content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from .forms import UserProfileForm, ProfileUpdateForm from django.contrib.auth.decorators import login_required from django.contrib import messages from django.shortcuts import render, redirect from django.contrib.auth.models import User from .models import Profile def display_login(request): context = {} return render(request, 'login/index.html', context) @login_required def make_profile(request): if request.method == "POST": form = UserProfileForm(request.POST) if form.is_valid(): post = form.save(commit=False) post.user = request.user post.save() return redirect('login:display_profile') else: form = UserProfileForm({'user': request.user}) return render(request, 'login/blank_profile.html', {'profile_form': form}) @login_required def display_profile(request): args = {'user': request.user} return render(request, 'login/info_profile.html', args) @login_required def update_profile(request): if request.method == 'POST': p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile) if p_form.is_valid(): p_form.save() return redirect('login:display_profile') else: p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile) context = { 'p_form': p_form } return render(request, 'login/update_profile.html', context)
nilq/baby-python
python
import tensorflow as tf import numpy as np import os from scipy.io import loadmat from epi.util import dbg_check import matplotlib.pyplot as plt # import tensorflow_probability as tfp FANO_EPS = 1e-6 neuron_inds = {"E": 0, "P": 1, "S": 2, "V": 3} def load_SSSN_variable(v, ind=0): # npzfile = np.load("data/V1_Zs.npz") matfile = loadmat(os.path.join("data", "AgosBiorxiv2.mat")) _x = matfile[v][ind] x = tf.constant(_x, dtype=tf.float32) return x def euler_sim_stoch(f, x_init, dt, T): x = x_init for t in range(T): x = x + f(x) * dt return x[:, :, :, 0] def tf_ceil(x, max): return max - tf.nn.relu(max - x) def tf_floor(x, min): return min + tf.nn.relu(x - min) def euler_sim_stoch_traj(f, x_init, dt, T): x = x_init xs = [x_init] for t in range(T): x = x + f(x) * dt xs.append(x) return tf.concat(xs, axis=3) def euler_sim_stoch_traj_bound(f, x_init, dt, T, min=None, max=None): x = x_init xs = [x_init] for t in range(T): x = x + f(x) * dt if min is not None: x = tf_floor(x, min) if max is not None: x = tf_ceil(x, max) xs.append(x) return tf.concat(xs, axis=3) ind = 1070 ind = 1070 W_mat = load_SSSN_variable("W", ind=ind) HB = load_SSSN_variable("hb", ind=ind) HC = load_SSSN_variable("hc", ind=ind) n = 2.0 N = 1 # dt = 0.00025 # T = 100 tau = 0.001 * np.array([1.0, 1.0, 1.0, 1.0], np.float32) tau = tau[None, None, :, None] tau_noise = 0.005 * np.array([1.0, 1.0, 1.0, 1.0], np.float32) tau_noise = tau_noise[None, None, :, None] sigma_fac = np.sqrt(1.0 + (tau / tau_noise)) # Dim is [M,N,|r|,T] def SSSN_sim_traj(sigma_eps, W_mat, N=1, dt=0.0005, T=150, x_init=None): sigma_eps = sigma_eps[:, None, :, None] def _SSSN_sim_traj(h, x_init=x_init): h = h[:, None, :, None] W = W_mat[None, None, :, :] _x_shape = tf.ones((h.shape[0], N, 4, 1), dtype=tf.float32) if x_init is None: x_init = tf.random.uniform((h.shape[0], N, 4, 1), 0.1, 0.25) else: x_init = x_init[:, :, :, None] eps_init = 0.0 * _x_shape y_init = tf.concat((x_init, eps_init), axis=2) def f(y): x = y[:, :, :4, :] eps = y[:, :, 4:, :] B = tf.random.normal(eps.shape, 0.0, np.sqrt(dt)) dx = (-x + (tf.nn.relu(tf.matmul(W, x) + h + eps) ** n)) / tau deps = ( -eps + (np.sqrt(2.0 * tau_noise) * sigma_eps * sigma_fac * B / dt) ) / tau_noise return tf.concat((dx, deps), axis=2) x_t = euler_sim_stoch_traj(f, y_init, dt, T) # x_t = euler_sim_stoch_traj_bound(f, y_init, dt, T, None, 1000) return x_t return _SSSN_sim_traj def SSSN_sim_traj_sigma(h, W_mat, N=1, dt=0.0005, T=150): h = h[:, None, :, None] def _SSSN_sim_traj(sigma_eps): sigma_eps = sigma_eps[:, None, :, None] W = W_mat[None, None, :, :] _x_shape = tf.ones((sigma_eps.shape[0], N, 4, 1), dtype=tf.float32) x_init = tf.random.uniform((sigma_eps.shape[0], N, 4, 1), 0.1, 0.25) eps_init = 0.0 * _x_shape y_init = tf.concat((x_init, eps_init), axis=2) def f(y): x = y[:, :, :4, :] eps = y[:, :, 4:, :] B = tf.random.normal(eps.shape, 0.0, np.sqrt(dt)) dx = (-x + (tf.nn.relu(tf.matmul(W, x) + h + eps) ** n)) / tau deps = ( -eps + (np.sqrt(2.0 * tau_noise) * sigma_eps * sigma_fac * B / dt) ) / tau_noise return tf.concat((dx, deps), axis=2) # x_t = euler_sim_stoch_traj(f, y_init, dt, T) x_t = euler_sim_stoch_traj_bound(f, y_init, dt, T, None, 1000) return x_t return _SSSN_sim_traj def SSSN_sim(sigma_eps, W_mat, N=1, dt=0.0005, T=150): sigma_eps = sigma_eps * np.array([1.0, 1.0, 1.0, 1.0], np.float32) sigma_eps = sigma_eps[None, None, :, None] def _SSSN_sim(h): h = h[:, None, :, None] W = W_mat[None, None, :, :] _x_shape = tf.ones((h.shape[0], N, 4, 1), dtype=tf.float32) x_init = tf.random.uniform((h.shape[0], N, 4, 1), 0.1, 0.25) eps_init = 0.0 * _x_shape y_init = tf.concat((x_init, eps_init), axis=2) def f(y): x = y[:, :, :4, :] eps = y[:, :, 4:, :] B = tf.random.normal(eps.shape, 0.0, np.sqrt(dt)) dx = (-x + (tf.nn.relu(tf.matmul(W, x) + h + eps) ** n)) / tau deps = ( -eps + (np.sqrt(2.0 * tau_noise) * sigma_eps * sigma_fac * B / dt) ) / tau_noise return tf.concat((dx, deps), axis=2) x_ss = euler_sim_stoch(f, y_init, dt, T) return x_ss return _SSSN_sim def get_drdh(alpha, eps, W_mat, N=1, dt=0.0005, T=150, delta_step=0.01): alpha_ind = neuron_inds[alpha] sssn_sim = SSSN_sim(eps, W_mat, N=N) delta_h = np.zeros((1, 4)) delta_h[0, alpha_ind] = delta_step def _drdh(h): x1 = tf.reduce_mean(sssn_sim(h)[:, :, alpha_ind], axis=1) x2 = tf.reduce_mean(sssn_sim(h + delta_h)[:, :, alpha_ind], axis=1) diff = (x2 - x1) / delta_step T_x = tf.stack((diff, diff ** 2), axis=1) return T_x return _drdh def get_Fano( alpha, sigma_eps, W_mat, N=100, dt=0.0005, T=150, T_ss=100, mu=0.01, k=100.0 ): if not (alpha == "all"): alpha_ind = neuron_inds[alpha] sssn_sim_traj = SSSN_sim_traj(sigma_eps, W_mat, N=N, dt=dt, T=T) def Fano(h): if alpha == "all": x_t = k * sssn_sim_traj(h)[:, :, :4, T_ss:] else: x_t = k * sssn_sim_traj(h)[:, :, alpha_ind, T_ss:] _means = tf.math.reduce_mean(x_t, axis=-1) _vars = tf.square(tf.math.reduce_std(x_t, axis=-1)) fano = _vars / (_means + FANO_EPS) vars_mean = tf.reduce_mean(fano, axis=1) if alpha == "all": T_x = tf.concat((vars_mean, tf.square(vars_mean - mu)), axis=1) else: T_x = tf.stack((vars_mean, tf.square(vars_mean - mu)), axis=1) return T_x return Fano def get_stddev_sigma( alpha, W_mat, h, N=100, dt=0.0005, T=150, T_ss=100, mu=0.01, k=100.0 ): if not (alpha == "all"): alpha_ind = neuron_inds[alpha] sssn_sim_traj = SSSN_sim_traj_sigma(h, W_mat, N=N, dt=dt, T=T) def get_stddev(sigma_eps): if alpha == "all": x_t = k * sssn_sim_traj(sigma_eps)[:, :, :4, T_ss:] else: x_t = k * sssn_sim_traj(sigma_eps)[:, :, alpha_ind, T_ss:] stddevs = tf.math.reduce_std(x_t, axis=-1) stddevs_mean = tf.reduce_mean(stddevs, axis=1) if alpha == "all": T_x = tf.concat((stddevs_mean, tf.square(stddevs_mean - mu)), axis=1) else: T_x = tf.stack((stddevs_mean, tf.square(stddevs_mean - mu)), axis=1) return T_x return get_stddev def plot_contrast_response( c, x, title="", ax=None, linestyle="-", colors=None, fontsize=14 ): if colors is None: colors = 4 * ["k"] assert x.shape[0] == c.shape[0] if ax is None: fig, ax = plt.subplots(1, 1) for i in range(4): ax.plot(100 * c, x[:, i], linestyle, c=colors[i], lw=4) ticksize = fontsize - 4 ax.set_xlabel("contrast (%)", fontsize=fontsize) ax.set_ylabel("rate (Hz)", fontsize=fontsize) ax.set_title(title, fontsize=fontsize) plt.setp(ax.get_xticklabels(), fontsize=ticksize) plt.setp(ax.get_yticklabels(), fontsize=ticksize) return ax def ISN_coeff(dh, H): sssn_sim = SSSN_sim(0.0) h = H + dh h_E = h[:, 0] r_ss = sssn_sim(h) u_E = tf.linalg.matvec(r_ss[:, 0, :4], W_mat[0, :]) u_E = u_E + h_E u_E = tf.nn.relu(u_E) isn_coeff = 1.0 - 2.0 * (u_E) * W_mat[0, 0] return isn_coeff
nilq/baby-python
python
from schematics.types import ModelType, StringType, PolyModelType, FloatType, DateTimeType from spaceone.inventory.model.sqldatabase.data import Database from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, \ ListDyField from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \ ListDynamicLayout, SimpleTableDynamicLayout from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta ''' SQL DATABASES ''' # TAB - Default # Resource Group, Location, Subscription, Subscription ID, SKU, Backend pool, Health probe, # Load balancing rule, NAT Rules, Public IP Addresses, Load Balancing Type sql_databases_info_meta = ItemDynamicLayout.set_fields('SQL Databases', fields=[ TextDyField.data_source('Database Name', 'name'), EnumDyField.data_source('Status', 'data.status', default_state={ 'safe': ['Online', 'Creating', 'Copying', 'Creating', 'OnlineChangingDwPerformanceTiers', 'Restoring', 'Resuming', 'Scaling', 'Standby'], 'warning': ['AutoClosed', 'Inaccessible', 'Offline', 'OfflineChangingDwPerformanceTiers', 'OfflineSecondary', 'Pausing', 'Recovering', 'RecoveryPending', 'Suspect'], 'disable': ['Disabled', 'Paused', 'Shutdown'], 'alert': ['EmergencyMode'] }), TextDyField.data_source('Resource ID', 'data.id'), TextDyField.data_source('Resource Group', 'data.resource_group'), TextDyField.data_source('Location', 'data.location'), TextDyField.data_source('Subscription ID', 'account'), TextDyField.data_source('Server Name', 'data.server_name'), TextDyField.data_source('Elastic Pool', 'data.elastic_pool_id'), TextDyField.data_source('Pricing Tier', 'data.pricing_tier_display'), DateTimeDyField.data_source('Earliest Restore Point', 'data.earliest_restore_date'), TextDyField.data_source('Collation', 'data.collation'), DateTimeDyField.data_source('Creation Date', 'launched_at'), TextDyField.data_source('Server Admin Login', 'data.administrator_login'), ]) # TAB - Configure sql_databases_configure = ItemDynamicLayout.set_fields('Configure', fields=[ TextDyField.data_source('Service Tier', 'data.service_tier_display'), TextDyField.data_source('Compute Tier', 'data.compute_tier'), TextDyField.data_source('Compute Hardware', 'data.sku.family'), TextDyField.data_source('Licence Type', 'data.license_type'), TextDyField.data_source('vCores', 'data.current_sku.capacity'), TextDyField.data_source('Data max size', 'instance_size'), TextDyField.data_source('Zone Redundant', 'data.zone_redundant'), ListDyField.data_source('Sync Groups', 'data.sync_group_display'), ListDyField.data_source('Sync Agents', 'data.sync_agent_display'), TextDyField.data_source('Collation', 'data.collation'), DateTimeDyField.data_source('Creation Date', 'data.creation_date') ]) # TAB - Diagnostic Settings sql_databases_diagnostic_settings = SimpleTableDynamicLayout.set_fields('Diagnostic Settings', 'data.diagnostic_settings_resource', fields=[ TextDyField.data_source('Name', 'name'), TextDyField.data_source('Storage Account', 'storage_account_id'), TextDyField.data_source('Event Hub', 'event_hub_name'), TextDyField.data_source('Log Analytics Workspace', 'workspace_id'), ]) # TAB - tags sql_databases_info_tags = TableDynamicLayout.set_fields('Tags', 'data.tags', fields=[ TextDyField.data_source('Key', 'key'), TextDyField.data_source('Value', 'value') ]) sql_databases_meta = CloudServiceMeta.set_layouts( [sql_databases_info_meta, sql_databases_configure, sql_databases_diagnostic_settings, sql_databases_info_tags]) class DatabaseResource(CloudServiceResource): cloud_service_group = StringType(default='Database') class SqlDatabaseResource(DatabaseResource): cloud_service_type = StringType(default='SQLDatabase') data = ModelType(Database) _metadata = ModelType(CloudServiceMeta, default=sql_databases_meta, serialized_name='metadata') name = StringType() account = StringType(serialize_when_none=False) instance_type = StringType(serialize_when_none=False) instance_size = FloatType(serialize_when_none=False) launched_at = DateTimeType(serialize_when_none=False) class SqlDatabaseResponse(CloudServiceResponse): resource = PolyModelType(SqlDatabaseResource)
nilq/baby-python
python
"""Adam Integer Check""" def prime_adam_check(number: int) -> bool: """ Check if a number is Adam Integer. A number is Adam if the square of the number and square of the reverse of the number are reverse of each other. Example : 11 (11^2 and 11^2 are reverse of each other). """ # Get the square of the number. square = str(number * number) # Get the reverse of th number reverse = int(str(number)[::-1]) # Get the square of reverse of the number. square_reverse = str(reverse * reverse) # Check if square and square_reverse are reverse of each other. if square == square_reverse[::-1]: return True return False if __name__ == "__main__": print("Program to check whether a number is an Adam Int or not...") number = int(input("Enter number: ").strip()) print(f"{number} is {'' if prime_adam_check(number) else 'not '}an Adam Int.")
nilq/baby-python
python
import re import urllib2 import urllib import urlparse import socket # sniff for python2.x / python3k compatibility "fixes' try: basestring = basestring except NameError: # 'basestring' is undefined, must be python3k basestring = str try: next = next except NameError: # builtin next function doesn't exist def next (iterable): return iterable.next() _DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$') def validate_ip(s): if _DOTTED_QUAD_RE.match(s): quads = s.split('.') for q in quads: if int(q) > 255: return False return True return False _CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$') def validate_cidr(s): if _CIDR_RE.match(s): ip, mask = s.split('/') if validate_ip(ip): if int(mask) > 32: return False else: return False return True return False def ip2long(ip): if not validate_ip(ip): return None quads = ip.split('.') if len(quads) == 1: # only a network quad quads = quads + [0, 0, 0] elif len(quads) < 4: # partial form, last supplied quad is host address, rest is network host = quads[-1:] quads = quads[:-1] + [0,] * (4 - len(quads)) + host lngip = 0 for q in quads: lngip = (lngip << 8) | int(q) return lngip _MAX_IP = 0xffffffff def long2ip(l): if _MAX_IP < l < 0: raise TypeError("expected int between 0 and %d inclusive" % _MAX_IP) return '%d.%d.%d.%d' % (l>>24 & 255, l>>16 & 255, l>>8 & 255, l & 255) def cidr2block(cidr): if not validate_cidr(cidr): return None ip, prefix = cidr.split('/') prefix = int(prefix) # convert dotted-quad ip to base network number # can't use ip2long because partial addresses are treated as all network # instead of network plus host (eg. '127.1' expands to '127.1.0.0') quads = ip.split('.') baseIp = 0 for i in range(4): baseIp = (baseIp << 8) | int(len(quads) > i and quads[i] or 0) # keep left most prefix bits of baseIp shift = 32 - prefix start = baseIp >> shift << shift # expand right most 32 - prefix bits to 1 mask = (1 << shift) - 1 end = start | mask return (long2ip(start), long2ip(end)) _RIPE_WHOIS = 'riswhois.ripe.net' _ASN_CACHE = {} def ip2asn(ip): global _ASN_CACHE # ako nije u pitanju lista onda odmah trazimo IP adresu # u cacheu i vracamo ju if type(ip) is not list and ip in _ASN_CACHE: return _ASN_CACHE[ip] try: ripeip = socket.gethostbyname(_RIPE_WHOIS) s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect((ripeip,43)) #s.recv(4096) except socket.gaierror: raise AsnResolutionError('Could not resolve RIPE server name') except socket.error: raise AsnResolutionError('Error connecting to whois server') if type(ip) is list: # u slucaju da je upit lista IP adresa, treba svaku provjeriti result = {} for i in ip: # prvo provjera da li je IP adresa u cacheu, ako nije # kontaktiramo server i saljemo upit te dodajemo u cache if i in _ASN_CACHE: result[i] = _ASN_CACHE[i] else: try: s.send('-k -F -M %s\r\n' % i) result[i] = _ASN_CACHE[i] = _parse_whois(s.recv(4096)) except AsnResolutionError, socket.error: # TODO: popraviti ovo, ako je doslo do pogreske trenutno se # vraca prazan string sto znaci da nema ASN-a result[i] = '' s.close() return result else: # u slucaju da je upit jedna IP adresa koja nije u cacheu s.send('-F -M %s\r\n' % ip) _ASN_CACHE[ip] = asn = _parse_whois(s.recv(4096)) s.close() return asn def url2host(url): host = urlparse.urlparse(url).hostname if not host: host = urlparse.urlparse('http://' + url).hostname if not host: raise HostResolutionError('Could not parse hostname') return host def url2ip(url): try: return socket.gethostbyname(url2host(url)) except socket.gaierror: raise HostResolutionError('No IP address for host') def url2tld(url): host = url2host(url) if '.' in host: return host.split('.')[-1:][0] else: raise HostResolutionError('No valid TLD in hostname') #_GOOGLE_KEY = 'ABQIAAAAxYjVDAFhAe3o3ORFz0M4WhSRANfPA86NpChaGS3JPxvpQtPEMg' _GOOGLE_KEY = 'ABQIAAAA91BfexGg9gwOzbZ1zsgJOBQDSU0_BEb6BufZ5pmVD4AMkVBbaA' _GOOGLE_URL = 'https://sb-ssl.google.com/safebrowsing/api/lookup?client=python&apikey=%s&appver=1.5.2&pver=3.0&url=%s' def _check_google(url): gurl = _GOOGLE_URL % (_GOOGLE_KEY, urllib.quote(url)) result = urllib2.urlopen(gurl) if result.getcode() == 204: return False if result.getcode() == 200: return True _BLACKLIST_CACHE = {} def url_blacklisted(url): global _BLACKLIST_CACHE if url in _BLACKLIST_CACHE: return _BLACKLIST_CACHE[url] try: _BLACKLIST_CACHE[url] = _check_google(url) return _BLACKLIST_CACHE[url] except urllib2.URLError, e: raise BlacklistCheckError(str(e)) class IpRange(object): def __init__ (self, start, end=None): if end is None: if isinstance(start, tuple): # occurs when IpRangeList calls via map to pass start and end start, end = start elif validate_cidr(start): # CIDR notation range start, end = cidr2block(start) else: # degenerate range end = start start = ip2long(start) end = ip2long(end) self.startIp = min(start, end) self.endIp = max(start, end) def __repr__ (self): return (long2ip(self.startIp), long2ip(self.endIp)).__repr__() def __contains__ (self, item): if isinstance(item, basestring): item = ip2long(item) if type(item) not in [type(1), type(_MAX_IP)]: raise TypeError("expected dotted-quad ip address or 32-bit integer") return self.startIp <= item <= self.endIp def __iter__ (self): i = self.startIp while i <= self.endIp: yield long2ip(i) i += 1 class IpRangeList(object): def __init__ (self, args): self.ips = tuple(map(IpRange, args)) def __repr__ (self): return self.ips.__repr__() def __contains__ (self, item): for r in self.ips: if item in r: return True return False def __iter__ (self): for r in self.ips: for ip in r: yield ip _RIPE_IP_URL = 'ftp://ftp.ripe.net/ripe/stats/delegated-ripencc-latest' _RANGE_CACHE = {} _RIPE_DATA = None class AddressSpace(object): def __init__(self, tld, range=None): self._tld = tld.upper() if range: self._range = IpRangeList(range) else: self._range = self._load_range(tld.upper()) def _load_range(self, cc): global _RANGE_CACHE global _RIPE_DATA if cc.upper() in _RANGE_CACHE: return _RANGE_CACHE[cc.upper()] if _RIPE_DATA==None: _RIPE_DATA = urllib.urlopen(_RIPE_IP_URL).readlines() ranges = filter(lambda x: x.find(cc.upper()) is not -1 and x.find('ipv4') is not -1, _RIPE_DATA) ranges_list = [] for r in ranges: start = r.split('|')[3] end = long2ip(ip2long(start) + int(r.split('|')[4])) ranges_list.append((start,end)) _RANGE_CACHE[cc.upper()] = IpRangeList(ranges_list) return _RANGE_CACHE[cc.upper()] def __contains__ (self, item): if validate_ip(item): if item in self._range: return True else: if url2tld(item).lower() == self._tld: return True if url2ip(item) in self._range: return True return False class HostResolutionError(Exception): pass class AsnResolutionError(Exception): pass class BlacklistCheckError(Exception): pass # interna funkcija koja parsira reply whois servera i iz njega # vadi van ASN broj, baca iznimku ako to ne moze napraviti def _parse_whois(data): data = filter(lambda x: x and not x.startswith('%'), ''.join(data).split('\n')) if len(data) <> 1: raise AsnResolutionError('Invalid reply from whois server') asn = data[0].split('\t') if len(asn) <> 2: raise AsnResolutionError('Invalid reply from whois server') if asn[0] == '3303': return '' else: return asn[0] def ip_in_tlds(ipurl, tlds): """ provjera je li IP u rangeu provajdanih TLDova (zemalja) (c) fvlasic """ for tld in tlds: if ipurl in AddressSpace(tld=tld): return tld.upper() return False def cymruIP2ASN(ips): addr = ('whois.cymru.com', '43') sock = socket.create_connection(addr) query='begin\ncountrycode\nasnumber\nnoasname\n[insert]end\n' ips_on_wire = "" if isinstance(ips, basestring): ips = [ips] for ip in ips: ips_on_wire = ips_on_wire + ip + "\n" query = query.replace('[insert]', ips_on_wire) sent = 0 while not sent==len(query): sendbytes = sock.send(query[sent:]) sent += sendbytes data='' more = True while more: more = sock.recv(8192) data += more sock.close() return dict(map(lambda x: (x.split('|')[1].strip(), \ (x.split('|')[0].strip(), x.split('|')[2].strip())), filter(lambda x: '|' in x, data.split('\n'))))
nilq/baby-python
python
class SPI(object): def __init__(self, clk, MOSI=None, MISO=None): self.clk = clk self.MOSI = MOSI self.MISO = MISO
nilq/baby-python
python
from core.entity.entity_exceptions import EntityOperationNotPermitted, EntityNotFoundException from core.test.media_files_test_case import MediaFilesTestCase class BaseTestClass(MediaFilesTestCase): """ provides the base class for all test cases _check_default_fields and _check_default_change must be re-implemented since they test whether the entity re-implement all its default fields after reloading. Given these fields were re-implemented the diagram state test cases will be defined automatically by inheritance mechanism. (see test_project how) However, field validation tests must be written. Use _test_field function for convenience. (see test_project). If some field contains file use FileMixin If some field contains password use PasswordMixin _check_reload function usually tests whether object fields were preserved during the object reload. If you need additional checks extend this function (see test_project) Use setUpTestData to create all related entities if you want. However, don't forget to call the same method from the superclass because it temporally moves all media root files to some safe directory. Also, tearDownClass and tearDown method must call similar ones from the superclass. DO NOT FORGET to use: del BaseTestClass AFTER EACH subclass declaration """ TEST_CREATE_AND_LOAD = 0 TEST_CHANGE_CREATE_AND_LOAD = 1 TEST_CREATE_CHANGE_AND_LOAD = 2 TEST_CREATE_LOAD_AND_CHANGE = 3 _entity_object_class = None """ The entity object class. New entity object will be created from this class """ _entity_model_class = None """ The entity model class is a Django model that is used for storing entities """ def test_object_creating_default(self): """ Tests how the object will be created with default values :return: nothing """ obj = self.get_entity_object_class()() self._check_creating_entity(obj.entity, False) self._check_fields_changed(obj.entity, obj.default_field_key) with self.assertRaises(EntityOperationNotPermitted, msg="entity update() is possible when it was still creating"): obj.entity.update() with self.assertRaises(EntityOperationNotPermitted, msg="entity delete() is possible when is was still creating"): obj.entity.delete() obj.create_entity() def test_object_creating_default_plus_changed(self): """ This test case will create new entity then changes some entity fields and at last store entity data to the database :return: nothing """ obj = self.get_entity_object_class()() obj.change_entity_fields() self._check_creating_entity(obj.entity, True) self._check_fields_changed(obj.entity, obj.entity_fields.keys()) obj.create_entity() def test_object_created_default(self): """ Tests how the object can be created with default values :return: nothing """ obj = self.get_entity_object_class()() obj.create_entity() self._check_created_entity(obj.entity) self._check_fields_changed(obj.entity, []) with self.assertRaises(EntityOperationNotPermitted, msg="entity object can be created twice"): obj.create_entity() with self.assertRaises(EntityOperationNotPermitted, msg="the 'saved' object can be saved again"): obj.entity.update() def test_object_created_plus_changed_default(self): """ Tests if the object has been created and correctly changed :return: nothing """ obj = self.get_entity_object_class()() obj.create_entity() obj.change_entity_fields() self._check_changed_entity(obj.entity, obj.id) self._check_fields_changed(obj.entity, obj.changed_field_key) with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be created, changed plus created again"): obj.create_entity() def test_object_created_plus_updated_default(self): """ Tests if the object can be both created and updated :return: nothing """ obj = self.get_entity_object_class()() obj.create_entity() self._do_entity_update(obj) self._check_updated_entity(obj.entity, obj.id) self._check_fields_changed(obj.entity, []) with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be re-created when this is created -> changed -> saved"): obj.create_entity() with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be re-updated when this is created -> changed -> updated"): obj.entity.update() def test_object_created_updated_and_loaded_default(self): """ Tests whether the object can be successfully loaded after it has been created and updated :return: nothing """ obj = self.get_entity_object_class()() obj.create_entity() self._do_entity_update(obj) obj.reload_entity() self._check_reload(obj) self._check_fields_changed(obj.entity, []) with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be re-created when loaded"): obj.create_entity() with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be updated when it still loaded and not changed"): obj.entity.update() def test_object_created_and_loaded_default(self): obj = self.get_entity_object_class()() obj.create_entity() obj.reload_entity() self._check_reload(obj) self._check_fields_changed(obj.entity, []) with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be re-created when loaded"): obj.create_entity() with self.assertRaises(EntityOperationNotPermitted, msg="The entity can be updated when it still loaded and not changed"): obj.entity.update() def test_object_created_loaded_and_changed(self): obj = self.get_entity_object_class()() obj.create_entity() obj.reload_entity() obj.change_entity_fields() self._check_changed_entity(obj.entity, obj.id) self._check_fields_changed(obj.entity, obj.changed_field_key) with self.assertRaises(EntityOperationNotPermitted, msg="The entity can't be re-created when 'changing'"): obj.create_entity() def test_object_created_and_deleted(self): obj = self.get_entity_object_class()() obj.create_entity() obj.entity.delete() self._check_entity_delete(obj) def test_object_created_loaded_and_deleted(self): obj = self.get_entity_object_class()() obj.create_entity() obj.reload_entity() obj.entity.delete() self._check_entity_delete(obj) def test_object_created_changed_and_deleted(self): obj = self.get_entity_object_class()() obj.create_entity() obj.change_entity_fields() obj.entity.delete() self._check_entity_delete(obj) def _do_entity_update(self, obj): obj.change_entity_fields() obj.entity.update() def _test_field(self, field_name, field_value, updated_value, exception_to_throw, route_number, use_defaults=True, **additional_kwargs): """ Provides the test for a standalone field :param field_name: the field name :param field_value: the field value :param updated_value: another field value to set :param exception_to_throw: None if the field value shall be assigned successfully (positive test). An exception class if attempt of field assignment must throw an exception (negative test). :param route_number: Number of route in the transient state diagram (TEST_CREATE_AND_LOAD, TEST_CHANGE_CREATE_AND_LOAD, TEST_CREATE_CHANGE_AND_LOAD, TEST_CREATE_LOAD_AND_CHANGE) :param use_defaults: True to use defaults put into the entity_object. False if additional arguments shall be put instead of defaults :param additional_kwargs: Some additional create object arguments to put :return: nothing """ initial_kwargs = {field_name: field_value} initial_kwargs.update(additional_kwargs) if exception_to_throw is None: obj = self.get_entity_object_class()(use_defaults=use_defaults, **initial_kwargs) else: with self.assertRaises(exception_to_throw, msg="An invalid value '%s' was successfully assigned to field '%s'" % (field_value, field_name)): self.get_entity_object_class()(use_defaults=use_defaults, **initial_kwargs) return if route_number == self.TEST_CHANGE_CREATE_AND_LOAD: obj.change_entity_fields(use_defaults=False, **{field_name: updated_value}) obj.create_entity() if route_number == self.TEST_CREATE_CHANGE_AND_LOAD: obj.change_entity_fields(use_defaults=False, **{field_name: updated_value}) obj.entity.update() obj.reload_entity() if route_number == self.TEST_CREATE_LOAD_AND_CHANGE: obj.change_entity_fields(use_defaults=False, **{field_name: updated_value}) obj.entity.update() if route_number == self.TEST_CREATE_AND_LOAD: last_value = field_value else: last_value = updated_value actual_value = getattr(obj.entity, field_name) self.assertEquals(actual_value, last_value, "The value '%s' for field '%s' doesn't either stored or retrieved correctly" % (last_value, field_name)) def _test_read_only_field(self, field_name, sample_value, throwing_exception=ValueError): """ Checks whether some value can be assigned to the read-only field :param field_name: the field name :param sample_value: value to assign :param throwing_exception: exception to throw :return: nothing """ with self.assertRaises(throwing_exception, msg="The read-only field '%s' has been successfully changed" % field_name): self.get_entity_object_class()(**{field_name: sample_value}) def get_entity_object_class(self): """ Returns new entity object class. New entity object will be created exactly from such a class :return: the entity object class """ if self._entity_object_class is None: raise NotImplementedError("Please, define the _entity_object_class protected variable") else: return self._entity_object_class def get_entity_model_class(self): """ Returns the entity model class. The entity model class is used for storing entities :return: the entity model class """ if self._entity_model_class is None: raise NotImplementedError("Please, define the _entity_model_class protected variable") else: return self._entity_model_class def _check_creating_entity(self, entity, fields_changed): """ Checks whether all entity fields were in place when the entity is 'CREATING'. The entity fields will be checked given that the entity object was created with no keyword arguments. :return: nothing """ self.assertIsNone(entity.id, "Entity ID is not None before the entity create") self.assertEquals(entity.state, "creating", "Entity state is not 'creating' before the entity create") self.assertIsNone(entity._wrapped, "Somebody has wrapped to the entity when the entity is creating") if fields_changed: self._check_default_change(entity) else: self._check_default_fields(entity) def _check_created_entity(self, entity): """ Checks whether all entity fields were in place when the entity has already been created :param entity: the entity to check :return: nothing """ self.assertIsNotNone(entity.id, "Entity ID is None when the entity has been created") self.assertEquals(entity.state, "saved", "The entity state is not 'saved' after the entity create") self._check_default_fields(entity) def _check_changed_entity(self, entity, expected_id): """ Checks whether the entity was changed :param entity: the entity to check :param expected_id: the entity ID to be expected :return: nothing """ self.assertEquals(entity.id, expected_id, "The entity ID is not correct") self.assertEquals(entity.state, "changed", "The entity state is not 'changed' after entity fields were corrected") self._check_default_change(entity) def _check_updated_entity(self, entity, expected_id): """ Checks whether the entity can be updated :param entity: the entity to be checked :return: nothing """ self.assertEquals(entity.id, expected_id, "The entity ID changed during the update process") self.assertEquals(entity.state, "saved", "The entity state was not proper") self._check_default_change(entity) def _check_reload(self, obj): """ Checks whether the entity is successfully and correctly reloaded. :param obj: the entity object within which the entity was reloaded :return: nothing """ self.assertIsInstance(obj.entity, obj.get_entity_class(), "Unexpected entity class") self.assertEquals(obj.entity.id, obj.id, "The entity ID was not properly retrieved") self.assertEquals(obj.entity.state, "loaded", "The entity state is not 'loaded' when the entity is loaded") self._check_field_consistency(obj) def _check_field_consistency(self, obj): for name, expected_value in obj.entity_fields.items(): actual_value = getattr(obj.entity, name) self.assertEquals(actual_value, expected_value, "The entity field '%s' doesn't retrieved correctly" % name) def _check_entity_delete(self, obj): """ Checks whether the entity is properly deleted :param obj: the entity object :return: nothing """ self.assertIsNone(obj.entity.id, "The deleted entity still have an ID") self.assertEquals(obj.entity.state, "deleted", "The deleted entity has incorrect status") with self.assertRaises(EntityOperationNotPermitted, msg="the deleted entity can be created"): obj.create_entity() with self.assertRaises(EntityOperationNotPermitted, msg="The deleted entity can be saved"): obj.entity.update() with self.assertRaises(EntityOperationNotPermitted, msg="The deleted entity can be deleted again"): obj.entity.delete() with self.assertRaises(EntityNotFoundException, msg="The entity can't be deleted carefully since the entity already deleted can be " "easily re-created"): obj.reload_entity() def _check_fields_changed(self, entity, field_list): """ Checks whether the certain and only certain fields in the entity was changed :param entity: the entity to test :param field_list: field list to check in the entity object :return: nothing """ self.assertEquals(len(entity._edited_fields), len(field_list), "the Entity._edited_fields doesn't contain appropriate field number") for field in field_list: self.assertIn(field, entity._edited_fields, "The field '%s' is not within the list of the edited fields") def _check_default_fields(self, entity): """ Checks whether the default fields were properly stored. The method deals with default data only. :param entity: the entity which default fields shall be checked :return: nothing """ raise NotImplementedError("The _check_default_fields method must be implemented when extending this base class") def _check_default_change(self, entity): """ Checks whether the fields were properly change. The method deals with default data only. :param entity: the entity to store :return: nothing """ raise NotImplementedError("The _check_default_change method must be implemented when extending this base class") del MediaFilesTestCase
nilq/baby-python
python
import numpy as np import torch from .mask_gen import BoxMaskGenerator class CutmixCollateWrapper(object): def __init__(self, batch_aug_fn=None): self.batch_aug_fn = batch_aug_fn self.mask_generator = BoxMaskGenerator( prop_range = (0.25, 0.5), n_boxes = 3, random_aspect_ratio = True, prop_by_area = False, within_bounds = False, invert = True ) def _generate_cutmix_mask(self, num_masks: int, width: int, height: int): return self.mask_generator.generate_params(num_masks, (width, height)) def __call__(self, batch): if self.batch_aug_fn is not None: batch = self.batch_aug_fn(batch) batch_size, _, w, h = batch['inputs'].shape masks = self._generate_cutmix_mask(batch_size, w, h) batch['cutmix_masks'] = torch.from_numpy(masks.astype(np.float32)) return batch class CutmixLoader(torch.utils.data.DataLoader): r"""CutmixLoader, also return cutmix mask dataset: `torch.utils.data.Dataset` dataset, must have classes_dict and collate_fn attributes batch_size: `int` number of samples in one batch """ def __init__(self, dataset: torch.utils.data.Dataset, batch_size: int, **kwargs): if hasattr(dataset, 'collate_fn'): pre_collate_fn = dataset.collate_fn else: pre_collate_fn = None collate_fn = CutmixCollateWrapper(pre_collate_fn) super(CutmixLoader, self).__init__( dataset, batch_size=batch_size, collate_fn = collate_fn, **kwargs )
nilq/baby-python
python
""" # BEGIN BINGO_DEMO >>> bingo = BingoCage(range(3)) >>> bingo() 2 >>> bingo() 0 >>> callable(bingo) True # END BINGO_DEMO """ # BEGIN BINGO import random class BingoCage: def __init__(self, items): self._items = list(items) # <1> random.shuffle(self._items) # <2> def __call__(self): if not self._items: # <3> raise IndexError('pop from empty BingoCage') return self._items.pop() # END BINGO
nilq/baby-python
python
from json import dumps class Node: def __init__(self, line = 0, column = 0): self.line = line self.column = column @property def clsname(self): return str(self.__class__.__name__) def to_tuple(self): return tuple([ ("node_class_name", self.clsname) ]) def to_readable(self): return "{}".format(self.clsname) def __repr__(self): return self.toJSON() def __str__(self): return str(self.to_readable()) def __getitem__(self, x): return self.__dict__[x] def __setitem__(self, x, y): self.__dict__[x]= y def __iter__(self): return self.__dict__.__iter__() def __eq__(self, other): return type(self) == type(other) and self.idName == other.idName def toJSON(self): return dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4, separators=(',', ': ')) class NodeProgram(Node): def __init__(self, class_list): super().__init__() self.class_list = class_list def to_tuple(self): return tuple([ ("class_name", self.clsname), ("classes", self.class_list) ]) def to_readable(self): return "{}(classes={})".format(self.clsname, self.class_list) class NodeClassTuple(Node, tuple): def __init__(self, classes): self.classes = classes class NodeClass(Node): def __init__(self, idName: str, methods, attributes, parent, line = 0, column = 0, parent_col = -1): super().__init__(line= line, column= column) self.idName = idName self.methods = methods self.attributes = attributes self.parent = parent self.parent_col = parent_col def to_readable(self): return "{}(name='{}', parent={}, methods={}, attributes={})".format( self.clsname, self.idName, self.parent, self.methods, self.attributes) class NodeFeature(Node): def __init__(self, line, column): super(NodeFeature, self).__init__(line= line, column= column) #No se si poner aqui una clase para heredar , que sea feature_class. #Tengo que ver si a futuro necesito iterar por los elementos de una clase #de manera abstracta. class NodeClassMethod(NodeFeature): def __init__(self, idName: str, formal_param_list, returnType: str, body, line, column, columnType): super().__init__(line= line, column= column) self.idName = idName self.formal_param_list= formal_param_list self.returnType = returnType self.body = body self.columnType= columnType def to_readable(self): return "{}(name='{}', formal_param_list={}, returnType={}, body={})".format( self.clsname, self.idName, self.formal_param_list, self.returnType, self.body ) class NodeAttr(NodeFeature): def __init__(self, idName, _type, line, column, expr= None, columnTypeAttr= None): super().__init__(line= line, column= column) self.idName= idName self._type= _type self.expr= expr self.columnTypeAttr= columnTypeAttr class NodeFormalParam(NodeFeature): def __init__(self, idName, param_type, line, column): super().__init__(line= line, column= column) self.idName = idName self._type = param_type def to_tuple(self): return tuple([ ("class_name", self.clsname), ("name", self.idNme), ("param_type", self._type) ]) def to_readable(self): return "{}(name='{}', param_type={})".format(self.clsname, self.idName, self._type) class NodeObject(Node): def __init__(self, idName, line, column): super().__init__(line= line, column= column) self.idName = idName def to_tuple(self): return tuple([ ("class_name", self.clsname), ("name", self.idName) ]) def to_readable(self): return "{}(name='{}')".format(self.clsname, self.idName) class NodeSelf(NodeObject): def __init__(self, line, column): super().__init__(idName= "SELF", line= line, column= column) def to_tuple(self): return tuple([ ("class_name", self.clsname) ]) def to_readable(self): return "{}".format(self.clsname) class NodeConstant(Node): def __init__(self, line, column): super().__init__(line= line, column= column) class NodeInteger(NodeConstant): def __init__(self, content, line, column): super().__init__(line= line, column= column) self.content = content def to_tuple(self): return tuple([ ("class_name", self.clsname), ("content", self.content) ]) def to_readable(self): return "{}(content={})".format(self.clsname, self.content) class NodeBoolean(NodeConstant): def __init__(self, content, line, column): super().__init__(line= line, column= column) self.content = content def to_tuple(self): return tuple([ ("class_name", self.clsname), ("content", self.content) ]) def to_readable(self): return "{}(content={})".format(self.clsname, self.content) class NodeString(NodeConstant): def __init__(self, content, line, column): super().__init__(line= line, column= column) self.content = content def to_tuple(self): return tuple([ ("class_name", self.clsname), ("content", self.content) ]) def to_readable(self): return "{}(content={})".format(self.clsname, repr(self.content)) # Cada expresión debe tener una función de evaluación asociada. # Con un valor de retorno x. class NodeExpr(Node): def __init__(self, line, column): super().__init__(line= line, column= column) class NodeNewObject(NodeExpr): def __init__(self, new_type, line, column): super().__init__(line= line, column= column) self.type = new_type def to_tuple(self): return tuple([ ("class_name", self.clsname), ("type", self.type) ]) def to_readable(self): return "{}(type={})".format(self.clsname, self.type) class NodeIsVoid(NodeExpr): def __init__(self, expr, line, column): super().__init__(line= line, column= column) self.expr= expr def to_tuple(self): return tuple([ ("class_name", self.clsname), ("expr", self.expr) ]) def to_readable(self): return "{}(expr={})".format(self.clsname, self.expr) class NodeAssignment(NodeExpr): def __init__(self, nodeObject, expr, line, column, columnAssign): super().__init__(line= line, column= column) self.nodeObject = nodeObject self.expr = expr self.columnAssign= columnAssign def to_tuple(self): return tuple([ ("class_name", self.clsname), ("nodeObject", self.nodeObject), ("expr", self.expr) ]) def to_readable(self): return "{}(nodeObject={}, expr={})".format(self.clsname, self.nodeObject, self.expr) class NodeBlock(NodeExpr): def __init__(self, expr_list, line, column): super().__init__(line = line, column = column) self.expr_list = expr_list def to_tuple(self): return tuple([ ("class_name", self.clsname), ("expr_list", self.expr_list) ]) def to_readable(self): return "{}(expr_list={})".format(self.clsname, self.expr_list) class NodeDynamicDispatch(NodeExpr): def __init__(self, expr, method, arguments, line, column): super().__init__(line= line, column= column) self.expr = expr self.method = method self.arguments = arguments if arguments is not None else tuple() def to_tuple(self): return tuple([ ("class_name", self.clsname), ("expr", self.expr), ("method", self.method), ("arguments", self.arguments) ]) def to_readable(self): return "{}(expr={}, method={}, arguments={})".format( self.clsname, self.expr, self.method, self.arguments) class NodeStaticDispatch(NodeExpr): def __init__(self, expr, dispatch_type, method, arguments, line, column, columnType, columnIdMethod): super().__init__(line= line, column= column) self.expr = expr self.dispatch_type = dispatch_type self.method = method self.arguments = arguments if arguments is not None else tuple() self.columnType= columnType self.columnIdMethod= columnIdMethod def to_tuple(self): return tuple([ ("class_name", self.clsname), ("expr", self.expr), ("dispatch_type", self.dispatch_type), ("method", self.method), ("arguments", self.arguments) ]) def to_readable(self): return "{}(expr={}, dispatch_type={}, method={}, arguments={})".format( self.clsname, self.expr, self.dispatch_type, self.method, self.arguments) class NodeLetComplex(NodeExpr): def __init__(self, nested_lets, body, line, column): super().__init__(line= line, column= column) self.nestedLets= nested_lets if type(nested_lets) is list else [nested_lets] self.body= body def to_tuple(self): return tuple([ ("class_name", self.clsname), ("nested_lets", self.nestedLets), ("body", self.body) ]) def to_readable(self): return "{}(nested_lets={}, body={})".format( self.clsname, self.nestedLets, self.body) class NodeLet(NodeExpr): def __init__(self, idName, returnType, body, line, column): super().__init__(line= line, column= column) self.idName= idName self.type= returnType self.body= body def to_tuple(self): return tuple([ ("class_name", self.clsname), ("idName", self.idName), ("returnType", self.type), ("body", self.body) ]) def to_readable(self): return "{}(idName={}, returnType={}, body={})".format( self.clsname, self.idName, self.type, self.body) class NodeIf(NodeExpr): def __init__(self, predicate, then_body, else_body, line, column): super().__init__(line= line, column= column) self.predicate = predicate self.then_body = then_body self.else_body = else_body def to_tuple(self): return tuple([ ("class_name", self.clsname), ("predicate", self.predicate), ("then_body", self.then_body), ("else_body", self.else_body) ]) def to_readable(self): return "{}(predicate={}, then_body={}, else_body={})".format( self.clsname, self.predicate, self.then_body, self.else_body) class NodeWhileLoop(NodeExpr): def __init__(self, predicate, body, line, column): super().__init__(line= line, column= column) self.predicate = predicate self.body = body def to_tuple(self): return tuple([ ("class_name", self.clsname), ("predicate", self.predicate), ("body", self.body) ]) def to_readable(self): return "{}(predicate={}, body={})".format(self.clsname, self.predicate, self.body) class NodeCase(NodeExpr): def __init__(self, expr, actions, line, column): super().__init__(line= line, column= column) self.expr = expr self.actions = actions def to_tuple(self): return tuple([ ("class_name", self.clsname), ("expr", self.expr), ("actions", self.actions) ]) def to_readable(self): return "{}(expr={}, actions={})".format(self.clsname, self.expr, self.actions) class NodeCaseAction(NodeExpr): def __init__(self, idName, expr, _type, line, column, typeColumn): super().__init__(line= line, column= column) self.idName= idName self.expr= expr self.type= _type self.typeColumn= typeColumn # ############################## UNARY OPERATIONS ################################## class NodeUnaryOperation(NodeExpr): def __init__(self, line, column): super().__init__(line= line, column= column) class NodeIntegerComplement(NodeUnaryOperation): def __init__(self, integer_expr, line, column): super().__init__(line= line, column= column) self.symbol = "~" self.integer_expr = integer_expr def to_tuple(self): return tuple([ ("class_name", self.clsname), ("integer_expr", self.integer_expr) ]) def to_readable(self): return "{}(expr={})".format(self.clsname, self.integer_expr) class NodeBooleanComplement(NodeUnaryOperation): def __init__(self, boolean_expr, line, column): super().__init__(line= line, column= column) self.symbol = "!" self.boolean_expr = boolean_expr def to_tuple(self): return tuple([ ("class_name", self.clsname), ("boolean_expr", self.boolean_expr) ]) def to_readable(self): return "{}(expr={})".format(self.clsname, self.boolean_expr) # ############################## BINARY OPERATIONS ################################## class NodeBinaryOperation(NodeExpr): def __init__(self, line, column): super().__init__(line= line, column= column) self.type= '' class NodeAddition(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "+" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second) class NodeSubtraction(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "-" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second) class NodeMultiplication(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "*" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second) class NodeDivision(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "/" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second) class NodeEqual(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "=" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second) class NodeLessThan(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "<" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second) class NodeLessThanOrEqual(NodeBinaryOperation): def __init__(self, first, second, line, column): super().__init__(line= line, column= column) self.symbol = "<=" self.first = first self.second = second def to_tuple(self): return tuple([ ("class_name", self.clsname), ("first", self.first), ("second", self.second) ]) def to_readable(self): return "{}(first={}, second={})".format(self.clsname, self.first, self.second)
nilq/baby-python
python
from ckan.common import config def get_ytp_recommendation_recaptcha_sitekey(): return config.get('ckanext.ytp_recommendation.recaptcha_sitekey')
nilq/baby-python
python
# The MIT License(MIT) # Copyright (c) 2013-2014 Matt Thomson # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys from setuptools import setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = ['pyembed'] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.test_args) sys.exit(errno) setup( name='pyembed', version='1.3.3', author='Matt Thomson', author_email='mattjohnthomson@gmail.com', url='http://pyembed.github.io', description='Python OEmbed consumer library with automatic discovery of ' + 'producers', long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(), download_url='https://pypi.python.org/pypi/pyembed/', license=open('LICENSE.txt').read(), provides=['pyembed.core'], packages=['pyembed.core'], namespace_packages=['pyembed'], package_data={ "pyembed.core": [ "config/providers.json" ] }, install_requires=[ 'beautifulsoup4', 'requests' ], tests_require=[ 'mock', 'pytest', 'vcrpy' ], cmdclass={'test': PyTest}, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Topic :: Text Processing' ] )
nilq/baby-python
python
import itertools a = input() s = input().split() n = int(input()) L = list(itertools.combinations(s, n)) f = filter(lambda x: 'a' in x, L) print("{:.4f}".format(len(list(f))/len(L)))
nilq/baby-python
python
from django.conf.urls import include, url urlpatterns = [ # browsable REST API url(r'^api/', include('osmaxx.rest_api.urls')), url(r'^version/', include('osmaxx.version.urls', namespace='version')), ]
nilq/baby-python
python
""" Copyright MIT and Harvey Mudd College MIT License Summer 2020 Lab 1 - Driving in Shapes """ ######################################################################################## # Imports ######################################################################################## import sys sys.path.insert(1, "../../library") import racecar_core # import racecar_utils as rc_utils ######################################################################################## # Global variables ######################################################################################## rc = racecar_core.create_racecar() # Put any global variables here ######################################################################################## # Functions ######################################################################################## def start(): """ This function is run once every time the start button is pressed """ # Begin at a full stop rc.drive.stop() # Print start message # TODO (main challenge): add a line explaining what the Y button does print( ">> Lab 1 - Driving in Shapes\n" "\n" "Controls:\n" " Right trigger = accelerate forward\n" " Left trigger = accelerate backward\n" " Left joystick = turn front wheels\n" " A button = drive in a circle\n" " B button = drive in a square\n" " X button = drive in a figure eight\n" " Y button = drive in a wave\n" ) # Setting up constants counter = 0 driveA = False driveB = False driveX = False driveY = False turnTimeIncrement = 6.3 forwardsTimeIncrement = 5 limits = [0] def update(): """ After start() is run, this function is run every frame until the back button is pressed """ # Use global constants global counter global driveA global driveB global driveX global driveY # for square challenge global turnTimeIncrement global forwardsTimeIncrement global limits # TODO (warmup): Implement manual acceleration and steering manual_speed = 0 manual_angle = 0 manual_speed -= rc.controller.get_trigger(rc.controller.Trigger.LEFT) manual_speed += rc.controller.get_trigger(rc.controller.Trigger.RIGHT) manual_angle = rc.controller.get_joystick(rc.controller.Joystick.LEFT)[0] # print(manual_speed, manual_angle) rc.drive.set_speed_angle(manual_speed, manual_angle) # TODO (main challenge): Drive in a circle if rc.controller.was_pressed(rc.controller.Button.A): print("Driving in a circle...") driveA = True counter = 0 if driveA == True: rc.drive.set_speed_angle(0.5, -1) counter += rc.get_delta_time() if counter > 11.7: rc.drive.stop() driveA = False print("STOPPED CIRCLE") # TODO (main challenge): Drive in a square when the B button is pressed if rc.controller.was_pressed(rc.controller.Button.B): print("Driving in a square...") counter = 0 driveB = True limits = [0] for i in range(8): if i % 2 == 0: limits.append(limits[-1] + forwardsTimeIncrement) else: limits.append(limits[-1] + turnTimeIncrement) print(limits) limits[1] += 1 if driveB == True: forward_speed = 0.5 turn_speed = 0.15 counter += rc.get_delta_time() if counter < limits[1]: rc.drive.set_speed_angle(forward_speed, 0) elif counter < limits[2]: rc.drive.set_speed_angle(turn_speed, -1) elif counter < limits[3]: rc.drive.set_speed_angle(forward_speed, 0) elif counter < limits[4]: rc.drive.set_speed_angle(turn_speed, -1) elif counter < limits[5]: rc.drive.set_speed_angle(forward_speed, 0) elif counter < limits[6]: rc.drive.set_speed_angle(turn_speed, -1) elif counter < limits[7]: rc.drive.set_speed_angle(forward_speed, 0) elif counter < limits[8]: rc.drive.set_speed_angle(turn_speed, -1) else: driveB = False rc.drive.stop() print("STOPPED SQUARE") # TODO (main challenge): Drive in a figure eight when the X button is pressed if rc.controller.was_pressed(rc.controller.Button.X): print("Driving in a figure eight...") driveX = True counter = 0 if driveX == True: rc.drive.set_speed_angle(0.5, -1) counter += rc.get_delta_time() if counter < 11.7: rc.drive.set_speed_angle(0.5, 1) elif counter > 23.4: rc.drive.stop() driveX = False print("STOPPED FIGURE EIGHT") # TODO (main challenge): Drive in a shape of your choice when the Y button # is pressed if rc.controller.was_pressed(rc.controller.Button.Y): print("Driving in a wave...") driveY = True counter = 0 if driveY == True: print(counter) counter += rc.get_delta_time() if counter < 3: rc.drive.set_speed_angle(1, -1) elif counter < 6: rc.drive.set_speed_angle(1, 1) elif counter < 9: rc.drive.set_speed_angle(1, -1) elif counter < 12: rc.drive.set_speed_angle(1, 1) elif counter < 15: rc.drive.set_speed_angle(1, -1) else: driveY = False rc.drive.stop() print("STOPPED WAVE") ######################################################################################## # DO NOT MODIFY: Register start and update and begin execution ######################################################################################## if __name__ == "__main__": rc.set_start_update(start, update) rc.go()
nilq/baby-python
python
import pyOcean_cpu as ocean a = ocean.zeros([5,5]) a[1,...,3] = 99 print(a)
nilq/baby-python
python
import sqlite3 import pandas as pd from .. import CONFIG import datetime from ..scrape.utils import PROPER_DATE_FORMAT class DB_Query(): def __init__(self, column_names, rows): self.column_names = column_names self.rows = rows def to_df(self): return pd.DataFrame(self.rows, columns=self.column_names) def execute_sql(sql, input_con=None, params=()): """ Executes a sql query and commits the result. params is a list of values that will be used in place of question marks in the sql statement. The input_con parameter is useful for creating temporary tables that will be persisted across a connection. Returns a DB_Query. """ if input_con is None: con = get_db_connection() else: con = input_con if type(params) != tuple and type(params) != list: params = (params, ) cur = con.execute(sql, params) results = cur.fetchall() column_names = [description[0] for description in cur.description] if cur.description is not None else None if input_con is None: close_db_connection(con) return DB_Query(column_names, results) execute_sql_persist = execute_sql def execute_many_sql(sql, seq_of_params): """ Executes a sql statement for a batch of values. Returns a DB_Query. """ con = get_db_connection() cur = con.executemany(sql, seq_of_params) results = cur.fetchall() column_names = [description[0] for description in cur.description] if cur.description is not None else None close_db_connection(con) return DB_Query(column_names, results) def get_table_column_names(table_name: str): """ Returns a list of column names of the specified table. This function is in this module because it deals with the cursor and connection abstractipn layer. """ con = get_db_connection() cur = con.execute("""SELECT * FROM {} LIMIT 1;""".format(table_name)) column_names = [description[0] for description in cur.description] return column_names def clear_scrape_logs(date=None): """ Removes all entries from the table scrape_log that come before the given date. date must be in YYYY-MM-DD format, otherwise a ValueError will be thrown. """ if date is None: execute_sql("""DELETE FROM scrape_log WHERE TRUE;""") else: try: datetime.datetime.strptime(date, PROPER_DATE_FORMAT) except ValueError: raise ValueError('date was not in the correct format: YYYY-MM-DD') execute_sql("""DELETE FROM scrape_log WHERE date < ?;""", date) def drop_tables(): """ Drops all tables in the database. """ table_names = [l[0] for l in execute_sql("""SELECT name FROM sqlite_master WHERE type='table';""").rows] for table_name in table_names: execute_sql("""DROP TABLE {};""".format(table_name)) def get_table_names(): """ Returns a list of table names (list of strings). """ return [l[0] for l in execute_sql("""SELECT name FROM sqlite_master WHERE type='table';""").rows] def execute_sql_file(file_name, input_con=None): """ Executes sql in the given file. Returns the output of the last sql statement in the file. Statements are separated by the semicolon (;) character. """ if input_con is None: con = get_db_connection() else: con = input_con output = None with open(file_name, 'r') as f: for cmd in f.read().split(';')[:-1]: try: output = execute_sql(cmd, con) except Exception as e: print('Tried to execute this command but failed: {}'.format(cmd)) raise e # close the con if it was created by this function if input_con is None: close_db_connection(con) return output execute_sql_file_persist = execute_sql_file def get_db_connection(): con = sqlite3.connect('{}/{}.db'.format(CONFIG['DB_PATH'], CONFIG['DB_NAME'])) return con def close_db_connection(con): con.commit() con.close()
nilq/baby-python
python
#!/usr/bin/env python3 """ This is the player application called by remctl or through SSH; it is the replacement for the Perl command 'acoustics' and provides the same functionality. """ import sys import os import importlib sys.path.append(os.path.dirname(sys.argv[0]) + '/lib') from amp import db, config if __name__ == "__main__": # Arguments are [player] [command] [arguments] if len(sys.argv) < 2: print("Expected player_id") sys.exit(1) # And we expect at least a player and a command if len(sys.argv) < 3: print("Expected command") sys.exit(1) conf = config.AcousticsConfig() if sys.argv[1] not in conf["{}"]['players'].split(","): sys.exit(1) player_module = importlib.import_module( conf.translate(conf['player.' + sys.argv[1]]["module"])) DB = db.Sqlite(conf['database']['data_source'].split(":")[-1]) player = player_module.PlayerImpl(sys.argv[1], DB) # Execute the player command. player.execute(sys.argv[2], sys.argv[3:])
nilq/baby-python
python
import argparse import yaml import os import anndata as ad from morphelia.preprocessing import * from morphelia.features import * def run(inp): """Preprocess morphological annotated data.""" # where to save figures figdir = os.path.join(inp['output'], './figures/') # load data inp_data = os.path.join(inp['output'], inp[inp['pp_inp']]) if not os.path.exists(inp_data): raise OSError(f"File with subsample data does not exist: {inp_data}") adata = ad.read_h5ad(inp_data) # clean data print("[STEP 1] Clean data: Drop features containing Nan values, " "duplicated features or invariant features.") adata = drop_nan(adata, verbose=True) adata = drop_duplicates(adata, verbose=True) adata = drop_invariant(adata, verbose=True) # filter debris print("[STEP 2] Filter debris.") if inp['filter_debris']: adata = filter_debris(adata, show=True, save=figdir, verbose=True) else: print('Skipped.') # filter cells print("[STEP 3] Normalize data.") adata = normalize(adata, method=inp['norm_method'], by=inp['batch_id'], pop_var=inp['treat_var'], norm_pop=inp['ctrl_name'], drop_nan=True, verbose=True) print("[STEP 4] Drop noise.") adata = drop_noise(adata, verbose=True, by=inp['condition_group']) print("[STEP 5] Drop features with near zero variance.") adata = drop_near_zero_variance(adata, verbose=True) print("[STEP 6] Drop outlier.") adata = drop_outlier(adata, verbose=True) print("[STEP 6] Drop highly correlated features.") adata = drop_highly_correlated(adata, thresh=0.95, verbose=True, show=True, save=figdir) # write file print("[STEP 7] Write file.") adata.write(os.path.join(inp['output'], inp['pp_name'])) def main(args=None): """Implements the commandline tool to Preprocess an AnnData object with morphological data from single cells.""" # initiate the arguments parser parser = argparse.ArgumentParser(description=f'Preprocess data.') parser.add_argument('config', type=str, help='config file in yaml format.') # parse args = parser.parse_args(args) yml_path = args.config with open(yml_path, 'r') as f: data = yaml.load(f, Loader=yaml.FullLoader) # run run(data)
nilq/baby-python
python
"""Encoder definition for transformer-transducer models.""" import torch from espnet.nets.pytorch_backend.transducer.blocks import build_blocks from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling class Encoder(torch.nn.Module): """Transformer encoder module. Args: idim (int): input dim enc_arch (list): list of encoder blocks (type and parameters) input_layer (str): input layer type repeat_block (int): repeat provided block N times if N > 1 self_attn_type (str): type of self-attention positional_encoding_type (str): positional encoding type positionwise_layer_type (str): linear positionwise_activation_type (str): positionwise activation type conv_mod_activation_type (str): convolutional module activation type normalize_before (bool): whether to use layer_norm before the first block padding_idx (int): padding_idx for embedding input layer (if specified) """ def __init__( self, idim, enc_arch, input_layer="linear", repeat_block=0, self_attn_type="selfattn", positional_encoding_type="abs_pos", positionwise_layer_type="linear", positionwise_activation_type="relu", conv_mod_activation_type="relu", normalize_before=True, padding_idx=-1, ): """Construct an Transformer encoder object.""" super().__init__() self.embed, self.encoders, self.enc_out = build_blocks( "encoder", idim, input_layer, enc_arch, repeat_block=repeat_block, self_attn_type=self_attn_type, positional_encoding_type=positional_encoding_type, positionwise_layer_type=positionwise_layer_type, positionwise_activation_type=positionwise_activation_type, conv_mod_activation_type=conv_mod_activation_type, padding_idx=padding_idx, ) self.normalize_before = normalize_before if self.normalize_before: self.after_norm = LayerNorm(self.enc_out) def forward(self, xs, masks): """Encode input sequence. Args: xs (torch.Tensor): input tensor masks (torch.Tensor): input mask Returns: xs (torch.Tensor): position embedded input mask (torch.Tensor): position embedded mask """ if isinstance(self.embed, (Conv2dSubsampling, VGG2L)): xs, masks = self.embed(xs, masks) else: xs = self.embed(xs) xs, masks = self.encoders(xs, masks) if isinstance(xs, tuple): xs = xs[0] if self.normalize_before: xs = self.after_norm(xs) return xs, masks
nilq/baby-python
python
import gym import quadruped_gym.gym # noqa: F401 def test_reset(): env = gym.make('A1BulletGymEnv-v0') observation = env.reset() assert observation in env.observation_space def test_step(): env = gym.make('A1BulletGymEnv-v0') env.reset() for _ in range(10): env.step(env.action_space.sample())
nilq/baby-python
python
import numpy as np if __name__=="__main__": N=list(map(int,input().split())) print(np.zeros(N,int)) print(np.ones(N,int))
nilq/baby-python
python
from jivago.jivago_application import JivagoApplication from jivago.wsgi.annotations import Resource from jivago.wsgi.methods import GET @Resource("/") class HelloResource(object): @GET def get_hello(self) -> str: return "Hello World!" app = JivagoApplication() if __name__ == '__main__': app.run_dev()
nilq/baby-python
python
import pandas from matplotlib import pyplot df = pandas.DataFrame([431-106,106]) df = df.transpose() df.columns=['complete','incomplete'] df.plot(kind='bar', stacked=True, legend=False) pyplot.show()
nilq/baby-python
python
from typing import Tuple, Union, List, Optional from sectionproperties.pre import sections import numpy as np import shapely def create_line_segment( point_on_line: Union[Tuple[float, float], np.ndarray], vector: np.ndarray, bounds: tuple, ): """ Return a LineString of a line that contains 'point_on_line' in the direction of 'unit_vector' bounded by 'bounds'. 'bounds' is a tuple of float containing a max ordinate and min ordinate. """ p_x, p_y = point_on_line b_2 = max(bounds) b_1 = min(bounds) if vector[0] != 0: # Not a vertical line scale_factor_2 = (b_2 - p_x) / vector[0] y_2 = scale_factor_2 * vector[1] + p_y scale_factor_1 = (b_1 - p_x) / vector[0] y_1 = scale_factor_1 * vector[1] + p_y return shapely.geometry.LineString([(b_1, y_1), (b_2, y_2)]) else: # Vertical line scale_factor_2 = (b_2 - p_y) / vector[1] x_2 = scale_factor_2 * vector[0] + p_x scale_factor_1 = (b_1 - p_y) / vector[1] x_1 = scale_factor_1 * vector[0] + p_x return shapely.geometry.LineString([(x_1, b_1), (x_2, b_2)]) def group_top_and_bottom_polys( polys: shapely.geometry.GeometryCollection, line: shapely.geometry.LineString, ) -> Tuple[list, list]: """ Returns tuple of two lists representing the list of Polygons in 'polys' on the "top" side of 'line' and the list of Polygons on the "bottom" side of the 'line' after the original geometry has been split by 'line'. The 0-th tuple element is the "top" polygons and the 1-st element is the "bottom" polygons. In the event that 'line' is a perfectly vertical line, the "top" polys are the polygons on the "right" of the 'line' and the "bottom" polys are the polygons on the "left" of the 'line'. """ top_acc = [] bot_acc = [] for poly in polys: m, b = line_mx_plus_b(line) px, py = poly.representative_point().coords[0] if b is not None: # Not a vertical line (special case) y_test = m * px + b if py < y_test: bot_acc.append(poly) elif py > y_test: top_acc.append(poly) else: # The special case of vertical line lx, _ = line.coords[0] if px < lx: bot_acc.append(poly) elif px > lx: top_acc.append(poly) return top_acc, bot_acc def line_mx_plus_b(line: shapely.geometry.LineString,) -> Tuple[float, float]: """ Returns a tuple representing the values of "m" and "b" from the definition of 'line' as "y = mx + b". """ y2, y1 = line.coords[1][1], line.coords[0][1] x2, x1 = line.coords[1][0], line.coords[0][0] if x2 - x1 == 0: return (1, None) m_slope = (y2 - y1) / (x2 - x1) point_on_line = line.coords[0] p_x, p_y = point_on_line # solve line eqn for b given a known point on the line b_intercept = p_y - m_slope * p_x return (m_slope, b_intercept) def perp_mx_plus_b( m_slope: float, point_on_line: Tuple[float, float], ) -> Tuple[float, float]: """ Returns a tuple representing the values of "m" and "b" from for a line that is perpendicular to 'm_slope' and contains the 'point_on_line', which represents an (x, y) coordinate. """ m_perp = -1 / m_slope p_x, p_y = point_on_line b_intercept = p_y - m_perp * p_x return (m_perp, b_intercept) def line_intersection( m_1: float, b_1: float, m_2: float, b_2: float, ) -> Optional[float]: """ Returns a float representing the x-ordinate of the intersection point of the lines defined by y = m_1*x + b_1 and y = m_2*x + b_2. Returns None if the lines are parallel. """ try: x = (b_2 - b_1) / (m_1 - m_2) except ZeroDivisionError: x = None return x def sum_poly_areas(lop: List[shapely.geometry.Polygon],) -> float: """ Returns a float representing the total area of all polygons in 'lop', the list of polygons. """ sum_acc = 0 for poly in lop: sum_acc += poly.area return sum_acc
nilq/baby-python
python
# -*- coding: utf-8 -*- class Config(object): def __init__(self, config_path): config_path = Config.validate_path(config_path) self.config_path = config_path self._config = Config.validate_format_and_parse(config_path) def __getitem__(self, key): return self._config.get(key) @staticmethod def validate_path(path): import os if os.path.splitext(path)[1] != '.json': raise Exception('Config file must be json format') is_exist = os.path.exists(path) if not is_exist: raise FileNotFoundError else: return path @staticmethod def validate_format_and_parse(path): import json import codecs with codecs.open(path, 'rb+', 'utf-8') as rcf: return json.load(rcf) def concat_config_path(file_located, filename): import os return os.path.abspath(os.path.join(os.path.split(file_located)[0], os.pardir)) + '/' + filename
nilq/baby-python
python
from django.conf.urls import include, url from django.contrib import admin #handler400 = 'world.views.error400page' #AEN: THIS doesn't work! import voxel_globe.main.views urlpatterns = [ #Admin site apps url(r'^admin/', include(admin.site.urls)), #Test app for development reasons url(r'^world/', include('voxel_globe.world.urls', namespace='world')), # pages #Main home page url(r'', include('voxel_globe.main.urls', namespace='main')), #Placeholders # url(r'^apps/imageIngest/$', voxel_globe.main.views.imageIngest, # name='imageIngest'), url(r'^apps/voxelCreator/$', voxel_globe.main.views.voxelCreator, name='voxelCreator'), url(r'^apps/voxelWorldViewer/$', voxel_globe.main.views.voxelWorldViewer, name='voxelWorldViewer'), # url(r'^apps/ingest/upload$', 'voxel_globe.ingest.views.upload', # name="uploadEndpoint"), #REST auth endpoint url(r'^rest/', include('rest_framework.urls', namespace='rest_framework')), #apps url(r'^meta/', include('voxel_globe.meta.urls', namespace='meta')), url(r'^apps/task/', include('voxel_globe.task.urls', namespace='task')), url(r'^apps/tiepoint/', include('voxel_globe.tiepoint.urls', namespace='tiepoint')), url(r'^apps/voxel_viewer/', include('voxel_globe.voxel_viewer.urls', namespace='voxel_viewer')), url(r'^apps/ingest/', include('voxel_globe.ingest.urls', namespace='ingest')), url(r'^apps/sfm/', include('voxel_globe.visualsfm.urls', namespace='visualsfm')), url(r'^apps/voxel_world/', include('voxel_globe.build_voxel_world.urls', namespace='build_voxel_world')), url(r'^apps/generate_point_cloud/', include('voxel_globe.generate_point_cloud.urls', namespace='generate_point_cloud')), url(r'^apps/tiepoint_registration/', include('voxel_globe.tiepoint_registration.urls', namespace='tiepoint_registration')), url(r'^apps/height_map/', include('voxel_globe.height_map.urls', namespace='height_map')), url(r'^apps/filter_number_observations/', include('voxel_globe.filter_number_observations.urls', namespace='filter_number_observations')), url(r'^download/', include('voxel_globe.download.urls', namespace='download')), url(r'^apps/create_site/', include('voxel_globe.create_site.urls', namespace='create_site')), url(r'^apps/image_view/', include('voxel_globe.image_view.urls', namespace='image_view')), url(r'^apps/event_trigger/', include('voxel_globe.event_trigger.urls', namespace='event_trigger')), url(r'^apps/channels/', include('voxel_globe.channel_test.urls', namespace='channel_test')), url(r'^apps/websockets/', include('voxel_globe.websockets.urls', namespace='websockets')), #Other static protected assets url(r'^images/', include('voxel_globe.security.urls', namespace='security')) , ]
nilq/baby-python
python
"""added column to DT and created NewTable Revision ID: 1100598db8eb Revises: 66362e7784fd Create Date: 2021-02-21 13:56:05.307362 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '1100598db8eb' down_revision = '66362e7784fd' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('dummy_table', sa.Column('mobile_number', sa.String(length=15), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('dummy_table', 'mobile_number') # ### end Alembic commands ###
nilq/baby-python
python
import time import multiprocessing import subprocess import sys import os def run(name): dir_path = os.path.dirname(os.path.realpath(__file__)) subprocess.Popen(('%s' % sys.executable, os.path.join(dir_path, "test_remote.py"), name, "etc etc")) if __name__ == '__main__': multiprocessing.Process(target=run, args=("subprocess",)).start() while True: time.sleep(0.1)
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import print_function import os from concurrent.futures import TimeoutError from kikimr.public.sdk.python import client as ydb import random EXPIRATION_QUEUE_COUNT = 4 DOC_TABLE_PARTITION_COUNT = 4 ADD_DOCUMENT_TRANSACTION = """PRAGMA TablePathPrefix("%s"); DECLARE $url AS Utf8; DECLARE $html AS Utf8; DECLARE $timestamp AS Uint64; $doc_id = Digest::CityHash($url); REPLACE INTO documents (doc_id, url, html, timestamp) VALUES ($doc_id, $url, $html, $timestamp); REPLACE INTO expiration_queue_%d (timestamp, doc_id) VALUES ($timestamp, $doc_id); """ READ_DOCUMENT_TRANSACTION = """PRAGMA TablePathPrefix("%s"); DECLARE $url AS Utf8; $doc_id = Digest::CityHash($url); SELECT doc_id, url, html, timestamp FROM documents WHERE doc_id = $doc_id; """ READ_EXPIRED_BATCH_TRANSACTION = """PRAGMA TablePathPrefix("%s"); DECLARE $timestamp AS Uint64; DECLARE $prev_timestamp AS Uint64; DECLARE $prev_doc_id AS Uint64; $data = ( SELECT * FROM expiration_queue_%d WHERE timestamp <= $timestamp AND timestamp > $prev_timestamp ORDER BY timestamp, doc_id LIMIT 100 UNION ALL SELECT * FROM expiration_queue_%d WHERE timestamp = $prev_timestamp AND doc_id > $prev_doc_id ORDER BY timestamp, doc_id LIMIT 100 ); SELECT timestamp, doc_id FROM $data ORDER BY timestamp, doc_id LIMIT 100; """ DELETE_EXPIRED_DOCUMENT = """PRAGMA TablePathPrefix("%s"); DECLARE $doc_id AS Uint64; DECLARE $timestamp AS Uint64; DELETE FROM documents WHERE doc_id = $doc_id AND timestamp = $timestamp; DELETE FROM expiration_queue_%d WHERE timestamp = $timestamp AND doc_id = $doc_id; """ def is_directory_exists(driver, path): try: return driver.scheme_client.describe_path(path).is_directory() except ydb.SchemeError: return False def ensure_path_exists(driver, database, path): paths_to_create = list() path = path.rstrip("/") while path != "": full_path = os.path.join(database, path) if is_directory_exists(driver, full_path): break paths_to_create.append(full_path) path = os.path.dirname(path).rstrip("/") while len(paths_to_create) > 0: full_path = paths_to_create.pop(-1) driver.scheme_client.make_directory(full_path) # Creates Documents table and multiple ExpirationQueue tables def create_tables(table_client, path): session = table_client.session().create() # Documents table stores the contents of web pages. # The table is partitioned by hash(Url) in order to evenly distribute the load. session.create_table( os.path.join(path, "documents"), ydb.TableDescription() .with_primary_keys("doc_id") .with_columns( ydb.Column("doc_id", ydb.OptionalType(ydb.PrimitiveType.Uint64)), ydb.Column("url", ydb.OptionalType(ydb.PrimitiveType.Utf8)), ydb.Column("html", ydb.OptionalType(ydb.PrimitiveType.Utf8)), ydb.Column("timestamp", ydb.OptionalType(ydb.PrimitiveType.Uint64)), ) .with_profile( ydb.TableProfile() # Partition Documents table by DocId .with_partitioning_policy( ydb.PartitioningPolicy().with_uniform_partitions( DOC_TABLE_PARTITION_COUNT ) ) ) ) # Multiple ExpirationQueue tables allow to scale the load. # Each ExpirationQueue table can be handled by a dedicated worker. for expiration_queue in range(EXPIRATION_QUEUE_COUNT): session.create_table( os.path.join(path, "expiration_queue_%d" % expiration_queue), ydb.TableDescription() .with_primary_keys("timestamp", "doc_id") .with_columns( ydb.Column("doc_id", ydb.OptionalType(ydb.PrimitiveType.Uint64)), ydb.Column("timestamp", ydb.OptionalType(ydb.PrimitiveType.Uint64)), ) ) # Insert or replaces a document. def add_document(session, path, url, html, timestamp): queue = random.randint(0, EXPIRATION_QUEUE_COUNT - 1) # this will keep prepared query in cache prepared = session.prepare(ADD_DOCUMENT_TRANSACTION % (path, queue)) print( "> AddDocument: \n" " Url: %s\n" " Timestamp %d" % ( url, timestamp, ) ) session.transaction().execute( prepared, {'$url': url, '$html': html, '$timestamp': timestamp}, commit_tx=True, ) # Reads document contents. def read_document(session, path, url): prepared = session.prepare(READ_DOCUMENT_TRANSACTION % path) print("> ReadDocument %s:" % url) result_sets = session.transaction().execute(prepared, {'$url': url}, commit_tx=True) result_set = result_sets[0] if len(result_set.rows) > 0: document = result_sets[0].rows[0] print( " DocId: %s\n" " Url: %s\n" " Timestamp: %d\n" " Html: %s" % ( document.doc_id, document.url, document.timestamp, document.html, ) ) else: print(" Not found") def read_expired_document(session, path, expiration_queue, timestamp, last_timestamp, last_doc_id): prepared = session.prepare(READ_EXPIRED_BATCH_TRANSACTION % (path, expiration_queue, expiration_queue)) result_sets = session.transaction().execute( prepared, {'$timestamp': timestamp, '$prev_timestamp': last_timestamp, '$prev_doc_id': last_doc_id}, commit_tx=True, ) return result_sets[0] def delete_expired_document(session, path, expiration_queue, doc_id, timestamp): prepared = session.prepare(DELETE_EXPIRED_DOCUMENT % (path, expiration_queue)) session.transaction().execute( prepared, {'$doc_id': doc_id, '$timestamp': timestamp}, commit_tx=True, ) def delete_expired(session, path, expiration_queue, timestamp): print("> DeleteExpired from queue #%d:" % expiration_queue) last_timestamp = 0 last_doc_id = 0 while True: result_set = read_expired_document( session, path, expiration_queue, timestamp, last_timestamp, last_doc_id ) if not result_set.rows: break for document in result_set.rows: last_doc_id = document.doc_id last_timestamp = document.timestamp print(" DocId: %s Timestamp: %d" % (last_doc_id, timestamp)) delete_expired_document( session, path, expiration_queue, last_doc_id, last_timestamp) def _run(driver, database, path): ensure_path_exists(driver, database, path) full_path = os.path.join(database, path) create_tables(driver.table_client, full_path) session = driver.table_client.session().create() add_document( session, full_path, "https://yandex.ru/", "<html><body><h1>Yandex</h1></body></html>", 1, ) add_document( session, full_path, "https://ya.ru/", "<html><body><h1>Yandex</h1></body></html>", 2 ) read_document(session, full_path, "https://yandex.ru/") read_document(session, full_path, "https://ya.ru/") for expiration_queue in range(EXPIRATION_QUEUE_COUNT): delete_expired( session, full_path, expiration_queue, 1 ) read_document(session, full_path, "https://ya.ru/") add_document( session, full_path, "https://yandex.ru/", "<html><body><h1>Yandex</h1></body></html>", 2 ) add_document( session, full_path, "https://yandex.ru/", "<html><body><h1>Yandex</h1></body></html>", 3 ) for expiration_queue in range(EXPIRATION_QUEUE_COUNT): delete_expired( session, full_path, expiration_queue, 2 ) read_document(session, full_path, "https://yandex.ru/") read_document(session, full_path, "https://ya.ru/") def run(endpoint, database, path): driver_config = ydb.DriverConfig(endpoint, database, credentials=ydb.construct_credentials_from_environ()) with ydb.Driver(driver_config) as driver: try: driver.wait(timeout=5) except TimeoutError: raise RuntimeError("Connect failed to YDB") _run(driver, database, path)
nilq/baby-python
python
#Elsa by Frostmeister import discord import math import time import googlesearch as gs import urbandictionary as ud import random import asyncio from discord.ext import commands ####### General class General: def __init__(self , bot): self.bot = bot @commands.command() async def invite(self): """Invite link for Elsa""" embed = discord.Embed(title="Elsa's Invite Link ", description="You can invite me to your server" ,color = 0xf5f5dc) embed.add_field(name= "Name " , value="Elsa" , inline=True) embed.add_field(name= "Prefix for commands" , value = "e! , E!" , inline =True) embed.add_field(name ="Invite Link" , value= " https://discordapp.com/oauth2/authorize?client_id=396540743877001216&scope=bot&permissions=2146958591" , inline=True) embed.set_thumbnail(url = "https://cdn.discordapp.com/avatars/396540743877001216/85e72bb348b8e0646f25c2926cd7cea5.jpg?size=1024 ") embed.set_footer(text = " Feel free to uncheck some permissions " ) await self.bot.say(embed=embed) @commands.command(pass_context=True) async def info(self ,ctx, user: discord.Member = None): """Gives info about the someone""" user = user or ctx.message.author try: embed = discord.Embed(title="{}'s info".format(user.name), description="Here's what I could find in my bag...", color=0xf5f5dc) embed.add_field(name="Username", value=user.name, inline=True) embed.add_field(name="Nickname" , value = user.nick , inline = True) embed.add_field(name="ID", value=user.id, inline=True) embed.add_field(name="Bot", value=user.bot ,inline=True) embed.add_field(name="Status", value=user.status,inline=True) embed.add_field(name="Highest role", value=user.top_role) embed.add_field(name="Joined Server", value=user.joined_at) embed.add_field(name="Joined Discord" , value=user.created_at) embed.set_thumbnail(url=user.avatar_url) await self.bot.say(embed=embed) except: await self.bot.say("Error") @commands.command(pass_context=True, aliases =['svinfo']) async def serverinfo(self , ctx): """Gives info about the server""" embed = discord.Embed(title="{}'s info".format(ctx.message.server.name), description="Here's what I could find in my bag...", color=0xf5f5dc) embed.add_field(name="Servername", value=ctx.message.server.name, inline=True) embed.add_field(name="ID", value=ctx.message.server.id, inline=True) embed.add_field(name="Verification Level", value=ctx.message.server.verification_level, inline=True) embed.add_field(name="Server Region", value=ctx.message.server.region, inline=True) embed.add_field(name="Owner" , value=ctx.message.server.owner, inline=True) embed.add_field(name="Channels",value=len(ctx.message.server.channels)) embed.add_field(name="Roles", value=len(ctx.message.server.roles)) embed.add_field(name="Members", value=len(ctx.message.server.members)) embed.add_field(name="Emojis", value=len(ctx.message.server.emojis)) embed.set_thumbnail(url=ctx.message.server.icon_url) await self.bot.say(embed=embed) ###### SETUP def setup(bot): bot.add_cog(General(bot))
nilq/baby-python
python
# -*- coding:utf-8 -*- """ jsondict <-> dict <-> model object \______________________/ """ def _datetime(*args): import pytz from datetime import datetime args = list(args) args.append(pytz.utc) return datetime(*args) def _getTarget(): from alchemyjsonschema.mapping import Draft4MappingFactory return Draft4MappingFactory def _makeOne(schema_factory, model, *args, **kwargs): import alchemyjsonschema.tests.models as models module = models mapping_factory = _getTarget()(schema_factory, module, *args, **kwargs) return mapping_factory(model) def test_it__dict_from_model_object(): from alchemyjsonschema import StructuralWalker, SchemaFactory from .models import Group, User schema_factory = SchemaFactory(StructuralWalker) target = _makeOne(schema_factory, Group) group = Group( name="ravenclaw", color="blue", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0) ) group.users = [User(name="foo", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0))] group_dict = target.dict_from_object(group) assert group_dict == { "color": "blue", "users": [ { "created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": None, "name": "foo", } ], "created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": None, "name": "ravenclaw", } def test_it__jsondict_from_model(): from alchemyjsonschema import StructuralWalker, SchemaFactory from .models import Group, User schema_factory = SchemaFactory(StructuralWalker) target = _makeOne(schema_factory, Group) group = Group( name="ravenclaw", color="blue", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0) ) group.users = [User(name="foo", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0))] jsondict = target.jsondict_from_object(group, verbose=True) import json assert json.dumps(jsondict) assert jsondict == { "color": "blue", "name": "ravenclaw", "users": [ {"name": "foo", "pk": None, "created_at": "2000-01-01T10:00:00+00:00"} ], "pk": None, "created_at": "2000-01-01T10:00:00+00:00", } def test_it__validate__jsondict(): from alchemyjsonschema import StructuralWalker, SchemaFactory from .models import Group schema_factory = SchemaFactory(StructuralWalker) target = _makeOne(schema_factory, Group) jsondict = { "color": "blue", "name": "ravenclaw", "users": [{"name": "foo", "pk": 1, "created_at": "2000-01-01T10:00:00+00:00"}], "pk": 1, "created_at": "2000-01-01T10:00:00+00:00", } target.validate_jsondict(jsondict) def test_it__dict_from_jsondict(): from alchemyjsonschema import StructuralWalker, SchemaFactory from .models import Group schema_factory = SchemaFactory(StructuralWalker) target = _makeOne(schema_factory, Group) jsondict = { "color": "blue", "name": "ravenclaw", "users": [{"name": "foo", "pk": 10, "created_at": "2000-01-01T10:00:00+00:00"}], "pk": None, "created_at": "2000-01-01T10:00:00+00:00", } group_dict = target.dict_from_jsondict(jsondict) assert group_dict == { "color": "blue", "users": [ {"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": 10, "name": "foo"} ], "created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": None, "name": "ravenclaw", } def test_it__object_from_dict(): from alchemyjsonschema import StructuralWalker, SchemaFactory from .models import Group, User schema_factory = SchemaFactory(StructuralWalker) target = _makeOne(schema_factory, Group) group_dict = { "color": "blue", "users": [ { "created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": None, "name": "foo", } ], "created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": None, "name": "ravenclaw", } group = target.object_from_dict(group_dict, strict=False) assert isinstance(group, Group) assert group.color == "blue" assert group.name == "ravenclaw" assert group.pk is None assert group.created_at == _datetime(2000, 1, 1, 10, 0, 0, 0) assert (len(group.users) == 1) and (isinstance(group.users[0], User)) assert group.users[0].name == "foo" assert group.users[0].pk is None assert group.users[0].created_at == _datetime(2000, 1, 1, 10, 0, 0, 0)
nilq/baby-python
python
import numpy as np ''' Reorient the mesh represented by @vertices so that the z-axis is aligned with @axis ''' def orient_mesh(vertices, axis): vector_norm = np.sqrt(axis[0]**2 + axis[1]**2 + axis[2]**2) yz_length = np.sqrt(axis[1]**2 + axis[2]**2) # Rotate around the y-axis if vector_norm != 0: y_angle = np.arccos(yz_length/vector_norm) rotation_y = [[np.cos(y_angle), 0, np.sin(y_angle)], [0, 1, 0], [-np.sin(y_angle), 0, np.cos(y_angle)]] vertices = np.dot(vertices, rotation_y) # Rotate around the x-axis if yz_length != 0 : x_angle = np.arccos(axis[2]/yz_length) rotation_x = [[1, 0, 0], [0, np.cos(x_angle), -np.sin(x_angle)], [0, np.sin(x_angle), np.cos(x_angle)]] vertices = np.dot(vertices, rotation_x) return vertices
nilq/baby-python
python
# -*- coding: utf-8 -*- import logging import math import random from PIL import Image, ImageDraw from .wallpaper_filter import WallpaperFilter from ..geom.point import Point from ..geom.size import Size logger = logging.getLogger(__name__) class Tunnel(WallpaperFilter): def _centroid(self, size) -> Point: return Point(size.width // 2, size.height // 2) def _filter(self, image: Image.Image, monitor: 'Monitor', position: Point) -> Image.Image: m_centre = self._centroid(monitor.size) i_centre = (Point(*position) + self._centroid(Size(*image.size))) angle = (math.degrees(-math.atan2(m_centre.y - i_centre.y, m_centre.x - i_centre.x))) if angle > 90: angle = 180 - angle if angle < -90: angle = 180 + angle logger.info('P1: %s, P2:%s, Angle: %s', m_centre, i_centre, angle) image = image.rotate(angle, expand=1, fillcolor=(0, 0, 0, 0)) return image class Jiggle(WallpaperFilter): def _filter(self, image: Image.Image, monitor: 'Monitor', position: Point) -> Image.Image: angle = 20 * random.random() - 10.0 image = image.rotate(angle, expand=1, fillcolor=(0, 0, 0, 0)) return image
nilq/baby-python
python
#!/usr/bin/env python #-*- coding=UTF-8 -*- import httplib2 import json import random import string import time import urllib from securUtil import SecurUtil class smsUtil(): @staticmethod def baseHTTPSRequest(url, data): # 配置 HTTP Request Header AppKey = '1958cd7bc542a299b0c3bc428f14006e' AppSecret = 'a3774be7f5a4' Nonce = ''.join(random.sample(string.ascii_letters + string.digits, random.randint(1, 62))) CurTime = '%.0f' % time.time() CheckSum = SecurUtil.hashForString('sha1', '%s%s%s' % (AppSecret, Nonce, CurTime)) headers = {} headers['AppKey'] = AppKey headers['Nonce'] = Nonce headers['CurTime'] = CurTime headers['CheckSum'] =CheckSum headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=utf-8' print(headers) # 将数据转换为 JSON 格式 #json_data = json.dumps(data) #print(json_data) conn = httplib2.Http(disable_ssl_certificate_validation=True) resp, content = conn.request('https://api.netease.im/sms/sendtemplate.action', method='POST', body=urllib.urlencode(data), headers=headers) print(resp.status) print(content) return resp.status, content @staticmethod def sendTemplate(templateid, mobiles, params): url = 'https://api.netease.im:443/sms/sendtemplate.action' data = {} data['templateid'] = templateid data['mobiles'] = mobiles data['params'] = params smsUtil.baseHTTPSRequest(url, data)
nilq/baby-python
python
""" drift ===== Drift calculation methods. """ from .continuous import drift_continuous from .roman import drift_roman
nilq/baby-python
python
# Copyright (c) 2019, Digi International, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import time from machine import Pin from xbee import relay # Pin D9 (ON/SLEEP/DIO9) LED_PIN_ID = "D9" print(" +------------------------------------------+") print(" | XBee MicroPython Relay Frames LED Sample |") print(" +------------------------------------------+\n") # Set up the LED pin object to manage the LED status. Configure the pin # as output and set its initial value to off (0). led_pin = Pin(LED_PIN_ID, Pin.OUT, value=0) while True: # Start reading relay frames. relay_frame = relay.receive() # If a relay frame is received, parse the data. if relay_frame is not None: data = relay_frame["message"].decode("utf-8") if data == "ON": led_pin.on() elif data == "OFF": led_pin.off() time.sleep(0.1)
nilq/baby-python
python
import math, glm class Camera: def __init__(self, position): self.position = position self.up = glm.vec3(0, 1, 0) self.worldUp = glm.vec3(0, 1, 0) self.pitch = 0 self.yaw = 0 self.speed = 20 self.sensitivity = 0.25 self.updateVectors() def moveRight(self, time): self.position += self.right * (self.speed * time) def moveLeft(self, time): self.position -= self.right * (self.speed * time) def moveTop(self, time): self.position += self.direction * (self.speed * time) def moveBottom(self, time): self.position -= self.direction * (self.speed * time) def rotate(self, offsetX, offsetY): self.yaw += offsetX * self.sensitivity self.pitch += offsetY * self.sensitivity if self.pitch > 89: self.pitch = 89 elif self.pitch < -89: self.pitch = -89 self.updateVectors() def updateVectors(self): x = math.cos(glm.radians(self.yaw)) * math.cos(glm.radians(self.pitch)) y = math.sin(glm.radians(self.pitch)) z = math.sin(glm.radians(self.yaw)) * math.cos(glm.radians(self.pitch)) front = glm.vec3(x, y, z) self.direction = glm.normalize(front) self.right = glm.normalize(glm.cross(self.direction, self.worldUp)) self.up = glm.normalize(glm.cross(self.right, self.direction)) def getViewMatrix(self): return glm.lookAt(self.position, self.position + self.direction, self.up)
nilq/baby-python
python
from bfieldtools import contour import pytest import numpy as np from numpy.testing import ( assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal, ) def setup_contour_input(): """ Load example mesh and create scalars data """ from bfieldtools.utils import load_example_mesh mesh = load_example_mesh("unit_disc") r = np.linalg.norm(mesh.vertices, axis=1) scalars = r ** 2 return mesh, scalars def compare_contour_direction_to_rotated_gradient(mesh, scalars, polyline): """ Check inner product between the polyline edges the rotated gradient vectors closes to the initial points of those edges. These should point to the same direction. Parameters ---------- mesh : trimesh mesh scalars : ndarray stream function polyline : ndarray (N, 3) coordinates of points representing a polyline """ from bfieldtools.mesh_calculus import gradient edges = polyline[1:] - polyline[:-1] g = gradient(scalars, mesh, rotated=True).T fc = mesh.vertices[mesh.faces].mean(axis=1) norm = np.linalg.norm p = polyline # Find closest face centers to polyline nodes f_inds = np.argmin(norm(p[:, None, :] - fc[None, :, :], axis=-1), axis=1) g_poly = g[f_inds] assert np.all(np.sum(g_poly[:-1] * edges, axis=1) > 0) def compare_magnetic_field_directions(mesh, scalars, polys, test_point): """ Check direction of magnetic calculated from mesh and stream function versus the direction of the field generated by contours Parameters ---------- mesh : trimesh mesh scalars : ndarray stream function polys : list of ndarray (N, 3) coordinates of points representing a many polylines test_point: ndarray(1, 3) coordinates of a test point """ from bfieldtools.line_magnetics import magnetic_field from bfieldtools.mesh_conductor import magnetic_field_coupling B_mesh = magnetic_field_coupling(mesh, test_point) b_stream_func = B_mesh @ scalars b_polys = np.array([magnetic_field(p, test_point) for p in polys]) b_poly_sum = b_polys.sum(axis=0) assert np.sum(b_poly_sum * b_stream_func) > 0 def test_scalar_contour_direction(): """ Test the direction of scalar_contour and the field direction generated by the scalar contour """ mesh, scalars = setup_contour_input() N = 10 polys, vals = contour.scalar_contour( mesh, scalars, N_contours=N, return_values=True ) compare_contour_direction_to_rotated_gradient(mesh, scalars, polys[-1]) test_point = np.array([[0, 0, 1]]) compare_magnetic_field_directions(mesh, scalars, polys, test_point) def test_simplify_contour(): assert True if __name__ == "__main__": test_scalar_contour_direction()
nilq/baby-python
python
# Leia um valor de comprimento em jardas e apresente-o convertido em metros # A foruma de conversão é: M = 0.91 * J J = float(input("Digite um valor em jardas: ")) M = 0.91 * J print("O valor em de jardas para metros é: %0.2f" % M)
nilq/baby-python
python
from .arch import Arch from .debian import Debian from .ubuntu import Ubuntu from .redhat import RedHat from .centos import CentOS
nilq/baby-python
python
import argparse import torch from torch.autograd import Variable import model import util import data import time import torchvision.transforms as transforms import shutil model_names = sorted(name for name in model.__dict__ if name.startswith("Planet") and callable(model.__dict__[name])) print model_names # def weighted_binary_cross_entropy(output, target, weights=None): # # if weights is not None: # assert len(weights) == 2 # # loss = weights[1] * (target * torch.log(output)) + \ # weights[0] * ((1 - target) * torch.log(1 - output)) # else: # loss = target * torch.log(output) + (1 - target) * torch.log(1 - output) # # return torch.neg(torch.mean(loss)) def weighted_multi_label_loss(p,y): return torch.neg(torch.mean(y*torch.log(p+1e-10)*0.1 +(1.-y)*torch.log(1.-p+1e-10))) # class WeightedMultiLabelLoss(torch.nn.modules.loss._WeightedLoss): # # def forward(self, input, target): # #_assert_no_grad(target) # weight = Variable(torch.zeros(input.size()))#self.weight.repeat(input.size(0),1)) # return weighted_multi_label_loss(torch.sigmoid(input), target, # weight) def train(net,loader,criterion,optimizer,decay=0.): net.train() avg_loss = 0. start = time.time() for i, (X, y) in enumerate(loader): input_var = torch.autograd.Variable(X) target_var = torch.autograd.Variable(y) # weights = torch.autograd.Variable(weight.repeat(X.size(0),1),requires_grad=False) output = net(input_var) #loss = weighted_multi_label_loss(torch.sigmoid(output),target_var) loss = criterion(output, target_var) avg_loss += loss.data[0] l1_crit = torch.nn.L1Loss(size_average=False) reg_loss = 0 for param in net.parameters(): reg_loss += l1_crit(param,Variable(torch.zeros(param.size()),requires_grad=False)) loss += decay * reg_loss optimizer.zero_grad() loss.backward() optimizer.step() if i%20 == 0: dt = time.time()-start pct = float(i+1)/len(loader) curr_loss = avg_loss / (i+1) print('%fs elapsed \t' '%f done \t' '%f loss \t' '%fs remaining'%(dt,pct*100,curr_loss,dt/pct*(1.-pct))) return avg_loss / len(loader) def validate(net,loader,criterion): net.eval() avg_loss = 0. for i, (X, y) in enumerate(loader): input_var = torch.autograd.Variable(X, volatile=True) #no backprop target_var = torch.autograd.Variable(y) output = net(input_var) #weights = torch.autograd.Variable(weight.repeat(X.size(0),1),requires_grad=False) output = net(input_var) #loss = weighted_multi_label_loss(torch.sigmoid(output),target_var) loss = criterion(output, target_var) avg_loss += loss.data[0] return avg_loss/len(loader) def save_model(model_state,filename='checkpoint.pth.tar',is_best=False): fname = model_state['arch']+'-'+filename torch.save(model_state, fname) if is_best: shutil.copyfile( fname, model_state['arch']+'-best.pth.tar') def main(args): # create model and optimizer train_trans = [] val_trans = [] debug_trans=[] siz = (256,256) if args.flip: train_trans.append(transforms.RandomHorizontalFlip()) train_trans.append(util.RandomVerticalFlip()) if args.rotate: train_trans.append(util.RandomVerticalFlip()) if args.translate: train_trans.append(util.RandomTranslation()) if args.scale > 0: train_trans.append(transforms.CenterCrop(224)) train_trans.append(transforms.Scale(args.scale)) val_trans.append(transforms.CenterCrop(224)) val_trans.append(transforms.Scale(args.scale)) debug_trans.append(transforms.CenterCrop(224)) debug_trans.append(transforms.Scale(args.scale)) siz = (args.scale,args.scale) if args.crop > 0: train_trans.append(transforms.RandomCrop(args.crop)) val_trans.append(transforms.CenterCrop(args.crop)) debug_trans.append(transforms.CenterCrop(args.crop)) siz = (args.crop,args.crop) train_trans.append(transforms.ToTensor()) #train_trans.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) val_trans.append(transforms.ToTensor()) debug_trans.append(transforms.ToTensor()) # val_trans.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) net = model.__dict__[args.model](input_size=siz,num_labels=17,dropout=args.dropout,feature_maps=args.features) print net optimizer = torch.optim.Adam(net.parameters(),weight_decay=args.l2_decay) #stats = torch.load('positive.pth.tar') #weights = (1.-stats['positive'])/stats['positive'] #criterion = WeightedMultiLabelLoss(weight = weights) criterion = torch.nn.MultiLabelSoftMarginLoss()#torch.nn.BCELoss()#torch.nn.MultiLabelSoftMarginLoss() print net.feature_size #optionally restore weights if args.resume is not None: print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) best_loss = checkpoint['score'] net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) else: best_loss = 1e10 # load data debug_data = data.PlanetData(args.datapath +'/debug', args.datapath + '/img_labels.csv', args.datapath + '/labels.txt', transform=debug_trans) train_data = data.PlanetData(args.datapath +'/train', args.datapath + '/img_labels.csv', args.datapath + '/labels.txt', transform=train_trans) val_data = data.PlanetData(args.datapath +'/val', args.datapath + '/img_labels.csv', args.datapath + '/labels.txt', transform=val_trans) debug_loader = torch.utils.data.DataLoader( debug_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) val_loader = torch.utils.data.DataLoader( val_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) #run training patience = args.patience for e in range(args.nepochs): start = time.time() # run 1 training epoch if args.debug: train_loss = train(net,debug_loader, criterion, optimizer, decay=args.l1_decay) val_loss = 0. else: train_loss = train(net,train_loader, criterion, optimizer, decay=args.l1_decay) val_loss = validate(net, val_loader, criterion) # validate end = time.time() #checkpoint print ('epoch %d \t' 'time %f \t' 'train loss %f \t' 'val loss %f \t'%(e,end-start,train_loss, val_loss) ) model_state = { 'epoch': e, 'score': val_loss, 'cfg': net.cfg, 'arch': args.model, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict() } save_model(model_state,'checkpoint.pth.tar', val_loss < best_loss) #early stopping if val_loss < best_loss: best_loss = val_loss patience = args.patience else: patience -= 1 if patience == 0: print('early_stopping') break if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-model", type=str, default='PlanetNet', help="model name") parser.add_argument("-patience", type=int, default=5, help="early stopping patience") parser.add_argument("-crop", type=int, default=0, help="crop size") parser.add_argument("-scale", type=int, default=0, help="scale size") parser.add_argument("-features", type=int, default=64, help="feature maps") parser.add_argument("-flip", type=bool, default=True, help="random flips") parser.add_argument("-rotate", type=bool, default=True, help="random rotation") parser.add_argument("-translate", type=bool, default=True, help="random translation") parser.add_argument("-debug", action="store_true", help="run on debug set") parser.add_argument("-dropout", type=float, default=0.5, help="dropout") parser.add_argument("-l1_decay", type=float, default=0., help="l1 weight decay") parser.add_argument("-l2_decay", type=float, default=0., help="l2 weight decay") parser.add_argument("-batch_size", type=int, default=128, help="batch size") parser.add_argument("-resume", type=str, default=None, help="resume training model file") parser.add_argument("-nepochs", type=int, default=100, help="max epochs") parser.add_argument("-workers", type=int, default=2, help="number of data loaders") parser.add_argument("datapath", type=str, help="data path") args = parser.parse_args() main(args)
nilq/baby-python
python
import abc class RecurrentSupervisedLearningEnv(metaclass=abc.ABCMeta): """ An environment that's really just a supervised learning task. """ @abc.abstractmethod def get_batch(self, batch_size): """ :param batch_size: Size of the batch size :return: tuple (X, Y) where X is a numpy array of size ( batch_size, self.sequence_length, self.feature_dim ) Y is a numpy array of size ( batch_size, self.sequence_length, self.target_dim ) """ pass @property @abc.abstractmethod def feature_dim(self): """ :return: Integer. Dimension of the features. """ pass @property @abc.abstractmethod def target_dim(self): """ :return: Integer. Dimension of the target. """ pass @property @abc.abstractmethod def sequence_length(self): """ :return: Integer. Dimension of the target. """ pass
nilq/baby-python
python
''' Created on Aug 9, 2013 @author: salchoman@gmail.com - salcho ''' class wsResponse: def __init__(self, id=-1, params=None, size=-1, response=None, payload=None, plugin=None): self.id = id self.params = params self.size = size self.http_code = -1 self.response = None self.response = response[0] if response[0] else None self.body = response[1] if response[1] else None #self.http_code = response[1][0] if response[1][0] else -1 self.http_code = 200 self.payload = payload self.plugin = plugin def getID(self): return self.id def getParams(self): return self.params def getSize(self): return self.size def getBody(self): return self.body def getHTTPCode(self): return self.http_code def getResponse(self): return self.response def getPayload(self): return self.payload def getPlugin(self): return self.plugin
nilq/baby-python
python
import tensorflow as tf def iou(source, target): """Calculates intersection over union (IoU) and intersection areas for two sets of objects with box representations. This uses simple arithmetic and outer products to calculate the IoU and intersections between all pairs without looping. Parameters ---------- source: tensor (float32) M x 4 tensor where each row contains the x,y location of the upper left corner of a box and its width and height in that order. Typically the predictions. target: tensor (float32) N x 4 tensor where each row contains the x,y location of the upper left corner of a box and its width and height in that order. Typically the ground truth. Returns ------- iou: tensor (float32) M x N tensor containing IoU values between source and target boxes. intersection: tensor (float32) M x N tensor containing area overlaps between source and target boxes in pixels. """ # split into corners and sizes xs, ys, ws, hs = tf.split(source, 4, axis=1) xt, yt, wt, ht = tf.split(target, 4, axis=1) # overlap in dimensions left = tf.maximum(xs, tf.transpose(xt)) top = tf.maximum(ys, tf.transpose(yt)) right = tf.minimum(xs + ws, tf.transpose(xt + wt)) bottom = tf.minimum(ys + hs, tf.transpose(yt + ht)) horizontal = tf.minimum(xs + ws, tf.transpose(xt + wt)) - tf.maximum(xs, tf.transpose(xt)) vertical = tf.minimum(ys + hs, tf.transpose(yt + ht)) - tf.maximum(ys, tf.transpose(yt)) # calculate intersection intersection = tf.maximum(0.0, horizontal) * tf.maximum(0.0, vertical) # calculate iou iou = intersection / (ws * hs + tf.transpose(wt * ht) - intersection) return iou def _greedy_iou_mapping_iter(i, ious, source_mask, target_mask, matches): """Performs one iteration of greedy IoU mapping. This is the loop body of the greedy IoU mapping algorithm. This identifies the best match having the highest IoU and removes the corresponding prediction and ground truth element from future consideration in matching. Parameters ---------- i: int32 Iteration number in mapping. Used for writing to output TensorArray. ious: tensor (float32) M x N tensor of IoU values used to generate mapping. Regression predictions are in rows and ground truth elements are in columns. This array is masked to remove previous matches when identifying the highest IoU match. source_mask: tensor (bool) 1D M-length tensor where unmatched predictions are represented by 'True'. target_mask: tensor (bool) 1D M-length tensor where unmatched ground truth elements are represented by 'True'. matches: tensor (float32) 2D tensor where each row represents a match, containing the indices of the matched prediction and ground truth element in that order. Returns ------- i: int32 Loop iteration counter. ious: tensor (float32) Same as input but updated with current iteration match. source_mask: tensor (bool) Same as input but updated with current iteration match. target_mask: tensor (bool) Same as input but updated with current iteration match. matches: tensor (float32) Same as input but updated with current iteration match. """ # mask targets and get best match for each source maxima = tf.reduce_max(tf.boolean_mask(ious, target_mask, axis=1), axis=1) target_indices = tf.argmax(tf.boolean_mask(ious, target_mask, axis=1), axis=1) # mask sources that were already matched maxima = tf.boolean_mask(maxima, source_mask) target_indices = tf.boolean_mask(target_indices, source_mask) # get source and target indices max = tf.reduce_max(maxima) source_index = tf.argmax(maxima) target_index = tf.gather(target_indices, source_index) # correct for masked sources and targets source_index = tf.gather(tf.where(source_mask), source_index) target_index = tf.gather(tf.where(target_mask), target_index) # update masks source_mask = tf.tensor_scatter_nd_update(source_mask, [source_index], [tf.constant(False)]) target_mask = tf.tensor_scatter_nd_update(target_mask, [target_index], [tf.constant(False)]) # write (source, target) to TensorArray matches = matches.write( i, tf.concat([tf.cast(source_index, tf.float32), tf.cast(target_index, tf.float32), [max]], axis=0) ) # update index i = i + 1 return i, ious, source_mask, target_mask, matches def greedy_iou_mapping(ious, min_iou): """Calculates greedy IoU mapping between predictions and ground truth. Uses intersection-over-union scores to compute a greedy mapping between ground truth and predicted objects. Greedy mapping can produce suboptimal results compared to the Kuhn–Munkres algorithm since matching is greedy. Parameters ---------- ious: tensor (float32) M x N tensor of IoU values used to generate mapping. Regression predictions are in rows and ground truth elements are in columns. This array is masked to remove previous matches when identifying the highest IoU match. min_iou: float32 Minimum IoU threshold for defining a match between a regression prediction and a ground truth box. Returns ------- precision: float32 Precision of IoU mapping. recall: float32 Recall of IoU mapping. tp: int32 True positive count of IoU mapping. fp: int32 False positive count of IoU mapping. fn: int32 False negative count of IoU mapping. tp_list: int32 Two-dimensional tensor containing indices of true positive predictions in first column, and corresponding matching ground truth indices in second column. fp_list: int32 One-dimensional tensor containing indices of false positive predictions. fn_list: int32 One-dimensional tensor containing indices of false negative ground truth. """ # initialize masks source_mask = tf.ones(tf.shape(ious)[0], tf.bool) target_mask = tf.ones(tf.shape(ious)[1], tf.bool) # define loop counter, condition, store for output i = tf.constant(0) matches = tf.TensorArray(tf.float32, size=tf.shape(ious)[0], dynamic_size=False) def condition(i, a, b, c, d): return tf.less(i, tf.minimum(tf.shape(ious)[0], tf.shape(ious)[1])) # loop to perform greedy mapping _, _, _, _, matches = tf.while_loop( condition, _greedy_iou_mapping_iter, [i, ious, source_mask, target_mask, matches], parallel_iterations=10, ) # stack outputs matches = matches.stack() # discard matches that do not meet min_iou matches = tf.boolean_mask(matches, tf.greater_equal(matches[:, 2], min_iou), axis=0) # calculate TP, FP, FN, precision, recall tp = tf.shape(matches)[0] fp = tf.shape(ious)[0] - tf.shape(matches)[0] fn = tf.shape(ious)[1] - tf.shape(matches)[0] # generate lists of indexes for TP, FP, FN tp_list = tf.cast(matches[:, 0:2], tf.int32) fp_list = tf.sets.difference( [tf.range(tf.shape(ious)[0], dtype=tf.int32)], [tf.cast(matches[:, 0], dtype=tf.int32)] ).values fn_list = tf.sets.difference( [tf.range(tf.shape(ious)[1], dtype=tf.int32)], [tf.cast(matches[:, 1], dtype=tf.int32)] ).values return tp, fp, fn, tp_list, fp_list, fn_list
nilq/baby-python
python
# Generated by Django 3.0.3 on 2020-08-10 13:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('ecommerce_platform', '0015_auto_20200810_1252'), ] operations = [ migrations.RenameField( model_name='coupon', old_name='user', new_name='users', ), ]
nilq/baby-python
python
import pytest from pytest import approx import os import shutil import numpy as np import pandas as pd from tarpan.testutils.a03_cars.cars import get_fit from tarpan.cmdstanpy.waic import ( waic, compare_waic, save_compare_waic_csv, save_compare_waic_txt, waic_compared_to_df, WaicData, WaicModelCompared, compare_waic_tree_plot, save_compare_waic_tree_plot) from tarpan.testutils.a04_height.height import ( get_fit1_intercept, get_fit2_fungus_treatment, get_fit3_treatment) def test_waic(): fit = get_fit() result = waic(fit) assert result.waic == approx(421.5135196466395, rel=1e-15) assert len(result.waic_pointwise) == 50 assert result.waic_pointwise[0] == approx(7.284060083431996, rel=1e-15) assert result.waic_pointwise[49] == approx(7.324510608904949, rel=1e-15) assert result.waic_std_err == approx(16.327468671341204, rel=1e-15) assert result.lppd == approx(-206.5875738029627, rel=1e-15) assert len(result.lppd_pointwise) == 50 assert result.lppd_pointwise[0] == approx(-3.6203241203579615, rel=1e-15) assert result.lppd_pointwise[49] == approx(-3.641419673133626, rel=1e-15) assert result.penalty == approx(4.169186020357044, rel=1e-15) assert len(result.penalty_pointwise) == 50 assert result.penalty_pointwise[0] == approx(0.021705921358036437, rel=1e-15) assert result.penalty_pointwise[49] == approx(0.020835631318848448, rel=1e-15) def test_compare_waic(): fit1_intercept = get_fit1_intercept() fit2_fungus_treatment = get_fit2_fungus_treatment() fit3_treatment = get_fit3_treatment() models = { "Itercept": fit1_intercept, "Fungus+treatment": fit2_fungus_treatment, "Treatment": fit3_treatment } result = compare_waic(models=models) assert [model.name for model in result] == ['Fungus+treatment', 'Treatment', 'Itercept'] assert [round(model.waic_data.waic, 2) for model in result] == \ [361.45, 402.71, 405.93] assert [round(model.waic_data.waic_std_err, 2) for model in result] == \ [13.34, 10.78, 11.29] difference = [ None if model.waic_difference_best is None else round(model.waic_difference_best, 2) for model in result ] assert difference == [None, 41.27, 44.48] std_err = [ None if model.waic_difference_best_std_err is None else round(model.waic_difference_best_std_err, 2) for model in result ] assert std_err == [None, 9.82, 11.55] assert [round(model.waic_data.penalty, 1) for model in result] == \ [3.4, 2.6, 1.6] actual_weight = [ round(model.weight, 5) for model in result ] assert actual_weight == [0.99986, 2e-05, 0.00012] def test_compare_waic__model_with_different_data_points(): cars_fit = get_fit() plants_fit = get_fit1_intercept() models = { "Cars": cars_fit, "Plants": plants_fit } with pytest.raises(AttributeError, match=r"different number of data points"): compare_waic(models=models) def test_waic_compared_to_df(): compared = [] for i in range(1, 4): waic = WaicData( waic=i, waic_pointwise=[i] * 3, waic_std_err=i * 1.1, lppd=i * 1.2, lppd_pointwise=[i * 1.2] * 3, penalty=i * 0.3, penalty_pointwise=[i * 0.3] * 3, ) compared_element = WaicModelCompared( name=f"Model {i}", waic_data=waic, waic_difference_best=i * 1.3, waic_difference_best_std_err=i * 1.4, weight=i*1.7 ) compared.append(compared_element) result = waic_compared_to_df(compared=compared) assert len(result) == 3 row = result.loc["Model 1"] assert row["WAIC"] == 1 assert row["SE"] == 1.1 assert row["dWAIC"] == 1.3 assert row["dSE"] == 1.4 assert row["pWAIC"] == 0.3 assert row["Weight"] == 1.7 row = result.loc["Model 2"] assert row["WAIC"] == 2 assert row["SE"] == 2.2 assert row["dWAIC"] == 2.6 assert row["dSE"] == 2.8 assert row["pWAIC"] == 0.6 assert row["Weight"] == 3.4 def test_save_compare_waic_csv(): fit1_intercept = get_fit1_intercept() fit2_fungus_treatment = get_fit2_fungus_treatment() fit3_treatment = get_fit3_treatment() models = { "Itercept": fit1_intercept, "Fungus+treatment": fit2_fungus_treatment, "Treatment": fit3_treatment } outdir = "tarpan/cmdstanpy/model_info/waic_test" if os.path.isdir(outdir): shutil.rmtree(outdir) save_compare_waic_csv(models=models) assert os.path.isfile(os.path.join(outdir, "compare_waic.csv")) df = pd.read_csv(os.path.join(outdir, "compare_waic.csv"), index_col="Name") assert len(df) == 3 row = df.loc["Fungus+treatment"] assert row["WAIC"] == approx(361.44, rel=1e-3) assert row["SE"] == approx(13.33, rel=1e-3) assert np.isnan(row["dWAIC"]) assert np.isnan(row["dSE"]) assert row["pWAIC"] == approx(3.4388, rel=1e-3) assert row["Weight"] == approx(0.99985, rel=1e-3) row = df.loc["Itercept"] assert row["WAIC"] == approx(405.93, rel=1e-3) assert row["SE"] == approx(11.292, rel=1e-3) assert row["dWAIC"] == approx(44.48, rel=1e-3) assert row["dSE"] == approx(11.55, rel=1e-3) assert row["pWAIC"] == approx(1.5745, rel=1e-3) assert row["Weight"] == approx(0.00012332, rel=1e-3) def test_save_compare_waic_txt(): fit1_intercept = get_fit1_intercept() fit2_fungus_treatment = get_fit2_fungus_treatment() fit3_treatment = get_fit3_treatment() models = { "Itercept": fit1_intercept, "Fungus+treatment": fit2_fungus_treatment, "Treatment": fit3_treatment } outdir = "tarpan/cmdstanpy/model_info/waic_test" if os.path.isdir(outdir): shutil.rmtree(outdir) save_compare_waic_txt(models=models) assert os.path.isfile(os.path.join(outdir, "compare_waic.txt")) with open(os.path.join(outdir, "compare_waic.txt"), 'r') as file: data = file.read() assert "dWAIC" in data assert "Treatment" in data assert "402.71" in data def test_compare_waic_tree_plot(): fit1_intercept = get_fit1_intercept() fit2_fungus_treatment = get_fit2_fungus_treatment() fit3_treatment = get_fit3_treatment() models = { "Itercept": fit1_intercept, "Fungus+treatment": fit2_fungus_treatment, "Treatment": fit3_treatment } fig, ax = compare_waic_tree_plot(models=models) assert ax.get_xlabel() == "WAIC (deviance)" def test_save_compare_waic_tree_plot(): fit1_intercept = get_fit1_intercept() fit2_fungus_treatment = get_fit2_fungus_treatment() fit3_treatment = get_fit3_treatment() models = { "Itercept": fit1_intercept, "Fungus+treatment": fit2_fungus_treatment, "Treatment": fit3_treatment } outdir = "tarpan/cmdstanpy/model_info/waic_test" if os.path.isdir(outdir): shutil.rmtree(outdir) save_compare_waic_tree_plot(models=models) assert os.path.isfile(os.path.join(outdir, "compare_waic.pdf"))
nilq/baby-python
python
# -*- coding: utf-8 -*- # @Date : 2019-07-26 # @Author : Xinyu Gong (xy_gong@tamu.edu) # @Link : None # @Version : 0.0 import os import glob import argparse import numpy as np from scipy.misc import imread import tensorflow as tf import utils.fid_score as fid def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '--data_path', type=str, required=True, help='set path to training set jpg images dir') parser.add_argument( '--output_file', type=str, default='fid_stat/fid_stats_cifar10_train.npz', help='path for where to store the statistics') opt = parser.parse_args() print(opt) return opt def main(): args = parse_args() ######## # PATHS ######## data_path = args.data_path output_path = args.output_file # if you have downloaded and extracted # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz # set this path to the directory where the extracted files are, otherwise # just set it to None and the script will later download the files for you inception_path = None print("check for inception model..", end=" ", flush=True) inception_path = fid.check_or_download_inception(inception_path) # download inception if necessary print("ok") # loads all images into memory (this might require a lot of RAM!) print("load images..", end=" ", flush=True) image_list = glob.glob(os.path.join(data_path, '*.jpg')) images = np.array([imread(str(fn)).astype(np.float32) for fn in image_list]) print("%d images found and loaded" % len(images)) print("create inception graph..", end=" ", flush=True) fid.create_inception_graph(inception_path) # load the graph into the current TF graph print("ok") print("calculte FID stats..", end=" ", flush=True) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) mu, sigma = fid.calculate_activation_statistics(images, sess, batch_size=100) np.savez_compressed(output_path, mu=mu, sigma=sigma) print("finished") if __name__ == '__main__': main()
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import absolute_import ''' @author: Jinpeng LI @contact: mr.li.jinpeng@gmail.com @organization: I2BM, Neurospin, Gif-sur-Yvette, France @organization: CATI, France @organization: U{IFR 49<http://www.ifr49.org>} @license: U{CeCILL version 2<http://www.cecill.info/licences/Licence_CeCILL_V2-en.html>} ''' ''' start to check the requirement on the server side ''' import os import sys resName = None i = 0 while i < len(sys.argv): if sys.argv[i] == "-r": resName = sys.argv[i + 1] break i = i + 1 lines2cmd = [ "kill $(ps -ef | grep 'python -m soma_workflow.start_database_server' | grep '%s' \ | grep -v grep | awk '{print $2}')" % (resName), "rm ~/.soma-workflow.cfg" ] for line2cmd in lines2cmd: os.system("echo '%s' " % (line2cmd)) os.system(line2cmd)
nilq/baby-python
python
from dataclasses import dataclass, field from typing import Dict from lf3py.serialization.deserializer import DictDeserializer @dataclass class SNSMessage(DictDeserializer): message: str = '' attributes: Dict[str, Dict[str, str]] = field(default_factory=dict)
nilq/baby-python
python
from utils import * def cross_val_split(dataset, folds): """ Splits the dataset into folds number of subsets of almost equal size after randomly shuffling it, for cross validation. :param dataset: The dataset to be splitted. :param folds: The number of folds to be created. :return: The dataset in cuts. """ np.random.shuffle(dataset) splits = np.array_split(dataset, folds) return splits def train_folds_merge(folds, test_id): """ Glues together the folds of training splits into a unified train set. :param folds: The folds produced from the dataset segmentation for crossvalidation. :param test_id: The one fold that should be excluded to play the role of the test set. :return: The unified train set. """ train_idxs = list(range(0, len(folds))) train_idxs.remove(test_id) list_of_splits = [] for j in train_idxs: list_of_splits.append(folds[j]) return np.vstack(list_of_splits) def plot_accuracies(accuracies, num_of_models): """ Plots a graph with the accuracies on the y axis and the different models on the x axis. """ import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1, figsize=(15, 10), dpi=100) xs = np.arange(1, num_of_models + 1) ax.plot(xs, accuracies) acc_best = np.argmax(accuracies) + 1 maxim = ax.scatter(acc_best, np.max(accuracies), marker='o', color='red') ax.grid(b=True, color='grey', linestyle='-.', linewidth=0.5, zorder=0) ax.legend([maxim], ["Best Accuracy"]) ax.set_title('Accuracy vs Algorithm selected') ax.set_ylabel('Accuracy') ax.set_xlabel('Methods') ax.set_xticklabels(['quest_a', 'quest_b', 'quest_c', 'quest_d']) ax.set_xticks(xs) plt.xticks(rotation=25) # plt.savefig("knn_%s.png" % (d_name)) plt.show() plt.savefig("2.3.png") def predictClass(x, mus, sigmas, X_train, number_of_classes, class_probabilities): """ For every model, it calculates the likelihood for each class, and picks the class with max likelihood. :param x: The datapoint we want to derive the class for. :param mus: A list with the mean vector for each method. First three are for first class, next three for second class, etc. :param sigmas: A list with the covariance matrix for each method. Same as mus. :param X_train: The train set - needed for Parzen Windows method. :param number_of_classes: The number of different classes in the dataset. :param class_probabilities: An array with the probability of each class. :return: A vector with the predicted classes by each model. """ predictions = [] # For the parametric methods number_of_models = int(len(mus) / 2) for i in range(0, number_of_models): method_likelihoods = [] for j in range(number_of_classes): index = i + j * number_of_models # the index will "jump" over the other methds in the lists. prob = gaussian(x, mus[index], sigmas[index]) * class_probabilities[j] # The beyes classifier rule method_likelihoods.append(prob) predictions.append(np.argmax(method_likelihoods)) # For the non-parametric method method_likelihoods = [] for j in range(number_of_classes): sumlog_pi = question_d(X_train, x) p_i = sumlog_pi * class_probabilities[j] # The beyes classifier rule method_likelihoods.append(p_i) predictions.append(np.argmax(method_likelihoods)) return predictions def main(): """ Dataset is split in folds in order to implement k-fold cross validation, based on which the average accuracy is computed. For each fold, the underlying pdfs are derived and accuracy is measured. """ data = loaddataset() folds = 5 num_of_models = 4 data_splits = cross_val_split(data, folds) accuracies = np.zeros((4, folds), dtype=int) # array with one row for each model and one column for each fold. # Holds the counts of accurates for each model, which turns in accuracies before moving on to the next fold. # For each fold: for t, test_set in enumerate(data_splits): train_set = train_folds_merge(data_splits, t) X_train = train_set[:, :-1] y_train = train_set[:, -1:] X_test = test_set[:, :-1] y_test = test_set[:, -1:] number_of_classes = len(np.unique(y_train)) class_probabilities = np.zeros(number_of_classes) # array with the probability for each class to exist # Lists with model parameters, utilized in predictClass(). mus = [] sigmas = [] # Training for i in range(number_of_classes): # Split set based on each label y. subset = np.array([X_train[j] for j in range(X_train.shape[0]) if y_train[j] == i]) # The class probabitlity for this class and fold. class_probabilities[i] = subset.shape[0] / X_train.shape[0] # Get pdf parameters for each class mus_a, sigmas_a = question_a(subset) mus.append(mus_a) sigmas.append(sigmas_a) mus_b, sigmas_b = question_b(subset) mus.append(mus_b) sigmas.append(sigmas_b) mus_c, sigmas_c = question_c(subset) mus.append(mus_c) sigmas.append(sigmas_c) # Question c: Training for last distribution model takes place right before testing, # because there aren't any parameters to be learned beforehand. # Testing: for i in range(X_test.shape[0]): # Take the predictions from all methods preds = predictClass(X_test[i], mus, sigmas, X_train, number_of_classes, class_probabilities) # Evaluate these predictions target = y_test[i] for j, pred in enumerate(preds): if pred == target: accuracies[j, t] += 1 # Lastly, the accuracies for this fold are calculated in place. accuracies[:, t] = accuracies[:, t] * 100 / X_test.shape[0] accurates_counts_avg = accuracies.mean(axis=1) # We average the accuracies of the k-fold crossvalidation print("Average Accuracy over {}-fold cross validation:\n".format(folds)) print("Assumption A: {}%".format(round(accurates_counts_avg[0], 1))) print("Assumption B: {}%".format(round(accurates_counts_avg[1], 1))) print("Assumption C: {}%".format(round(accurates_counts_avg[2], 1))) print("Assumption D: {}%".format(round(accurates_counts_avg[3], 1))) #---------------plot accuracies plot_accuracies(accurates_counts_avg, num_of_models) if __name__ == "__main__": main()
nilq/baby-python
python
import random from abc import ABCMeta, abstractmethod from collections import defaultdict, Counter, OrderedDict import math import numpy as np from gtd.log import indent from wge.rl import Trace def normalize_counts(counts): """Return a normalized Counter object.""" normed = Counter() total = float(sum(list(counts.values()), 0.0)) assert total > 0 # cannot normalize empty Counter for key, ct in list(counts.items()): normed[key] = ct / total return normed class ReplayBuffer(object, metaclass=ABCMeta): @abstractmethod def sample(self, num_episodes): """Sample WITH replacement from the buffer. Args: num_episodes (int): number of episodes to return. Returns: sampled_episodes (list[Episode]) sample_probs (list[float]): probability of sampling the episode trace (ReplayBufferSampleTrace) """ raise NotImplementedError @abstractmethod def extend(self, episodes): """Extends the buffer with the given episodes. Randomly evicts episodes from the buffer as necessary. Args: episodes (list[Episode]) """ raise NotImplementedError @abstractmethod def __len__(self): raise NotImplementedError @abstractmethod def status(self): """A human-readable string describing the status of the buffer.""" raise NotImplementedError class UniformReplayBuffer(ReplayBuffer): """Minimalist replay buffer.""" def __init__(self): self._episodes = [] def __len__(self): return len(self._episodes) def sample(self, num_episodes): indices = np.random.choice(len(self._episodes), size=num_episodes, replace=True) episodes = [self._episodes[i] for i in indices] probs = [1.] * len(episodes) trace = None return episodes, probs, trace def extend(self, episodes): self._episodes.extend(episodes) def status(self): return 'size: {}'.format(len(self)) class ReplayBufferNotReadyException(Exception): pass class RewardPrioritizedReplayBuffer(ReplayBuffer): def __init__(self, max_size, sampling_quantile, discount_factor): """RewardPrioritizedReplayBuffer. Lowest-reward episodes are evicted when the buffer becomes full. Buffer only samples from the top K-quantile of what it contains. (where K = sampling_quantile) Args: max_size (int): max size of the buffer. sampling_quantile (float): should be in (0, 1] discount_factor (float) """ self.max_size = max_size self.sampling_quantile = sampling_quantile self._discount_factor = discount_factor self._episodes = [] # this should always be sorted from highest-reward to lowest-reward def __len__(self): return len(self._episodes) def sample(self, num_episodes): n = len(self) if n == 0: raise RuntimeError('Cannot sample from an empty buffer.') # only sample as many as are contained in the buffer num_episodes = min(num_episodes, len(self)) # only sample from the top k-quantile sample_limit = int(math.ceil(n * self.sampling_quantile)) # if the top k-quantile isn't large enough to get num_episodes unique episodes, expand it sample_limit = max(sample_limit, num_episodes) # don't ever sample the same thing twice sample_indices = list(np.random.choice(sample_limit, size=num_episodes, replace=False)) sample_episodes = [self._episodes[i] for i in sample_indices] sample_probs = [1.] * len(sample_episodes) # TODO(kelvin): similar to the old replay buffer, we are just hacking sample_probs to be all 1s right now trace = PrioritizedRewardReplayBufferTrace(self._episodes) return sample_episodes, sample_probs, trace def extend(self, episodes): # only add episodes with full reward episodes = [ep for ep in episodes if ep.discounted_return(0, 1.) == 1] # TODO(kelvin): just create a FullRewardOnlyBuffer, rather than # hacking RewardPrioritizedBuffer # DISABLED: only add episodes with positive reward # episodes = [ep for ep in episodes if ep.discounted_return(0, 1.) > 0] self._episodes.extend(episodes) if len(self._episodes) > self.max_size: # the sort in the following lines is an in-place sort # this shuffle breaks the in-place nature of that sort, which # would undesirably favor older episodes shuffled_episodes = list(self._episodes) random.shuffle(shuffled_episodes) sorted_episodes = sorted(shuffled_episodes, key=lambda ep: ep.discounted_return(0, 1.), reverse=True) self._episodes = sorted_episodes[:self.max_size] def status(self): if len(self) == 0: return 'empty' rewards = sorted(ep.discounted_return(0, 1.) for ep in self._episodes) median = rewards[int(len(rewards) / 2)] min = rewards[0] max = rewards[-1] mean = sum(rewards) / len(rewards) return 'n={n:<4} mean={mean:.2f} range=[{min:.2f}, {max:.2f}] median={median:.2f}'.format( n=len(rewards), min=min, median=median, max=max, mean=mean) class GroupedReplayBuffer(ReplayBuffer): """Buffer of Episodes to replay.""" def __init__(self, episode_grouper, episode_identifier, buffer_factory, min_group_size): """Construct replay buffer. WARNING: We assume that the probability of sampling an episode is just 1. Compared to using the real sample prob (which can be easily computed), this is more stable for downstream importance sampling. We already violate the assumptions of importance sampling, because our proposal distribution doesn't have full support over the target distribution. Exact sample probs actually exacerbate the problem. Approximate sample probs somewhat mitigate the problem. Args: episode_grouper (Callable[Episode, object]): see self._sample_from_groups episode_identifier (Callable[Episode, object]): see self._sample_from_groups buffer_factory (Callable[[], ReplayBuffer): creates a brand new buffer min_group_size (int): if a group's buffer is smaller than this size, we will not sample from it. """ self._group_buffers = defaultdict(buffer_factory) self._episode_grouper = episode_grouper self._episode_identifier = episode_identifier self._min_group_size = min_group_size def sample(self, num_episodes): group_labels = [label for label, buffer in list(self._group_buffers.items()) if len(buffer) >= self._min_group_size] if len(group_labels) == 0: # none of the buffers are ready raise ReplayBufferNotReadyException() num_groups = len(group_labels) uniform_probs = [1. / num_groups] * num_groups group_counts = np.random.multinomial(num_episodes, uniform_probs) # sample uniformly from groups sampled_episodes = [] sample_probs = [] traces = {} assert len(group_labels) == len(group_counts) for label, group_count in zip(group_labels, group_counts): group_buffer = self._group_buffers[label] eps, probs, trace = group_buffer.sample(group_count) sampled_episodes.extend(eps) sample_probs.extend(probs) traces[label] = trace group_counts_dict = dict(list(zip(group_labels, group_counts))) full_trace = GroupedReplayBufferTrace(traces, group_counts_dict) return sampled_episodes, sample_probs, full_trace def extend(self, episodes): # group the episodes grouped_episodes = defaultdict(list) for ep in episodes: grouped_episodes[self._episode_grouper(ep)].append(ep) # add the episodes to their respective buffers for label, group in list(grouped_episodes.items()): self._group_buffers[label].extend(group) def __len__(self): return sum(len(buffer) for buffer in list(self._group_buffers.values())) def status(self): if len(self._group_buffers) == 0: return 'empty' return '\n'.join('{}: {}'.format(buffer.status(), label) for label, buffer in list(self._group_buffers.items())) class GroupedReplayBufferTrace(Trace): def __init__(self, group_traces, group_counts): def trace_sort_key(item): group_label, trace = item if isinstance(trace, PrioritizedRewardReplayBufferTrace): return -trace.mean # sort by mean reward of group else: return repr(group_label) # sort by group label self._group_traces = OrderedDict(sorted(list(group_traces.items()), key=trace_sort_key)) self._group_counts = OrderedDict(sorted(list(group_counts.items()), key=lambda x: -x[1])) def to_json_dict(self): return {'group_counts': {repr(label): count for label, count in list(self._group_counts.items())}, 'group_traces': {repr(label): stat.to_json_dict() for label, stat in list(self._group_traces.items())} } def dumps(self): return 'group stats:\n{}\nsample counts:\n{}'.format( indent('\n'.join('{}: {}'.format( trace.dumps(), label) for label, trace in list(self._group_traces.items()))), indent('\n'.join('{:<5}: {}'.format(c, k) for k, c in list(self._group_counts.items()))), ) class PrioritizedRewardReplayBufferTrace(Trace): def __init__(self, episodes): self._rewards = sorted(ep.discounted_return(0, 1.) for ep in episodes) self.median = self._rewards[int(len(self._rewards) / 2)] self.min = self._rewards[0] self.max = self._rewards[-1] self.mean = sum(self._rewards) / len(self._rewards) def dumps(self): return 'n={n:<4} mean={mean:.2f} range=[{min:.2f}, {max:.2f}] median={median:.2f}'.format( n=len(self._rewards), min=self.min, median=self.median, max=self.max, mean=self.mean) def to_json_dict(self): return {'median': self.median, 'mean': self.mean, 'min': self.min, 'max': self.max }
nilq/baby-python
python
# # PySNMP MIB module FNCNMS (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FNCNMS # Produced by pysmi-0.3.4 at Wed May 1 13:14:13 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint") netsmart, = mibBuilder.importSymbols("FNC-COMMON-SMI", "netsmart") ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup") system, = mibBuilder.importSymbols("SNMPv2-MIB", "system") Unsigned32, NotificationType, MibIdentifier, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Bits, IpAddress, Gauge32, ModuleIdentity, Counter32, Counter64, enterprises, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "NotificationType", "MibIdentifier", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Bits", "IpAddress", "Gauge32", "ModuleIdentity", "Counter32", "Counter64", "enterprises", "ObjectIdentity") TextualConvention, DisplayString, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "DateAndTime") netsmart1500 = ModuleIdentity((1, 3, 6, 1, 4, 1, 3861, 4, 1500)) netsmart1500.setRevisions(('2012-02-06 16:00', '2011-06-16 16:00', '2003-08-02 16:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: netsmart1500.setRevisionsDescriptions(('Added keep alive message event', 'Added following NETypes to neType object FLASHWAVE 9500, FLASHWAVE CDS, FLASHWAVE 9410, FLASHWAVE 9420, FLASHWAVE 7120, FLASHWAVE 7420', 'Initial Version.',)) if mibBuilder.loadTexts: netsmart1500.setLastUpdated('201202061600Z') if mibBuilder.loadTexts: netsmart1500.setOrganization('Fujitsu Network Communications Inc.') if mibBuilder.loadTexts: netsmart1500.setContactInfo('Fujitsu Network Communications Tel: I-800-USE-FTAC i.e (800) 873 3822') if mibBuilder.loadTexts: netsmart1500.setDescription(" This MIB represents the interface definition between a third party SNMP Manager and FNC's NETSMART Management System. The functions provided in this MIB are: - Network Element (NE) Target IDentifier (TID) Discovery - Dynamic Alarm reporting - Alarm Discovery and Reconciliation ") nmsNEMgmtMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1)) nmsNEMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1)) nmsNEAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2)) nmsNotificationTrapBase = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0)) class NMSSeverity(TextualConvention, Integer32): description = ' Severity represents the severity of the Notification. It is assigned by the NE. Cleared indicates that this notification clears a previously sent trap with a severity of critical,major,minor or info. ' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5)) namedValues = NamedValues(("cleared", 1), ("info", 2), ("minor", 3), ("major", 4), ("critical", 5)) class NMSCondDirection(TextualConvention, Integer32): description = 'Whether the fault is on the transmit side or on the receive side. ' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("na", 1), ("transmit", 2), ("receive", 3)) class NMSCondLocation(TextualConvention, Integer32): description = ' Location indicates whether the failure occurred on the nearEnd of this NE or on the farEnd. ' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("na", 1), ("nearEnd", 2), ("farEnd", 3)) class NMSServiceEffect(TextualConvention, Integer32): description = 'Service Affecting indicates whether the alarm affects traffic or not. ' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("na", 1), ("serviceAffecting", 2), ("nonServiceAffecting", 3)) class NMSTrapSeqNumber(TextualConvention, Integer32): description = ' A value in the range 1-99999 is reported. The value 0 is never reported but can only be obtained through a GET.A value of 0 indicates that agent has not forwarded any notifications since it booted up. When the value of this field reaches 99999, the number resets and the subsequent notification is sent with a number 1. This value is not unique per NE.It is a global number used across all NEs. ' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 99999) class NMSMgdNE(DisplayString): subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 20) class NMSNEConnState(TextualConvention, Integer32): description = ' Describes the state of connection between an NE and agent.Connected indicates that the agent is connected to the NE. NotConnected indicates that there is no communication between NE and agent. ' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2)) namedValues = NamedValues(("connected", 1), ("notConnected", 2)) nmsNETable = MibTable((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1), ) if mibBuilder.loadTexts: nmsNETable.setStatus('current') if mibBuilder.loadTexts: nmsNETable.setDescription(' This Table contains a row for each managed NE in NETSMART. The row contains: - the TID (20 characters max) of each NE. - the NE Type ( A string representing the FNC Product Name) . - the NE Connection State. @see NMSNEConnState ') nmsNEEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1), ).setIndexNames((1, "FNCNMS", "neTID")) if mibBuilder.loadTexts: nmsNEEntry.setStatus('current') if mibBuilder.loadTexts: nmsNEEntry.setDescription(' Provides information about a managed NE in NETSMART. ') neTID = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1, 1), NMSMgdNE()).setMaxAccess("readonly") if mibBuilder.loadTexts: neTID.setStatus('current') if mibBuilder.loadTexts: neTID.setDescription('TID of the NE. @see NMSMgdNE. ') neType = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1, 2), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: neType.setStatus('current') if mibBuilder.loadTexts: neType.setDescription(' Describes the type of Fujitsu NETWORK Element Current Types are FLM6, FACTR, FLM150, FLM600, FLM2400, FLX 600A, FLX 2500A, FLASH 192, FLASH 10G, FLASHWAVE 4010, FLASHWAVE 4020, FLASHWAVE 4100, FLASHWAVE 4300, FLASHWAVE 4500, FLASHWAVE 4560, FLASHWAVE 7200, FLASHWAVE 7300, FLASHWAVE 7500, FLASHWAVE 7700, FLASHWAVE 9500, FLASHWAVE CDS, FLASHWAVE 9410, FLASHWAVE 9420, FLASHWAVE 7120, FLASHWAVE 7420, Fujitsu NE, Generic NE, Unknown. -Unknown NEType represents NEs whose type is not determined yet. -Fujitsu NE NEType represents the NEs which are minimally (only fault management) supported by the agent. -Generic NE NEType represents non Fujitsu NEs whose fault management functionality is supported by the agent. ') neConnState = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1, 3), NMSNEConnState()).setMaxAccess("readonly") if mibBuilder.loadTexts: neConnState.setStatus('current') if mibBuilder.loadTexts: neConnState.setDescription(' Describes connection state between agent and NE. @see NMSNEConnState. A nmsNEStateChangeEvent NOTIFICATION generated whenever there is a change in neConnState. Also see neOperation and nmsNEOperationEvent for the default state when an NE is added. ') nmsNEAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1), ) if mibBuilder.loadTexts: nmsNEAlarmTable.setStatus('current') if mibBuilder.loadTexts: nmsNEAlarmTable.setDescription(' This table contains a NE TID and a List of nmsNEAlarmList for this TID. ') nmsNEAlarmListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1), ).setIndexNames((0, "FNCNMS", "alarmTID"), (0, "FNCNMS", "alarmIndex")) if mibBuilder.loadTexts: nmsNEAlarmListEntry.setStatus('current') if mibBuilder.loadTexts: nmsNEAlarmListEntry.setDescription(' A row in the table indicating the specific Alarm for the given TID. ') alarmTID = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 1), NMSMgdNE()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmTID.setStatus('current') if mibBuilder.loadTexts: alarmTID.setDescription(' NE TID against which this Alarm is being raised. @see NMSMgdNE. ') alarmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99999))).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmIndex.setStatus('current') if mibBuilder.loadTexts: alarmIndex.setDescription('The instance of the alarm for this NE. This variable is used to identify a unique row for each alarm. In the nmsNEAlarmTable a sequence of alarms will look like: TID alarmIndex Other Alarm Data ======================================================== TID1 1 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID1 2 2-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID1 3 3-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID2 1 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID2 2 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 1 2-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 2 3-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 3 4-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 4 5-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 5 6-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID4 1 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 i.e. For each TID alarmIndex starts with 1 and is incremented by 1. This is to help retrieve alarms on an NE basis. ') alarmEntityId = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 3), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmEntityId.setStatus('current') if mibBuilder.loadTexts: alarmEntityId.setDescription('Describes the AID of the entity against which this notification is being sent. ') alarmEntityType = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 4), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmEntityType.setStatus('current') if mibBuilder.loadTexts: alarmEntityType.setDescription(' Describes the kind the entity against which this notification is being sent . The combination of (alarmEntity, alarmEntityType) represents a addressable entity on the NE. Examples of kind of entities are EQPT,OC3,STS1,COM,ENV etc. ') alarmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 5), NMSSeverity()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmSeverity.setStatus('current') if mibBuilder.loadTexts: alarmSeverity.setDescription('Describes the severity of the notification being sent. @see NMSSeverity ') alarmCondType = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 6), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmCondType.setStatus('current') if mibBuilder.loadTexts: alarmCondType.setDescription(' Describes the condition type i.e RMVD or LOS etc. ') alarmServEffect = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 7), NMSServiceEffect()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmServEffect.setStatus('current') if mibBuilder.loadTexts: alarmServEffect.setDescription(' Describes whether the notification is serviceAffecting or not. @see NMSServiceEffect. ') alarmLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 8), NMSCondLocation()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmLocation.setStatus('current') if mibBuilder.loadTexts: alarmLocation.setDescription(' Describes if the location is applicable and if applicable the appropriate location. @see NMSCondLocation. ') alarmDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 9), NMSCondDirection()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmDirection.setStatus('current') if mibBuilder.loadTexts: alarmDirection.setDescription(' Describes if the direction is a applicable and if applicable the appropriate direction. @see NMSCondLocation. ') alarmDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 10), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: alarmDescription.setStatus('current') if mibBuilder.loadTexts: alarmDescription.setDescription(' Gives a textual description of the condition. ') neAlarmTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 11), DateAndTime()).setMaxAccess("readonly") if mibBuilder.loadTexts: neAlarmTimeStamp.setStatus('current') if mibBuilder.loadTexts: neAlarmTimeStamp.setDescription('Time stamp at which the NE generated the fault. The correctness of the value depends whether this fault is reported to the agent or agent retrieved this information from NE. For faults reported from the NE this value is correct where as for faults retrieved the timestamp indicates the time on NE at which this fault was retrieved. ') nmsAlarmTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 12), DateAndTime()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsAlarmTimeStamp.setStatus('current') if mibBuilder.loadTexts: nmsAlarmTimeStamp.setDescription(' Time at which the NMS received the fault. ') nmsLastMsgNumber = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 2), NMSTrapSeqNumber()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsLastMsgNumber.setStatus('current') if mibBuilder.loadTexts: nmsLastMsgNumber.setDescription(' This variable describes the value of the last NMSTrapIndex reported by the agent in the notifications. @see NMSTrapSeqNumber. ') nmsTrapHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3), ) if mibBuilder.loadTexts: nmsTrapHistoryTable.setStatus('current') if mibBuilder.loadTexts: nmsTrapHistoryTable.setDescription(' Thus table contains a sequence of trap seq number and the corresponding neTID.A Management system upon detecting that it has not received a particular sequence number, can determine the NE against which the sequence number was used and do a alarm resynchornization for that NE. ') nmsTrapHistoryTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3, 1), ).setIndexNames((0, "FNCNMS", "nmsTrapHistoryIndex")) if mibBuilder.loadTexts: nmsTrapHistoryTableEntry.setStatus('current') if mibBuilder.loadTexts: nmsTrapHistoryTableEntry.setDescription(' A row indicating the trap sequence number and NE TID which caused this event to be sent. ') nmsTrapHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3, 1, 1), NMSTrapSeqNumber()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsTrapHistoryIndex.setStatus('current') if mibBuilder.loadTexts: nmsTrapHistoryIndex.setDescription(' A Sequence Number that was previously sent. @see NMSTrapSeqNumber. ') nmsTrapHistoryTID = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3, 1, 2), NMSMgdNE()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsTrapHistoryTID.setStatus('current') if mibBuilder.loadTexts: nmsTrapHistoryTID.setDescription(' The tid for which the sequence entry was sent. @see NMSMGgNE. ') nmsNotificationBase = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4)) notifTID = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 1), NMSMgdNE()).setMaxAccess("readonly") if mibBuilder.loadTexts: notifTID.setStatus('current') if mibBuilder.loadTexts: notifTID.setDescription(' NE TID against which this notification is being sent. @see NMSMgdNE. ') neEntityID = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 2), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: neEntityID.setStatus('current') if mibBuilder.loadTexts: neEntityID.setDescription(' Describes the AID of the entity against which this notification is being sent. ') neEntityType = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 3), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: neEntityType.setStatus('current') if mibBuilder.loadTexts: neEntityType.setDescription(' Describes the type the entity against which this notification is being sent . The combination (neEntityID,neEntityType) represents a addressable entity on the network element. Examples of neEntityType would be EQPT,OC3,COM,STS1,ENV etc. ') neSeverity = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 4), NMSSeverity()).setMaxAccess("readonly") if mibBuilder.loadTexts: neSeverity.setStatus('current') if mibBuilder.loadTexts: neSeverity.setDescription(' Describes the severity of the notification being sent. @see NMSSeverity. ') neCondType = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 5), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: neCondType.setStatus('current') if mibBuilder.loadTexts: neCondType.setDescription(' Describes the condition type i.e RMVD or LOS etc. ') neServEffect = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 6), NMSServiceEffect()).setMaxAccess("readonly") if mibBuilder.loadTexts: neServEffect.setStatus('current') if mibBuilder.loadTexts: neServEffect.setDescription(' Describes whether the notification is ServiceAffecting or not. @see NMSServiceEffect. ') neLocation = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 7), NMSCondLocation()).setMaxAccess("readonly") if mibBuilder.loadTexts: neLocation.setStatus('current') if mibBuilder.loadTexts: neLocation.setDescription(' Describes if the location is applicable for this notification, if it is applicable describes the location. @see NMSCondLocation. ') neDirection = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 8), NMSCondDirection()).setMaxAccess("readonly") if mibBuilder.loadTexts: neDirection.setStatus('current') if mibBuilder.loadTexts: neDirection.setDescription('Describes if the direction is a applicable and if it is applicable the direction. @see NMSCondDirection. ') neCondDescription = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 9), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: neCondDescription.setStatus('current') if mibBuilder.loadTexts: neCondDescription.setDescription(' Textual Description of the notification. ') nmsNotifTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 10), DateAndTime()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsNotifTimeStamp.setStatus('current') if mibBuilder.loadTexts: nmsNotifTimeStamp.setDescription(' Time when NMS received this event. ') neNotifTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 11), DateAndTime()).setMaxAccess("readonly") if mibBuilder.loadTexts: neNotifTimeStamp.setStatus('current') if mibBuilder.loadTexts: neNotifTimeStamp.setDescription(' Time when NE raised this event. ') nmsTrapSeqNumber = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 12), NMSTrapSeqNumber()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsTrapSeqNumber.setStatus('current') if mibBuilder.loadTexts: nmsTrapSeqNumber.setDescription(' @See NMSTrapSeqNumber. ') nmsNEConnState = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 13), NMSNEConnState()).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsNEConnState.setStatus('current') if mibBuilder.loadTexts: nmsNEConnState.setDescription(' Describes the state of connection between an NE and NMS. ') neOperation = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: neOperation.setStatus('current') if mibBuilder.loadTexts: neOperation.setDescription(' Indicates if a new NE is added to NETSMART or if it is removed. Addition always implies a nmsNEConnState with value notConnected and NEType of Unknown. ') notifServer = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 15), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: notifServer.setStatus('current') if mibBuilder.loadTexts: notifServer.setDescription(' Describes the Server which this notification is being sent. ') nmsKeepAliveState = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("alive", 1)))).setMaxAccess("readonly") if mibBuilder.loadTexts: nmsKeepAliveState.setStatus('current') if mibBuilder.loadTexts: nmsKeepAliveState.setDescription(' Describes the keep alive state which this notification is being sent. ') nmsNEEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 1)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "neEntityID"), ("FNCNMS", "neEntityType"), ("FNCNMS", "neSeverity"), ("FNCNMS", "neCondType"), ("FNCNMS", "neServEffect"), ("FNCNMS", "neLocation"), ("FNCNMS", "neDirection"), ("FNCNMS", "neCondDescription"), ("FNCNMS", "nmsNotifTimeStamp"), ("FNCNMS", "neNotifTimeStamp"), ("FNCNMS", "nmsTrapSeqNumber")) if mibBuilder.loadTexts: nmsNEEvent.setStatus('current') if mibBuilder.loadTexts: nmsNEEvent.setDescription(' The agent generates this notification when a rept alarm or rept evt is generated. ') nmsNEStateChangeEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 2)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "nmsNEConnState"), ("FNCNMS", "nmsTrapSeqNumber")) if mibBuilder.loadTexts: nmsNEStateChangeEvent.setStatus('current') if mibBuilder.loadTexts: nmsNEStateChangeEvent.setDescription(' The agent generates this notification when a NMS changes its connection state to the NE. @see NMSConnState. ') nmsNEOperationEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 3)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "neOperation"), ("FNCNMS", "nmsTrapSeqNumber")) if mibBuilder.loadTexts: nmsNEOperationEvent.setStatus('current') if mibBuilder.loadTexts: nmsNEOperationEvent.setDescription(' The agent generates this notification when a NE is created or deleted. @see neOperation. ') nmsKeepAliveEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 4)).setObjects(("FNCNMS", "notifServer"), ("FNCNMS", "nmsKeepAliveState")) if mibBuilder.loadTexts: nmsKeepAliveEvent.setStatus('current') if mibBuilder.loadTexts: nmsKeepAliveEvent.setDescription(' The agent generates this notification when reaching the keep alive trap interval. @see nmsKeepAliveState. ') fncNMSMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3)) fncNMSMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 1)) fncNMSMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2)) fncNMSMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 1, 1)).setObjects(("SNMPv2-MIB", "system"), ("FNCNMS", "nmsNEMgmtGroup"), ("FNCNMS", "nmsNEAlarmGroup"), ("FNCNMS", "nmsNETrapGroup"), ("FNCNMS", "nmsNETrapObjects"), ("FNCNMS", "nmsServerTrapGroup"), ("FNCNMS", "nmsServerTrapObjects")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): fncNMSMIBCompliance = fncNMSMIBCompliance.setStatus('current') if mibBuilder.loadTexts: fncNMSMIBCompliance.setDescription(' Module Compliancy ') nmsNEMgmtGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 1)).setObjects(("FNCNMS", "neTID"), ("FNCNMS", "neType"), ("FNCNMS", "neConnState")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): nmsNEMgmtGroup = nmsNEMgmtGroup.setStatus('current') if mibBuilder.loadTexts: nmsNEMgmtGroup.setDescription(' This Group defines objects which are common to all NE Table related retrievals ') nmsNEAlarmGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 2)).setObjects(("FNCNMS", "alarmTID"), ("FNCNMS", "alarmIndex"), ("FNCNMS", "alarmEntityId"), ("FNCNMS", "alarmEntityType"), ("FNCNMS", "alarmSeverity"), ("FNCNMS", "alarmCondType"), ("FNCNMS", "alarmServEffect"), ("FNCNMS", "alarmLocation"), ("FNCNMS", "alarmDirection"), ("FNCNMS", "alarmDescription"), ("FNCNMS", "neAlarmTimeStamp"), ("FNCNMS", "nmsAlarmTimeStamp"), ("FNCNMS", "nmsLastMsgNumber"), ("FNCNMS", "nmsTrapHistoryIndex"), ("FNCNMS", "nmsTrapHistoryTID")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): nmsNEAlarmGroup = nmsNEAlarmGroup.setStatus('current') if mibBuilder.loadTexts: nmsNEAlarmGroup.setDescription(' This Group defines objects which are common to all Alarm Table related retrievals ') nmsNETrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 3)).setObjects(("FNCNMS", "nmsNEEvent"), ("FNCNMS", "nmsNEStateChangeEvent"), ("FNCNMS", "nmsNEOperationEvent")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): nmsNETrapGroup = nmsNETrapGroup.setStatus('current') if mibBuilder.loadTexts: nmsNETrapGroup.setDescription(' This Group defines objects which are used part of notifications ') nmsNETrapObjects = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 4)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "neEntityID"), ("FNCNMS", "neEntityType"), ("FNCNMS", "neSeverity"), ("FNCNMS", "neCondType"), ("FNCNMS", "neServEffect"), ("FNCNMS", "neLocation"), ("FNCNMS", "neDirection"), ("FNCNMS", "neCondDescription"), ("FNCNMS", "nmsNotifTimeStamp"), ("FNCNMS", "neNotifTimeStamp"), ("FNCNMS", "nmsTrapSeqNumber"), ("FNCNMS", "nmsNEConnState"), ("FNCNMS", "neOperation")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): nmsNETrapObjects = nmsNETrapObjects.setStatus('current') if mibBuilder.loadTexts: nmsNETrapObjects.setDescription(' This Group defines objects reported as part of notifications. ') nmsServerTrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 5)).setObjects(("FNCNMS", "nmsKeepAliveEvent")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): nmsServerTrapGroup = nmsServerTrapGroup.setStatus('current') if mibBuilder.loadTexts: nmsServerTrapGroup.setDescription(' This Group defines objects which are used part of notifications ') nmsServerTrapObjects = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 6)).setObjects(("FNCNMS", "notifServer"), ("FNCNMS", "nmsKeepAliveState")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): nmsServerTrapObjects = nmsServerTrapObjects.setStatus('current') if mibBuilder.loadTexts: nmsServerTrapObjects.setDescription(' This Group defines objects reported as part of notifications. ') mibBuilder.exportSymbols("FNCNMS", nmsTrapHistoryTableEntry=nmsTrapHistoryTableEntry, nmsNEAlarm=nmsNEAlarm, alarmIndex=alarmIndex, NMSMgdNE=NMSMgdNE, alarmEntityType=alarmEntityType, alarmServEffect=alarmServEffect, NMSServiceEffect=NMSServiceEffect, PYSNMP_MODULE_ID=netsmart1500, nmsTrapHistoryTable=nmsTrapHistoryTable, nmsTrapHistoryIndex=nmsTrapHistoryIndex, nmsNotificationBase=nmsNotificationBase, NMSNEConnState=NMSNEConnState, nmsNEAlarmTable=nmsNEAlarmTable, alarmTID=alarmTID, nmsNEAlarmGroup=nmsNEAlarmGroup, neAlarmTimeStamp=neAlarmTimeStamp, NMSTrapSeqNumber=NMSTrapSeqNumber, fncNMSMIBConformance=fncNMSMIBConformance, neType=neType, fncNMSMIBCompliance=fncNMSMIBCompliance, neEntityType=neEntityType, nmsTrapSeqNumber=nmsTrapSeqNumber, alarmEntityId=alarmEntityId, neServEffect=neServEffect, nmsLastMsgNumber=nmsLastMsgNumber, neEntityID=neEntityID, notifTID=notifTID, nmsTrapHistoryTID=nmsTrapHistoryTID, nmsNEMgmtGroup=nmsNEMgmtGroup, nmsNEEvent=nmsNEEvent, nmsNETrapObjects=nmsNETrapObjects, neLocation=neLocation, nmsNEAlarmListEntry=nmsNEAlarmListEntry, nmsNEOperationEvent=nmsNEOperationEvent, nmsNETable=nmsNETable, nmsNEEntry=nmsNEEntry, neSeverity=neSeverity, nmsKeepAliveState=nmsKeepAliveState, alarmDescription=alarmDescription, fncNMSMIBGroups=fncNMSMIBGroups, nmsNETrapGroup=nmsNETrapGroup, nmsNotificationTrapBase=nmsNotificationTrapBase, NMSCondDirection=NMSCondDirection, neTID=neTID, neConnState=neConnState, nmsNEMgmtMIB=nmsNEMgmtMIB, neDirection=neDirection, notifServer=notifServer, nmsKeepAliveEvent=nmsKeepAliveEvent, nmsServerTrapObjects=nmsServerTrapObjects, neCondType=neCondType, nmsAlarmTimeStamp=nmsAlarmTimeStamp, NMSCondLocation=NMSCondLocation, NMSSeverity=NMSSeverity, neCondDescription=neCondDescription, nmsNEStateChangeEvent=nmsNEStateChangeEvent, alarmCondType=alarmCondType, fncNMSMIBCompliances=fncNMSMIBCompliances, alarmLocation=alarmLocation, alarmDirection=alarmDirection, alarmSeverity=alarmSeverity, netsmart1500=netsmart1500, neNotifTimeStamp=neNotifTimeStamp, nmsNEConnState=nmsNEConnState, neOperation=neOperation, nmsServerTrapGroup=nmsServerTrapGroup, nmsNEMgmt=nmsNEMgmt, nmsNotifTimeStamp=nmsNotifTimeStamp)
nilq/baby-python
python
import random letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j','l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x' 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W','X', 'Y', 'Z'] numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] symbols = ['!', '#', '$', '&', '(', ')', '*', '+'] print("Welcome to the PyPassword Genrator!!!") letterLen = eval(input("Number of Letter: ")) symLen = eval(input("Number of Symbol: ")) numLen = eval(input("NUmber in password: ")) password = [] for i in range(1,letterLen+1): password += random.choice(letters) for j in range(1,symLen+1): password += random.choice(symbols) for k in range(1,numLen+1): password += random.choice(numbers) print(password) random.shuffle(password) print(password) passsd = "" for str in password: passsd += str print("Your Strong Password: "+passsd)
nilq/baby-python
python
""" Determine an optimal list of hotel to visit. ``` $ python src/domain/solver.py \ -s "/Users/fpaupier/projects/samu_social/data/hotels_subset.csv ``` Note that the first record should be the adress of the starting point (let's say the HQ of the Samu Social) """ import argparse import numpy as np from ortools.constraint_solver import pywrapcp from ortools.constraint_solver import routing_enums_pb2 from src.services.map import Map from src.services.csv_reader import parse_csv MAX_DISTANCE = 15000 # Maximum distance (meters) that a worker can cover in a day MAX_VISIT_PER_DAY = 8 # Maximum number of various hotel a worker can cover within a day def get_distances_matrix(hotels, workers): """Compute the distance matrix (distance between each hotels). Returns a triangular matrix and the labels of the hotels. Note: 1) That the first address shall be the address of the depot. 2) If the API doesn't returna a match for the address, we drop the point. This may not be the expected behavior. TODO Args: hotels (list[dict]): list of address, each dict has the struct {'address': 'Avenue Winston Churchill', 'postcode': 27000} workers (dict(int: int)) Returns: distances(list[list[int]]): matrix of distances labels(dict[int, string]): the index of the address and it's name Warnings: Function seems to break if size of input hotels is too big ? Returns empty distances that leads to a segmentation fault down the processing pipeline. """ map = Map() distances = [] labels = dict() index = 0 hotels_and_workers = workers + workers + hotels for hotel1 in hotels_and_workers: src_address = { "address": hotel1.get("address"), "postcode": hotel1.get("postcode"), } # point1 = map.point(src_address) point1 = hotel1["point"] src_dist = [] if not point1: continue labels[index] = "{} {}".format( src_address.get("address"), src_address.get("postcode") ) # Store the address as labels for the node index = index + 1 for hotel2 in hotels_and_workers: target_address = { "address": hotel2.get("address"), "postcode": hotel2.get("postcode"), } # point2 = map.point(target_address) point2 = hotel2["point"] if not point2: continue distance = map.distance(point1, point2) distance = int(np.round(distance * 1000)) # Distance expressed in meters src_dist.append(distance) if src_dist: distances.append(src_dist) return distances, labels ########################### # Problem Data Definition # ########################### def create_data_model(hotels, workers, from_raw_data): """Creates the data for the example. Args: hotels(list[dict]) workers(dict(int: int): number of couple of Samu Social workers available from_raw_data(bool): """ data = {} n_workers = len(workers) data["num_vehicles"] = n_workers # Precise start and end locations of the workers # The number_workers-th first line correspond to the start locations of the workers start_locations = [idx for idx in range(n_workers)] # The number_workers-th to the 2*number_workers-th line correspond to the end locations of the workers end_locations = [idx for idx in range(n_workers, 2 * n_workers)] data["start_locations"] = start_locations data["end_locations"] = end_locations # Matrix of distances between locations. if from_raw_data: hotels_data = parse_csv(hotels, "hotel", write=False) else: hotels_data = hotels _distances, labels = get_distances_matrix(hotels_data, workers) data["distances"] = _distances data["labels"] = labels num_locations = len(_distances) data["num_locations"] = num_locations # The problem is to find an assignment of routes to vehicles that has the shortest total distance # and such that the total amount a vehicle is carrying never exceeds its capacity. Capacities can be understood # as the max number of visits that a worker can do in a day demands = [1] * num_locations capacities = [MAX_VISIT_PER_DAY] * n_workers data["demands"] = demands data["vehicle_capacities"] = capacities return data ####################### # Problem Constraints # ####################### def create_distance_callback(data): """Creates callback to return distance between points.""" distances = data["distances"] def distance_callback(from_node, to_node): """Returns the manhattan distance between the two nodes""" return distances[from_node][to_node] return distance_callback def create_demand_callback(data): """Creates callback to get demands at each location.""" def demand_callback(from_node, to_node): return data["demands"][from_node] return demand_callback def add_capacity_constraints(routing, data, demand_callback): """Adds capacity constraint""" capacity = "Capacity" routing.AddDimensionWithVehicleCapacity( demand_callback, 0, # null capacity slack data["vehicle_capacities"], # vehicle maximum capacities True, # start cumul to zero capacity, ) ########### # FORMATTER # ########### def format_solution(data, routing, assignment): """Print routes on console.""" plan_output = [] for vehicle_id in range(data["num_vehicles"]): route = [] index = routing.Start(vehicle_id) route_dist = 0 while not routing.IsEnd(index): node_index = routing.IndexToNode(index) next_node_index = routing.IndexToNode( assignment.Value(routing.NextVar(index)) ) route_dist += routing.GetArcCostForVehicle( node_index, next_node_index, vehicle_id ) route.append(("{0}".format(data["labels"].get(node_index)))) index = assignment.Value(routing.NextVar(index)) # Add return address to the route route.append((data["labels"].get(routing.IndexToNode(index)))) plan_output.append(route) return plan_output ######## # Main # ######## def solve_routes(hotels, number_workers, from_raw_data=False): """ Entry point of the program Args: hotels: number_workers: from_raw_data (bool): should we consider the raw csv file or not Returns: """ # Instantiate the data problem. data = create_data_model(hotels, number_workers, from_raw_data) # Create Routing Model routing = pywrapcp.RoutingModel( data["num_locations"], data["num_vehicles"], data["start_locations"], data["end_locations"], ) # Define weight of each edge distance_callback = create_distance_callback(data) routing.SetArcCostEvaluatorOfAllVehicles(distance_callback) # Add Capacity constraint demand_callback = create_demand_callback(data) add_capacity_constraints(routing, data, demand_callback) # Setting first solution heuristic (cheapest addition). search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters() search_parameters.first_solution_strategy = ( routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC ) # Solve the problem. assignment = routing.SolveWithParameters(search_parameters) if assignment: itinerary = format_solution(data, routing, assignment) return itinerary else: return None if __name__ == "__main__": """ Solve a Vehicle Routing Problem Note: The first record should be the address of the starting point (let's say the HQ of the Samu Social) """ parser = argparse.ArgumentParser(description="Solve a Vehicle Routing Problem") parser.add_argument( "-s", "--source", help="path to the source address csv file", type=str ) parser.add_argument( "-n", "--number_workers", help="Number of workers available to perform the visit", type=int, default=4, ) args = parser.parse_args() solve_routes(args.source, args.number_workers, from_raw_data=True)
nilq/baby-python
python
import torch.nn as nn import math import torch.nn.functional as F __all__ = ['SENet', 'Sphere20a', 'senet50'] def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) # This SEModule is not used. class SEModule(nn.Module): def __init__(self, planes, compress_rate): super(SEModule, self).__init__() self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size=1, stride=1, bias=True) self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size=1, stride=1, bias=True) self.relu = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = F.avg_pool2d(module_input, kernel_size=module_input.size(2)) x = self.conv1(x) x = self.relu(x) x = self.conv2(x) x = self.sigmoid(x) return module_input * x class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride # SENet compress_rate = 16 # self.se_block = SEModule(planes * 4, compress_rate) # this is not used. self.conv4 = nn.Conv2d(planes * 4, planes * 4 // compress_rate, kernel_size=1, stride=1, bias=True) self.conv5 = nn.Conv2d(planes * 4 // compress_rate, planes * 4, kernel_size=1, stride=1, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) ## senet out2 = F.avg_pool2d(out, kernel_size=out.size(2)) out2 = self.conv4(out2) out2 = self.relu(out2) out2 = self.conv5(out2) out2 = self.sigmoid(out2) # out2 = self.se_block.forward(out) # not used if self.downsample is not None: residual = self.downsample(x) out = out2 * out + residual # out = out2 + residual # not used out = self.relu(out) return out class SENet(nn.Module): def __init__(self, block, layers, num_classes=8631, include_top=True): self.inplanes = 64 super(SENet, self).__init__() self.include_top = include_top self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) if self.include_top: self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x, get_feat=True): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x0 = self.maxpool(x) x1 = self.layer1(x0) x2 = self.layer2(x1) x3 = self.layer3(x2) x4 = self.layer4(x3) x_avg = self.avgpool(x4) if not self.include_top: if get_feat: return [x0, x1, x2, x3, x4] else: return x_avg else: x_fc = x_avg.view(x_avg.size(0), -1) x_fc = self.fc(x_fc) if get_feat: return [x0, x1, x2, x3, x4] else: return x_fc def senet50(**kwargs): """Constructs a SENet-50 model. """ model = SENet(Bottleneck, [3, 4, 6, 3], **kwargs) return model class Sphere20a(nn.Module): def __init__(self,classnum=10574,feature=False): super(Sphere20a, self).__init__() self.classnum = classnum self.feature = feature #input = B*3*112*96 self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48 self.relu1_1 = nn.PReLU(64) self.conv1_2 = nn.Conv2d(64,64,3,1,1) self.relu1_2 = nn.PReLU(64) self.conv1_3 = nn.Conv2d(64,64,3,1,1) self.relu1_3 = nn.PReLU(64) self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24 self.relu2_1 = nn.PReLU(128) self.conv2_2 = nn.Conv2d(128,128,3,1,1) self.relu2_2 = nn.PReLU(128) self.conv2_3 = nn.Conv2d(128,128,3,1,1) self.relu2_3 = nn.PReLU(128) self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24 self.relu2_4 = nn.PReLU(128) self.conv2_5 = nn.Conv2d(128,128,3,1,1) self.relu2_5 = nn.PReLU(128) self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12 self.relu3_1 = nn.PReLU(256) self.conv3_2 = nn.Conv2d(256,256,3,1,1) self.relu3_2 = nn.PReLU(256) self.conv3_3 = nn.Conv2d(256,256,3,1,1) self.relu3_3 = nn.PReLU(256) self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12 self.relu3_4 = nn.PReLU(256) self.conv3_5 = nn.Conv2d(256,256,3,1,1) self.relu3_5 = nn.PReLU(256) self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12 self.relu3_6 = nn.PReLU(256) self.conv3_7 = nn.Conv2d(256,256,3,1,1) self.relu3_7 = nn.PReLU(256) self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12 self.relu3_8 = nn.PReLU(256) self.conv3_9 = nn.Conv2d(256,256,3,1,1) self.relu3_9 = nn.PReLU(256) self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6 self.relu4_1 = nn.PReLU(512) self.conv4_2 = nn.Conv2d(512,512,3,1,1) self.relu4_2 = nn.PReLU(512) self.conv4_3 = nn.Conv2d(512,512,3,1,1) self.relu4_3 = nn.PReLU(512) self.fc5 = nn.Linear(512*7*6, 512) def forward(self, x): feat_outs = [] x = self.relu1_1(self.conv1_1(x)) x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x)))) feat_outs.append(x) x = self.relu2_1(self.conv2_1(x)) x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x)))) x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x)))) feat_outs.append(x) x = self.relu3_1(self.conv3_1(x)) x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x)))) x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x)))) x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x)))) x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x)))) feat_outs.append(x) x = self.relu4_1(self.conv4_1(x)) x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x)))) feat_outs.append(x) x = x.view(x.size(0), -1) x = self.fc5(x) feat_outs.append(x) return feat_outs
nilq/baby-python
python
# Create your views here. from django.conf import settings from django.core.cache import cache from django.db.models import Prefetch from django.utils.decorators import method_decorator from django.views.decorators.cache import cache_page from rest_framework.generics import RetrieveAPIView, ListAPIView from question.models import Question, Testcase from question.permissions import IsQuestionAllowed, IsInTime, \ IsQuestionListInTime from question.serializers import QuestionDetailSerializer, \ QuestionListSerializer class QuestionDetail(RetrieveAPIView): serializer_class = QuestionDetailSerializer lookup_url_kwarg = 'que_id' lookup_field = 'id' permission_classes = [IsQuestionAllowed, IsInTime] def get_queryset(self): return Question.objects.all().prefetch_related( Prefetch('test_cases', queryset=Testcase.objects.filter(is_public=True)) ) @method_decorator(cache_page(settings.CACHE_TTLS['QUESTION_DETAIL'])) def get(self, request, *args, **kwargs): return self.retrieve(self, request, *args, **kwargs) class QuestionList(ListAPIView): serializer_class = QuestionListSerializer permission_classes = [IsQuestionListInTime] pagination_class = None def get_queryset(self): que_list = cache.get('contest-{}-questions' .format(self.kwargs['contest_id'])) if not que_list: que_list = Question.objects.filter( contests__id=self.kwargs['contest_id'] ) cache.set('contest-{}-questions' .format(self.kwargs['contest_id']), que_list, settings.CACHE_TTLS['CONTEST_QUESTIONS']) return que_list
nilq/baby-python
python
import os.path import random import multiprocessing import pandas as pd from utils import load_library, correct_full_sequence, get_precursor_indice, tear_library, flatten_list from mz_calculator import calc_fragment_mz def shuffle_seq(seq = None, seed = None): """Fisher-Yates algorithm. Modified from PECAN's decoyGenerator.py""" if seq is None: return None else: l = list(seq) random.seed(seed) for i in range(len(l) - 1, 0, -1): j = int(random.random() * (i + 1)) if i == j: continue else: (l[i], l[j]) = (l[j], l[i]) return tuple(l) def reverse(seq): return seq[::-1] def shift_seq(seq): i = len(seq) // 2 return seq[i::] + seq[:i:] def mutate_seq(seq): mutations = {"G" : "L", "A" : "L", "V" : "L", "L" : "V", "I" : "V", "F" : "L", "M" : "L", "P" : "L", "W" : "L", "S" : "T", "C" : "S", "T" : "S", "Y" : "S", "H" : "S", "K" : "L", "R" : "L", "Q" : "N", "E" : "D", "N" : "Q", "D" : "E"} return [seq[0], mutations[seq[1]]] + seq[2:-2] + [mutations[seq[-2]], seq[-1]] def get_mod_indice(sort_base): cursor, lock = -1, 0 poses, mods = [], [] for i, lett in enumerate(sort_base): if lett == "(": lock = 1 poses.append(cursor) mod = "" elif lett == ")": lock = 0 cursor -= 1 mods.append(mod + ")") if not lock: cursor += 1 else: mod += sort_base[i] return poses, mods def decoy_generator(library, lib_cols, decoy_method, precursor_indice, original_colnames, result_collector, fixed_colnames, seed): product_mz, peptide_sequence, full_uniMod_peptide_name = [], [], [] transition_group_id, decoy, protein_name = [], [], [] transition_name, peptide_group_label = [], [] valid_indice = [] for idx, pep in enumerate(precursor_indice): target_record = library.iloc[pep, :] if ("decoy" in list(library.columns)) and (list(target_record["decoy"])[0] == 1): continue valid_indice.extend(pep) target_fullseq = list(target_record[lib_cols["FULL_SEQUENCE_COL"]])[0] target_pureseq = list(target_record[lib_cols["PURE_SEQUENCE_COL"]])[0] if decoy_method in ["shuffle", "pseudo_reverse", "shift"]: unimod5, KR_end, KR_mod_end = False, False, False sort_base = target_fullseq[:] if sort_base.startswith("(UniMod:5)"): unimod5 = True sort_base = sort_base[10:] if sort_base[-1] in ["K", "R"]: KR_end = sort_base[-1] sort_base = sort_base[:-1] elif (sort_base.endswith("(UniMod:259)") or sort_base.endswith("(UniMod:267)")): KR_mod_end = sort_base[-13:] sort_base = sort_base[:-13] mod_indice, mod_list = get_mod_indice(sort_base) if KR_end or KR_mod_end: pure_seq_list = [i for i in target_pureseq[:-1]] else: pure_seq_list = [i for i in target_pureseq] seq_list = pure_seq_list[:] for mod_id, mod in zip(mod_indice, mod_list): seq_list[mod_id] += mod if decoy_method == "shuffle": shuffled_indice = shuffle_seq([i for i in range(len(seq_list))], seed = seed) elif decoy_method == "pseudo_reverse": shuffled_indice = reverse([i for i in range(len(seq_list))]) elif decoy_method == "shift": shuffled_indice = shift_seq([i for i in range(len(seq_list))]) decoy_fullseq = "".join([seq_list[i] for i in shuffled_indice]) decoy_pureseq = "".join([pure_seq_list[i] for i in shuffled_indice]) if unimod5: decoy_fullseq = "(UniMod:5)" + decoy_fullseq if KR_end: decoy_fullseq += KR_end decoy_pureseq += KR_end elif KR_mod_end: decoy_fullseq += KR_mod_end decoy_pureseq += KR_mod_end[0] elif decoy_method == "reverse": unimod5 = False sort_base = target_fullseq[:] if sort_base.startswith("(UniMod:5)"): unimod5 = True sort_base = sort_base[10:] mod_indice, mod_list = get_mod_indice(sort_base) pure_seq_list = [i for i in target_pureseq] seq_list = pure_seq_list[:] for mod_id, mod in zip(mod_indice, mod_list): seq_list[mod_id] += mod shuffled_indice = reverse([i for i in range(len(seq_list))]) decoy_fullseq = "".join([seq_list[i] for i in shuffled_indice]) decoy_pureseq = "".join([pure_seq_list[i] for i in shuffled_indice]) if unimod5: decoy_fullseq = "(UniMod:5)" + decoy_fullseq elif decoy_method == "mutate": unimod5 = False sort_base = target_fullseq[:] if sort_base.startswith("(UniMod:5)"): unimod5 = True sort_base = sort_base[10:] mod_indice, mod_list = get_mod_indice(sort_base) pure_seq_list = [i for i in target_pureseq] mutated_pure_seq_list = mutate_seq(pure_seq_list) mutated_seq_list = mutated_pure_seq_list[:] for mod_id, mod in zip(mod_indice, mod_list): mutated_seq_list[mod_id] += mod decoy_fullseq = "".join(mutated_seq_list) decoy_pureseq = "".join(mutated_pure_seq_list) if unimod5: decoy_fullseq = "(UniMod:5)" + decoy_fullseq for charge, tp, series in zip(target_record[lib_cols["FRAGMENT_CHARGE_COL"]], target_record[lib_cols["FRAGMENT_TYPE_COL"]], target_record[lib_cols["FRAGMENT_SERIES_COL"]]): product_mz.append(calc_fragment_mz(decoy_fullseq, decoy_pureseq, charge, "%s%d" % (tp, series))) peptide_sequence.append(decoy_pureseq) full_uniMod_peptide_name.append(decoy_fullseq) if "transition_name" in original_colnames: transition_name.extend(["DECOY_" + list(target_record["transition_name"])[0]] * target_record.shape[0]) if "PeptideGroupLabel" in original_colnames: peptide_group_label.extend(["DECOY_" + list(target_record["PeptideGroupLabel"])[0]] * target_record.shape[0]) transition_group_id.extend(["DECOY_" + list(target_record[lib_cols["PRECURSOR_ID_COL"]])[0]] * target_record.shape[0]) decoy.extend([1] * target_record.shape[0]) protein_name.extend(["DECOY_" + list(target_record[lib_cols["PROTEIN_NAME_COL"]])[0]] * target_record.shape[0]) result_collector.append([product_mz, peptide_sequence, full_uniMod_peptide_name, transition_group_id, decoy, protein_name, transition_name, peptide_group_label, library.iloc[valid_indice, :].loc[:, fixed_colnames]]) def generate_decoys(lib, do_not_output_library, n_threads, seed, mz_min, mz_max, n_frags_each_precursor, decoy_method, logger): output_filename = os.path.join(os.path.dirname(lib), os.path.basename(lib)[:-4] + ".DreamDIA.with_decoys.tsv") lib_cols, library = load_library(lib) library = correct_full_sequence(library, lib_cols["PRECURSOR_ID_COL"], lib_cols["FULL_SEQUENCE_COL"]) library = library[(library[lib_cols["PRECURSOR_MZ_COL"]] >= mz_min) & (library[lib_cols["PRECURSOR_MZ_COL"]] < mz_max)] library = library[(library[lib_cols["FRAGMENT_MZ_COL"]] >= mz_min) & (library[lib_cols["FRAGMENT_MZ_COL"]] < mz_max)] library.index = [i for i in range(library.shape[0])] precursor_indice = get_precursor_indice(library[lib_cols["PRECURSOR_ID_COL"]]) too_few_indice = flatten_list([i for i in precursor_indice if len(i) < n_frags_each_precursor]) library.drop(too_few_indice, inplace = True) library.index = [i for i in range(library.shape[0])] precursor_indice, chunk_indice = tear_library(library, lib_cols, n_threads) original_colnames = list(library.columns) modifiable_colnames = [lib_cols["FRAGMENT_MZ_COL"], lib_cols["PURE_SEQUENCE_COL"], lib_cols["FULL_SEQUENCE_COL"], lib_cols["PRECURSOR_ID_COL"], lib_cols["PROTEIN_NAME_COL"], "transition_name", "decoy", "PeptideGroupLabel"] fixed_colnames = [i for i in original_colnames if i not in modifiable_colnames] if "decoy" in original_colnames: decoy_types = library["decoy"].value_counts() if 0 in decoy_types and 1 in decoy_types: if decoy_types[1] > 0.5 * decoy_types[0]: logger.info("The spectral library has enough decoys, so DreamDIA-XMBD will not generate more.") if not do_not_output_library: library.to_csv(output_filename, sep = "\t", index = False) return lib_cols, library generators = [] mgr = multiprocessing.Manager() result_collectors = [mgr.list() for _ in range(n_threads)] for i, chunk_index in enumerate(chunk_indice): precursor_index = [precursor_indice[idx] for idx in chunk_index] p = multiprocessing.Process(target = decoy_generator, args = (library, lib_cols, decoy_method, precursor_index, original_colnames, result_collectors[i], fixed_colnames, seed, )) generators.append(p) p.daemon = True p.start() for p in generators: p.join() product_mz = flatten_list([collector[0][0] for collector in result_collectors]) peptide_sequence = flatten_list([collector[0][1] for collector in result_collectors]) full_uniMod_peptide_name = flatten_list([collector[0][2] for collector in result_collectors]) transition_group_id = flatten_list([collector[0][3] for collector in result_collectors]) decoy = flatten_list([collector[0][4] for collector in result_collectors]) protein_name = flatten_list([collector[0][5] for collector in result_collectors]) transition_name = flatten_list([collector[0][6] for collector in result_collectors]) peptide_group_label = flatten_list([collector[0][7] for collector in result_collectors]) fixed_part = pd.concat([collector[0][8] for collector in result_collectors]) modified_part = pd.DataFrame({lib_cols["FRAGMENT_MZ_COL"] : product_mz, lib_cols["PURE_SEQUENCE_COL"] : peptide_sequence, lib_cols["FULL_SEQUENCE_COL"] : full_uniMod_peptide_name, lib_cols["PRECURSOR_ID_COL"] : transition_group_id, lib_cols["DECOY_OR_NOT_COL"] : decoy, lib_cols["PROTEIN_NAME_COL"] : protein_name}) if "transition_name" in original_colnames: modified_part["transition_name"] = transition_name if "PeptideGroupLabel" in original_colnames: modified_part["PeptideGroupLabel"] = peptide_group_label modified_part.index = [nn for nn in range(modified_part.shape[0])] fixed_part.index = [nn for nn in range(fixed_part.shape[0])] if "decoy" in original_colnames: decoy_data = pd.concat([modified_part, fixed_part], axis = 1).loc[:, original_colnames] else: decoy_data = pd.concat([modified_part, fixed_part], axis = 1).loc[:, original_colnames + ["decoy"]] library["decoy"] = [0 for _ in range(library.shape[0])] library_with_decoys = pd.concat([library, decoy_data]) library_with_decoys = library_with_decoys.sort_values(by = [lib_cols["PRECURSOR_ID_COL"], lib_cols["LIB_INTENSITY_COL"]], ascending = [True, False]) library_with_decoys.index = [i for i in range(library_with_decoys.shape[0])] if (not do_not_output_library) and (not os.path.exists(output_filename)): library_with_decoys.to_csv(output_filename, index = False, sep = "\t") return lib_cols, library_with_decoys
nilq/baby-python
python
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2020-2021 Micron Technology, Inc. All rights reserved. import argparse import datetime import os import time import subprocess import sys import requests_unixsocket import yaml TZ_LOCAL = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo def dt(): return datetime.datetime.now(tz=TZ_LOCAL).isoformat() def main(): parser = argparse.ArgumentParser() parser.add_argument("--interval", "-i", type=int, default=10) parser.add_argument("--output-dir", "-d", default="cn_tree_shapes") grp = parser.add_mutually_exclusive_group() grp.add_argument("--kvs", nargs="+") grp.add_argument("--mpool") args = parser.parse_args() if not args.kvs and not args.mpool: print("One of --kvs, or --mpool is required. Quitting.") sys.exit(1) if os.path.exists(args.output_dir): print("%s already exists. Quitting." % args.output_dir) sys.exit(1) print(args.mpool) if args.mpool: kvslist = [] cmd = ["hse", "kvdb", "list", "-v"] cmd += [args.mpool] out = subprocess.check_output(cmd) data = yaml.safe_load(out) if "kvdbs" in data and data["kvdbs"]: for record in data["kvdbs"]: kvslist.extend(record["kvslist"]) else: kvslist = args.kvs url = {} dirpath = {} sockpath = {} for kvs in kvslist: kvdbname, kvsname = kvs.split("/") socket_path = os.getenv('HSE_REST_SOCK_PATH') url[kvs] = "http+unix://%s/mpool/%s/kvs/%s/cn/tree" % ( socket_path.replace("/", "%2F"), kvdbname, kvsname, ) dirpath[kvs] = os.path.join(args.output_dir, kvdbname, kvsname) os.makedirs(dirpath[kvs]) sockpath[kvs] = socket_path session = requests_unixsocket.Session() counter = 1 while True: t1 = time.time() for kvs in kvslist: path = os.path.join(dirpath[kvs], "cn_tree_%06d.yaml" % counter) if os.path.exists(sockpath[kvs]): response = session.get(url[kvs]) else: print( "[%s] Iteration #%06d of KVS %s path %s does not exist, " "KVS not open? Skipping." % (dt(), counter, kvs, sockpath[kvs]) ) continue if response.text.startswith("Usage:"): print( "[%s] Iteration #%06d of KVS %s returned usage message, " "KVS not open? Skipping." % (dt(), counter, kvs) ) else: print("[%s] Writing to path %s" % (dt(), path)) with open(path, "w") as fp: fp.write(response.text) time.sleep(args.interval - ((time.time() - t1) % args.interval)) counter += 1 if __name__ == "__main__": main()
nilq/baby-python
python
import csv import six import io import json import logging from collections import Mapping from ..util import resolve_file_path logger = logging.getLogger(__name__) EPILOG = __doc__ class MappingTableIntakeException(Exception): """ Specific type of exception we'd like to throw if we fail in this stage due to an error with the table itself """ pass class GeneTableIntakeException(Exception): """ Specific type of exception we'd like to throw if we fail in this stage due to an error with the table itself """ pass class MappingTableHeader: """ Constant class that holds information on the structure of the mapping table (csv) that does not vary across . """ HEADER_ROW_INDEX = 2 INTEGER_FIELDS = ['no', 'maximum_length_of_value', 'default', 'min', 'max'] BOOLEAN_FIELDS = ['is_list', 'calculated_property', 'embedded_field', 'do_import', 'add_no_value'] STRING_FIELDS = ['field_name', 'vcf_field', 'source_name', 'source_version', 'sub_embedding_group', 'annotation_category', 'separator', 'description', 'scope', 'schema_title', 'pattern', 'link', 'abbreviation'] SPECIAL_FIELDS = ['field_type', 'enum_list', 'links_to'] ENUM_FIELDS = ['enum_list'] IGNORED_FIELDS = ['source', 'priority', 'annotation_space_location', 'comments', 'value_example'] ALL_FIELDS = INTEGER_FIELDS + BOOLEAN_FIELDS + STRING_FIELDS + SPECIAL_FIELDS + ENUM_FIELDS + IGNORED_FIELDS class VariantTableParser(object): """ Class that encapsulates data/functions related to the annotation field mapping table. """ FIELD_TYPE_INDEX = 10 # XXX: hardcoded, must change if field_type is moved on mapping table EMBEDDED_VARIANT_FIELDS = resolve_file_path('schemas/variant_embeds.json') EMBEDDED_VARIANT_SAMPLE_FIELDS = resolve_file_path('schemas/variant_sample_embeds.json') # XXX: unused currently EMBEDS_TO_GENERATE = [('variant', EMBEDDED_VARIANT_FIELDS), ('variant_sample', EMBEDDED_VARIANT_SAMPLE_FIELDS)] NAME_FIELD = 'field_name' def __init__(self, _mp, schema, skip_embeds=False): self.mapping_table = _mp self.annotation_field_schema = json.load(io.open(schema, 'r')) self.version, self.date, self.fields = self.read_mp_meta() if not skip_embeds: # if calling from gene, do not wipe variant/variant_sample embeds self.provision_embeds() @staticmethod def process_fields(row): """ Takes in the row of field names and processes them. At this point fields are all lowercased and use underscores, such as 'field_name' Args: row: row of fields to be processed from the mapping table Raises: MappingTableIntakeException if a duplicate field is detected or no fields are detected Returns: list of fields """ fields = {} for name in row: if name not in fields: fields[name] = True else: raise MappingTableIntakeException('Found duplicate field in %s' % row) if not fields: raise MappingTableIntakeException('Did not find any fields on row %s' % row) return fields.keys() def provision_embeds(self): """ Does setup necessary for writing embeds to JSON files in the schemas directory Called by initializer based on EMBEDS_TO_GENERATE, overwrite this to control this functionality (for genes). """ for field, f in self.EMBEDS_TO_GENERATE: with io.open(f, 'w+') as fd: json.dump({field: {}}, fd, indent=4) def read_mp_meta(self): """ Reads mapping table from file given to class. First 3 rows of the mapping table contain this information. Version and Date are in the second column while fields are across the third row, as below: ,version=v1.5, ... ,date=12/1/2019, ... field1, field2, field3, ... Returns: 3 tuple - version, date, fields """ version, date, fields = None, None, None with io.open(self.mapping_table, 'r', encoding='utf-8-sig') as f: reader = csv.reader(f) for row_idx, row in enumerate(reader): if row_idx == 0: version = row[1].split('=')[1].strip() elif row_idx == 1: date = row[1].split('=')[1].strip() elif row_idx == 2: fields = self.process_fields(row) else: break # we are done with this step logger.info('Mapping table Version: %s, Date: %s\n' % (version, date)) logger.info('Mapping table fields: %s\n' % (", ".join(fields))) return version, date, fields def process_annotation_field_inserts(self): """ Processes the annotation fields in the mapping table to produce inserts Note that project and institution are required fields on the annotation field schema and are not set here :returns: list of annotation field inserts """ inserts = [] with io.open(self.mapping_table, 'r', encoding='utf-8-sig') as f: reader = csv.reader(f) for row_idx, row in enumerate(reader): insert = {} if row_idx <= MappingTableHeader.HEADER_ROW_INDEX: # skip header rows continue for field_name, entry in zip(self.fields, row): if field_name not in self.annotation_field_schema['properties'] or not entry: continue # IMPORTANT: skip entry not in field schema if field_name in MappingTableHeader.INTEGER_FIELDS: # handle int fields if entry is not None: # entry=0 is a normal value insert[field_name] = int(entry) elif field_name in MappingTableHeader.BOOLEAN_FIELDS: # handle bool fields if entry is not None: if entry == 'Y': insert[field_name] = True else: # assume False if anything other than 'Y' is present insert[field_name] = False elif field_name in MappingTableHeader.ENUM_FIELDS: # handle enum fields if entry is not None: field_type = row[self.FIELD_TYPE_INDEX] val_list = [] if field_type == 'string': val_list = [en.strip() for en in entry.split(',') if en.strip()] elif field_type == 'number': val_list = [float(en.strip()) for en in entry.split(',') if en.strip()] elif field_type == 'integer': val_list = [int(en.strip()) for en in entry.split(',') if en.strip()] insert[field_name] = val_list else: # handle all other fields with direct copy if they exist if field_name == 'pattern': # must decode escape characters insert[field_name] = entry.encode().decode('unicode-escape') else: insert[field_name] = entry inserts.append(insert) return inserts @staticmethod def filter_fields_by_sample(inserts): """ Returns annotation fields that belong on the sample variant schema :param inserts: annotation field inserts processed from above :return: only annotations fields that are part of the sample variant """ return [field for field in inserts if field.get('scope', '') == 'sample_variant'] @staticmethod def filter_fields_by_variant(inserts): """ Returns annotation fields that belong on the variant schema :param inserts: all raw annotation field inserts :return: only annotation fields that are part of the sample variant """ return [field for field in inserts if field.get('scope', '') == 'variant'] def update_embeds(self, item, scope): """ Updates the EMBEDDED_FIELDS location JSON containing the embeds for Variant. NOTE: the files are overwritten every time you run the process! :param item: embedded field to be written :param scope: which item type this embed is for """ # XXX: This does NOT work properly if for linkTos, embeds required .keyword! for t, f in self.EMBEDS_TO_GENERATE: if scope == t: with io.open(f, 'rb') as fd: embeds = json.load(fd) link_type = 'embedded_field' prefix = '' if item.get('sub_embedding_group', None): prefix = self.format_sub_embedding_group_name(item.get('sub_embedding_group'), t='key') + '.' if link_type not in embeds[t]: embeds[t][link_type] = [prefix + item[self.NAME_FIELD]] else: embeds[t][link_type].append(prefix + item[self.NAME_FIELD]) with io.open(f, 'w+') as wfd: json.dump(embeds, wfd, indent=4) wfd.write('\n') # write newline at EOF @staticmethod def format_sub_embedding_group_name(json_or_str, t='key'): """ Helper method that will extract the appropriate value from sub_embedding_group :param json_or_str: entry in mapping table, could be string or json, so we try both :param t: one of key or title :return: title that you wanted based on inputs """ if t not in ['key', 'title']: raise MappingTableIntakeException('Tried to parse sub_embedded_group with' 'key other than "key" or "title": %s ' % t) try: fmt = json.loads(json_or_str) except Exception: # just a string is given, use for both name and title return json_or_str else: return fmt[t] def generate_properties(self, inserts, variant=True): """ Generates variant/variant sample properties. :param inserts: result of one of the above two functions :param variant: whether or not we are generating variant props or sample_variant props :return: properties """ # TODO: refactor this process, as it is a little hard to follow - Will 1/21/2021 props = {} cols = {} facs = {} # inner functions to be used as helper def get_prop(item): if item.get('embedded_field', False): self.update_embeds(item, item.get('scope', 'gene')) # XXX: HACK - how to get around? -Will return {} if not item.get('do_import', True): # DROP fields that explicitly have do_import = False return {} temp = {} prop_name = item[self.NAME_FIELD] features = {} features.update({ "title": item.get('schema_title', prop_name), self.NAME_FIELD: prop_name, "type": item['field_type'] }) # handle fields where key changes directly if item.get('schema_description'): features['description'] = item['schema_description'] if item.get('links_to'): features['linkTo'] = item['links_to'] if item.get('enum_list'): features['enum'] = item['enum_list'] if item.get('field_priority'): features['lookup'] = item['field_priority'] # handle boolean fields for a_field in MappingTableHeader.BOOLEAN_FIELDS: if item.get(a_field) and a_field != 'is_list': features[a_field] = item[a_field] # handle string fields for a_field in MappingTableHeader.STRING_FIELDS: if item.get(a_field) is not None: features[a_field] = item[a_field] # handle int fields for a_field in MappingTableHeader.INTEGER_FIELDS: if item.get(a_field) is not None: features[a_field] = int(item[a_field]) # handle sub_embedded object if item.get('sub_embedding_group'): sub_temp = {} prop = {} sum_ob_name = self.format_sub_embedding_group_name(item['sub_embedding_group'], t='key') sub_title = self.format_sub_embedding_group_name(item['sub_embedding_group'], t='title') # handle sub-embedded object that is an array if item.get('is_list'): prop[prop_name] = { 'title': item.get(self.NAME_FIELD, 'None provided'), 'type': 'array', 'items': features } sub_temp.update({ 'title': sum_ob_name, 'type': 'array', 'items': { 'title': sub_title, 'type': 'object', 'properties': prop } }) else: prop[prop_name] = features sub_temp.update({ 'title': sub_title, 'type': 'array', 'items': { 'title': sub_title, 'type': 'object', 'properties': prop, } }) temp[sum_ob_name] = sub_temp return temp # convert to array structure if item.get('is_list'): array_item = {} array_item.update({ "title": item.get('schema_title', item[self.NAME_FIELD]), "type": "array", self.NAME_FIELD: item[self.NAME_FIELD] }) if item.get('schema_description'): array_item['description'] = item['schema_description'] array_item['items'] = features temp[prop_name] = array_item return temp else: temp[prop_name] = features return temp def update(d, u): for k, v in six.iteritems(u): dv = d.get(k, {}) if not isinstance(dv, Mapping): d[k] = v elif isinstance(v, Mapping): d[k] = update(dv, v) else: d[k] = v return d def is_variant(o): return o.get('scope') == 'variant' def is_sub_embedded_object(o): return o.get('sub_embedding_group') def is_facet(o): return o.get('facet_order', None) def is_column(o): return o.get('column_order') def is_link_to(o): return o.get('links_to') def is_numbered_field(o): return o.get('field_type') in ['integer', 'number'] def has_grouping(o): return o.get('annotation_category', False) def is_default_hidden(o): return o.get('facet_default_hidden', 'N') == 'Y' def insert_column_or_facet(d, o, facet=True): val = {'title': o.get('schema_title', o.get(self.NAME_FIELD))} if is_default_hidden(o): val['default_hidden'] = True if is_numbered_field(o) and is_facet(o): val['aggregation_type'] = 'stats' if "number_step" in o: val['number_step'] = o["number_step"] elif o['field_type'] == "integer": val['number_step'] = 1 else: # Default. Is assumed to be "any" on frontend if absent, # but adding 'documentation through redundancy', if such thing is a thing. val['number_step'] = "any" # add facet (or column) order/grouping if facet and is_facet(o) is not None: val['order'] = is_facet(o) if has_grouping(o) is not False: val['grouping'] = o.get('annotation_category') if not facet and is_column(o) is not None: val['order'] = is_column(o) if is_sub_embedded_object(o): if is_link_to(o): # add .display_title if we are a linkTo d[self.format_sub_embedding_group_name(o.get('sub_embedding_group')) + '.' + o[self.NAME_FIELD] + '.display_title'] = val else: d[self.format_sub_embedding_group_name(o.get('sub_embedding_group')) + '.' + o[self.NAME_FIELD]] = val else: if is_link_to(o): d[o[self.NAME_FIELD] + '.display_title'] = val else: d[o[self.NAME_FIELD]] = val # go through all annotation objects generating schema properties and # adding columns/facets as defined by the mapping table for obj in inserts: update(props, get_prop(obj)) if variant: # we are doing variant, so take columns only from variant context if is_variant(obj): if is_facet(obj): insert_column_or_facet(facs, obj) if is_column(obj): insert_column_or_facet(cols, obj, facet=False) else: # we are doing variant_sample, so we should take columns/facets from BOTH if is_facet(obj): insert_column_or_facet(facs, obj) if is_column(obj): insert_column_or_facet(cols, obj, facet=False) if not props: raise MappingTableIntakeException('Got no properties on schema!') return props, cols, facs @staticmethod def add_default_schema_fields(schema): """ Adds default schema fields Args: schema: schema to add fields to """ schema['$schema'] = 'http://json-schema.org/draft-04/schema#' schema['type'] = 'object' schema['required'] = ['institution', 'project'] # for display_title schema['identifyingProperties'] = ['uuid', 'aliases', 'annotation_id'] schema['additionalProperties'] = False @staticmethod def add_variant_required_fields(schema): schema['required'].extend(['CHROM', 'REF', 'ALT', 'POS']) @staticmethod def add_variant_sample_required_fields(schema): schema['required'].extend(['CALL_INFO', 'variant', 'file']) @staticmethod def add_identifier_field(props): """ Adds the 'annotation_id' field, the unique_key constraint on variant/variant_sample which is an alias for the display_title. """ props['annotation_id'] = { 'title': 'Annotation ID', 'type': 'string', 'uniqueKey': True, } @staticmethod def add_extra_variant_sample_columns(cols): """ Adds href, variant display title to columns (fields not on mapping table) """ cols['display_title'] = { "title": "Position", "order": 0, "sort_fields" : [ { "field" : "variant.display_title", "title" : "Variant Display Title" }, { "field" : "variant.csq_rs_dbsnp151", "title": "dbSNP RS Number" } ] } cols['bam_snapshot'] = { "title": 'Genome Snapshot', "order": 81 } cols["associated_genotype_labels.proband_genotype_label"] = { "title": "Genotype", "order": 39, "sort_fields": [ { "field": "associated_genotype_labels.proband_genotype_label", "title": "Proband GT" }, { "field": "associated_genotype_labels.mother_genotype_label", "title": "Mother GT" }, { "field": "associated_genotype_labels.father_genotype_label", "title": "Father GT" } ] } # Redundant - display_title column renders this as well. # cols['variant.display_title'] = { # 'title': 'Variant', # } @staticmethod def extend_variant_sample_columns(cols): if "variant.genes.genes_most_severe_gene.display_title" in cols: # We combine `genes_most_severe_gene` + `genes_most_severe_transcript` columns in the UI column render func for compactness. cols["variant.genes.genes_most_severe_gene.display_title"].update({ "title": "Gene, Transcript", "sort_fields": [ { "field": "variant.genes.genes_most_severe_gene.display_title", "title": "Gene" }, { "field": "variant.genes.genes_most_severe_transcript", "title": "Most Severe Transcript" } ] }) if "DP" in cols: # We combine `DP` + `AF` columns in the UI column render func for compactness. cols["DP"].update({ "title": "Coverage, VAF", "sort_fields": [ { "field": "DP", "title": "Coverage" }, { "field": "AF", "title": "VAF" } ] }) if "variant.csq_gnomadg_af" in cols: # We combine `csq_gnomadg_af` + `csq_gnomadg_af_popmax` columns in the UI column render func for compactness. cols["variant.csq_gnomadg_af"].update({ "title" : "gnomAD", "sort_fields": [ { "field": "variant.csq_gnomadg_af", "title": "gnomad AF" }, { "field": "variant.csq_gnomadg_af_popmax", "title": "gnomad AF Population Max" } ] }) if "variant.csq_cadd_phred" in cols: cols["variant.csq_cadd_phred"].update({ "title": "Predictors", "sort_fields": [ { "field": "variant.csq_cadd_phred", "title": "Cadd Phred Score" }, { "field": "variant.spliceaiMaxds", "title": "SpliceAI Max DS"}, { "field": "variant.csq_phylop100way_vertebrate", "title": "PhyloP 100 Score"} ] }) if "variant.genes.genes_most_severe_hgvsc" in cols: cols["variant.genes.genes_most_severe_hgvsc"].update({ "title": "Variant", "sort_fields": [ { "field": "variant.genes.genes_most_severe_hgvsc", "title": "Coding Sequence" }, { "field": "variant.genes.genes_most_severe_hgvsp", "title": "Protein Sequence" } ] }) # Default Hidden Columns: if "variant.csq_clinvar" in cols: cols["variant.csq_clinvar"].update({ "default_hidden": True }) if "GT" in cols: cols["GT"].update({ "default_hidden": True }) @staticmethod def add_extra_variant_sample_facets(facs): """ Order of a Facet Group within top-level FacetList is determined by `min(grouped facet 1, grouped facet 2, ...)` which is then used for sorting relative to all other top-level facets' and facet groups' orders. Facets within a group are sorted relative to each other. """ facs["variant.genes.genes_most_severe_gene.display_title"] = { "title": "Gene", "order": 1, "grouping": "Genes", "search_type": "sayt_without_terms", # Enables search-as-you-type via AJAX (SAYT-AJAX) for this facet "sayt_item_type": "Gene" # Required if "search_type" == "sayt_without_terms" } facs["variant.genes.genes_most_severe_gene.gene_lists.display_title"] = { "title": "Gene List", "order": 2, "grouping": "Genes", "description": "Groups of genes that are relevant for a disease or condition" } facs['inheritance_modes'] = { 'title': 'Inheritance Modes', 'order': 15, } # Range facets using range aggregation_type (ranges will be defined from Q2Q tab in future) facs['variant.csq_gnomadg_af'] = { "title": "GnomAD Alt Allele Frequency", "aggregation_type": "range", "number_step": "any", "order": 18, "grouping": "Population Frequency", "ranges": [ { "from": 0, "to": 0, "label": "unobserved" }, { "from": 0, "to": 0.001, "label": "ultra-rare" }, { "from": 0.001, "to": 0.01, "label": "rare" }, { "from": 0.01, "to": 1, "label": "common" } ] } facs['variant.csq_gnomadg_af_popmax'] = { "title": "GnomAD Alt AF - PopMax", "aggregation_type": "range", "number_step": "any", "order": 19, "grouping": "Population Frequency", "ranges": [ { "from": 0, "to": 0, "label": "unobserved" }, { "from": 0, "to": 0.001, "label": "ultra-rare" }, { "from": 0.001, "to": 0.01, "label": "rare" }, { "from": 0.01, "to": 1, "label": "common" } ] } facs['variant.csq_phylop100way_vertebrate'] = { "title": "PhyloP (100 Vertebrates)", "aggregation_type": "range", "number_step": "any", "order": 22, "grouping": "Effect Predictors", "ranges": [ { "from": -20, "to": -3, "label": "strong positive selection" }, { "from": -3, "to": -2, "label": "positive selection" }, { "from": -2, "to": 2, "label": "low selection" }, { "from": 2, "to": 3, "label": "conserved" }, { "from": 3, "to": 10, "label": "highly conserved"} ] } facs['FS'] = { "title": "Strand Fisher Score", "aggregation_type": "range", "number_step": "any", "order": 12, "grouping": "Variant Quality", "ranges": [ { "to": 20, "label": "Low Strand Bias (P ≥ 0.01)" }, { "from": 20, "label": "High Strand Bias (P < 0.01)" } ] } facs['AD_ALT'] = { "title": "AD (Alt)", "aggregation_type": "range", "number_step": 1, "order": 10, "grouping": "Variant Quality", "ranges": [ { "from": 1, "to": 4, "label": "Very Low" }, { "from": 5, "to": 9, "label": "Low" }, { "from": 10, "to": 19, "label": "Medium" }, { "from": 20, "label": "High" } ] } facs['novoPP'] = { "title": "novoCaller PP", "aggregation_type": "range", "number_step": "any", "order": 16, "grouping": "Genotype", "ranges": [ { "from": 0.1, "to": 0.9, "label": "de novo candidate (weak)" }, { "from": 0.9, "to": 1, "label": "de novo candidate (strong)" } ] } # Genotype labels (calculated properties) facs.update({ "associated_genotype_labels.proband_genotype_label": { "title": "Proband Genotype", "order": 12, "grouping": "Genotype" }, "associated_genotype_labels.mother_genotype_label": { "title": "Mother Genotype", "order": 13, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.father_genotype_label": { "title": "Father Genotype", "order": 14, "grouping": "Genotype", "default_hidden": True }, # Below facets are default-hidden unless e.g. additional_facet=associated_genotype_labels.co_parent_genotype_label # URL param is supplied in filter block flags or search href. "associated_genotype_labels.co_parent_genotype_label": { "title": "Co-Parent Genotype", "order": 1000, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.sister_genotype_label": { "title": "Sister Genotype", "order": 1001, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.sister_II_genotype_label": { "title": "Sister II Genotype", "order": 1002, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.sister_III_genotype_label": { "title": "Sister III Genotype", "order": 1003, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.sister_IV_genotype_label": { "title": "Sister IV Genotype", "order": 1004, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.brother_genotype_label": { "title": "Brother Genotype", "order": 1005, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.brother_II_genotype_label": { "title": "Brother II Genotype", "order": 1006, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.brother_III_genotype_label": { "title": "Brother III Genotype", "order": 1007, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.brother_IV_genotype_label": { "title": "Brother IV Genotype", "order": 1008, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.daughter_genotype_label": { "title": "Daughter Genotype", "order": 1009, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.daughter_II_genotype_label": { "title": "Daughter II Genotype", "order": 1010, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.daughter_III_genotype_label": { "title": "Daughter III Genotype", "order": 1011, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.daughter_IV_genotype_label": { "title": "Daughter IV Genotype", "order": 1012, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.son_genotype_label": { "title": "Son Genotype", "order": 1013, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.son_II_genotype_label": { "title": "Son II Genotype", "order": 1014, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.son_III_genotype_label": { "title": "Son III Genotype", "order": 1015, "grouping": "Genotype", "default_hidden": True }, "associated_genotype_labels.son_IV_genotype_label": { "title": "Son IV Genotype", "order": 1016, "grouping": "Genotype", "default_hidden": True } }) @staticmethod def extend_variant_sample_facets(facs): pass def generate_variant_sample_schema(self, sample_props, cols, facs, variant_cols, variant_facs): """ Builds the variant_sample.json schema based on sample_props. Will also add variant columns and facets since this information is embedded. Args: sample_props: first output of generate_properties Returns: Variant sample schema """ schema = {} self.add_default_schema_fields(schema) self.add_variant_sample_required_fields(schema) schema['title'] = 'Sample Variant' schema['description'] = "Schema for variant info for sample" schema['id'] = '/profiles/variant_sample.json' schema['mixinProperties'] = [ {"$ref": "mixins.json#/schema_version"}, {"$ref": "mixins.json#/uuid"}, {"$ref": "mixins.json#/aliases"}, {"$ref": "mixins.json#/submitted"}, {"$ref": "mixins.json#/modified"}, {"$ref": "mixins.json#/status"}, {"$ref": "mixins.json#/attribution"}, {"$ref": "mixins.json#/notes"}, {"$ref": "mixins.json#/static_embeds"}, ] schema['properties'] = sample_props schema['properties']['schema_version'] = {'default': '1'} schema['properties']['variant'] = { # link to single variant 'title': 'Variant', 'type': 'string', 'linkTo': 'Variant', } schema['properties']['gene_notes'] = { 'title': 'Gene Notes', 'description': 'Note item related to this Gene', 'type': 'string', 'linkTo': 'NoteStandard' } schema['properties']['variant_notes'] = { 'title': 'Variant Notes', 'description': 'Notes related to the relevant Variant', 'type': 'string', 'linkTo': 'NoteStandard' } schema['properties']['interpretation'] = { 'title': 'Clinical Interpretation', 'description': 'Clinical Interpretation Note connected to this item', 'type': 'string', 'linkTo': 'NoteInterpretation' } schema['properties']['discovery_interpretation'] = { 'title': 'Discovery Interpretation', 'description': 'Gene/Variant Discovery interpretation note connected to this item', 'type': 'string', 'linkTo': 'NoteDiscovery' } schema['properties']['file'] = { # NOT a linkTo as the ID is sufficient for filtering 'title': 'File', 'description': 'String Accession of the vcf file used in digestion', 'type': 'string', } schema['properties']['bam_snapshot'] = { 'title': 'Genome Snapshot', 'description': 'Link to Genome Snapshot Image', 'type': 'string', } schema['properties']['genotype_labels'] = { 'title': 'Genotype Labels', 'type': 'array', 'items': { 'type': 'object', 'properties': { 'role': { 'title': 'Role', 'type': 'string', }, 'labels': { 'title': 'Genotype Labels', 'type': 'array', 'items': { 'type': 'string' } } } } } schema['properties']['inheritance_modes'] = { 'title': 'Inheritance Modes', 'type': 'array', 'items': { 'type': 'string' } } schema['properties']['samplegeno']['items']['properties']['samplegeno_role'] = { # noqa structure is there 'title': 'Familial Relation', 'description': 'Relationship of the person who submitted this sample relative to the proband', 'type': 'string', 'suggested_enum': ['proband', 'father', 'mother', 'brother', 'sister', 'sibling', 'half-brother', 'half-sister', 'half-sibling', 'wife', 'husband', 'son', 'daughter', 'child', 'grandson', 'granddaughter', 'grandchild', 'grandmother', 'family-in-law', 'extended-family', 'not linked'], } schema['properties']['samplegeno']['items']['properties']['samplegeno_sex'] = { # noqa structure is there 'title': 'Sex', 'description': 'Sex of the donor of this sample ID', 'type': 'string', 'enum': ['M', 'F', 'U'], # XXX: what others should be included? } # adds annotation ID field, effectively making display_title a primary key constraint self.add_identifier_field(schema['properties']) # helper so variant facets work on variant sample # XXX: Behavior needs testing def format_variant_cols_or_facs(d): cp = {} for k, v in d.items(): cp['variant.' + k] = v return cp variant_cols = format_variant_cols_or_facs(variant_cols) variant_facs = format_variant_cols_or_facs(variant_facs) cols.update(variant_cols) # add variant stuff since we are embedding this info facs.update(variant_facs) self.add_extra_variant_sample_columns(cols) self.extend_variant_sample_columns(cols) self.add_extra_variant_sample_facets(facs) self.extend_variant_sample_facets(facs) schema['columns'] = cols schema['facets'] = facs schema['facets'] = self.sort_schema_properties(schema, key='facets') schema['columns'] = self.sort_schema_properties(schema, key='columns') logger.info('Built variant_sample schema') return schema def generate_variant_schema(self, var_props, cols, facs): """ Builds the variant.json schema based on var_props Args: var_props: first output of generate_properties for variant cols: second output of generate_properties for variant facs: third output of generate_properties for variant Returns: Variant schema """ schema = {} self.add_default_schema_fields(schema) self.add_variant_required_fields(schema) schema['title'] = 'Variants' schema['description'] = "Schema for variants" schema['id'] = '/profiles/variant.json' schema['mixinProperties'] = [ {"$ref": "mixins.json#/schema_version"}, {"$ref": "mixins.json#/uuid"}, {"$ref": "mixins.json#/aliases"}, {"$ref": "mixins.json#/submitted"}, {"$ref": "mixins.json#/modified"}, {"$ref": "mixins.json#/status"}, {"$ref": "mixins.json#/attribution"}, {"$ref": "mixins.json#/notes"}, {"$ref": "mixins.json#/interpretation"}, {"$ref": "mixins.json#/static_embeds"}, ] schema['properties'] = var_props schema['properties']['hg19'] = { # required for testing :( - will 1-8-2021 "title": "hg19 Coordinates", "type": "array", "items": { "title": "hg19 Coordinates", "enable_nested": True, "type": "object", "properties": { "hg19_hgvsg": { "title": "Variant", "field_name": "hg19_hgvsg", "type": "string", "description": "HGVS genome sequence name (hg19)", }, "hg19_chrom": { "title": "Chromosome (hg19)", "field_name": "hg19_chrom", "type": "string", "description": "hg19 coordinate chromosome", }, "hg19_pos": { "title": "Position (hg19)", "field_name": "hg19_pos", "type": "integer", "description": "hg19 coordinate position", } } } } schema['properties']['variant_notes'] = { "title": "Variant Notes", "description": "Notes related to this Variant", "type": "array", "items": { "title": "Variant Note", "type": "string", "linkTo": "NoteStandard" } } schema['properties']['schema_version'] = {'default': '2'} schema['facets'] = facs schema['columns'] = cols schema['facets'] = self.sort_schema_properties(schema, key='facets') schema['columns'] = self.sort_schema_properties(schema, key='columns') # adds annotation ID field, effectively making display_title a primary key constraint self.add_identifier_field(schema['properties']) logger.info('Build variant schema') return schema @staticmethod def sort_schema_properties(schema, key='properties'): """ Helper method that sorts schema properties by key by inserting sorted key, values into a new dictionary (since in Python3.6>= all dicts are ordered). Schemas from this point forward will have their properties sorted alphabetically so it is easier to visualize changes. Args: schema: schema with key 'properties' to be sorted key: optional arg to use as key to resolve dictionary to sort, intended to allow us to sort properties, columns and facets """ sorted_properties = {} for key, value in sorted(schema[key].items()): sorted_properties[key] = value return sorted_properties def write_schema(self, schema, fname): """ Writes the given schema (JSON) to the given file 'fname' Args: schema: dictionary to write as json as the schema fname: file to write out to """ schema['properties'] = self.sort_schema_properties(schema) with io.open(fname, 'w+') as out: json.dump(schema, out, indent=4) logger.info('Successfully wrote schema: %s to file: %s\n' % (schema['title'], fname)) def run(self, vs_out=None, v_out=None, institution=None, project=None, write=True): """ Runs the mapping table intake program, generates and writes schemas and returns inserts to be posted in main Args: vs_out: where to write variant_sample schema v_out: where to write variant schema institution: what institution to attach to these inserts project: what project to attach to these inserts write: whether to write the schemas - default True Returns: inserts: annotation field inserts """ inserts = self.process_annotation_field_inserts() variant_sample_props, _, _ = self.generate_properties(self.filter_fields_by_sample(inserts), variant=False) variant_props, _, _ = self.generate_properties(self.filter_fields_by_variant(inserts)) # as of 3/9/2021, this is now just the 'properties' of the schema # columns/facets are edited directly - they are read in here from the # output location (read in schema/overwrite when done, don't touch columns/facets) new_variant_sample_schema = self.generate_variant_sample_schema(variant_sample_props, cols={}, facs={}, variant_cols={}, variant_facs={}) new_variant_schema = self.generate_variant_schema(variant_props, cols={}, facs={}) if write: if not vs_out or not v_out: raise MappingTableIntakeException('Write specified but no output file given') # Read/replace columns/facets and update properties # NOTE: This will not function correctly if you wipe the schemas! # Although this isn't ideal, I'm not convinced it's a good use of time to do # the refactoring necessary to pull the column/facet logic out. It's much easier # to just ignore that info. variant_sample_schema = json.load(io.open(vs_out)) new_variant_sample_schema['facets'] = variant_sample_schema['facets'] new_variant_sample_schema['columns'] = variant_sample_schema['columns'] self.write_schema(new_variant_sample_schema, vs_out) variant_schema = json.load(io.open(v_out)) new_variant_schema['facets'] = variant_schema['facets'] new_variant_schema['columns'] = variant_schema['columns'] self.write_schema(new_variant_schema, v_out) logger.info('Successfully wrote schemas') if project or institution: for insert in inserts: if project: insert['project'] = project if institution: insert['institution'] = institution return inserts class StructuralVariantTableParser(VariantTableParser): """ Subclass of VariantTableParser used for intake of SV mapping table. Main differences from the parent class are: - Explicitly updates methods only related to "properties" field of the relevant schema; all other fields in schema will be same as in existing schema. - Searches schema "properties" objects and embedded objects for field indicative of the property coming from the mapping table as implied by presence of VCF_FIELD_KEY - All "properties" objects that do not come from the mapping table are included in the new schema, while those from previous mapping table ingestion are dropped and will only be re-generated if present in current mapping table. """ SV_SCHEMA_PATH = resolve_file_path("schemas/structural_variant.json") SV_SAMPLE_SCHEMA_PATH = resolve_file_path("schemas/structural_variant_sample.json") EMBEDDED_VARIANT_FIELDS = resolve_file_path("schemas/structural_variant_embeds.json") EMBEDDED_VARIANT_SAMPLE_FIELDS = resolve_file_path( "schemas/structural_variant_sample_embeds.json" ) EMBEDS_TO_GENERATE = [ ("variant", EMBEDDED_VARIANT_FIELDS), ("variant_sample", EMBEDDED_VARIANT_SAMPLE_FIELDS), ] VCF_FIELD_KEY = "vcf_field" def __init__(self, *args, **kwargs): super(StructuralVariantTableParser, self).__init__(*args, **kwargs) self.sv_non_vcf_props = {} self.sv_sample_non_vcf_props = {} self.get_vcf_props() @property def old_sv_schema(self): """Explicit property for easier mocking.""" return json.load(io.open(self.SV_SCHEMA_PATH)) @property def old_sv_sample_schema(self): """Explicit property for easier mocking.""" return json.load(io.open(self.SV_SAMPLE_SCHEMA_PATH)) def get_vcf_props(self): """ Searches through existing SV and SV sample schemas to identify existing "properties" objects that did not come from previous mapping table ingestion, as indicated by lack of VCF_FIELD_KEY on the object. Updates self.sv_non_vcf_props and self.sv_sample_non_vcf_props dicts with keys as top-level "properties" fields to keep and values as list of sub-embedded fields to keep if applicable. Expects sub-embedded objects from previous mapping table ingestion to be one-layer deep, e.g. an array of objects that are not themselves arrays of objects. NOTE: This will obviously fail if the VCF_FIELD_KEY is dropped from the mapping table. """ for key, value in self.old_sv_schema["properties"].items(): vcf_field = self._is_vcf_field(key, value) if not vcf_field: self.sv_non_vcf_props[key] = "" else: sub_embeds_to_keep = self._collect_non_vcf_sub_embeds(key, value) if sub_embeds_to_keep: self.sv_non_vcf_props[key] = sub_embeds_to_keep for key, value in self.old_sv_sample_schema["properties"].items(): vcf_field = self._is_vcf_field(key, value) if not vcf_field: self.sv_sample_non_vcf_props[key] = "" else: sub_embeds_to_keep = self._collect_non_vcf_sub_embeds(key, value) if sub_embeds_to_keep: self.sv_sample_non_vcf_props[key] = sub_embeds_to_keep def _is_vcf_field(self, key, value): """ Helper function to self.get_vcf_props() to identify "properties" fields that stem from previous mapping table ingestion as indicated by VCF_FIELD_KEY. :param key: str field name :param value: dict corresponding to key :return result: bool if key corresponds to a vcf field """ result = False item_type = value.get("type", "") vcf_field = value.get(self.VCF_FIELD_KEY, "") if not vcf_field: if item_type == "array": item_dict = value["items"] if "properties" in item_dict: # Array of objects for item_key, item_value in item_dict["properties"].items(): result = self._is_vcf_field(item_key, item_value) if result: break else: result = self._is_vcf_field(key, item_dict) else: result = True return result def _collect_non_vcf_sub_embeds(self, key, value): """ Helper function to self.get_vcf_props that collects non-vcf fields nested within an object that contains at least one vcf field. :param key: str field name :param value: dict corresponding to key :return result: list of nested non-vcf fields """ result = [] item_type = value.get("type", "") vcf_field = value.get(self.VCF_FIELD_KEY, "") if not vcf_field: if item_type == "array": item_dict = value["items"] if "properties" in item_dict: # Array of objects for item_key, item_value in item_dict["properties"].items(): sub_item_type = item_value.get("type", "") sub_item_vcf_field = item_value.get(self.VCF_FIELD_KEY, "") if sub_item_type == "array": sub_item_vcf_field = ( item_value["items"].get(self.VCF_FIELD_KEY, "") ) if not sub_item_vcf_field: result.append(item_key) return result def provision_embeds(self): """ Does setup necessary for writing embeds to JSON files in the schemas directory. Called by initializer based on EMBEDS_TO_GENERATE. """ for field, f in self.EMBEDS_TO_GENERATE: field = "structural_" + field with open(f, 'w+') as fd: json.dump({field: {}}, fd, indent=4) def update_embeds(self, item, scope): """ Updates the EMBEDDED_FIELDS location JSON containing the embeds for structural variant. NOTE: the files are overwritten every time you run the process! :param item: embedded field to be written :param scope: which item type this embed is for """ # XXX: This does NOT work properly if for linkTos, embeds required .keyword! for t, f in self.EMBEDS_TO_GENERATE: if scope == t: t = "structural_" + t with open(f, 'rb') as fd: embeds = json.load(fd) link_type = 'embedded_field' prefix = '' if item.get('sub_embedding_group', None): prefix = self.format_sub_embedding_group_name( item.get('sub_embedding_group'), t='key' ) + '.' if link_type not in embeds[t]: embeds[t][link_type] = [prefix + item[self.NAME_FIELD]] else: embeds[t][link_type].append(prefix + item[self.NAME_FIELD]) with open(f, 'w+') as wfd: json.dump(embeds, wfd, indent=4) wfd.write('\n') # write newline at EOF @staticmethod def generate_schema(var_props, old_schema, props_to_keep): """ Generate new schema by updating the properties of the old schema according to the new mapping table, leaving the remainder of the schema the same. :param var_props: dict of new props from mapping table ingested :param old_schema: dict of existing schema :param props_to_keep: dict of non-vcf fields and sub-embedded non-vcf fields to keep, if applicable :return schema: dict of updated schema with "properties" field containing all new props and existing non-vcf fields """ schema = {} old_schema_props = old_schema["properties"] for field in props_to_keep: sub_embeds_to_keep = props_to_keep[field] if not sub_embeds_to_keep: var_props[field] = old_schema_props[field] else: if field in var_props: try: var_prop_sub_embeds = var_props[field]["items"]["properties"] old_schema_prop_sub_embeds = ( old_schema_props[field]["items"]["properties"] ) for sub_embed in sub_embeds_to_keep: var_prop_sub_embeds[sub_embed] = ( old_schema_prop_sub_embeds[sub_embed] ) except KeyError: # Field went from array of objects to other type, so don't # attempt to sub-embed previous non-vcf fields continue else: var_props[field] = old_schema_props[field] tmp = [ key for key in old_schema_props[field]["items"]["properties"] ] for sub_embed in tmp: if sub_embed not in sub_embeds_to_keep: del var_props[field]["items"]["properties"][sub_embed] for key in old_schema: if key == "properties": schema["properties"] = var_props else: schema[key] = old_schema[key] return schema def run(self, project=None, institution=None, write=True): """ Runs mapping table intake for SVs, writing new 'properties' fields for structural variants and structural variant samples. :param project: str project identifier :param institution: str institution identifier :param write: bool to write new schema :return inserts: list of dicts corresponding to props of ingested mapping table """ inserts = self.process_annotation_field_inserts() sv_props, _, _ = self.generate_properties( self.filter_fields_by_variant(inserts) ) sv_sample_props, _, _ = self.generate_properties( self.filter_fields_by_sample(inserts), variant=False ) new_sv_schema = self.generate_schema( sv_props, self.old_sv_schema, self.sv_non_vcf_props ) new_sv_sample_schema = self.generate_schema( sv_sample_props, self.old_sv_sample_schema, self.sv_sample_non_vcf_props ) if write: self.write_schema(new_sv_schema, self.SV_SCHEMA_PATH) self.write_schema(new_sv_sample_schema, self.SV_SAMPLE_SCHEMA_PATH) logger.info("Successfully wrote schemas") if project or institution: for insert in inserts: if project: insert['project'] = project if institution: insert['institution'] = institution return inserts class GeneTableParser(VariantTableParser): """ Subclass of MappingTableParser that overrides methods required for any differences across tables. """ def __init__(self, *args, **kwargs): self.FIELD_TYPE_INDEX = 8 kwargs['skip_embeds'] = True # do not clear embeds when running gene intake super(GeneTableParser, self).__init__(*args, **kwargs) @staticmethod def add_default_schema_fields(schema): """ Adds default schema fields Args: schema: schema to add fields to """ schema['$schema'] = 'http://json-schema.org/draft-04/schema#' schema['type'] = 'object' schema['required'] = ['institution', 'project', 'gene_symbol', 'ensgid'] schema['identifyingProperties'] = ['uuid', 'aliases'] schema['additionalProperties'] = False schema['mixinProperties'] = [ {"$ref": "mixins.json#/schema_version"}, {"$ref": "mixins.json#/uuid"}, {"$ref": "mixins.json#/aliases"}, {"$ref": "mixins.json#/submitted"}, {"$ref": "mixins.json#/modified"}, {"$ref": "mixins.json#/status"}, {"$ref": "mixins.json#/attribution"}, {"$ref": "mixins.json#/notes"}, {"$ref": "mixins.json#/static_embeds"}, {"$ref": "mixins.json#/interpretation"} ] def generate_gene_schema(self, gene_props, columns, facets): """ Builds gene.json schema based on gene_props :param gene_props: dictionary of 'properties' based on the gene fields :param columns: columns to attach :param facets: facets to compute :return: gene schema """ schema = {} self.add_default_schema_fields(schema) schema['title'] = 'Genes' schema['description'] = "Schema for Genes" schema['id'] = '/profiles/gene.json' gene_props['ensgid']['uniqueKey'] = True # XXX: This is required for genes schema['properties'] = gene_props schema['properties']['schema_version'] = {'default': '1'} schema['properties']['gene_notes'] = { "title": "Gene Notes", "description": "Notes related to this Gene", "type": "array", "items": { "title": "Gene Note", "type": "string", "linkTo": "NoteStandard" } } schema['facets'] = facets schema['columns'] = columns logger.info('Build gene schema') return schema def run(self, gs_out=None, write=False): # noqa - args are different then in superclass but we don't care """ Ingests the gene table, producing the gene schema :param gs_out: path where to write the gene schema :param write: whether or not to actually write the schema (can do dry-run) :return: gene_annotation_field inserts """ inserts = self.process_annotation_field_inserts() gene_props, columns, facets = self.generate_properties(inserts) gene_schema = self.generate_gene_schema(gene_props, columns, facets) if write: if not gs_out: raise GeneTableIntakeException('Write specified but no output file given') self.write_schema(gene_schema, gs_out) logger.info('Successfully wrote gene schema to %s' % gs_out) return inserts
nilq/baby-python
python
import numpy as np import torch import itertools from torch.autograd import Variable def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_occupancy = False): ''' This function computes the binary mask that represents the occupancy of each ped in the other's grid params: frame : This will be a MNP x 3 matrix with each row being [pedID, x, y] dimensions : This will be a list [width, height] neighborhood_size : Scalar value representing the size of neighborhood considered grid_size : Scalar value representing the size of the grid discretization num_person : number of people exist in given frame is_occupancy: A flag using for calculation of accupancy map ''' mnp = num_person width, height = dimensions[0], dimensions[1] if is_occupancy: frame_mask = np.zeros((mnp, grid_size**2)) else: frame_mask = np.zeros((mnp, mnp, grid_size**2)) frame_np = frame.data.numpy() #width_bound, height_bound = (neighborhood_size/(width*1.0)), (neighborhood_size/(height*1.0)) width_bound, height_bound = (neighborhood_size/(width*1.0))*2, (neighborhood_size/(height*1.0))*2 #print("weight_bound: ", width_bound, "height_bound: ", height_bound) #instead of 2 inner loop, we check all possible 2-permutations which is 2 times faster. list_indices = list(range(0, mnp)) for real_frame_index, other_real_frame_index in itertools.permutations(list_indices, 2): current_x, current_y = frame_np[real_frame_index, 0], frame_np[real_frame_index, 1] width_low, width_high = current_x - width_bound/2, current_x + width_bound/2 height_low, height_high = current_y - height_bound/2, current_y + height_bound/2 other_x, other_y = frame_np[other_real_frame_index, 0], frame_np[other_real_frame_index, 1] #if (other_x >= width_high).all() or (other_x < width_low).all() or (other_y >= height_high).all() or (other_y < height_low).all(): if (other_x >= width_high) or (other_x < width_low) or (other_y >= height_high) or (other_y < height_low): # Ped not in surrounding, so binary mask should be zero #print("not surrounding") continue # If in surrounding, calculate the grid cell cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size)) cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size)) if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0: continue if is_occupancy: frame_mask[real_frame_index, cell_x + cell_y*grid_size] = 1 else: # Other ped is in the corresponding grid cell of current ped frame_mask[real_frame_index, other_real_frame_index, cell_x + cell_y*grid_size] = 1 #Two inner loops aproach -> slower # # For each ped in the frame (existent and non-existent) # for real_frame_index in range(mnp): # #real_frame_index = lookup_seq[pedindex] # #print(real_frame_index) # #print("****************************************") # # Get x and y of the current ped # current_x, current_y = frame[real_frame_index, 0], frame[real_frame_index, 1] # #print("cur x : ", current_x, "cur_y: ", current_y) # width_low, width_high = current_x - width_bound/2, current_x + width_bound/2 # height_low, height_high = current_y - height_bound/2, current_y + height_bound/2 # #print("width_low : ", width_low, "width_high: ", width_high, "height_low : ", height_low, "height_high: ", height_high) # # For all the other peds # for other_real_frame_index in range(mnp): # #other_real_frame_index = lookup_seq[otherpedindex] # #print(other_real_frame_index) # #print("################################") # # If the other pedID is the same as current pedID # if other_real_frame_index == real_frame_index: # # The ped cannot be counted in his own grid # continue # # Get x and y of the other ped # other_x, other_y = frame[other_real_frame_index, 0], frame[other_real_frame_index, 1] # #print("other_x: ", other_x, "other_y: ", other_y) # if (other_x >= width_high).all() or (other_x < width_low).all() or (other_y >= height_high).all() or (other_y < height_low).all(): # # Ped not in surrounding, so binary mask should be zero # #print("not surrounding") # continue # # If in surrounding, calculate the grid cell # cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size)) # cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size)) # #print("cell_x: ", cell_x, "cell_y: ", cell_y) # if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0: # continue # # Other ped is in the corresponding grid cell of current ped # frame_mask[real_frame_index, other_real_frame_index, cell_x + cell_y*grid_size] = 1 # #print("frame mask shape %s"%str(frame_mask.shape)) return frame_mask def getSequenceGridMask(sequence, dimensions, pedlist_seq, neighborhood_size, grid_size, using_cuda, is_occupancy=False): ''' Get the grid masks for all the frames in the sequence params: sequence : A numpy matrix of shape SL x MNP x 3 dimensions : This will be a list [width, height] neighborhood_size : Scalar value representing the size of neighborhood considered grid_size : Scalar value representing the size of the grid discretization using_cuda: Boolean value denoting if using GPU or not is_occupancy: A flag using for calculation of accupancy map ''' sl = len(sequence) sequence_mask = [] for i in range(sl): mask = Variable(torch.from_numpy(getGridMask(sequence[i], dimensions, len(pedlist_seq[i]), neighborhood_size, grid_size, is_occupancy)).float()) if using_cuda: mask = mask.cuda() sequence_mask.append(mask) return sequence_mask
nilq/baby-python
python
# Copyright 2021 The KaiJIN Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import argparse from logging import log import socket import json from .tfevent import TFEventAccumulator from .logger import logger class DaemonClient(): def __init__(self): logger.init('rs.client.log', './') logger.info('start client daemon.') def start(self, ip, port, mode='all', verbose=True): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ip, port)) # recv data bufs = bytes() while True: buf = s.recv(1024) if len(buf) == 0: break bufs += buf data = json.loads(bufs.decode()) # print mode if verbose and mode == 'all': logger.info(json.dumps(data, indent=2)) elif verbose and mode == 'cpu': logger.info(json.dumps(data['cpu'], indent=2)) elif verbose and mode == 'gpu': logger.info(json.dumps(data['gpu'], indent=2)) return data def start_file(self, file, mode='all'): machines = [] with open(file) as fp: for line in fp: ip, port = line.replace('\n', '').split(':') machines.append((ip, int(port), mode, False)) results = [] for m in machines: try: results.append((m, self.start(*m))) logger.info(f'Successfully to receive data from {m[0]}:{m[1]}.') except Exception as e: logger.warn(f'Failed to receive data from {m[0]}:{m[1]} due to {e}') def sep(length=200): return '\n' + '-' * length s = sep(200) if mode in ['all', 'cpu']: s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format( 'ip', 'port', 'cpu_count', 'cpu_current_freq', 'cpu_percent', 'memory_total(GB)', 'memory_used(GB)', 'memory_free(GB)', 'memory_percent(GB)', 'memory_shared(GB)') s += sep(200) for res in results: m, data = res[0], res[1] s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format( m[0], m[1], data['cpu']['cpu_count'], data['cpu']['cpu_current_freq'], data['cpu']['cpu_percent'], data['cpu']['memory_total'], data['cpu']['memory_used'], data['cpu']['memory_free'], data['cpu']['memory_percent'], data['cpu']['memory_shared'], ) s += sep(200) if mode in ['all', 'gpu']: s += sep(200) s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format( 'ip', 'port', 'driver_version', 'cuda_version', 'product_name', 'fan_speed', 'total_memory', 'used_memory', 'memory_percent', 'utilization') s += sep(200) for res in results: m, data = res[0], res[1] for gpu in data['gpu']['gpus']: s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format( m[0], m[1], data['gpu']['driver_version'], data['gpu']['cuda_version'], gpu['product_name'].replace('NVIDIA ', ''), gpu['fan_speed'], gpu['total_memory'], gpu['used_memory'], '{:.2f} %'.format(float(gpu['used_memory'].split(' ')[0]) * 100 / float(gpu['total_memory'].split(' ')[0])), gpu['utilization'], ) s += sep(200) if mode in ['all', 'event']: s += sep(200) s += '\n{:^20}{:^20}{:^100}{:^40}{:^20}'.format('ip', 'port', 'expr', 'update', 'epoch') s += sep(200) for res in results: m, data = res[0], res[1] for k, v in data['event'].items(): s += '\n{:^20}{:^20}{:^100}{:^40}{:^20}'.format( m[0], m[1], k, data['event'][k]['modify'], data['event'][k]['epoch'], ) s += sep(200) logger.info(s) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--ip', type=str, help='local ipv4 addr.') parser.add_argument('--port', type=int, help='bind or listen port.') parser.add_argument('--mode', type=str, default='all', choices=['cpu', 'gpu', 'event', 'all']) parser.add_argument('--file', type=str, default=None) args, _ = parser.parse_known_args() print(args) daemon = DaemonClient() if args.file is None: daemon.start(ip=args.ip, port=args.port, mode=args.mode) else: daemon.start_file(file=args.file, mode=args.mode)
nilq/baby-python
python
import anyio from anyio_mqtt import AnyIOMQTTClient import logging _LOG = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) logging.getLogger("anyio_mqtt").setLevel(logging.DEBUG) PAHO_LOGGER = logging.getLogger("paho") PAHO_LOGGER.setLevel(logging.DEBUG) async def main() -> None: _LOG.debug("Creating client") async with AnyIOMQTTClient() as client: client.enable_logger(PAHO_LOGGER) client.username_pw_set("test", "tesffffft") _LOG.debug("Subscribing to a/b/c") client.subscribe("a/b/c") _LOG.debug("Connecting to broker") client.connect("walternate") _LOG.debug("Subscribing to d/e/f") client.subscribe("d/e/f") _LOG.debug("Publishing message to a/b/c with QoS 0") client.publish("a/b/c", "hi0", qos=0) _LOG.debug("Publishing message to a/b/c with QoS 1") client.publish("a/b/c", "hi1", qos=1) _LOG.debug("Publishing message to a/b/c with QoS 2") client.publish("a/b/c", "hi2", qos=2) i = 0 _LOG.debug("Waiting for messages (1)") async for msg in client.messages: print( f"Message received in test.py (1): {msg.topic} - {msg.payload.decode('utf8')}" ) i += 1 if i >= 5: break _LOG.debug("Publishing message to a/b/c with QoS 0") client.publish("a/b/c", "2hi0", qos=0) _LOG.debug("Not listening for messages for 3 seconds") await anyio.sleep(3) i = 0 _LOG.debug("Waiting for messages (2)") async for msg in client.messages: print( f"Message received in test.py (2): {msg.topic} - {msg.payload.decode('utf8')}" ) i += 1 if i >= 5: _LOG.debug("Calling client.disconnect()") client.disconnect() break _LOG.debug("Publishing message to a/b/c with QoS 0") client.publish("a/b/c", "3hi0", qos=0) _LOG.debug("Publishing message to a/b/c with QoS 1") client.publish("a/b/c", "3hi1", qos=1) _LOG.debug("Publishing message to a/b/c with QoS 2") client.publish("a/b/c", "3hi2", qos=2) _LOG.debug("Waiting 3 seconds") await anyio.sleep(3) _LOG.debug("Connecting to broker") client.connect("localhost") i = 0 _LOG.debug("Waiting for messages (3)") async for msg in client.messages: print( f"Message received in test.py (3): {msg.topic} - {msg.payload.decode('utf8')}" ) i += 1 if i >= 5: print("Breaking out of last msg loop") break print("Now leaving async context...") print("Finished!") if __name__ == "__main__": anyio.run(main)
nilq/baby-python
python
import time import random import numpy as np import torch from torchtuples import tuplefy, TupleTree def make_name_hash(name='', file_ending='.pt'): year, month, day, hour, minute, second = time.localtime()[:6] ascii_letters_digits = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' random_hash = ''.join(random.choices(ascii_letters_digits, k=20)) path = f"{name}_{year}-{month}-{day}_{hour}-{minute}-{second}_{random_hash}{file_ending}" return path class TimeLogger: def __init__(self, start=None): self.start = self.time() if start is None else start self.prev = self.start @staticmethod def time(): return time.time() def diff(self): prev, self.prev = (self.prev, self.time()) return self.prev - self.start, self.prev - prev @staticmethod def _hms_from_sec(sec): """Hours, minutes, seconds.""" m, s = divmod(sec, 60) h, m = divmod(m, 60) return h, m, s @staticmethod def _hms_str(h, m, s, shorten=True): """Hours, minutes, seconds.""" hs = f"{int(h)}h:" ms = f"{int(m)}m:" ss = f"{int(s)}s" if shorten: if h == 0: hs = '' if m == 0: ms = '' return f"{hs}{ms}{ss}" # return f"{int(h)}h:{int(m)}m:{int(s)}s" def hms_diff(self, shorten=True): diff_start, diff_prev = self.diff() hms_start = self._hms_from_sec(diff_start) hms_prev = self._hms_from_sec(diff_prev) return self._hms_str(*hms_start, shorten), self._hms_str(*hms_prev, shorten) def array_or_tensor(tensor, numpy, input): """Returs a tensor if numpy is False or input is tensor. Else it returns numpy array, even if input is a DataLoader. """ is_tensor = None if numpy is False: is_tensor = True elif (numpy is True) or is_dl(input): is_tensor = False elif not (is_data(input) or is_dl(input)): raise ValueError(f"Do not understand type of `input`: {type(input)}") elif tuplefy(input).type() is torch.Tensor: is_tensor = True elif tuplefy(input).type() is np.ndarray: is_tensor = False else: raise ValueError("Something wrong") if is_tensor: tensor = tuplefy(tensor).to_tensor().val_if_single() else: tensor = tuplefy(tensor).to_numpy().val_if_single() return tensor def is_data(input): """Returns True if `input` is data of type tuple, list, TupleTree, np.array, torch.Tensor.""" datatypes = [np.ndarray, torch.Tensor, tuple, list, TupleTree] return any([isinstance(input, ct) for ct in datatypes]) def is_dl(input): """Returns True if `input` is a DataLoader (inherit from DataLoader).""" return isinstance(input, torch.utils.data.DataLoader)
nilq/baby-python
python
# Copyright (c) Andrey Sobolev, 2019. Distributed under MIT license, see LICENSE file. GEOMETRY_LABEL = 'Geometry optimization' PHONON_LABEL = 'Phonon frequency' ELASTIC_LABEL = 'Elastic constants' PROPERTIES_LABEL = 'One-electron properties'
nilq/baby-python
python
import django # this verifies local libraries can be packed into the egg def addition(first, second): return first + second
nilq/baby-python
python
import sys import os import argparse import pandas as pd from fr.tagc.rainet.core.util.exception.RainetException import RainetException from fr.tagc.rainet.core.util.log.Logger import Logger from fr.tagc.rainet.core.util.time.Timer import Timer from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager #=============================================================================== # Started 28-Dec-2016 # Diogo Ribeiro # Based on LncRNAScore.py DESC_COMMENT = "Script to map attributes from Mukherjee2016 to groups of lncRNAs." SCRIPT_NAME = "LncRNAGroupAnalysis.py" #=============================================================================== #=============================================================================== # General plan: # 1) Read file with RNA annotation, gene IDs # 2) Read Mukherjee2016 file with data for lncRNAs # 3) Output into R-readable format #=============================================================================== #=============================================================================== # Processing notes: # 1) A category including all RNAs in Mukherjee2016 is created while reading its file #=============================================================================== class LncRNAGroupAnalysis(object): #======================================================================= # Constants #======================================================================= ANNOTATION_FILE_ID_COLUMN = 0 ANNOTATION_FILE_ANNOTATION_COLUMN = 1 DATA_FILE_ID_COLUMN = 0 # DATA_FILE_ANNOTATION_COLUMN = 9 OUTPUT_FILE = "lncRNA_group_analysis.tsv" ALL_MRNA_ANNOTATION = "0-All_mRNAs" ALL_LNCRNA_ANNOTATION = "1-All_lncRNAs" def __init__(self, annotationFile, dataFile, outputFolder, dataColumns, useMRNA, dataAnnotationColumn): self.annotationFile = annotationFile self.dataFile = dataFile self.outputFolder = outputFolder try: self.dataColumns = [] sp = dataColumns.split(",") for s in sp: self.dataColumns.append( int( s)) except: raise RainetException("LncRNAGroupAnalysis.__init__: data column input in wrong format:", dataColumns) self.useMRNA = useMRNA self.dataAnnotationColumn = dataAnnotationColumn # make output folder if not os.path.exists( self.outputFolder): os.mkdir( self.outputFolder) # # # Read list of annotation per RNA. def read_annotation_file( self): #======================================================================= # Example file # # ENSG00000256751 Predicted # ENSG00000256750 Predicted # ENSG00000261773 Interacting # ENSG00000237402 Interacting #======================================================================= # The same gene can have several annotations #======================================================================= # initialising #======================================================================= transcriptAnnotation = {} # Key -> transcript ensemblID, value -> set of annotations groupTranscripts = {} # Key -> annotation, value -> set of transcripts lineCounter = 0 #======================================================================= # read file #======================================================================= with open( self.annotationFile, "r") as inFile: for line in inFile: line = line.strip() lineCounter+=1 spl = line.split( "\t") geneID = spl[ LncRNAGroupAnalysis.ANNOTATION_FILE_ID_COLUMN] # select column to use as annotation annotationItem = spl[ LncRNAGroupAnalysis.ANNOTATION_FILE_ANNOTATION_COLUMN] if not geneID.startswith( "ENS"): raise RainetException("read_annotation_file: entry is not ENS*:", geneID) if "." in geneID: geneID = geneID.split( ".")[0] # storing tx as key if geneID not in transcriptAnnotation: transcriptAnnotation[ geneID] = set() transcriptAnnotation[ geneID].add( annotationItem) # storing annotation as key if annotationItem not in groupTranscripts: groupTranscripts[ annotationItem] = set() groupTranscripts[ annotationItem].add( geneID) print "read_annotation_file: number of entries read:", lineCounter print "read_annotation_file: number of transcripts with annotation:", len( transcriptAnnotation) print "read_annotation_file: number of annotations:", len( groupTranscripts) self.transcriptAnnotation = transcriptAnnotation self.groupTranscripts = groupTranscripts for group in sorted(groupTranscripts): print group, len( groupTranscripts[ group]) # # # Read Mukherjee 2016 file with data def read_data_file(self): #======================================================================= # Example file # # Gene Syn Proc Deg CytNuc PolyCyt TrP Copies Exon Annotation Cluster Host Complex # ENSG00000005206.12 0.3240500888 -0.0260844809 0.1373502068 -0.5552417614 -0.2815917912 0.6640126412 0.2623901975 MultiExon lncRNA c3 None processed_transcript # ENSG00000006062.9 0.1118696177 -0.0129556703 0.3003516672 -0.4050632081 0.0920502949 -0.5617828392 -0.0963797176 MultiExon lncRNA c4 None processed_transcript # ENSG00000031544.10 -1.050910308 -0.254916842 0.9567499553 -0.9364242934 -0.1898011997 -2.9665750821 -1.9313555304 MultiExon lncRNA c7 None processed_transcript #======================================================================= # Note the gene ID has a value after the "." # Some classifications are as floats others as strings. #======================================================================= # Output file, a melted file # # Gene Group Metric Value # ENSG00000005206 Predicted Syn 0.3240500888 # ENSG00000005206 Predicted Proc -0.0260844809 # ENSG00000006062 Interacting Syb 0.1118696177 outFile = open( self.outputFolder + "/" + LncRNAGroupAnalysis.OUTPUT_FILE, "w") # write header outFile.write("Gene\tGroup\tMetric\tValue\n") numbersPerGroup = {} # key -> group, value -> count of transcripts numbersPerGroup[ LncRNAGroupAnalysis.ALL_LNCRNA_ANNOTATION] = 0 if self.useMRNA: numbersPerGroup[ LncRNAGroupAnalysis.ALL_MRNA_ANNOTATION] = 0 #======================================================================= # read input file and write output #======================================================================= table = pd.read_table( self.dataFile, header = 0, sep = "\t", skip_blank_lines = True) columnNames = list(table.columns.values) newTable = table[ :] for index, gene in newTable.iterrows(): geneID = gene[LncRNAGroupAnalysis.DATA_FILE_ID_COLUMN] ## process geneID if not geneID.startswith( "ENS"): raise RainetException("read_data_file: entry is not ENS*:", geneID) # Note: some entries contain ENSGR*, this is a small modification due to chromosome Y/X, it can safely be changed to ENSG0* if geneID.startswith( "ENSGR"): geneID = geneID.replace("ENSGR","ENSG0") if "." in geneID: geneID = geneID.split( ".")[0] # if gene has annotation if geneID in self.transcriptAnnotation: # for each of its annotations, write a line for annotation in self.transcriptAnnotation[ geneID]: for metric in self.dataColumns: outFile.write( "%s\t%s\t%s\t%s\n" % (geneID, annotation, columnNames[ metric], gene[ metric])) if annotation not in numbersPerGroup: numbersPerGroup[ annotation] = 0 numbersPerGroup[ annotation]+= 1 # if mRNA if gene[ self.dataAnnotationColumn] == "protein_coding": if self.useMRNA: # add to mRNA category numbersPerGroup[ LncRNAGroupAnalysis.ALL_MRNA_ANNOTATION]+= 1 for metric in self.dataColumns: outFile.write( "%s\t%s\t%s\t%s\n" % (geneID, LncRNAGroupAnalysis.ALL_MRNA_ANNOTATION, columnNames[ metric], gene[ metric])) elif gene[ self.dataAnnotationColumn] == "lncRNA": # add lncRNA to all lncRNA group regardless of its existence in our annotations numbersPerGroup[ LncRNAGroupAnalysis.ALL_LNCRNA_ANNOTATION]+= 1 for metric in self.dataColumns: outFile.write( "%s\t%s\t%s\t%s\n" % (geneID, LncRNAGroupAnalysis.ALL_LNCRNA_ANNOTATION, columnNames[ metric], gene[ metric])) else: # neither lncRNA nor mRNA continue outFile.close() print "read_data_file: number of lines in input data:", len(newTable) print "read_data_file: number of lncRNAs per group", numbersPerGroup if __name__ == "__main__": try: # Start chrono Timer.get_instance().start_chrono() print "STARTING " + SCRIPT_NAME #=============================================================================== # Get input arguments, initialise class #=============================================================================== parser = argparse.ArgumentParser(description= DESC_COMMENT) # positional args parser.add_argument('annotationFile', metavar='annotationFile', type=str, help='TSV file with annotation per transcript (gene). No header. Can have several annotations for same transcript, one per line. E.g. transcriptID\tannotation.') parser.add_argument('dataFile', metavar='dataFile', type=str, help='File with data per lncRNA from Mukherjee2016. Header is important. Already filtered for lncRNAs.') parser.add_argument('outputFolder', metavar='outputFolder', type=str, help='Folder where to write output files.') parser.add_argument('--dataColumns', metavar='dataColumns', type=str, default = "1,2,3,4,5,7,10", help='Which 0-based columns in the input data file we want to process. At least the gene ID column needs to be included and as the first in list. Give attribute as comma-separated.') parser.add_argument('--useMRNA', metavar='useMRNA', type=int, default = 1, help='Whether to create protein_coding category, if available on file.') parser.add_argument('--dataAnnotationColumn', metavar='dataAnnotationColumn', type=int, default = 9, help='Which 0-based column to use as transcript biotype/group annotation.') #gets the arguments args = parser.parse_args( ) # init run = LncRNAGroupAnalysis( args.annotationFile, args.dataFile, args.outputFolder, args.dataColumns, args.useMRNA, args.dataAnnotationColumn) # read annotations file Timer.get_instance().step( "Reading annotation file..") run.read_annotation_file( ) # read data file and write output Timer.get_instance().step( "Reading data file..") run.read_data_file() # Stop the chrono Timer.get_instance().stop_chrono( "FINISHED " + SCRIPT_NAME ) # Use RainetException to catch errors except RainetException as rainet: Logger.get_instance().error( "Error during execution of %s. Aborting :\n" % SCRIPT_NAME + rainet.to_string())
nilq/baby-python
python
""" 参数及配置 """ # main.py small_dataset = False # 选择数据集规模 small_train_path = "../data/small_dataset/train.conll" # 小数据集-训练集 small_dev_path = "../data/small_dataset/dev.conll" # 小数据集-验证集 big_train_path = "../data/big_dataset/train" # 大数据集-训练集 big_dev_path = "../data/big_dataset/dev" # 大数据集-验证集 big_test_path = "../data/big_dataset/test" # 大数据集-测试集 embedding_path = "../data/embedding/giga.100.txt" # 预训练好的词向量文件 result_path = "../result/small_dataset_result.txt" # 保存结果的文件 max_epoch = 50 # 最大的epoch(一次epoch是训练完所有样本一次) max_no_rise = 10 # epoch连续几轮(验证集最大正确率)没有上升时结束训练 # embedding.py window = 5 # 上下文窗口大小 embedding_dim = 100 # 每个词向量的维度 # dataloader.py shuffle = True # 是否打乱数据集 batch_size = 50 # 多少样本更新一次 # bpnn.py hidden_layer_size = 354 # 隐藏层中的神经元数量, 2/3(input + output) activation = 'relu' # 隐藏层的激活函数 Lambda = 0.01 # L2正则化项系数λ learning_rate = 0.5 # 初始学习率 embedding_trainable = True # 是否训练embedding decay_rate = 0.96 # 学习率衰减速率(防止 loss function 在极小值处不停震荡) random_seed = 1110 # 随机数种子
nilq/baby-python
python
from cakechat.utils.data_structures import create_namedtuple_instance SPECIAL_TOKENS = create_namedtuple_instance( 'SPECIAL_TOKENS', PAD_TOKEN=u'_pad_', UNKNOWN_TOKEN=u'_unk_', START_TOKEN=u'_start_', EOS_TOKEN=u'_end_') DIALOG_TEXT_FIELD = 'text' DIALOG_CONDITION_FIELD = 'condition'
nilq/baby-python
python
from models.models import Departamento
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Python implementation of Tanner Helland's color color conversion code. http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/ """ import math # Aproximate colour temperatures for common lighting conditions. COLOR_TEMPERATURES = { 'candle': 1900, 'sunrise': 2000, 'incandescent': 2500, 'tungsten': 3200, 'halogen': 3350, 'sunlight': 5000, 'overcast': 6000, 'shade': 7000, 'blue-sky': 10000, 'warm-fluorescent': 2700, 'fluorescent': 37500, 'cool-fluorescent': 5000, } def correct_output(luminosity): """ :param luminosity: Input luminosity :return: Luminosity limited to the 0 <= l <= 255 range. """ if luminosity < 0: val = 0 elif luminosity > 255: val = 255 else: val = luminosity return round(val) def kelvin_to_rgb(kelvin): """ Convert a color temperature given in kelvin to an approximate RGB value. :param kelvin: Color temp in K :return: Tuple of (r, g, b), equivalent color for the temperature """ temp = kelvin / 100.0 # Calculate Red: if temp <= 66: red = 255 else: red = 329.698727446 * ((temp - 60) ** -0.1332047592) # Calculate Green: if temp <= 66: green = 99.4708025861 * math.log(temp) - 161.1195681661 else: green = 288.1221695283 * ((temp - 60) ** -0.0755148492) # Calculate Blue: if temp > 66: blue = 255 elif temp <= 19: blue = 0 else: blue = 138.5177312231 * math.log(temp - 10) - 305.0447927307 return tuple(correct_output(c) for c in (red, green, blue))
nilq/baby-python
python
# The MIT license: # # Copyright 2017 Andre Netzeband # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and # to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Note: The DeepDriving project on this repository is derived from the DeepDriving project devloped by the princeton # university (http://deepdriving.cs.princeton.edu/). The above license only applies to the parts of the code, which # were not a derivative of the original DeepDriving project. For the derived parts, the original license and # copyright is still valid. Keep this in mind, when using code from this project. from .Initializer import *
nilq/baby-python
python
# Generated by Django 2.2.5 on 2019-10-07 17:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cases', '0016_datetimes_to_dates'), ] operations = [ migrations.AlterField( model_name='preliminarycase', name='completed_on', field=models.DateField(blank=True, null=True, verbose_name='Completed On'), ), migrations.AlterField( model_name='preliminarycase', name='date_recorded', field=models.DateField(blank=True, null=True, verbose_name='Date Recorded'), ), ]
nilq/baby-python
python
from pyvisdk.esxcli.executer import execute_soap from pyvisdk.esxcli.base import Base class IscsiNetworkportalIpconfig(Base): ''' Operations that can be performed on iSCSI Network Portal (iSCSI vmknic)'s IP configuration ''' moid = 'ha-cli-handler-iscsi-networkportal-ipconfig' def set(self, adapter, ip, subnet, dns1=None, dns2=None, gateway=None, nic=None): ''' Set iSCSI network portal IP configuration :param adapter: string, The iSCSI adapter name. :param dns1: string, The iSCSI network portal primary DNS address :param dns2: string, The iSCSI network portal secondary DNS address :param gateway: string, The iSCSI network portal gateway address :param ip: string, The iSCSI network portal IP address :param nic: string, The iSCSI network portal (vmknic) :param subnet: string, The iSCSI network portal subnet mask :returns: void ''' return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.networkportal.ipconfig.Set', adapter=adapter, dns1=dns1, dns2=dns2, gateway=gateway, ip=ip, nic=nic, subnet=subnet, ) def get(self, adapter, nic=None): ''' Get iSCSI network portal ip configuration :param adapter: string, The iSCSI adapter name. :param nic: string, The iSCSI network portal (vmknic) :returns: vim.EsxCLI.iscsi.networkportal.ipconfig.get.NetworkPortal[] ''' return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.networkportal.ipconfig.Get', adapter=adapter, nic=nic, )
nilq/baby-python
python
# -*- coding: utf-8 -*- """Tests dict input objects for `tackle.providers.system.hooks.lists` module.""" from tackle.main import tackle def test_provider_system_hook_lists(change_dir): """Verify the hook call works properly.""" output = tackle('.', no_input=True) assert 'donkey' in output['appended_list'] assert 'donkey' in output['appended_lists'] assert 'chickens' in output['appended_lists']
nilq/baby-python
python
from flask_wtf import FlaskForm from wtforms import StringField,TextAreaField,SubmitField,SelectField from wtforms.validators import Required class PitchForm(FlaskForm): title = StringField('Pitch title',validators=[Required()]) text = TextAreaField('Text',validators=[Required()]) category = SelectField('Type',choices=[('job','Job pitch'),('event','Event pitch'),('advert','Advert pitch')],validators=[Required()]) submit = SubmitField('Submit') class UpdateProfile(FlaskForm): bio = TextAreaField('Bio.',validators = [Required()]) submit = SubmitField('Submit') class CommentForm(FlaskForm): text = TextAreaField('Leave a comment:',validators=[Required()]) submit = SubmitField('Submit')
nilq/baby-python
python
#!/usr/bin/env python """ ROS node implementing Rhinohawk global mission state. See rh_msgs.msg.State for a full description of the state data. The state node aggregates state from many different sources and makes it available to other nodes in the Rhinohawk System, particularly the controller node which is responsible for autonomous mission control. Here it's important to make the distinction between two types of waypoints used in the Rhinohawk System: Mission waypoints, sometimes called "transit waypoints", are the top-level waypoints which need to be reached in order to satisfy the mission. In the context of the MedExpress Challenge, the mission waypoints break down as follows: 1,2 - Must be traversed in this order 3,N - Can be traversed in any other N+1 - Joe's reported location, area must be searched for a landing location On the way back: N,3 - Can be traversed in any order 2,1 - Must be traversed in this order APM waypoints are the low-level waypoints used by the navigation system to navigate no-fly-zones and geofence restrictions and to (eventually) complete mission objectives. Typically, completing any of the above mission waypoints will require M number of APM waypoints. For performance purposes, only the waypoints necessary to complete the next mission objective are uploaded to the autopilot at any given time. """ from functools import partial import rospy from std_msgs.msg import Float64 from sensor_msgs.msg import NavSatFix import mavros_msgs.msg as mrm from rh_msgs.msg import State, Mission, GPSCoord, VehicleState from rh_msgs.srv import GetState, GetStateResponse from rh_msgs.srv import SetMission, SetMissionResponse from rh_msgs.srv import StartMission, StartMissionResponse from rh_msgs.srv import AbortMission, AbortMissionResponse from rh_msgs.srv import SetNoFlyZones, SetNoFlyZonesResponse from rh_autonomy.aggregator import LatchMap from rh_autonomy.util import waypoints_to_str, gps_dist from rh_autonomy import constants as rhc class MissionStatus: NOT_READY = 1 READY = 2 RUNNING = 3 ABORTING = 4 COMPLETE = 5 class VehicleStatus: GROUNDED = 1 FLYING = 2 CONTROL_RATE_HZ = 1 gps_topic = "/mavros/global_position/global" compass_topic = "/mavros/global_position/compass_hdg" vfr_topic = "/mavros/vfr_hud" wp_change_topic = "/mavros/mission/waypoints" def log(s): rospy.loginfo("STATE: %s" % s) def warn(s): rospy.logwarn("STATE: %s" % s) class StateNode(): def __init__(self): self.values = LatchMap() self.mission = Mission() self.dynamic_nfzs = [] self.target_mission_wp = 0 self.apm_wps = None self.reached_apm_wp = -1 self.mission_status = MissionStatus.NOT_READY self.vehicle_status = VehicleStatus.GROUNDED self.landing_location = GPSCoord() rospy.init_node("state") self.sub(gps_topic, NavSatFix) self.sub(compass_topic, Float64) self.sub(vfr_topic, mrm.VFR_HUD) #self.sub(wp_change_topic, mrm.WaypointList, max_age=None) rospy.Subscriber("/mavros/state", mrm.State, self.mavros_state_change) rospy.Subscriber("/mavros/mission/reached", mrm.WaypointReached, self.waypoint_reached) rospy.Subscriber("/mavros/statustext/recv", mrm.StatusText, self.mavlink_statustext) rospy.Subscriber(wp_change_topic, mrm.WaypointList, self.waypoints_changed) self.state_pub = rospy.Publisher("state", State, queue_size = 5) rospy.Service('command/set_mission', SetMission, self.handle_set_mission) rospy.Service('command/start_mission', StartMission, self.handle_start_mission) rospy.Service('command/abort_mission', AbortMission, self.handle_abort_mission) rospy.Service('command/set_dnfzs', SetNoFlyZones, self.handle_set_dnfzs) rospy.Service('command/get_state', GetState, self.handle_get_state) def run_forever(self): rate = rospy.Rate(CONTROL_RATE_HZ) while True: if rospy.is_shutdown(): break self.check_goal() if self.state_pub.get_num_connections() > 0: self.state_pub.publish(self.get_state()) rate.sleep() def check_goal(self): if self.mission_status != MissionStatus.RUNNING: return if self.target_mission_wp > len(self.mission.mission_wps.points)-1: return target = self.mission.mission_wps.points[self.target_mission_wp] gps_position = self.values.get_value(gps_topic) curr_pos = GPSCoord(gps_position.latitude, gps_position.longitude, 1) d = gps_dist(curr_pos, target) log("Distance from goal: %2.6fm" % d) if rhc.PERFORM_SEARCH and self.target_mission_wp == len(self.mission.mission_wps.points)-1: # Searching for landing marker #TODO: actually search! # For SITL testing -- midway through the search, act like we found the marker if self.apm_wps and self.reached_apm_wp > len(self.apm_wps)/2: midpoint = self.apm_wps[len(self.apm_wps)/2] self.landing_location = GPSCoord(midpoint.x_lat, midpoint.y_long, 0) rospy.loginfo("SITL SIMULATION - Found landing marker") else: # Navigating toward a goal waypoint #for i, point in enumerate(self.mission.mission_wps.points): # d = gps_dist(curr_pos, point) # rospy.loginfo("Goal %d - distance %f"%(i,d)) # are we close to the goal? # TODO: get distance in meters #if d_in_meters < rhc.WAYPOINT_ACCEPTANCE_RADIUS: if d < 0.0002: self.goal_reached(self.target_mission_wp) self.reached_apm_wp = 0 def goal_reached(self, index): log("Reached goal %d" % self.target_mission_wp) log("----------------------------------------------------") if index == len(self.mission.mission_wps.points)-1: rospy.loginfo("Landing at remote location") # get next goal self.target_mission_wp += 1 def sub(self, topic, data_type, max_age=10): rospy.Subscriber(topic, data_type, \ partial(self.values.latch_value, topic, max_age=max_age)) def mavros_state_change(self, msg): if msg.system_status==4: if self.vehicle_status != VehicleStatus.FLYING: self.vehicle_status = VehicleStatus.FLYING log("Flying") else: if self.vehicle_status != VehicleStatus.GROUNDED: self.vehicle_status = VehicleStatus.GROUNDED log("Landed") if self.target_mission_wp == len(self.mission.mission_wps.points): log("MISSION COMPLETE") self.mission_status = MissionStatus.COMPLETE def waypoints_changed(self, msg): self.apm_wps = msg.waypoints if self.apm_wps: rospy.loginfo("Received waypoints (curr=%d):\n%s" % \ (msg.current_seq, waypoints_to_str(self.apm_wps))) #rospy.logdebug("Got waypoint list for goal %d"%mission_goal_id) def waypoint_reached(self, msg): """ Called whenever an APM waypoint is reached """ if not self.apm_wps: warn("Reached waypoint, but no waypoints known") return apm_wps = self.apm_wps if self.reached_apm_wp < msg.wp_seq: log("Reached APM waypoint %s" % msg.wp_seq) self.reached_apm_wp = msg.wp_seq else: log("Already reached APM waypoint %s" % msg.wp_seq) def mavlink_statustext(self, msg): #log("Got Mavlink msg: %s " % msg.text) #if msg == 'Land complete': # log("On the ground") pass def handle_set_mission(self, msg): """ Set mission parameters """ if not msg.mission.mission_wps: warn("Mission submitted with no mission waypoints") return SetMissionResponse(False) self.mission = msg.mission self.mission_status = MissionStatus.READY log("New mission has been set:\n%s" % self.mission) return SetMissionResponse(True) def handle_start_mission(self, msg): """ Start mission """ if not self.mission.mission_wps.points: rospy.loginfo("No mission waypoints defined") return StartMissionResponse(False) if self.mission_status != MissionStatus.READY: rospy.loginfo("Status not ready. Cannot begin mission.") return StartMissionResponse(False) # TODO: verify that mission controller is spinning self.mission_status = MissionStatus.RUNNING log("STARTING MISSION") return StartMissionResponse(True) def handle_abort_mission(self, msg): """ Abort mission with extreme prejudice """ self.mission_status = MissionStatus.ABORTING return AbortMissionResponse(True) def handle_set_dnfzs(self, msg): self.dynamic_nfzs = msg.dynamic_nfzs log("New dynamic no-fly-zones have been set") return SetNoFlyZonesResponse(True) def handle_get_state(self, msg): return GetStateResponse(self.get_state()) def get_state(self): state = State() state.mission = self.mission state.dynamic_nfzs = self.dynamic_nfzs state.target_mission_wp = self.target_mission_wp vehicle_state = VehicleState() vehicle_state.status = self.vehicle_status state.vehicle_state = vehicle_state gps_position = self.values.get_value(gps_topic) if gps_position: vehicle_state.position.lat = gps_position.latitude vehicle_state.position.lon = gps_position.longitude vehicle_state.position.alt = gps_position.altitude compass = self.values.get_value(compass_topic) if compass: vehicle_state.heading = compass.data vfr = self.values.get_value(vfr_topic) if vfr: vehicle_state.position.alt = vfr.altitude vehicle_state.airspeed = vfr.airspeed vehicle_state.groundspeed = vfr.groundspeed if self.apm_wps: state.apm_wps = self.apm_wps state.landing_location = self.landing_location state.mission_status = self.mission_status return state if __name__ == "__main__": node = StateNode() rospy.loginfo("Mission state ready.") node.run_forever()
nilq/baby-python
python
class Tuners(object): """Enum class for mapping symbols to string names.""" UNIFORM = "uniform" GP = "gp" GP_EI = "gp_ei" GP_EI_VEL = "gp_eivel"
nilq/baby-python
python
# coding=utf-8 import re from jinja2 import Environment, PackageLoader class ViewModel(object): class Property(object): def __init__(self, name, type_name): super(ViewModel.Property, self).__init__() self.name = name self.type_name = type_name def __str__(self): return "let {}: Stream<{}>".format(self.name, self.type_name) def __init__(self, vm_text): super(ViewModel, self).__init__() self.vm_text = vm_text @property def view_model_name(self): try: regex = re.compile(r'(?:struct|extension) (\w+)ViewModel') mo = regex.search(self.vm_text) return mo.group(1) except Exception: print("The ViewModel in the pasteboard is invalid.") exit(1) @property def properties(self): try: str = self.vm_text input_block_regex = re.compile("struct Input {([^}]+)") input_block = input_block_regex.search(str).group(1) input_properties_regex = re.compile(r'let (\w+): Stream<([^>]+)>') input_properties = [ ViewModel.Property(p[0], p[1]) for p in input_properties_regex.findall(input_block) ] output_block_regex = re.compile("struct Output {([^}]+)") output_block = output_block_regex.search(str).group(1) output_properties_regex = re.compile(r'let (\w+): Stream<([^>]+)>') output_properties = [ ViewModel.Property(p[0], p[1]) for p in output_properties_regex.findall(output_block) ] return (input_properties, output_properties) except Exception: print("The ViewModel in the pasteboard is invalid.") exit(1) class UnitTest(ViewModel): def create_tests(self): input_properties, output_properties = self.properties env = Environment( loader=PackageLoader('ptfgen_templates', 'commands'), trim_blocks=True, lstrip_blocks=True ) template = env.get_template("unit_test.dart") content = template.render( name=self.view_model_name, input_properties=input_properties, output_properties=output_properties ) return content class BindViewModel(ViewModel): def create_bind_view_model(self): input_properties, output_properties = self.properties env = Environment( loader=PackageLoader('ptfgen_templates', 'commands'), trim_blocks=True, lstrip_blocks=True ) template = env.get_template("bindviewmodel.dart") content = template.render( name=self.view_model_name, input_properties=input_properties, output_properties=output_properties ) return content
nilq/baby-python
python
import matplotlib.pyplot as plt import numpy as np from plotting import * def PID(x,I,dx,KP,KI,KD): u = -np.dot(KP,x)-np.dot(KI,I)-np.dot(KD,dx) return u def PID_trajectory(A,B,c,D,x0,dx0,KP,KI,KD,dt=1e-3,T=10,xdes=None,dxdes=None): """ For 2nd order system Ax + Bdx + c + Du and PID controller Returns dictionary of trajectories [t(t), x(t), I(t), dx(t), ddx(t), u(t), uP(t), uI(t), uD(t)]""" x = x0.copy() dx = dx0.copy() I = np.zeros(len(x0)) res = dict((idx,[]) for idx in ['t','xdes','dxdes','x','I','dx','ddx','u','uP','uI','uD']) t = 0 while t < T: xd = xdes(t) if xdes is not None else np.zeros(len(x0)) dxd = dxdes(t) if dxdes is not None else np.zeros(len(x0)) u = PID(x-xd,I,dx-dxd,KP,KI,KD) ddx = np.dot(A,x-xd) + np.dot(B,dx-dxd) + c + np.dot(D,u) res['t'].append(t) res['x'].append(x.copy()) if xdes is not None: res['xdes'].append(xd) if dxdes is not None: res['dxdes'].append(dxd) res['I'].append(I) res['dx'].append(dx.copy()) res['u'].append(u) res['ddx'].append(ddx.copy()) res['uP'].append(-np.dot(KP,x-xd)) res['uD'].append(-np.dot(KD,dx-dxd)) res['uI'].append(-np.dot(KI,I)) I += dt*(x-xd) t += dt x += dx*dt dx += ddx*dt return res A=np.zeros((2,2)) A[1,0] = 0.4 A[0,1] = -0.4 B=np.zeros((2,2)) B[1,0] = 0 B[0,1] = -0 c=np.zeros(2) #c[1] = 0.5 D=np.eye(2)*1 x0=np.array([1.,0.]) dx0=np.array([0.,0.]) res1 = PID_trajectory(A,B,c,D,x0=x0,dx0=dx0,KP=np.eye(2)*1,KI=np.eye(2)*0.25,KD=np.eye(2)*2,dt=0.01,T=20) res2 = PID_trajectory(A,B,c,D,x0=x0,dx0=dx0,KP=np.eye(2)*1,KI=np.eye(2)*0.25,KD=np.eye(2)*1,dt=0.01,T=20) res3 = PID_trajectory(A,B,c,D,x0=x0,dx0=dx0,KP=np.eye(2)*1,KI=np.eye(2)*0.25,KD=np.eye(2)*0.5,dt=0.01,T=20) #plotmulti([res1,res1],['x','x'],['x','x'],[0,1],['black','green','red','blue']) plt.figure(figsize=(6,6)) plotxy([res1,res2,res3],['kD=2','kD=1','kD=0.5'],['x','x','x'],[0,1],['black','green','red','blue']) #plotxy([res1,res1],['x','u'],['x','u'],[0,1],['black','green','red','blue']) plt.show()
nilq/baby-python
python
""" Copyright (c) 2018 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import traceback import numpy as np from caffe._caffe import log as LOG from caffe._caffe import Layer as BaseLayer class AdaptiveWeightingLossLayer(BaseLayer): """Layer for adaptive weighting between the input losses.""" def _load_params(self, param_str, num_variables): """Loads layer parameters. :param param_str: Input str of parameters """ layer_params = eval(param_str) self._scale = float(layer_params['scale']) if 'scale' in layer_params else 1.0 self._init = layer_params['init'] if 'init' in layer_params else 0.0 self._weights = layer_params['weights'] if 'weights' in layer_params else None if self._weights is None: self._weights = np.ones([num_variables], dtype=np.float32) else: assert len(self._weights) == num_variables assert np.all([w > 0.0 for w in self._weights]) def _create_variables(self, num_params, init_value): """Initializes internal state""" self.blobs.add_blob(num_params) self.blobs[0].data[...] = init_value def setup(self, bottom, top): """Initializes layer. :param bottom: List of bottom blobs :param top: List of top blobs """ try: self._load_params(self.param_str, num_variables=len(bottom)) num_variables = len(bottom) self._create_variables(num_variables, self._init) except Exception: LOG('AdaptiveWeightingLossLayer setup exception: {}'.format(traceback.format_exc())) exit() def forward(self, bottom, top): """Carry out forward pass. :param bottom: List of bottom blobs :param top: List of top blobs """ try: num_variables = len(bottom) assert num_variables > 0 assert len(top) == 1 or len(top) == 1 + num_variables samples = [] losses = [] for i in xrange(num_variables): loss_value = np.array(bottom[i].data, dtype=np.float32).reshape([-1]) assert len(loss_value) == 1 loss_value = loss_value[0] if loss_value > 0.0: param_value = self.blobs[0].data[i] loss_factor = np.exp(-param_value) new_loss_value = param_value + self._scale * loss_factor * loss_value samples.append((i, self._scale * loss_factor, self._scale * loss_factor * loss_value)) losses.append(self._weights[i] * new_loss_value) top[0].data[...] = np.sum(losses) if len(losses) > 0 else 0.0 if len(top) == 1 + num_variables: for i in xrange(num_variables): top[i + 1].data[...] = np.copy(bottom[i].data) self._samples = samples except Exception: LOG('AdaptiveWeightingLossLayer forward pass exception: {}'.format(traceback.format_exc())) exit() def backward(self, top, propagate_down, bottom): """Carry out backward pass. :param top: List of top blobs :param propagate_down: List of indicators to carry out back-propagation for the specified bottom blob :param bottom: List of bottom blobs """ try: num_variables = len(bottom) for i in xrange(num_variables): bottom[i].diff[...] = 0.0 top_diff_value = top[0].diff[0] for i, loss_scale, var_scale in self._samples: if propagate_down[i]: bottom[i].diff[...] = self._weights[i] * loss_scale * top_diff_value self.blobs[0].diff[i] += self._weights[i] * (1.0 - var_scale) * top_diff_value except Exception: LOG('AdaptiveWeightingLossLayer backward pass exception: {}'.format(traceback.format_exc())) exit() def reshape(self, bottom, top): """Carry out blob reshaping. :param bottom: List of bottom blobs :param top: List of top blobs """ top[0].reshape(1) num_variables = len(bottom) if len(top) == 1 + num_variables: for i in xrange(num_variables): top[i + 1].reshape(1)
nilq/baby-python
python
# # Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import List import pytest import tensorflow as tf import merlin.models.tf as ml from merlin.io.dataset import Dataset from merlin.schema import Tags def test_sequential_block_yoochoose(testing_data: Dataset): body = ml.InputBlock(testing_data.schema).connect(ml.MLPBlock([64])) outputs = body(ml.sample_batch(testing_data, batch_size=100, include_targets=False)) assert list(outputs.shape) == [100, 64] class DummyFeaturesBlock(ml.Block): def add_features_to_context(self, feature_shapes) -> List[str]: return [Tags.ITEM_ID.value] def call(self, inputs, **kwargs): items = self.context[Tags.ITEM_ID] emb_table = self.context.get_embedding(Tags.ITEM_ID) item_embeddings = tf.gather(emb_table, tf.cast(items, tf.int32)) if tf.rank(item_embeddings) == 3: item_embeddings = tf.squeeze(item_embeddings) return inputs * item_embeddings def compute_output_shape(self, input_shapes): return input_shapes @property def item_embedding_table(self): return self.context.get_embedding(Tags.ITEM_ID) def test_block_context(ecommerce_data: Dataset): inputs = ml.InputBlock(ecommerce_data.schema) dummy = DummyFeaturesBlock() model = inputs.connect(ml.MLPBlock([64]), dummy, context=ml.ModelContext()) out = model(ml.sample_batch(ecommerce_data, batch_size=100, include_targets=False)) embeddings = inputs.select_by_name(Tags.CATEGORICAL.value) assert ( dummy.context.get_embedding(Tags.ITEM_ID).shape == embeddings.embedding_tables[Tags.ITEM_ID.value].shape ) assert out.shape[-1] == 64 @pytest.mark.parametrize("run_eagerly", [True]) def test_block_context_model(ecommerce_data: Dataset, run_eagerly: bool, tmp_path): dummy = DummyFeaturesBlock() model = ml.Model( ml.InputBlock(ecommerce_data.schema), ml.MLPBlock([64]), dummy, ml.BinaryClassificationTask("click"), ) model.compile(optimizer="adam", run_eagerly=run_eagerly) model.fit(ecommerce_data, batch_size=50, epochs=1) model.save(str(tmp_path)) copy_model = tf.keras.models.load_model(str(tmp_path)) assert copy_model.context == copy_model.block.layers[0].context assert list(copy_model.context._feature_names) == ["item_id"] assert len(dict(copy_model.context._feature_dtypes)) == 23 copy_model.compile(optimizer="adam", run_eagerly=run_eagerly) # TODO: Fix prediction-task output name so that we can retrain a model after saving # copy_model.fit(ecommerce_data.tf_dataloader(), epochs=1)
nilq/baby-python
python
from sentence_transformers import SentenceTransformer, InputExample, losses from torch.utils.data import DataLoader #Define the model. Either from scratch of by loading a pre-trained model model = SentenceTransformer('distilbert-base-nli-mean-tokens') #Define your train examples. You need more than just two examples... train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=0.8), InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)] #Define your train dataset, the dataloader and the train loss train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=16) train_loss = losses.CosineSimilarityLoss(model) #Tune the model model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=1, warmup_steps=100)
nilq/baby-python
python
from ..core import Dimensioned, AttrTree try: import pandas from .pandas import DFrame # noqa (API import) except: pandas = None try: import seaborn from .seaborn import * # noqa (API import) except: seaborn = None from .collector import * # noqa (API import) def public(obj): if not isinstance(obj, type): return False baseclasses = [Dimensioned, Collector, AttrTree] return any([issubclass(obj, bc) for bc in baseclasses]) __all__ = list(set([_k for _k, _v in locals().items() if public(_v)]))
nilq/baby-python
python
#! /bin/python3 import socket listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) listener.bind(('127.0.0.1', 8080)) listener.listen(0) print("[+] Esperando por conexiones") connection, addr = listener.accept() print("[+] Conexion de " + str(addr)) while True: command = input('>>') connection.send(command.encode()) result = connection.recv(1024) print(result)
nilq/baby-python
python
from collections import defaultdict, deque import math from .models import Node, Edge class Graph(object): def __init__(self): self.nodes = set() # Nodes models self.edges = defaultdict(list) # Edges models self.distances = {} # mimic Nodes model def add_node(self, value): self.nodes.add(value) # Add nodes to graph # mimic edges model def add_edge(self, from_node, to_node): self.edges[from_node].append(to_node) self.edges[to_node].append(from_node) x = from_node.x_coord - to_node.x_coord y = from_node.y_coord - to_node.y_coord distance = math.pow(x, 2) + math.pow(y, 2) self.distances[(from_node, to_node)] = math.sqrt(distance) def dijkstra(graph, initial): visited = {initial: 0} # Initial will always be the same path = {} # will be passed to interface nodes = set(graph.nodes) # need all nodes that make up the graph while nodes: # while condition fails when all nodes are visited min_node = None # changes upon certain conditions for node in nodes: # each node is looped through from the entire set if node in visited: # check if node has already been visited if min_node is None: min_node = node elif visited[node] < visited[min_node]: min_node = node if min_node is None: break nodes.remove(min_node) current_weight = visited[min_node] for edge in graph.edges[min_node]: try: weight = current_weight + graph.distances[(min_node, edge)] except: continue if edge not in visited or weight < visited[edge]: visited[edge] = weight path[edge] = min_node return visited, path def shortest_path(graph, origin, destination): visited, paths = dijkstra(graph, origin) full_path = deque() _destination = paths[destination] while _destination != origin: full_path.appendleft(_destination) _destination = paths[_destination] full_path.appendleft(origin) full_path.append(destination) return visited[destination], list(full_path) def graph_test(): graph = Graph() all_nodes = Node.objects.filter(floor=3) #print(all_nodes[1]) for node in all_nodes: graph.add_node(node) for node in all_nodes: edges = Edge.objects.filter(FromNode=node) for edge in edges: graph.add_edge(edge.FromNode, edge.ToNode) orgin = Node.objects.get(name="3_enter", floor=3) destination = Node.objects.get(name="136_o", floor =3) path_distance, path_list = shortest_path(graph,orgin, destination) return path_list def get_path(star, end, level): graph = Graph() all_nodes = Node.objects.filter(floor=level) #print(all_nodes[1]) for node in all_nodes: graph.add_node(node) edges = Edge.objects.filter(FromNode=node) for edge in edges: graph.add_edge(edge.FromNode, edge.ToNode) orgin = Node.objects.get(name=star, floor=level) destination = Node.objects.get(name=end, floor =level) path_distance, path_list = shortest_path(graph,orgin, destination) return path_list ''' if __name__ == '__main__': graph = Graph() all_nodes = Nodes.objects.filter(floor=3) #print(all_nodes[1]) print(all_nodes) for node in all_nodes: graph.add_node(node) graph.add_edge('A', 'B', 10) graph.add_edge('A', 'C', 20) graph.add_edge('B', 'D', 15) graph.add_edge('C', 'D', 30) graph.add_edge('B', 'E', 50) graph.add_edge('D', 'E', 30) graph.add_edge('E', 'F', 5) graph.add_edge('F', 'G', 2) print(shortest_path(graph, 'A', 'D')) # output: (25, ['A', 'B', 'D']) '''
nilq/baby-python
python
from client.nogui import DirManager dm = DirManager() dm.run()
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright CNRS 2012 # Roman Yurchak (LULI) # This software is governed by the CeCILL-B license under French law and # abiding by the rules of distribution of free software. import numpy as np from scipy.constants import e, c, m_e, epsilon_0, k, N_A from scipy.constants import physical_constants eV2K = physical_constants['electron volt-kelvin relationship'][0] m_p_e = physical_constants['proton-electron mass ratio'][0] def critical_density(lmbda): """ Compute critical density for a plasma Parameters: ----------- - lmbda [ndarray or float]: wavelengt [nm] Returns: - Nc: [cm⁻³] """ lmbda *= 1e-9 omega = 2*np.pi*c/lmbda return 1e-6*epsilon_0*m_e*omega**2/e**2 def coulomb_logarithm(nele, znuc, tele): """ Compute Coulomb logarithm Warning: untested implementation! Use log_lambda instead! Parameters: ----------- - nele: electron density in [cm⁻³] - znuc: nuclear charge - tele: mean temperature in [eV] Returns: -------- ln Λ_spec """ #Ne = nele*1e6 # cm⁻³ to m⁻³ #tele = tele*eV2K #Lambda = (3.*(k*tele)**(3./2))/(4*(np.pi*Ne)**0.5 * (znuc*e)**3) return np.fmax(2, 24. - np.log(nele**0.5/tele)) def log_lambda(nele, Znuc, temp, spec='e', source='Atzeni2004'): """ Compute the Coulomb logarithm for electrons or ions Parameters: ----------- - nele: electron density in [cm⁻³] - znuc: nuclear charge - temp: mean temperature in [eV] - spec: specie i or e - source: literature source from where the formula is taken. Possible options are: * Atzeni2004 : The Physics of Inertial Fusion: BeamPlasma Interaction, Hydrodynamics, Hot Dense Matter, 2004, page 367, section 10.9.1 * Drake2006 : High-Energy-Density Physics, Fundamentals, Inertial Fusion, and Experimental Astrophysics, page 48 Returns: -------- ln Λ_spec """ if spec not in ['e', 'i']: raise ValueError("The 'spec' argument {} must be either 'i' (ions) or 'e' (electrons)".format(spec)) if source == 'Atzeni2004': if spec == 'e': if not np.all(temp > 10): print('Warning: computing Ln Λ_e outside of its validity range Te > 10 eV !') res = 7.1 - 0.5*np.log(nele*1e-21) + np.log(temp*1e-3) elif spec == 'i': if not np.all(temp < Znuc/2.*10e3): print('Warning: computing Ln Λ_e outside of its validity range Ti < 10 A keV !') res = 9.2 - 0.5*np.log(nele*1e-21) + 1.5*np.log(temp*1e-3) elif source == 'Drake2006': if spec == 'e': print('Warning: validity domain for Ln Λ_e not defined in Drake (2006)!') res = 24. - np.log(nele**0.5/temp) else: raise NotImplementedError('Ln Λ_i not defined in the Drake (2006) book!') else: raise NotImplementedError('Source = {} for calculating the Coulomb logarithm is not implemented!'.format(source)) return np.fmax(1, res) def collision_rate(dens, temp, abar, zbar, kind='ei', source='Atzeni2004', ln_lambda_source=None): """ Compute the electron ion collision rate Source: Atzeni2004 Parameters: ----------- - dens: density in [g.cm⁻³] - temp: temperature in [eV] - abar: mean atomic mass - zbar: mean ionization - kind: type of colliosion rate ei, e (ee) or i (ii) - source: formula used to calculate the Log Λ (see `log_lambda` ) Returns: -------- ν_kind [s^-1] """ if kind in ['e', 'ei']: spec = 'e' elif kind == 'i': spec = 'i' else: raise ValueError if ln_lambda_source is None: ln_lambda_source = source nion = dens*N_A/abar nele = nion*zbar lnLambda = log_lambda(nele, zbar, temp, spec=spec, source=ln_lambda_source) if source == 'Atzeni2004': if kind == 'i': res = 6.60e-19*(abar**0.5*(temp/1e3)**(3./2))/((nion/1e21)*zbar**4*lnLambda) elif kind in ['e', 'ei']: res = 1.09e-11*((temp/1e3)**(3./2))/((nion/1e21)*zbar**2*lnLambda) if kind == 'ei': res *= m_p_e/2 return 1./res else: raise NotImplementedError def ff_collision_frequency(nele, zbar,tele, lmbda): """ Compute inverse bremsstrahlung coefficient ν_ib = (ne * ν_ei / nc) * 1/√(1 - ne/nc) Parameters: ----------- - nele: electron density in [cm⁻³] - zbar: mean ionization - tele: mean temperature un eV - lmbda [ndarray or float]: wavelengt [nm] """ nc = critical_density(lmbda) nu_ei = ei_collision_rate(nele, zbar,tele) nu_ff = (nele*nu_ei/nc)*(1/(1 - nele/nc)**0.5) nu_ff[nele>nc] = np.nan return nu_ff def isentropic_sound_speed(abar, zbar, gamma, tele): """ Compute the ion sound speed for an ideal gas (NRL formulary): Parameters: ----------- - abar: atomic number - zbar: mean ionization - gamma: adiabatic index - tele: electron temperature [eV] Returns: ----------- adiabatic sound speed [km/s] """ return 9.79*(gamma*zbar*tele/abar)**0.5 def spitzer_conductivity(nele, tele, znuc, zbar): """ Compute the Spitzer conductivity Parameters: ----------- - nele [g/cm³] - tele [eV] - znuc: nuclear charge - zbar: mean ionization Returns: -------- - Spitzer conductivity [Ω⁻¹.cm⁻¹] """ lnLam = coulomb_logarithm(nele, znuc, tele) return 1./(1.03e-2*lnLam*zbar*(tele)**(-3./2)) def spitzer_conductivity2(nele, tele, znuc, zbar): """ Compute the Spitzer conductivity Parameters: ----------- - nele [g/cm³] - tele [eV] - znuc: nuclear charge - zbar: mean ionization Returns: -------- - Spitzer conductivity [cm².s⁻¹] """ lnLam = coulomb_logarithm(nele, znuc, tele) return 2e21*tele**(5./2)/(lnLam*nele*(zbar+1)) def thermal_speed(temp, abar=1.0, spec='e'): """ Calculate the thermal speed for electrons or ions Parameters ---------- - temp [eV] - abar: mean atomic number - spec: species Returns ------- speed in cm/s Source: https://en.wikipedia.org/wiki/Plasma_parameters """ if spec == 'e': return 4.19e7*temp**0.5 elif spec == 'i': return 9.79e5*abar**(-0.5)*temp**0.5 else: raise ValueError def collisional_mfp(dens, temp, abar, zbar, source='Atzeni2004'): """ Calculate the collisional mean free path Parameters: ----------- - dens: density in [g.cm⁻³] - temp: temperature in [eV] - abar: mean atomic mass - zbar: mean ionization - source: Returns ------- - collisional mean free path [cm] Source: Drake (2006) book. """ nu_ei = collision_rate(dens, temp, abar, zbar, kind='ei', source='Atzeni2004') vel = thermal_speed(temp, abar, spec='e') return vel/nu_ei
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import humps import re def main(): comp = re.compile(r"^(0x[\da-f]+) ([\w \,]*) (\d)", re.M | re.I) match = None op_input = "utils/opcodes.txt" op_output = "src/op.rs" dis_output = "src/dis/mod.rs" asm_output = "src/asm/mod.rs" with open(op_input) as f: match = comp.findall(f.read()) header = ( "//! `{}` is automatically generated by `" + __file__ + "` from `{}`.\n" "//! don't modify this file directly, instead run `python3 " + __file__ + "`.\n\n" ) raw_opcode = "RawOpcode" wrap_opcode = "Opcode" opcode_err = "OpError" with open(op_output, "w") as o: with open(dis_output, "w") as f2: o.write(header.format(op_output, op_input)) f2.write(header.format(dis_output, op_input)) f2.write("use super::op::*;\nuse std::fmt;\n\n") f2.write("#[derive(Debug, Clone)]\n") f2.write(f"pub struct {opcode_err}(usize);\n\n") f2.write(f"impl fmt::Display for {opcode_err} {{\n") f2.write( f"{' ' * 4}fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {{\n" ) f2.write(f"{' ' * 4 * 2}write!(f, \"expected {{}} bytes\", self.0)\n") f2.write(f"{' ' * 4}}}\n}}\n\n") f2.write( f"pub fn disassemble_raw(bin: &[u8]) -> Result<Vec<{raw_opcode}>, {opcode_err}> {{\n" ) f2.write(f"{' ' * 4}let mut ops = Vec::new();\n\n") f2.write(f"{' ' * 4}let mut i = 0;\n") f2.write(f"{' ' * 4}while i < bin.len() {{\n") f2.write(f"{' ' * 4 * 2}ops.push(match bin[i] {{\n") o.write("use std::{fmt, mem};\n\n") o.write("#[allow(non_camel_case_types)]\n") o.write("#[repr(u8)]\n") o.write("#[derive(Debug, Clone, Copy, PartialEq, Eq)]\n") o.write(f"pub enum {raw_opcode} {{\n") for m in match: if m[1] != "": op = m[1].replace(" ", "_").replace(",", "_") op2 = op.replace("__D16", "") op2 = op2.replace("_D16", "") op2 = op2.replace("D16", "") op2 = op2.replace("__D8", "") op2 = op2.replace("_D8", "") op2 = op2.replace("D8", "") op2 = op2.replace("_adr", "") o.write(f"{' ' * 4}{op2} = {m[0]},\n") f2.write(f"{' ' * 4 * 3}{m[0]} => {{\n{' ' * 4 * 4}i += ") if op.endswith("D16") or op.endswith("_adr"): f2.write( f"3;\n{' ' * 4 * 4}if i >= bin.len() {{\n{' ' * 4 * 5}return Err(OpError(i - bin.len()));\n" ) f2.write( f"{' ' * 4 * 4}}} else {{\n{' ' * 4 * 5}{raw_opcode}::{op2}\n{' ' * 4 * 4}}}\n" ) f2.write(f"{' ' * 4 * 3}}}\n") elif op.endswith("D8"): f2.write( f"2;\n{' ' * 4 * 4}if i >= bin.len() {{\n{' ' * 4 * 5}return Err(OpError(i - bin.len()));\n" ) f2.write( f"{' ' * 4 * 4}}} else {{\n{' ' * 4 * 5}{raw_opcode}::{op2}\n{' ' * 4 * 4}}}\n" ) f2.write(f"{' ' * 4 * 3}}}\n") else: f2.write( f"1;\n{' ' * 4 * 4}{raw_opcode}::{op2}\n{' ' * 4 * 3}}}\n" ) o.write("}\n\n") o.write(f"impl {raw_opcode} {{\n") o.write(f"{' ' * 4}pub fn size(&self) -> usize {{\n") o.write(f"{' ' * 4 * 2}match *self {{\n") for m in match: if m[1] != "": op = m[1].replace(" ", "_").replace(",", "_") op2 = op.replace("__D16", "") op2 = op2.replace("_D16", "") op2 = op2.replace("D16", "") op2 = op2.replace("__D8", "") op2 = op2.replace("_D8", "") op2 = op2.replace("D8", "") op2 = op2.replace("_adr", "") o.write(f"{' ' * 4 * 3}{raw_opcode}::{op2} => {m[2]},\n") o.write(f"{' ' * 4 * 2}}}\n{' ' * 4}}}\n}}\n\n") o.write(f"impl From<u8> for {raw_opcode} {{\n") o.write(f"{' ' * 4}fn from(t: u8) -> {raw_opcode} {{\n") o.write(f"{' ' * 4 * 2}match t {{\n") o.write(f"{' ' * 4 * 3}// Undocumented ops\n") o.write( f"{' ' * 4 * 3}0x08 | 0x10 | 0x18 | 0x20 | 0x28 | 0x30 | 0x38 => RawOpcode::NOP,\n" ) o.write(f"{' ' * 4 * 3}0xd9 => RawOpcode::RET,\n") o.write(f"{' ' * 4 * 3}0xdd | 0xed | 0xfd => RawOpcode::CALL,\n") o.write(f"{' ' * 4 * 3}0xcb => RawOpcode::JMP,\n") o.write(f"{' ' * 4 * 3}_ => unsafe {{ mem::transmute(t) }},\n") o.write(f"{' ' * 4 * 2}}}\n{' ' * 4}}}\n}}\n\n") o.write(f"impl From<&u8> for {raw_opcode} {{\n") o.write(f"{' ' * 4}fn from(t: &u8) -> {raw_opcode} {{\n") o.write(f"{' ' * 4 * 2}From::from(*t)\n") o.write(f"{' ' * 4}}}\n}}\n\n") o.write(f"impl Into<u8> for {raw_opcode} {{\n") o.write(f"{' ' * 4}fn into(self) -> u8 {{\n") o.write(f"{' ' * 4 * 2}unsafe {{ mem::transmute(self) }}\n") o.write(f"{' ' * 4}}}\n}}\n\n") o.write(f"impl fmt::Display for {raw_opcode} {{\n") o.write( f"{' ' * 4}fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {{\n" ) o.write( f"{' ' * 4 * 2}write!(f, \"{{:?}}(0x{{:02x?}})\", self, *self as u8)\n" ) o.write(f"{' ' * 4}}}\n}}\n\n") f2.write(f"{' ' * 4 * 3}_ => {{\n{' ' * 4 * 4}i += ") f2.write(f"1;\n{' ' * 4 * 4}{raw_opcode}::NOP\n{' ' * 4 * 3}}}\n") f2.write(f"{' ' * 4 * 2}}});\n") f2.write(f"{' ' * 4}}}\n\n{' ' * 4}Ok(ops)\n}}\n\n") o.write("#[derive(Debug, Clone, Copy, PartialEq, Eq)]\n") o.write(f"pub enum {wrap_opcode} {{\n") f2.write( f"pub fn disassemble(bin: &[u8]) -> Result<Vec<{wrap_opcode}>, {opcode_err}> {{\n" ) f2.write(f"{' ' * 4}let mut ops = Vec::new();\n\n") f2.write(f"{' ' * 4}let mut i = 0;\n") f2.write(f"{' ' * 4}while i < bin.len() {{\n") f2.write(f"{' ' * 4 * 2}ops.push(match bin[i] {{\n") for m in match: if m[1] != "": op = m[1].replace(" ", "_").replace(",", "_").lower() op = humps.pascalize(op) op = op.replace("__D16", "(u8, u8)") op = op.replace("_D16", "(u8, u8)") op = op.replace("D16", "(u8, u8)") op = op.replace("__D8", "(u8)") op = op.replace("_D8", "(u8)") op = op.replace("D8", "(u8)") op = op.replace("Adr", "(u16)") op = op.replace("_B", "B") op = op.replace("_C", "C") op = op.replace("_D", "D") op = op.replace("_E", "E") op = op.replace("_H", "H") op = op.replace("_L", "L") op = op.replace("_M", "M") op = op.replace("_A", "A") o.write(f"{' ' * 4}{op},\n") op2 = op.replace("(u8, u8)", "(*b2, *b1)") op2 = op2.replace("(u8)", "(*b1)") op2 = op2.replace("(u16)", "(u16::from_le_bytes([*b1, *b2]))") f2.write(f"{' ' * 4 * 3}{m[0]} => {{\n{' ' * 4 * 4}i += ") if op2.endswith("(*b2, *b1)") or op2.endswith( "(u16::from_le_bytes([*b1, *b2]))" ): f2.write( f"3;\n{' ' * 4 * 4}let b1 = bin.get(i - 2).ok_or({opcode_err}(2))?;" ) f2.write( f"\n{' ' * 4 * 4}let b2 = bin.get(i - 1).ok_or({opcode_err}(1))?;" ) f2.write( f"\n{' ' * 4 * 4}{wrap_opcode}::{op2}\n{' ' * 4 * 3}}}\n" ) elif op2.endswith("(*b1)"): f2.write( f"2;\n{' ' * 4 * 4}let b1 = bin.get(i - 1).ok_or({opcode_err}(1))?;" ) f2.write( f"\n{' ' * 4 * 4}{wrap_opcode}::{op2}\n{' ' * 4 * 3}}}\n" ) else: f2.write( f"1;\n{' ' * 4 * 4}{wrap_opcode}::{op2}\n{' ' * 4 * 3}}}\n" ) f2.write(f"{' ' * 4 * 3}_ => {{\n{' ' * 4 * 4}i += ") f2.write(f"1;\n{' ' * 4 * 4}{wrap_opcode}::Nop\n{' ' * 4 * 3}}}\n") f2.write(f"{' ' * 4 * 2}}});\n") f2.write(f"{' ' * 4}}}\n\n{' ' * 4}Ok(ops)\n}}\n") o.write("}\n\n") o.write(f"impl {wrap_opcode} {{\n") o.write(f"{' ' * 4}pub fn size(&self) -> usize {{\n") o.write(f"{' ' * 4 * 2}match *self {{\n") for m in match: if m[1] != "": op = m[1].replace(" ", "_").replace(",", "_").lower() op = humps.pascalize(op) op = op.replace("__D16", "(_, _)") op = op.replace("_D16", "(_, _)") op = op.replace("D16", "(_, _)") op = op.replace("__D8", "(_)") op = op.replace("_D8", "(_)") op = op.replace("D8", "(_)") op = op.replace("Adr", "(_)") op = op.replace("_B", "B") op = op.replace("_C", "C") op = op.replace("_D", "D") op = op.replace("_E", "E") op = op.replace("_H", "H") op = op.replace("_L", "L") op = op.replace("_M", "M") op = op.replace("_A", "A") o.write(f"{' ' * 4 * 3}{wrap_opcode}::{op} => {m[2]},\n") o.write(f"{' ' * 4 * 2}}}\n{' ' * 4}}}\n}}\n") with open(asm_output, "w") as f: f.write(header.format(asm_output, op_input)) f.write("use super::op::*;\npub mod lexer;\n\n") f.write(f"pub fn codegen(ops: &[{wrap_opcode}]) -> Vec<u8> {{\n") f.write(f"{' ' * 4}let mut bin = Vec::new();\n\n") f.write(f"{' ' * 4}let mut i = 0;\n") f.write(f"{' ' * 4}while i < ops.len() {{\n") f.write(f"{' ' * 4 * 2}match ops[i] {{\n") for m in match: if m[1] != "": op = m[1].replace(" ", "_").replace(",", "_").lower() op = humps.pascalize(op) op = op.replace("__D16", "(b2, b1)") op = op.replace("_D16", "(b2, b1)") op = op.replace("D16", "(b2, b1)") op = op.replace("__D8", "(b1)") op = op.replace("_D8", "(b1)") op = op.replace("D8", "(b1)") op = op.replace("Adr", "(s1)") op = op.replace("_B", "B") op = op.replace("_C", "C") op = op.replace("_D", "D") op = op.replace("_E", "E") op = op.replace("_H", "H") op = op.replace("_L", "L") op = op.replace("_M", "M") op = op.replace("_A", "A") f.write(f"{' ' * 4 * 3}{wrap_opcode}::{op} => {{\n") if op.endswith("(b2, b1)"): f.write(f"{' ' * 4 * 4}bin.push({m[0]}u8);\n") f.write( f"{' ' * 4 * 4}bin.push(b1);\n{' ' * 4 * 4}bin.push(b2);\n{' ' * 4 * 3}}}\n" ) elif op.endswith("(b1)"): f.write(f"{' ' * 4 * 4}bin.push({m[0]}u8);\n") f.write(f"{' ' * 4 * 4}bin.push(b1);\n{' ' * 4 * 3}}}\n") elif op.endswith("(s1)"): f.write(f"{' ' * 4 * 4}bin.push({m[0]}u8);\n") f.write(f"{' ' * 4 * 4}let b = s1.to_le_bytes();\n") f.write( f"{' ' * 4 * 4}bin.push(b[0]);\n{' ' * 4 * 4}bin.push(b[1]);\n{' ' * 4 * 3}}}\n" ) else: f.write(f"{' ' * 4 * 4}bin.push({m[0]}u8);\n{' ' * 4 * 3}}}\n") f.write(f"{' ' * 4 * 2}}}\n{' ' * 4 * 2}i += 1;\n") f.write(f"{' ' * 4}}}\n\n{' ' * 4}bin\n}}\n") if __name__ == "__main__": main()
nilq/baby-python
python
# Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Automl Tables Batch Predict wrapper.""" import logging from pathlib import Path from google.cloud import automl_v1beta1 as automl def predict(project_id, region, model_id, datasource, destination_prefix, output_destination): """Runs batch predict on an AutoML tables model. Args: project_id: A project ID for AutoML. region: A region for AutoML processing. model_id: An ID of a trained AutoML model. datasource: The URL of a dataset to score. Should start with 'bq://' for BigQuery and 'gs://' for Cloud storage. destination_prefix: A destination prefix for the output. 'bq' for BigQuery, 'gs' for Cloud Storage. output_destination: Used by KFP. """ logging.basicConfig(level=logging.INFO) client = automl.PredictionServiceClient() # Prepare the prediction query config model_full_id = client.model_path(project_id, region, model_id) if datasource.startswith("bq"): input_config = {"bigquery_source": {"input_uri": datasource}} else: input_uris = datasource.split(",") input_config = {"gcs_source": {"input_uris": input_uris}} if destination_prefix.startswith("bq"): output_config = {"bigquery_destination": {"output_uri": destination_prefix}} else: output_config = { "gcs_destination": { "output_uri_prefix": destination_prefix } } # Run the prediction query logging.info("Starting batch scoring using: {}".format(datasource)) response = client.batch_predict(model_full_id, input_config, output_config) # Wait for completion response.result() result = response.metadata logging.info("Batch scoring completed: {}".format(str(result))) # Save results if destination_prefix.startswith("bq"): output = result.batch_predict_details.output_info.bigquery_output_dataset else: output = result.batch_predict_details.output_info.gcs_output_directory Path(output_destination).parent.mkdir(parents=True, exist_ok=True) Path(output_destination).write_text(output)
nilq/baby-python
python
import CustomVLCClass import serial import time import threading time.sleep(20) while True: def inputListener(): inputdata = input('0 to quit the first song, 1 to quit the second song') if(inputdata == '0'): if a.mediaplayer.is_playing() : a.pause() else: a.play() print("Quiting 0") inputListener() #Starting another time the inputListener elif(inputdata == '1'): if b.mediaplayer.is_playing(): b.pause() else: b.play() print("Quiting 1") inputListener() #Starting another time the inputListener elif(inputdata == '00'): a.mute() inputListener() elif(inputdata == '01'): a.unmute() inputListener() def arduinoListener(): past0 = 0 #For counting the last chip in the field past1 = 0 past2 = 0 past3 = 0 past4 = 0 while True: try: line = ser.readline() if not line: continue x = line.decode('ascii', errors='replace') if x == '00\r\n': print("00") if past0 == 1: a.mute() if past0 == 2: b.mute() if past0 == 3: c.mute() if past0 == 4: d.mute() if past0 == 5: e.mute() if past0 == 6: f.mute() past0 = 0 elif x == '01\r\n': print("01") past0 = 1 a.unmute() elif x == '02\r\n': print("02") past0 = 2 b.unmute() elif x == '03\r\n': print("03") past0 = 3 c.unmute() elif x == '04\r\n': print("04") past0 = 4 d.unmute() elif x == '05\r\n': print("05") past0 = 5 e.unmute() elif x == '06\r\n': print("06") past0 = 6 f.unmute() if x == '10\r\n': print("10") if past1 == 1: a.mute() if past1 == 2: b.mute() if past1 == 3: c.mute() if past1 == 4: d.mute() if past1 == 5: e.mute() if past1 == 6: f.mute() past1 = 0 elif x == '11\r\n': print("11") past1 = 1 a.unmute() elif x == '12\r\n': print("12") past1 = 2 b.unmute() elif x == '13\r\n': print("13") past1 = 3 c.unmute() elif x == '14\r\n': print("14") past1 = 4 d.unmute() elif x == '15\r\n': print("15") past1 = 5 e.unmute() elif x == '16\r\n': print("16") past1 = 6 f.unmute() if x == '20\r\n': print("20") if past2 == 1: a.mute() if past2 == 2: b.mute() if past2 == 3: c.mute() if past2 == 4: d.mute() if past2 == 5: e.mute() if past2 == 6: f.mute() past1 = 0 elif x == '21\r\n': print("21") past2 = 1 a.unmute() elif x == '22\r\n': print("22") past2 = 2 b.unmute() elif x == '23\r\n': print("23") past2 = 3 c.unmute() elif x == '24\r\n': print("24") past2 = 4 d.unmute() elif x == '25\r\n': print("25") past2 = 5 e.unmute() elif x == '26\r\n': print("26") past2 = 6 f.unmute() if x == '30\r\n': print("30") if past3 == 1: a.mute() if past3 == 2: b.mute() if past3 == 3: c.mute() if past3 == 4: d.mute() if past3 == 5: e.mute() if past3 == 6: f.mute() past3 = 0 elif x == '31\r\n': print("31") past3 = 1 a.unmute() elif x == '32\r\n': print("32") past3 = 2 b.unmute() elif x == '33\r\n': print("33") past3 = 3 c.unmute() elif x == '34\r\n': print("34") past3 = 4 d.unmute() elif x == '35\r\n': print("35") past3 = 5 e.unmute() elif x == '36\r\n': print("36") past3 = 6 f.unmute() if x == '40\r\n': print("40") if past4 == 1: a.mute() if past4 == 2: b.mute() if past4 == 3: c.mute() if past4 == 4: d.mute() if past4 == 5: e.mute() if past4 == 6: f.mute() past4 = 0 elif x == '41\r\n': print("41") past4 = 1 a.unmute() elif x == '42\r\n': print("42") past4 = 2 b.unmute() elif x == '43\r\n': print("43") past4 = 3 c.unmute() elif x == '44\r\n': print("44") past4 = 4 d.unmute() elif x == '45\r\n': print("45") past4 = 5 e.unmute() elif x == '46\r\n': print("46") past4 = 6 f.unmute() except KeyboardInterrupt: print("exiting") break ser = serial.Serial('/dev/ttyAMA0', 9600, timeout=1.0) ser.setDTR(False) time.sleep(1) ser.flushInput() ser.setDTR(True) a = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/1.mp3") b = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/2.mp3") c = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/3.mp3") d = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/4.mp3") e = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/5.mp3") f = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/6.mp3") inputArduinoThread = threading.Thread(target=arduinoListener, name="inputAduino") inputArduinoThread.start() while a.mediaplayer.is_playing() and b.mediaplayer.is_playing: time.sleep(0.1)
nilq/baby-python
python
import torch from kobart import get_kobart_tokenizer from transformers.models.bart import BartForConditionalGeneration class KoBART_title(): def __init__(self, ckpt_path="./n_title_epoch_3"): self.model = BartForConditionalGeneration.from_pretrained(ckpt_path).cuda() self.tokenizer = get_kobart_tokenizer() def infer(self, text): input_ids = self.tokenizer.encode(text) input_ids = torch.tensor(input_ids) input_ids = input_ids.unsqueeze(0).cuda() output = self.model.generate(input_ids, eos_token_id=1, max_length=512, num_beams=5) output = self.tokenizer.decode(output[0], skip_special_tokens=True) return output if __name__ == "__main__": num = 0 title_class = KoBART_title() while(1): num += 1 c = input(f'{num}: context> ').strip() t = title_class.infer(c) print(f"Title: {t}")
nilq/baby-python
python
from scripts.game_objects.game_object import GameObject from scripts.consts import IRON_SWORD, WOODEN_BOW class Weapon(GameObject): def __init__(self, game, type, damage, x, y): weapon_dict = {'iron_sword': IRON_SWORD, 'wooden_bow': WOODEN_BOW} super().__init__(game, weapon_dict[type], x, y, game.pickable_objects, game.all_sprites) self.damage = damage self.type = type def use(self): if self.type == 'iron_sword': if self.game.inventory.sword_slot.item: if self.game.inventory.sword_slot.item == self: self.game.inventory.add_item(self) self.game.inventory.sword_slot.item = None self.game.inventory.sword_slot.selected = False self.game.player.damage = 3 else: self.game.inventory.sword_slot.item = self for cell in self.game.inventory.cells: if cell.item == self: cell.item = None cell.selected = False self.game.all_sprites.remove(self) self.game.pickable_objects.remove(self) self.game.player.damage = self.damage else: if self.game.inventory.bow_slot.item: if self.game.inventory.bow_slot.item == self: self.game.inventory.add_item(self) self.game.inventory.bow_slot.item = None self.game.inventory.bow_slot.selected = False self.game.player.bow_damage = 3 else: self.game.inventory.bow_slot.item = self for cell in self.game.inventory.cells: if cell.item == self: cell.item = None cell.selected = False self.game.all_sprites.remove(self) self.game.pickable_objects.remove(self) self.game.player.bow_damage = self.damage
nilq/baby-python
python
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mox import testtools from oslo.config import cfg from rack import exception from rack import service from rack import test from rack.tests import utils from rack import wsgi test_service_opts = [ cfg.StrOpt("fake_manager", default="rack.tests.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", default='127.0.0.1', help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class TestWSGIService(test.TestCase): def setUp(self): super(TestWSGIService, self).setUp() self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) def test_service_random_port(self): test_service = service.WSGIService("test_service") test_service.start() self.assertNotEqual(0, test_service.port) test_service.stop() def test_service_start_with_illegal_workers(self): CONF.set_override("rackapi_workers", -1) self.assertRaises(exception.InvalidInput, service.WSGIService, "rackapi") @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support") def test_service_random_port_with_ipv6(self): CONF.set_default("test_service_listen", "::1") test_service = service.WSGIService("test_service") test_service.start() self.assertEqual("::1", test_service.host) self.assertNotEqual(0, test_service.port) test_service.stop() class TestLauncher(test.TestCase): def setUp(self): super(TestLauncher, self).setUp() self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) self.service = service.WSGIService("test_service") def test_launch_app(self): service.serve(self.service) self.assertNotEqual(0, self.service.port) service._launcher.stop()
nilq/baby-python
python
from __future__ import unicode_literals __all__ = ( 'Key', 'Keys', ) class Key(object): def __init__(self, name): #: Descriptive way of writing keys in configuration files. e.g. <C-A> #: for ``Control-A``. self.name = name def __repr__(self): return 'Key(%s)' % self.name class Keys(object): Escape = Key('<Escape>') ControlA = Key('<C-A>') ControlB = Key('<C-B>') ControlC = Key('<C-C>') ControlD = Key('<C-D>') ControlE = Key('<C-E>') ControlF = Key('<C-F>') ControlG = Key('<C-G>') ControlH = Key('<C-H>') ControlI = Key('<C-I>') # Tab ControlJ = Key('<C-J>') # Enter ControlK = Key('<C-K>') ControlL = Key('<C-L>') ControlM = Key('<C-M>') # Enter ControlN = Key('<C-N>') ControlO = Key('<C-O>') ControlP = Key('<C-P>') ControlQ = Key('<C-Q>') ControlR = Key('<C-R>') ControlS = Key('<C-S>') ControlT = Key('<C-T>') ControlU = Key('<C-U>') ControlV = Key('<C-V>') ControlW = Key('<C-W>') ControlX = Key('<C-X>') ControlY = Key('<C-Y>') ControlZ = Key('<C-Z>') ControlSpace = Key('<C-Space>') ControlBackslash = Key('<C-Backslash>') ControlSquareClose = Key('<C-SquareClose>') ControlCircumflex = Key('<C-Circumflex>') ControlUnderscore = Key('<C-Underscore>') Up = Key('<Up>') Down = Key('<Down>') Right = Key('<Right>') Left = Key('<Left>') Home = Key('<Home>') End = Key('<End>') Delete = Key('<Delete>') ShiftDelete = Key('<ShiftDelete>') PageUp = Key('<PageUp>') PageDown = Key('<PageDown>') BackTab = Key('<BackTab>') # shift + tab Tab = ControlI Backspace = ControlH F1 = Key('<F1>') F2 = Key('<F2>') F3 = Key('<F3>') F4 = Key('<F4>') F5 = Key('<F5>') F6 = Key('<F6>') F7 = Key('<F7>') F8 = Key('<F8>') F9 = Key('<F9>') F10 = Key('<F10>') F11 = Key('<F11>') F12 = Key('<F12>') F13 = Key('<F13>') F14 = Key('<F14>') F15 = Key('<F15>') F16 = Key('<F16>') F17 = Key('<F17>') F18 = Key('<F18>') F19 = Key('<F19>') F20 = Key('<F20>') # Matches any key. Any = Key('<Any>') # Special CPRResponse = Key('<Cursor-Position-Response>')
nilq/baby-python
python
import autograd import autograd.numpy as np import scipy as sp from copy import deepcopy import paragami from paragami.autograd_supplement_lib import grouped_sum import vittles import time def _validate(y, x): n_obs = x.shape[0] x_dim = x.shape[1] if len(y) != n_obs: raise ValueError( 'The length of ``y`` must match the number of rows in ``x``.') return n_obs, x_dim def reg(y, x, w=None, offset=None): """The regression parameter at the given perturbations. This should be the optimum of ``reg_obj`` with the corresponding parameters. """ n_obs, x_dim = _validate(y=y, x=x) if offset is None: offset = np.zeros(x_dim) if w is not None: x_w = x * np.expand_dims(w, axis=1) else: x_w = x x_wt_x = x_w.T @ x / n_obs x_wt_y = x_w.T @ y / n_obs return np.linalg.solve(x_wt_x, x_wt_y - offset).flatten() def reg_obj(beta, y, x, w=None, offset=None): """The objective function for linear regression. Here, I use the weighted method-of-moments objective function defined as follows. Let epsilon = Y - X beta, and let X^T epsilon = m. The objective function is to set m equal to offset using a weighted quadratic loss, which is (m - offset)^T (X^T X)^{-1} (m - offset) The weighting matrix (X^T X)^{-1} improves numerical stability and gives a loss function that is identical to OLS when offset = 0. """ n_obs, x_dim = _validate(y=y, x=x) if offset is None: offset = np.zeros_like(beta) if w is not None: xw = x * w[:, None] xtx = xw.T @ x / n_obs xty = xw.T @ y / n_obs else: xtx = x.T @ x / n_obs xty = x.T @ y / n_obs # This is the method of moments objective after expanding, dropping # terms that do not depend on beta, and collecting. result = \ np.dot(beta, xtx @ beta) + \ 2 * np.dot(beta, offset - xty) assert result.shape == () return result def get_standard_error_matrix(betahat, y, x, w, se_group=None): """Return the standard error matrix for the regression estimate betahat. If se_group is None, compute the ordinary regression standard error. Otherwise, compute the robust standard errors using the grouping given by se_group, which is assumed to be integers 0:(num_groups - 1). Note that se_group must be zero-indexed, and the number of groups is taken to be the largest index plus one. (This behavior is implicitly assumed in group_sum.) With the se_group option, no finite-sample bias adjustment is applied. For example, the resulting ses should be equivalent to calling the R function sandwich::vcovCL(..., cluster=se_group, type="HC0", cadjust=FALSE) """ resid = y - x @ betahat # For now, I am taking the weights to parameterize a change to the # objective function rather than a change to the empirical distribution. # See email from me to Rachael and Tamara on Jan 31, 2020, 2:50 PM # for more discussion of this subtle point. if se_group is None: # I am using num_obs instead of np.sum(w) because w does not # parameterize the empirical distribution. num_obs = len(y) xtx_bar = np.einsum('ni,nj,n->ij', x, x, w) / num_obs sigma2hat = np.sum(w * (resid ** 2)) / (num_obs - len(betahat)) xtx_inv = np.linalg.inv(xtx_bar) se2 = sigma2hat * xtx_inv / num_obs return se2 else: if len(se_group) != len(y): raise ValueError("se_group must be the same length as the data.") #resid = y - x @ betahat if np.min(se_group) != 0: raise ValueError('se_group must be zero-indexed ' + '(its minimum must be zero)') # Calculate the sample variance of the gradient where each group # is treated as a single observation. grad = w[:, None] * resid[:, None] * x grad_grouped = grouped_sum(grad, se_group) num_groups = grad_grouped.shape[0] grad2_mean = np.einsum('gi,gj->ij', grad_grouped, grad_grouped) / num_groups grad_mean = np.einsum('gi->i', grad_grouped) / num_groups grad_cov = grad2_mean - np.outer(grad_mean, grad_mean) # Weight by the Hessian. xtx_bar = np.einsum('ni,nj,n->ij', x, x, w) / num_groups hinv_grad_cov = np.linalg.solve(xtx_bar, grad_cov) se2 = np.linalg.solve(xtx_bar, hinv_grad_cov.T) / num_groups return se2 def get_regression_w_grads(beta, y, x, w0, se_group=None): sens_reg_obj = lambda beta, w: reg_obj(beta, y=y, x=x, w=w) obs_w_sens = vittles.HyperparameterSensitivityLinearApproximation( objective_fun=sens_reg_obj, opt_par_value=beta, hyper_par_value=w0, validate_optimum=True, grad_tol=1e-08) get_betahat = obs_w_sens.get_opt_par_function() def get_se(w): betahat = get_betahat(w) se_cov = get_standard_error_matrix(betahat, y, x, w=w, se_group=se_group) return np.sqrt(np.diag(se_cov)) se = get_se(w0) betahat_grad = obs_w_sens.get_dopt_dhyper() se_grad = autograd.jacobian(get_se)(w0) return se, betahat_grad, se_grad ############################################################# # Sensitivity to the `offset`, i.e to the moment condition E[X eps] = offset. # This is actually a little silly, since the regression solution is # linear in the offset. But this shows how you would to it in general and # it isn't expensive. def get_regression_offset_grads(beta, y, x, offset0, se_group=None): sens_reg_obj = lambda beta, offset: reg_obj(beta, y=y, x=x, offset=offset) offset_sens = vittles.HyperparameterSensitivityLinearApproximation( objective_fun=sens_reg_obj, opt_par_value=beta, hyper_par_value=offset0, validate_optimum=True, grad_tol=1e-08) get_betahat = offset_sens.get_opt_par_function() # I believe that using an offset should not affect the values of the # standard errors. def get_se(offset): betahat = get_betahat(offset) se_cov = get_standard_error_matrix( betahat, y, x, w=np.ones(x.shape[0]), se_group=se_group) return np.sqrt(np.diag(se_cov)) se = get_se(offset0) betahat_grad = offset_sens.get_dopt_dhyper() se_grad = autograd.jacobian(get_se)(offset0) return se, betahat_grad, se_grad ########################################################## # The below functions are now being done in the R library. # Estimate how many datapoints we would have to remove to effect a change of delta. def inds_to_effect_change(leverage, desired_delta): # Argsort sorts low to high. # We are removing points, so multiply by -1. sort_inds = np.argsort(leverage * np.sign(desired_delta)) deltas = -1 * np.cumsum(leverage[sort_inds]) change_sign_inds = np.argwhere( np.sign(desired_delta) * (desired_delta - deltas) <= 0.) if len(change_sign_inds) > 0: first_ind_change_sign = np.min(change_sign_inds) remove_inds = sort_inds[:(first_ind_change_sign + 1)] return remove_inds else: return None def print_change_results(inds, effect_str, lev_len): print('Assuming linearity, which may not hold for large numbers of points,') if inds is not None: print('removing {} observations ({:0.2f}%) would {}.'.format( len(inds), 100 * len(inds) / lev_len, effect_str)) else: print('no number of observations would {}.'.format(effect_str))
nilq/baby-python
python
import json import numpy as np import os.path as osp import warnings from collections import defaultdict from plyfile import PlyData from six import b from ..utils.point_clouds import uniform_sample from ..utils import invert_dictionary, read_dict from ..utils.plotting import plot_pointcloud from .three_d_object import ThreeDObject import ipdb st = ipdb.set_trace class ScannetDataset(object): """ Holds Scannet mesh and labels data paths and some needed class labels mappings Note: data downloaded from: http://www.scan-net.org/changelog#scannet-v2-2018-06-11 """ def __init__(self, top_scan_dir, idx_to_semantic_cls_file, instance_cls_to_semantic_cls_file, axis_alignment_info_file): self.top_scan_dir = top_scan_dir self.idx_to_semantic_cls_dict = read_dict(idx_to_semantic_cls_file) self.semantic_cls_to_idx_dict = invert_dictionary(self.idx_to_semantic_cls_dict) self.instance_cls_to_semantic_cls_dict = read_dict(instance_cls_to_semantic_cls_file) self.semantic_cls_to_instance_cls_dict = defaultdict(list) for k, v in self.instance_cls_to_semantic_cls_dict.items(): self.semantic_cls_to_instance_cls_dict[v].append(k) self.scans_axis_alignment_matrices = read_dict(axis_alignment_info_file) def idx_to_semantic_cls(self, semantic_idx): return self.idx_to_semantic_cls_dict[str(semantic_idx)] def semantic_cls_to_idx(self, semantic_cls): return self.semantic_cls_to_idx_dict[str(semantic_cls)] def instance_cls_to_semantic_cls(self, instance_cls): return self.instance_cls_to_semantic_cls_dict[str(instance_cls)] def get_axis_alignment_matrix(self, scan_id): return self.scans_axis_alignment_matrices[scan_id] class ScannetScan(object): """ Keep track of the point-cloud associated with the scene of Scannet. Includes meta-information such as the object that exist in the scene, their semantic labels and their RGB color. """ def __init__(self, scan_id, scannet_dataset, apply_global_alignment=True, hardcode_boxes_path=None): """ :param scan_id: (string) e.g. 'scene0705_00' :scannet_dataset: (ScannetDataset) captures the details about the class-names, top-directories etc. """ self.dataset = scannet_dataset self.scan_id = scan_id self.pc, self.semantic_label, self.color = \ self.load_point_cloud_with_meta_data(self.scan_id, apply_global_alignment=apply_global_alignment) self.three_d_objects = None # A list with ThreeDObject contained in this Scan self.hardcoded_boxes = None if hardcode_boxes_path is not None: self.hardcoded_objects = None # A list with ThreeDObject contained in this Scan self.hardcoded_boxes = self.load_hardcoded_boxes(hardcode_boxes_path) def __str__(self, verbose=True): res = '{}'.format(self.scan_id) if verbose: res += ' with {} points'.format(self.n_points()) return res def n_points(self): return len(self.pc) def verify_read_data_correctness(self, scan_aggregation, segment_file, segment_indices): c1 = scan_aggregation['sceneId'][len('scannet.'):] == self.scan_id scan_segs_suffix = '_vh_clean_2.0.010000.segs.json' segment_dummy = self.scan_id + scan_segs_suffix c2 = segment_file == segment_dummy c3 = len(segment_indices) == self.n_points() c = np.array([c1, c2, c3]) if not np.all(c): warnings.warn('{} has some issue'.format(self.scan_id)) return c def load_point_cloud_with_meta_data(self, load_semantic_label=True, load_color=True, apply_global_alignment=True): """ :param load_semantic_label: :param load_color: :param apply_global_alignment: rotation/translation of scan according to Scannet meta-data. :return: """ scan_ply_suffix = '_vh_clean_2.labels.ply' mesh_ply_suffix = '_vh_clean_2.ply' scan_data_file = osp.join(self.dataset.top_scan_dir, self.scan_id, self.scan_id + scan_ply_suffix) data = PlyData.read(scan_data_file) x = np.asarray(data.elements[0].data['x']) y = np.asarray(data.elements[0].data['y']) z = np.asarray(data.elements[0].data['z']) pc = np.stack([x, y, z], axis=1) label = None if load_semantic_label: label = np.asarray(data.elements[0].data['label']) color = None if load_color: scan_data_file = osp.join(self.dataset.top_scan_dir, self.scan_id, self.scan_id + mesh_ply_suffix) data = PlyData.read(scan_data_file) r = np.asarray(data.elements[0].data['red']) g = np.asarray(data.elements[0].data['green']) b = np.asarray(data.elements[0].data['blue']) color = (np.stack([r, g, b], axis=1) / 256.0).astype(np.float32) # Global alignment of the scan if apply_global_alignment: pc = self.align_to_axes(pc) return pc, label, color def load_point_clouds_of_all_objects(self, exclude_instances=None): scan_aggregation_suffix = '.aggregation.json' aggregation_file = osp.join(self.dataset.top_scan_dir, self.scan_id, self.scan_id + scan_aggregation_suffix) with open(aggregation_file) as fin: scan_aggregation = json.load(fin) scan_segs_suffix = '_vh_clean_2.0.010000.segs.json' segment_file = self.scan_id + scan_segs_suffix segments_file = osp.join(self.dataset.top_scan_dir, self.scan_id, segment_file) with open(segments_file) as fin: segments_info = json.load(fin) segment_indices = segments_info['segIndices'] segment_dummy = scan_aggregation['segmentsFile'][len('scannet.'):] check = self.verify_read_data_correctness(scan_aggregation, segment_dummy, segment_indices) segment_indices_dict = defaultdict(list) for i, s in enumerate(segment_indices): segment_indices_dict[s].append(i) # Add to each segment, its point indices # iterate over every object all_objects = [] for object_info in scan_aggregation['segGroups']: object_instance_label = object_info['label'] object_id = object_info['objectId'] if exclude_instances is not None: if object_instance_label in exclude_instances: continue segments = object_info['segments'] pc_loc = [] # Loop over the object segments and get the all point indices of the object for s in segments: pc_loc.extend(segment_indices_dict[s]) object_pc = pc_loc all_objects.append(ThreeDObject(self, object_id, object_pc, object_instance_label)) self.three_d_objects = all_objects return check def load_point_clouds_of_all_hardcoded_boxes(self): # iterate over every object count_bad_boxes = 0 all_objects = [] for idx, box in enumerate(self.hardcoded_boxes): xmin, ymin, zmin, xmax, ymax, zmax = box object_pc = np.where((self.pc[:,0] >= xmin) & (self.pc[:,0] <= xmax) & (self.pc[:,1] >= ymin) & (self.pc[:,1] <= ymax) & (self.pc[:,2] >= zmin) & (self.pc[:,2] <= zmax))[0] if len(object_pc) == 0: count_bad_boxes += 1 continue object_id = idx object_label = "group_free_object" all_objects.append(ThreeDObject(self, object_id, object_pc, object_label)) self.hardcoded_objects = all_objects return count_bad_boxes def load_hardcoded_boxes(self, path_to_boxes): boxes = np.load(path_to_boxes) # N, 7 centroid = boxes[:, :3] lengths = boxes[:, 3:6] min_coords = centroid - (lengths/2) max_coords = centroid + (lengths/2) return np.concatenate((min_coords,max_coords),axis=1) def override_instance_labels_by_semantic_labels(self): for o in self.three_d_objects: o._use_true_instance = False def activate_instance_labels(self): for o in self.three_d_objects: o._use_true_instance = True def all_semantic_types(self): unique_types = np.unique(self.semantic_label) human_types = [] for t in unique_types: human_types.append(self.dataset.idx_to_semantic_cls(t)) return sorted(human_types) def instance_occurrences(self): """ :return: (dict) instance_type (string) -> number of occurrences in the scan (int) """ res = defaultdict(int) for o in self.three_d_objects: res[o.instance_label] += 1 return res def clone(self): raise NotImplementedError('Implement me.') def points_of_instance_types(self, valid_instance_types, exclude_instance_types): idx = [] for o in self.three_d_objects: o_label_valid = True if (valid_instance_types is None) else (o.instance_label in valid_instance_types) o_label_excluded = False if (exclude_instance_types is None) else ( o.instance_label in exclude_instance_types) if o_label_valid and not o_label_excluded: idx.extend(o.points) return np.array(idx) def sample_indices(self, subsample=None, valid_instance_types=None, seed=None, exclude_instance_types=None): """ Sample ids from the scan point cloud. :param exclude_instance_types: :param seed: Random seed (default=None) :param subsample: The number of ids to be sampled from the scan point cloud :param valid_instance_types: The instances to be sampled from :return: sampled point indices """ if valid_instance_types is not None or exclude_instance_types is not None: valid_idx = self.points_of_instance_types(valid_instance_types, exclude_instance_types) else: valid_idx = np.arange(self.n_points()) if subsample is None: return valid_idx # return all valid points else: return uniform_sample(points=valid_idx, n_samples=subsample, random_seed=seed) def plot(self, subsample=None, valid_instance_types=None): """ Plot the scan point cloud :param subsample: The number of points to be sampled from the scan point cloud :param valid_instance_types: The instances to be plotted :return: matplotlib.pyplot.fig of the scan """ pt = self.sample_indices(subsample, valid_instance_types) x, y, z = self.pc[pt, 0], self.pc[pt, 1], self.pc[pt, 2] color = self.color[pt] return plot_pointcloud(x, y, z, color=color) def align_to_axes(self, point_cloud): """ Align the scan to xyz axes using the alignment matrix found in scannet. """ # Get the axis alignment matrix alignment_matrix = self.dataset.get_axis_alignment_matrix(self.scan_id) alignment_matrix = np.array(alignment_matrix, dtype=np.float32).reshape(4, 4) # Transform the points pts = np.ones((point_cloud.shape[0], 4), dtype=point_cloud.dtype) pts[:, 0:3] = point_cloud point_cloud = np.dot(pts, alignment_matrix.transpose())[:, :3] # Nx4 # Make sure no nans are introduced after conversion assert (np.sum(np.isnan(point_cloud)) == 0) return point_cloud def scan_and_target_id_to_context_info(scan_id, target_id, all_scans_in_dict): """ Get context information (e.g., same instance-class objects) of the object specified by the target_id in the scene specified by the scene_id. :param scan_id: (string) scene0010_00 :param target_id: (int) 36 :param all_scans_in_dict: dict from strings: scene0010_00 to objects of ScannetScan :return: (chair, [35, 37, 38, 39], scene0010_00-chair-5-36-35-37-38-39) """ scene_objects = all_scans_in_dict[scan_id].three_d_objects target = scene_objects[target_id] instance_label = target.instance_label distractors = [x.object_id for x in scene_objects if x.instance_label == instance_label and x != target] half_context_info = [scan_id, instance_label, str(len(distractors) + 1), str(target_id)] context_string = '-'.join(half_context_info + [str(d) for d in distractors]) context_string = context_string.replace(' ', '_') return instance_label, distractors, context_string
nilq/baby-python
python
"""Voorstudie voor een DTD editor (wxPython versie) - not actively maintained """ import os,sys,shutil,copy from xml.etree.ElementTree import Element, ElementTree, SubElement import parsedtd as pd ELTYPES = ('pcdata','one','opt','mul','mulopt') ATTTYPES = ('cdata','enum','id') VALTYPES = ('opt','req','fix','dflt') ENTTYPES = ('ent', 'ext') SYMBOLS = { 'elsrt': { 'pcdata': ('<#PCDATA>', 'parsed character data'), 'one': ('<1>', 'single'), 'opt': ('<?>', 'single optional'), 'mul': ('<+>', 'multiple'), 'mulopt': ('<*>', 'multiple optional'), }, 'elopt': ('<|>', 'either/or'), 'attsrt': { 'cdata': ('[CDATA]', 'character data'), 'enum': ('[enum]', 'enumerated values'), 'id': ('[ID]', 'id'), ## 'IDREF': ('[=>]', 'related id'), ## 'IDREFS': ('[=>>]', 'list of related ids') }, 'attwrd': { 'fix': ('[#FIXED]', 'fixed value'), 'dflt': ('[:]', 'default value'), 'req': ('[#REQUIRED]', 'required'), 'opt': ('[#IMPLIED]', 'optional'), }, 'entsrt': { 'ent': ('{&}', 'internal (value)'), 'ext': ('{&url}','external (url) ')} } TITEL = "Albert's (Simple) DTD-editor" HMASK = "DTD files (*.dtd)|*.dtd|All files (*.*)|*.*" IMASK = "All files|*.*" testdtd = """\ <!ELEMENT note (to,from,heading,body)> <!ELEMENT to (#PCDATA)> <!ELEMENT from (#PCDATA)> <!ELEMENT heading (#PCDATA)> <!ELEMENT body (#PCDATA)> <!ATTLIST body NAME CDATA #IMPLIED CATEGORY (HandTool|Table|Shop-Professional) "HandTool" PARTNUM CDATA #IMPLIED PLANT (Pittsburgh|Milwaukee|Chicago) "Chicago" INVENTORY (InStock|Backordered|Discontinued) "InStock"> <!ENTITY writer "Donald Duck."> <!ENTITY copyright SYSTEM "http://www.w3schools.com/entities.dtd"> """ import wx if os.name == 'ce': DESKTOP = False else: DESKTOP = True def getshortname(x,attr=False,ent=False): if attr: name,srt,opt,val = x strt = ' '.join((SYMBOLS['attsrt'][srt][0],name, SYMBOLS['attwrd'][opt][0],val)) t = ''.join(('[',']')) elif ent: name,srt,val = x strt = ' '.join((SYMBOLS['entsrt'][srt][0],name,":",val)) else: tag,type,opt = x strt = ' '.join((SYMBOLS['elsrt'][type][0],tag)) if opt: strt = SYMBOLS['elopt'][0] + strt return strt def is_element(data): test = data.split()[0] if test in [x[0] for x in SYMBOLS['elsrt'].values()]: return True else: return False def is_pcdata(data): test = data.split()[0] if test == SYMBOLS['elsrt']['pcdata'][0]: return True else: return False def is_attribute(data): test = data.split()[0] if test in [x[0] for x in SYMBOLS['attsrt'].values()]: return True else: return False def is_entitydef(data): test = data.split()[0] if test in [x[0] for x in SYMBOLS['entsrt'].values()]: return True else: return False #~ def ParseDTD(data=None,file=None): #~ root = Element('Root_Element') #~ return ElementTree(root) class ElementDialog(wx.Dialog): def __init__(self,parent,title='',size=wx.DefaultSize, pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE, item=None, not_root=True): self.not_root = not_root size = (320,200) if not_root else (320,100) wx.Dialog.__init__(self,parent,-1,title=title,size=size) #, pos, size, style) self._parent = parent self.pnl = wx.Panel(self,-1) lblName = wx.StaticText(self.pnl, -1,"element name: ") self.txtTag = wx.TextCtrl(self.pnl,-1, size=(200,-1)) self.txtTag.Bind(wx.EVT_KEY_UP,self.OnKeyUp) if not_root: lblType = wx.StaticText(self.pnl, -1,"choose one: ") self.rbTypes = [wx.RadioButton(self.pnl,-1,label=SYMBOLS['elsrt'][name][1]) for name in ELTYPES] self.cbOpt = wx.CheckBox(self.pnl,-1,label=SYMBOLS['elopt'][1]) self.bOk = wx.Button(self.pnl,id=wx.ID_SAVE) self.bOk.Bind(wx.EVT_BUTTON,self.on_ok) self.bCancel = wx.Button(self.pnl,id=wx.ID_CANCEL) ## self.bCancel.Bind(wx.EVENT_BUTTON,self.on_cancel) self.SetAffirmativeId(wx.ID_SAVE) tag = '' type = '' opt = False if item: tag = item["tag"] opt = item['opt'] type = item["type"] self.txtTag.SetValue(tag) if not_root: if type: for ix,name in enumerate(ELTYPES): if name == type: self.rbTypes[ix].SetValue(True) ## else: ## self.rbTypes[2].SetValue(True) self.cbOpt.Value = opt sizer = wx.BoxSizer(wx.VERTICAL) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblName,0,wx.ALIGN_CENTER_VERTICAL) hsizer.Add(self.txtTag,0,wx.ALIGN_CENTER_VERTICAL) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) if not_root: hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblType,0) vsizer = wx.BoxSizer(wx.VERTICAL) for rb in self.rbTypes: vsizer.Add(rb) hsizer.Add(vsizer,0,wx.TOP,3) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(self.cbOpt) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(self.bOk,0,wx.EXPAND | wx.ALL, 2) hsizer.Add(self.bCancel,0,wx.EXPAND | wx.ALL, 2) sizer.Add(hsizer,0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL,2) self.pnl.SetSizer(sizer) self.pnl.SetAutoLayout(True) sizer.Fit(self.pnl) sizer.SetSizeHints(self.pnl) self.pnl.Layout() def on_ok(self, ev): self._parent.data = {} tag = self.txtTag.GetValue() if self.not_root and self.rbTypes[0].Value: if tag: self.txtTag.SetFocus() wx.MessageBox('Element name must be empty for PCDATA', self._parent.title, wx.OK|wx.ICON_ERROR) return else: if tag == '' or len(tag.split()) > 1: self.txtTag.SetFocus() wx.MessageBox('Element name cannot be empty or contain spaces', self._parent.title, wx.OK|wx.ICON_ERROR) return self._parent.data["tag"] = tag if self.not_root: typed = False for ix,rb in enumerate(self.rbTypes): if rb.Value: self._parent.data["type"] = ELTYPES[ix] #rb.LabelText typed = True if not typed: ## self.rbTypes[0].SetFocus() wx.MessageBox('You MUST choose a type for this element', self._parent.title, wx.OK|wx.ICON_ERROR) return self._parent.data['opt'] = self.cbOpt.Value else: self._parent.data["type"] = 'one' self._parent.data['opt'] = False print self._parent.data ev.Skip() ## self.end('ok') def OnKeyUp(self,ev): ky = ev.GetKeyCode() mod = ev.GetModifiers() if ky == 65 and mod == wx.MOD_CONTROL: win = ev.GetEventObject() if win in (self.txtTag): win.SelectAll() class AttributeDialog(wx.Dialog): def __init__(self,parent,title='',size=wx.DefaultSize, pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE,item=None): wx.Dialog.__init__(self,parent,-1,title=title,size=(320,225)) #,pos.size,style) self._parent = parent self.pnl = wx.Panel(self,-1) lblName = wx.StaticText(self.pnl,-1, "Attribute name:") self.txtName = wx.TextCtrl(self.pnl,-1, size=(200,-1)) self.txtName.Bind(wx.EVT_KEY_UP,self.OnKeyUp) lblType = wx.StaticText(self.pnl, -1,"Attribute type: ") self.cmbType = wx.ComboBox(self.pnl, -1, style=wx.CB_READONLY, choices=[SYMBOLS['attsrt'][name][1] for name in ATTTYPES]) lblWrd = wx.StaticText(self.pnl, -1,"choose one: ") self.rbWrds = [wx.RadioButton(self.pnl,-1,label=SYMBOLS['attwrd'][name][1]) for name in VALTYPES] lblValue = wx.StaticText(self.pnl, -1,"Fixed/default value:") self.txtValue = wx.TextCtrl(self.pnl,-1, size=(100,-1)) # self.bList = wx.Button(self.pnl,-1,'Edit List',action=self.EditList) self.bOk = wx.Button(self.pnl,id=wx.ID_SAVE) self.bOk.Bind(wx.EVT_BUTTON,self.on_ok) self.bCancel = wx.Button(self.pnl,id=wx.ID_CANCEL) self.SetAffirmativeId(wx.ID_SAVE) nam = val = srt = opt = '' if item: nam = item["name"] srt = item['srt'] opt = item['opt'] val = item.get('val','') self.txtName.SetValue(nam) if srt: for name in ATTTYPES: if name == srt: self.cmbType.Value = SYMBOLS['attsrt'][name][1] else: self.cmbType.Value = SYMBOLS['attsrt'][ATTTYPES[0]][1] if opt: for ix,name in enumerate(VALTYPES): if name == opt: self.rbWrds[ix].Value = True else: self.rbWrds[0].Value = True self.txtValue.SetValue(val) sizer = wx.BoxSizer(wx.VERTICAL) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblName,0,wx.ALIGN_CENTER_VERTICAL | wx.LEFT|wx.RIGHT,5) hsizer.Add(self.txtName,1,wx.EXPAND | wx.ALIGN_CENTER_VERTICAL) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblType,0,wx.ALIGN_CENTER_VERTICAL | wx.LEFT|wx.RIGHT,5) hsizer.Add(self.cmbType,0,wx.EXPAND | wx.ALIGN_CENTER_VERTICAL) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblWrd,0, wx.LEFT|wx.RIGHT,5) vsizer = wx.BoxSizer(wx.VERTICAL) ## print self.rbTypes for rb in self.rbWrds: vsizer.Add(rb) hsizer.Add(vsizer,0,wx.TOP,3) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblValue,0,wx.ALIGN_CENTER_VERTICAL | wx.LEFT|wx.RIGHT,5) hsizer.Add(self.txtValue,0,wx.EXPAND | wx.ALIGN_CENTER_VERTICAL) #hsizer.Add(self.bList,0,wx.EXPAND | wx.ALIGN_CENTER_VERTICAL) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(self.bOk,0,wx.EXPAND | wx.ALL, 2) hsizer.Add(self.bCancel,0,wx.EXPAND | wx.ALL, 2) sizer.Add(hsizer,0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL,2) self.pnl.SetSizer(sizer) self.pnl.SetAutoLayout(True) sizer.Fit(self.pnl) sizer.SetSizeHints(self.pnl) self.pnl.Layout() def on_ok(self, ev): self._parent.data = {} nam = self.txtName.GetValue() typ = self.cmbType.GetValue() val = self.txtValue.GetValue() print nam if nam == '' or len(nam.split()) > 1: self.txtName.SetFocus() wx.MessageBox('Attribute name cannot be empty or contain spaces', self._parent.title,wx.OK|wx.ICON_ERROR) return if self.rbWrds[2].Value and val == '': self.txtValue.SetFocus() wx.MessageBox('Vaste waarde opgeven', self._parent.title,wx.OK|wx.ICON_ERROR) return if self.rbWrds[3].Value and val == '': self.txtValue.SetFocus() wx.MessageBox('Default waarde opgeven', self._parent.title,wx.OK|wx.ICON_ERROR) return self._parent.data["name"] = nam for key,wrd in SYMBOLS['attsrt'].items(): if typ == wrd[1]: self._parent.data["srt"] = key for ix,rb in enumerate(self.rbWrds): if rb.Value: self._parent.data["opt"] = VALTYPES[ix] self._parent.data["val"] = val ## self.end('ok') print self._parent.data ev.Skip() def on_cancel(self, ev): self.end('cancel') def OnKeyUp(self,ev): ky = ev.GetKeyCode() mod = ev.GetModifiers() if ky == 65 and mod == wx.MOD_CONTROL: win = ev.GetEventObject() if win in (self.txtName, self.txtValue): win.SelectAll() def EditList(self,ev): data = {'item': self.item, 'tag': tag, 'type': type, 'opt': opt} edt = ListDialog(self,title='Edit enumerated list',item=data) if edt.ShowModal() == wx.ID_SAVE: h = (self.data["tag"],self.data['type'],self.data['opt']) self.tree.SetItemText(self.item,getshortname(h)) self.tree.SetItemPyData(self.item,h) class EntityDialog(wx.Dialog): def __init__(self,parent,title='',size=wx.DefaultSize, pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE,item=None): wx.Dialog.__init__(self,parent,-1,title=title,size=(320,160)) #,pos.size,style) self._parent = parent self.pnl = wx.Panel(self,-1) lblName = wx.StaticText(self.pnl,-1, "Entity name:") self.txtName = wx.TextCtrl(self.pnl,-1, size=(200,-1)) self.txtName.Bind(wx.EVT_KEY_UP,self.OnKeyUp) #lblValue = wx.StaticText(self.pnl,-1, "Attribute value:") #self.txtValue = wx.TextCtrl(self.pnl,-1, size=(200,-1)) #self.txtValue.Bind(wx.EVT_KEY_UP,self.OnKeyUp) lblType = wx.StaticText(self.pnl, -1,"Definition: ") self.rbTypes = [wx.RadioButton(self.pnl,-1, label=SYMBOLS['entsrt'][name][1] + ": ") for name in ENTTYPES] self.txtVal = wx.TextCtrl(self.pnl,-1, size=(100,-1)) self.txtUrl = wx.TextCtrl(self.pnl,-1, size=(100,-1)) self.bOk = wx.Button(self.pnl,id=wx.ID_SAVE) self.bOk.Bind(wx.EVT_BUTTON,self.on_ok) self.bCancel = wx.Button(self.pnl,id=wx.ID_CANCEL) ## self.bCancel.Bind(wx.EVT_BUTTON,self.on_cancel) self.SetAffirmativeId(wx.ID_SAVE) nam = val = srt = url = '' if item: nam = item["name"] #val = item["value"] srt = item['srt'] val = item.get('val','') self.txtName.SetValue(nam) #self.txtValue.SetValue(val) if srt == ENTTYPES[0]: self.rbTypes[0].SetValue(True) self.txtVal.SetValue(val) elif srt == ENTTYPES[1]: self.rbTypes[1].SetValue(True) self.txtUrl.SetValue(val) else: self.rbTypes[0].SetValue(True) sizer = wx.BoxSizer(wx.VERTICAL) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblName,0,wx.ALIGN_CENTER_VERTICAL | wx.LEFT|wx.RIGHT,5) hsizer.Add(self.txtName,1,wx.EXPAND) sizer.Add(hsizer,1, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(lblType,0,wx.TOP|wx.LEFT|wx.RIGHT,5) #hsizer.Add(lblValue,0,wx.ALIGN_CENTER_VERTICAL | wx.LEFT|wx.RIGHT,5) #hsizer.Add(self.txtValue,1,wx.EXPAND | wx.ALIGN_CENTER_VERTICAL) #sizer.Add(hsizer,1, wx.EXPAND | wx.ALL,5) vsizer = wx.BoxSizer(wx.VERTICAL) for ix,rb in enumerate(self.rbTypes): hhsizer = wx.BoxSizer(wx.HORIZONTAL) hhsizer.Add(rb,0,wx.ALIGN_CENTER_VERTICAL | wx.ALL,1) if ix == 0: hhsizer.Add(self.txtVal,0,wx.ALIGN_CENTER_VERTICAL | wx.ALL,1) elif ix == 1: hhsizer.Add(self.txtUrl,0,wx.ALIGN_CENTER_VERTICAL | wx.ALL,1) vsizer.Add(hhsizer) hsizer.Add(vsizer,0) sizer.Add(hsizer,0, wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.ALL,5) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(self.bOk,0,wx.EXPAND | wx.ALL, 2) hsizer.Add(self.bCancel,0,wx.EXPAND | wx.ALL, 2) sizer.Add(hsizer,0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL,2) self.pnl.SetSizer(sizer) self.pnl.SetAutoLayout(True) sizer.Fit(self.pnl) sizer.SetSizeHints(self.pnl) self.pnl.Layout() def on_ok(self, ev): self._parent.data = {} nam = self.txtName.GetValue() ent = self.rbTypes[0].Value val = self.txtVal.GetValue() url = self.txtUrl.GetValue() ext = self.rbTypes[1].Value print nam if nam == '' or len(nam.split()) > 1: self.txtName.SetFocus() wx.MessageBox('Entity name cannot be empty or contain spaces', self._parent.title,wx.OK|wx.ICON_ERROR) return if ent and val == '': self.txtVal.SetFocus() wx.MessageBox('Waarde opgeven', self._parent.title,wx.OK|wx.ICON_ERROR) return if ext and url == '': self.txtUrl.SetFocus() wx.MessageBox('Url opgeven', self._parent.title,wx.OK|wx.ICON_ERROR) return self._parent.data["name"] = nam #self._parent.data["value"] = self.txtValue.GetValue() if ent: self._parent.data["srt"] = 'ent' self._parent.data["val"] = val elif ext: self._parent.data["srt"] = 'ext' self._parent.data["val"] = url ## self.end('ok') print self._parent.data ev.Skip() def on_cancel(self, ev): self.end('cancel') def OnKeyUp(self,ev): ky = ev.GetKeyCode() mod = ev.GetModifiers() if ky == 65 and mod == wx.MOD_CONTROL: win = ev.GetEventObject() if win in (self.txtName, self.txtValue): win.SelectAll() class MainFrame(wx.Frame): def __init__(self,parent,id,fn=''): self.parent = parent self.title = "Albert's XML Editor" self.xmlfn = fn wx.Frame.__init__(self,parent,id, pos=(2,2), size=(320,450) ) self.SetIcon(wx.Icon("axe.ico",wx.BITMAP_TYPE_ICO)) self.init_menus() menuBar = wx.MenuBar() filemenu, viewmenu, editmenu = self.init_menus() menuBar.Append(filemenu, "&File") menuBar.Append(viewmenu, "&View") menuBar.Append(editmenu, "&Edit") self.SetMenuBar(menuBar) self.enable_pasteitems(False) ## self.helpmenu.append('About', callback = self.about) self.pnl = wx.Panel(self,-1) self.tree = wx.TreeCtrl(self.pnl,-1) self.tree.Bind(wx.EVT_LEFT_DCLICK, self.onLeftDClick) self.tree.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown) self.tree.Bind(wx.EVT_KEY_UP,self.OnKeyUp) sizer0 = wx.BoxSizer(wx.VERTICAL) sizer1 = wx.BoxSizer(wx.HORIZONTAL) sizer1.Add(self.tree,1,wx.EXPAND) sizer0.Add(sizer1,1,wx.EXPAND) self.pnl.SetSizer(sizer0) self.pnl.SetAutoLayout(True) sizer0.Fit(self.pnl) sizer0.SetSizeHints(self.pnl) self.pnl.Layout() self.Show(True) self.cut_att = None self.cut_ent = None self.cut_el = None if self.xmlfn == '': self.rt = Element('New_Root') self.openxml() else: self.rt = parse_dtd(file=self.xmlfn).getroot() self.init_tree() def init_menus(self,popup=False): if popup: viewmenu = wx.Menu() else: filemenu = wx.Menu() mitem = wx.MenuItem(filemenu, -1, "&New") self.Bind(wx.EVT_MENU,self.newxml,mitem) filemenu.AppendItem(mitem) mitem = wx.MenuItem(filemenu, -1, "&Open") self.Bind(wx.EVT_MENU,self.openxml,mitem) filemenu.AppendItem(mitem) mitem = wx.MenuItem(filemenu, -1, '&Save') self.Bind(wx.EVT_MENU,self.savexml,mitem) filemenu.AppendItem(mitem) mitem = wx.MenuItem(filemenu, -1, 'Save &As') self.Bind(wx.EVT_MENU,self.savexmlas,mitem) filemenu.AppendItem(mitem) filemenu.AppendSeparator() mitem = wx.MenuItem(filemenu, -1, 'E&xit') self.Bind(wx.EVT_MENU,self.quit,mitem) filemenu.AppendItem(mitem) viewmenu = wx.Menu() mitem = wx.MenuItem(viewmenu, -1, "&Expand All (sub)Levels") self.Bind(wx.EVT_MENU,self.expand,mitem) viewmenu.AppendItem(mitem) mitem = wx.MenuItem(viewmenu, -1, "&Collapse All (sub)Levels") self.Bind(wx.EVT_MENU,self.collapse,mitem) viewmenu.AppendItem(mitem) if popup: editmenu = viewmenu editmenu.AppendSeparator() else: editmenu = wx.Menu() mitem = wx.MenuItem(editmenu, -1, "&Edit") self.Bind(wx.EVT_MENU,self.edit,mitem) editmenu.AppendItem(mitem) editmenu.AppendSeparator() mitem = wx.MenuItem(editmenu, -1, "&Delete") self.Bind(wx.EVT_MENU,self.delete,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "C&ut") self.Bind(wx.EVT_MENU,self.cut,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "&Copy") self.Bind(wx.EVT_MENU,self.copy,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "Paste Before") self.Bind(wx.EVT_MENU,self.paste,mitem) if popup: if not self.cut_el and not self.cut_att and not self.cut_ent: mitem.SetItemLabel("Nothing to Paste") mitem.Enable(False) else: self.pastebefore_item = mitem editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "Paste After") self.Bind(wx.EVT_MENU,self.paste_aft,mitem) if popup: if not self.cut_el and not self.cut_att and not self.cut_ent: ## mitem.SetItemLabel(" ") mitem.Enable(False) else: self.pasteafter_item = mitem editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "Paste Under") self.Bind(wx.EVT_MENU,self.paste_und,mitem) if popup: if not self.cut_el and not self.cut_att and not self.cut_ent: ## mitem.SetItemLabel(" ") mitem.Enable(False) else: self.pasteunder_item = mitem editmenu.AppendItem(mitem) editmenu.AppendSeparator() mitem = wx.MenuItem(editmenu, -1, 'Insert Element Before') self.Bind(wx.EVT_MENU,self.insert,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, 'Insert Element After') self.Bind(wx.EVT_MENU,self.ins_aft,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, 'Insert Element Under') self.Bind(wx.EVT_MENU,self.ins_chld,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "Add Attribute") self.Bind(wx.EVT_MENU,self.add_attr,mitem) editmenu.AppendItem(mitem) mitem = wx.MenuItem(editmenu, -1, "Add Entity") self.Bind(wx.EVT_MENU,self.add_ent,mitem) editmenu.AppendItem(mitem) if popup: return editmenu else: return filemenu, viewmenu, editmenu def enable_pasteitems(self,active=False): if active: self.pastebefore_item.SetItemLabel("Paste Before") else: self.pastebefore_item.SetItemLabel("Nothing to Paste") self.pastebefore_item.Enable(active) self.pasteafter_item.Enable(active) def newxml(self,ev=None): h = wx.Dialog.askstring("AXE", "Enter a name (tag) for the root element") if h is not None: self.init_tree("(untitled)") def openxml(self,ev=None): ## self.openfile() ## try: email = pd.DTDParser(fromstring=testdtd) ## except pd.DTDParsingError,msg: ## print msg ## return self.rt = email self.init_tree() def _openfile(self,h): try: rt = ElementTree(file=h).getroot() except: return False else: self.rt = rt self.xmlfn = h return True def openfile(self,ev=None): dlg = wx.FileDialog( self, message="Choose a file", defaultDir=os.getcwd(), wildcard=HMASK, style=wx.OPEN ) if dlg.ShowModal() == wx.ID_OK: h = dlg.GetPath() if not self._openfile(h): dlg = wx.MessageBox('dtd parsing error', self.title, wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() dlg.Destroy() def savexmlfile(self,oldfile=''): def expandnode(rt,root): tag,c = self.tree.GetFirstChild(rt) while tag.IsOk(): text = self.tree.GetItemText(tag) data = self.tree.GetItemPyData(tag) ## print text,data[0],data[1] if text.startswith(ELSTART): node = SubElement(root,data[0]) if data[1]: node.text = data[1] expandnode(tag,node) else: root.set(data[0],data[1]) tag,c = self.tree.GetNextChild(rt,c) print "savexmlfile():",self.xmlfn try: shutil.copyfile(self.xmlfn,self.xmlfn + '.bak') except IOError as mld: ## wx.MessageBox(str(mld),self.title,wx.OK|wx.ICON_ERROR) pass top = self.tree.GetRootItem() rt = self.tree.GetLastChild(top) text = self.tree.GetItemText(rt) data = self.tree.GetItemPyData(rt) root = Element(data[0]) # .split(None,1) expandnode(rt,root) h = ElementTree(root).write(self.xmlfn,encoding="iso-8859-1") def savexml(self,ev=None): ## print "savexml(): ", self.xmlfn if self.xmlfn == '': self.savexmlas() else: self.savexmlfile() def savexmlas(self,ev=None): d,f = os.path.split(self.xmlfn) ## print "savexmlas(): ", d,f dlg = wx.FileDialog( self, message="Save file as ...", defaultDir=d, defaultFile=f, wildcard=HMASK, style=wx.SAVE ) if dlg.ShowModal() == wx.ID_OK: self.xmlfn = dlg.GetPath() ## print "savexmlas(): ", self.xmlfn self.savexmlfile() # oldfile=os.path.join(d,f)) self.tree.SetItemText(self.top,self.xmlfn) self.SetTitle(" - ".join((os.path.split(self.xmlfn)[-1],TITEL))) dlg.Destroy() def about(self,ev=None): wx.MessageBox("Made in 2009 by Albert Visser\nWritten in (wx)Python", self.title,wx.OK|wx.ICON_INFORMATION ) def quit(self,ev=None): self.Close() def init_tree(self,name=''): def add_to_tree(el,rt): h = (el.tag,el.text) rr = self.tree.AppendItem(rt,getshortname(h)) self.tree.SetItemPyData(rr,h) for attr in el.keys(): h = el.get(attr) if not h: h = '""' h = (attr,h) rrr = self.tree.AppendItem(rr,getshortname(h,attr=True)) self.tree.SetItemPyData(rrr,h) for subel in list(el): add_to_tree(subel,rr) self.tree.DeleteAllItems() if name: titel = name elif self.xmlfn: titel = self.xmlfn else: titel = '[untitled]' self.top = self.tree.AddRoot(titel) self.SetTitle(" - ".join((os.path.split(titel)[-1],TITEL))) ## de in self.openxml ingelezen structuur self.rt bevat (in dit geval): ## [<parsedtd._Element object at 0x2527c50>, ## <parsedtd._Entity object at 0x2527f90>, ## <parsedtd._Entity object at 0x2527f50>] ## het _Element object bevat de attributen: ## type: 'ANY', ## name: 'note', ## occurrence: '', ## entity_list: [], ## attribute_list: [], ## is_alternative: False, ## subelement_list: [ ## <parsedtd._Element object at 0x176dcd0>, ## <parsedtd._Element object at 0x176dd50>, ## <parsedtd._Element object at 0x176dd90>, ## <parsedtd._Element object at 0x176ddd0>], ## en de _entity objecten: ## type: 'ent', ## name: 'writer', ## value: 'Donald Duck.' ## en ## type: 'ext', ## name: 'copyright', ## value: 'http://www.w3schools.com/entities.dtd' h = (self.rt.tag,'one',False) rt = self.tree.AppendItem(self.top,getshortname(h)) self.tree.SetItemPyData(rt,h) for el in list(self.rt): add_to_tree(el,rt) #self.tree.selection = self.top # set_selection() def on_bdown(self, ev=None): if wx.recon_context(self.tree, ev): self.item = self.tree.selection if self.item == self.top: wx.context_menu(self, ev, self.filemenu) elif self.item is not None: wx.context_menu(self, ev, self.editmenu) else: wx.Message.ok(self.title,'You need to select a tree item first') #menu.append() else: ev.skip() def onLeftDClick(self,ev=None): pt = ev.GetPosition() item, flags = self.tree.HitTest(pt) if item: if item == self.top: edit = False else: data = self.tree.GetItemText(item) edit = True ## if data.startswith(ELSTART): ## if self.tree.GetChildrenCount(item): ## edit = False if edit: self.edit() ev.Skip() def OnRightDown(self, ev=None): pt = ev.GetPosition() item, flags = self.tree.HitTest(pt) if item and item != self.top: self.tree.SelectItem(item) menu = self.init_menus(popup=True) self.PopupMenu(menu) ## print "klaar met menu" menu.Destroy() ## pass def OnKeyUp(self, ev=None): pt = ev.GetPosition() ky = ev.GetKeyCode() item, flags = self.tree.HitTest(pt) if item and item != self.top: if ky == wx.WXK_DELETE: self.delete() elif ky == wx.WXK_F2: self.edit() ev.Skip() def checkselection(self): sel = True self.item = self.tree.Selection if self.item is None or self.item == self.top: wx.MessageBox('You need to select an element or attribute first', self.title,wx.OK | wx.ICON_INFORMATION) return sel def expand(self,ev=None): item = self.tree.Selection if item: self.tree.ExpandAllChildren(item) def collapse(self,ev=None): item = self.tree.Selection if item: self.tree.CollapseAllChildren(item) def edit(self, ev=None): if DESKTOP and not self.checkselection(): return test = self.tree.GetItemText(self.item) # self.item.get_text() if is_element(test): tag,type,opt = self.tree.GetItemPyData(self.item) # self.item.get_data() data = {'item': self.item, 'tag': tag, 'type': type, 'opt': opt} if tag == self.rt.tag: edt = ElementDialog(self,title='Edit root element',item=data,not_root=False) else: edt = ElementDialog(self,title='Edit an element',item=data) if edt.ShowModal() == wx.ID_SAVE: h = (self.data["tag"],self.data['type'],self.data['opt']) self.tree.SetItemText(self.item,getshortname(h)) self.tree.SetItemPyData(self.item,h) elif is_attribute(test): nam,srt,opt,val = self.tree.GetItemPyData(self.item) # self.item.get_data() data = {'item': self.item, 'name': nam, 'srt': srt, 'opt': opt, 'val': val} edt = AttributeDialog(self,title='Edit an attribute',item=data) if edt.ShowModal() == wx.ID_SAVE: h = (self.data["name"],self.data["srt"], self.data["opt"],self.data["val"]) self.tree.SetItemText(self.item,getshortname(h,attr=True)) self.tree.SetItemPyData(self.item,h) elif is_entitydef(test): nam,srt,val = self.tree.GetItemPyData(self.item) # self.item.get_data() data = {'item': self.item, 'name': nam, 'srt': srt, 'val': val} edt = EntityDialog(self,title='Edit an attribute',item=data) if edt.ShowModal() == wx.ID_SAVE: h = (self.data["name"],self.data["srt"],self.data["val"]) self.tree.SetItemText(self.item,getshortname(h,attr=True)) self.tree.SetItemPyData(self.item,h) else: return edt.Destroy() def cut(self, ev=None): self.copy(cut=True) def delete(self, ev=None): self.copy(cut=True, retain=False) def copy(self, ev=None, cut=False, retain=True): # retain is t.b.v. delete functie def push_el(el,result): # print "start: ",result text = self.tree.GetItemText(el) data = self.tree.GetItemPyData(el) y = [] # print "before looping over contents:",text,y if is_element(text): x,c = self.tree.GetFirstChild(el) while x.IsOk(): z = push_el(x,y) x,c = self.tree.GetNextChild(el,c) # print "after looping over contents: ",text,y result.append((text,data,y)) # print "end: ",result return result if DESKTOP and not self.checkselection(): return text = self.tree.GetItemText(self.item) data = self.tree.GetItemPyData(self.item) txt = 'cut' if cut else 'copy' txt = txt if retain else 'delete' if data == (self.rt.tag,'one',False): wx.MessageBox("Can't %s the root" % txt, self.title,wx.OK | wx.ICON_ERROR) return ## print "copy(): print text,data" ## print text,data if retain: self.cut_el = None self.cut_att = None self.cut_ent = None if is_element(text): ## self.cut_el = self.item # hmmm... hier moet de aanroep van push_el komen self.cut_el = [] self.cut_el = push_el(self.item,self.cut_el) elif is_attribute(text): self.cut_att = data elif is_entitydef(text): self.cut_ent = data if cut: self.tree.Delete(self.item) self.enable_pasteitems(True) print "copy(): print self.cut_el, _att, _ent" print self.cut_el, self.cut_att, self.cut_ent def paste(self, ev=None,before=True,pastebelow=False): if DESKTOP and not self.checkselection(): return data = self.tree.GetItemPyData(self.item) if pastebelow: text = self.tree.GetItemText(self.item) if not is_element(text): wx.MessageBox("Can't paste under a non-element",self.title, wx.OK | wx.ICON_ERROR) return if is_pcdata(text): wx.MessageBox("Can't paste under PCDATA",self.title, wx.OK | wx.ICON_ERROR) return if data == self.rt: if before: wx.MessageBox("Can't paste before the root", self.title,wx.OK | wx.ICON_ERROR) return else: wx.MessageBox("Pasting as first element under root", self.title,wx.OK | wx.ICON_INFORMATION) pastebelow = True if self.cut: self.enable_pasteitems(False) print "paste(): print self.cut_el, _att, _ent" print self.cut_el, self.cut_att, self.cut_ent if self.cut_el: def zetzeronder(node,el,pos=-1): ## print "zetzeronder()" ## print "node: ",node ## print "el:", el ## item = self.tree.GetItemText(el) ## data = self.tree.GetItemPyData(el) if pos == -1: subnode = self.tree.AppendItem(node,el[0]) self.tree.SetItemPyData(subnode,el[1]) else: subnode = self.tree.InsertItemBefore(node,i,el[0]) self.tree.SetItemPyData(subnode,el[1]) for x in el[2]: zetzeronder(subnode,x) if pastebelow: node = self.item i = -1 else: node = self.tree.GetItemParent(self.item) # self.item.get_parent() x,c = self.tree.GetFirstChild(node) cnt = self.tree.GetChildrenCount(node) for i in range(cnt): if x == self.item: if not before: i += 1 break x,c = self.tree.GetNextChild(node,c) if i == cnt: i = -1 zetzeronder(node,self.cut_el[0],i) else: if self.cut_att: item = getshortname(self.cut_att,attr=True) data = self.cut_att else: item = getshortname(self.cut_ent,ent=True) data = self.cut_ent if pastebelow: node = self.tree.AppendItem(self.item,item) self.tree.SetItemPyData(node,data) else: add_to = self.tree.GetItemParent(self.item) # self.item.get_parent() added = False x,c = self.tree.GetFirstChild(add_to) for i in range(self.tree.GetChildrenCount(add_to)): if x == self.item: if not before: i += 1 node = self.tree.InsertItemBefore(add_to,i,item) self.tree.SetItemPyData(node,data) added = True break x,c = self.tree.GetNextChild(add_to,c) if not added: node = self.tree.AppendItem(add_to,item) self.tree.SetItemPyData(node,data) def paste_aft(self, ev=None): self.paste(before=False) def paste_und(self, ev=None): self.paste(pastebelow=True) def add_attr(self, ev=None): if DESKTOP and not self.checkselection(): return text = self.tree.GetItemText(self.item) if not is_element(text): wx.MessageBox("Can't insert under a non-element",self.title, wx.OK | wx.ICON_ERROR) return if is_pcdata(text): wx.MessageBox("Can't insert under PCDATA", self.title,wx.OK | wx.ICON_ERROR) return edt = AttributeDialog(self,title="New attribute") if edt.ShowModal() == wx.ID_SAVE: h = (self.data["name"],self.data["srt"], self.data["opt"],self.data["val"]) rt = self.tree.AppendItem(self.item,getshortname(h,attr=True)) self.tree.SetItemPyData(rt,h) edt.Destroy() def add_ent(self, ev=None): if DESKTOP and not self.checkselection(): return text = self.tree.GetItemText(self.item) if not is_element(text): wx.MessageBox("Can't insert under a non-element",self.title, wx.OK | wx.ICON_ERROR) return if is_pcdata(text): wx.MessageBox("Can't insert under PCDATA", self.title,wx.OK | wx.ICON_ERROR) return edt = EntityDialog(self,title="New entity") if edt.ShowModal() == wx.ID_SAVE: h = (self.data["name"],self.data["srt"],self.data["val"]) rt = self.tree.AppendItem(self.item,getshortname(h,ent=True)) self.tree.SetItemPyData(rt,h) edt.Destroy() def insert(self, ev=None,before=True,below=False): if DESKTOP and not self.checkselection(): return if below: text = self.tree.GetItemText(self.item) if not is_element(text): wx.MessageBox("Can't insert under a non-element",self.title, wx.OK | wx.ICON_ERROR) return if is_pcdata(text): wx.MessageBox("Can't insert under PCDATA", self.title,wx.OK | wx.ICON_ERROR) return if self.tree.GetItemPyData(self.item) == (self.rt.tag,'one',False) and not below: wx.MessageBox("Can't insert before/after the root", self.title,wx.OK | wx.ICON_ERROR) return edt = ElementDialog(self,title="New element") if edt.ShowModal() == wx.ID_SAVE: data = (self.data['tag'],self.data['type'],self.data['opt']) text = getshortname(data) if below: rt = self.tree.AppendItem(self.item,text) self.tree.SetItemPyData(rt,data) else: parent = self.tree.GetItemParent(self.item) item = self.item if not before else self.tree.GetPrevSibling(self.item) node = self.tree.InsertItem(parent,item,text) self.tree.SetPyData(node,data) edt.Destroy() def ins_aft(self, ev=None): self.insert(before=False) def ins_chld(self, ev=None): self.insert(below=True) def on_click(self, event): self.close() class MainGui(object): def __init__(self,args): app = wx.App(redirect=False) # True,filename="axe.log") if len(args) > 1: frm = MainFrame(None, -1, fn=args[1]) else: frm = MainFrame(None, -1) app.MainLoop() def test_is_element(): for test in [x[0] for x in SYMBOLS['elsrt'].values()]: assert is_element(" ".join((test,"testdata")))," ".join((test,'wordt niet herkend als element')) for test in ('<15> hallo','','xxxxx',"hallo daar vrienden"): assert not is_element(" ".join((test,"testdata")))," ".join((test,'wordt herkend als element')) def test_is_attribute(): for test in [x[0] for x in SYMBOLS['attsrt'].values()]: assert is_attribute(" ".join((test,"testdata")))," ".join((test,'wordt niet herkend als attribuut')) for test in ('<15> hallo','','xxxxx',"hallo daar vrienden"): assert not is_attribute(" ".join((test,"testdata")))," ".join((test,'wordt herkend als attribuut')) def test_is_entitydef(): for test in [x[0] for x in SYMBOLS['entsrt'].values()]: assert is_entitydef(" ".join((test,"testdata")))," ".join((test,'wordt niet herkend als entiteit')) for test in ('<15> hallo','','xxxxx',"hallo daar vrienden"): assert not is_entitydef(" ".join((test,"testdata")))," ".join((test,'wordt herkend als entiteit')) if __name__ == "__main__": ## print sys.argv ## test_is_element() ## test_is_attribute() ## test_is_entitydef() MainGui(sys.argv)
nilq/baby-python
python
def register(mf): mf.overwrite_defaults({ "testvar": 42 }, scope="..notexistentmodule")
nilq/baby-python
python
# <caret>
nilq/baby-python
python