index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,300 | de4ea3fdb7925dc757c6060871664cdd4bcf280b | import re
text = "Hello Ram, How are you ?"
data = re.search('[a-z]\w+', text)
print(data)
# data = re.findall('[A-Z]\w+', text)
# print(data) |
983,301 | 305c5a5b7de872720d4680e5a842b96aafe5d092 | from common import *
import user
DEBUG=True
DATABASE_ENGINE='sqlite3'
DATABASE_NAME='%s/nextroom.db'%user.home
|
983,302 | 25a9d3666e352f7fc0433e86c0c230bc8a7cab4f | import tensorflow as tf
import numpy as np
from sklearn import preprocessing
import math
def conv_relu(input, size, depth, in_depth=None):
# Create variable named "weights".
# http://stats.stackexchange.com/questions/47590/what-are-good-initial-weights-in-a-neural-network
sqared = math.sqrt(size*size)
weights = tf.get_variable("weights", (size, size, in_depth, depth),
initializer=tf.contrib.layers.xavier_initializer())
#initializer=tf.random_normal_initializer(mean=0.0, stddev=sqared))
#initializer=tf.constant_initializer(value=0.0))
bias = tf.get_variable("bias", [depth],
initializer=tf.constant_initializer(value=0.0))
conv = tf.nn.conv2d(input, weights,
strides=[1, 1, 1, 1], padding='VALID', use_cudnn_on_gpu=True)
return tf.nn.relu(tf.nn.bias_add(conv, bias))
def input_definition(inputs, size=30, depths=[], filters=[]):
with tf.variable_scope("conv0_cam0"):
conv_cam0 = conv_relu(inputs["cam0"], size=5, in_depth=4, depth=64)
with tf.variable_scope("conv0_cam1"):
conv_cam1 = conv_relu(inputs["cam1"], size=5, in_depth=4, depth=64)
with tf.variable_scope("conv0_cam3"):
conv_cam2 = conv_relu(inputs["cam3"], size=5, in_depth=4, depth=64)
with tf.variable_scope("conv0_cam4"):
conv_cam3 = conv_relu(inputs["cam4"], size=5, in_depth=4, depth=64)
conc_input = tf.concat([conv_cam0, conv_cam1, conv_cam2, conv_cam3], 3)
with tf.variable_scope("conv1"):
r1 = conv_relu(conc_input, size=filters[0], in_depth=256, depth=depths[0])
with tf.variable_scope("conv2"):
r2 = conv_relu(r1, size=filters[1], in_depth=depths[0], depth=depths[1])
# only if third element is defined
if len(depths) == 3:
with tf.variable_scope("conv3"):
r3 = conv_relu(r2, size=filters[2], in_depth=depths[1], depth=depths[2])
else:
r3 = r2
out = r3
return out
def input_structure(input, plane_num):
with tf.variable_scope("in30"):
in_30 = input_definition(input.get_for_plane_size(plane_num, 30),
size=30,
depths=[96, 48, 16],
filters=[3, 5, 5])
with tf.variable_scope("in18"):
in_18 = input_definition(input.get_for_plane_size(plane_num, 18),
size=18,
depths=[40, 40, 8],
filters=[3, 3, 3])
with tf.variable_scope("in12"):
in_12 = input_definition(input.get_for_plane_size(plane_num, 12),
size=12,
depths=[32, 8],
filters=[3, 3])
with tf.variable_scope("in10"):
in_10 = input_definition(input.get_for_plane_size(plane_num, 10),
size=10,
depths=[32, 8],
filters=[3, 3])
c = tf.concat([
in_30,
tf.image.resize_images(in_18, [16, 16], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR),
tf.image.resize_images(in_12, [16, 16], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR),
tf.image.resize_images(in_10, [16, 16], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
], 3)
n_out = 1
mean = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='mean', trainable=False)
variance = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='variance', trainable=False)
variance_epsilon = tf.Variable(tf.constant(0.0001, shape=[n_out]),
name='epsilon', trainable=False)
offset = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='offset', trainable=False)
scale = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='scale', trainable=False)
normalized = tf.nn.batch_normalization(c, mean=mean,
variance=variance,
variance_epsilon=variance_epsilon,
offset=offset,
scale=scale)
return normalized
class InputOrganizer(object):
def __init__(self, batch_size, num_planes=96, meanzero=False):
"""
:param meanzero: Use mean zero when retrieving feed dict
"""
self.meanzero = meanzero
if self.meanzero:
print("Input data will be normalized on range [-1, 1]")
else:
print("Not using mean 0 range [0.0, 1.0]!")
self.cameras = ["cam0", "cam1", "cam3", "cam4"]
self.sizes = [30, 18, 12, 10]
self.num_planes = num_planes
self.num_channels = 4
self.placeholders = {}
with tf.name_scope("input_placeholders"):
for plane in range(self.num_planes):
self.placeholders[plane] = {}
for size in self.sizes:
self.placeholders[plane][size] = {}
for cam in self.cameras:
name = "%s_%s_%s" % (plane, size, cam)
self.placeholders[plane][size][cam] = tf.placeholder(tf.float32,
name=name,
shape=(batch_size, size, size, self.num_channels))
# Target patch
self.target = tf.placeholder(tf.float32,
name="target",
shape=(batch_size, 8, 8, 3))
def get_for_plane_size(self, plane, size):
input = self.placeholders[plane][size]
assert len(input) == 4
return input
def get_target_placeholder(self):
return self.target
def preprocess_batch(self, image_batch):
#s = image_batch.shape
#for im_idx in range(s[0]):
# for ch_idx in range(s[3]):
# image_batch[im_idx, :, :, ch_idx] = preprocessing.scale(image_batch[im_idx, :, :, ch_idx])
return (image_batch * 2.0) - 1.0
def get_feed_dict(self, images_feed):
feed_dict = {}
# Target images
tar = np.concatenate([t['target'] for t in images_feed], axis=0)
if self.meanzero:
feed_dict[self.target.name] = self.preprocess_batch(tar)
else:
feed_dict[self.target.name] = tar
# add 4 images with 4 resolutions for each plane (96 planes in total)
for plane in range(self.num_planes):
for size in self.sizes:
for cam in self.cameras:
item_name = "plane%s_%s_%s" % (plane, cam, size)
images = [im['planes'][item_name] for im in images_feed]
c_images = np.concatenate(images, axis=0)
if self.meanzero:
feed_dict[self.placeholders[plane][size][cam].name] = self.preprocess_batch(c_images)
else:
feed_dict[self.placeholders[plane][size][cam].name] = c_images
return feed_dict
|
983,303 | 9290e1ebc5d5ebd540b30ce6650c9731a0eabde4 | from typing import Any
from kgx.sink import Sink
class NullSink(Sink):
"""
A NullSink just ignores any date written to it,
effectively a /dev/null device for Transformer
data flows, in which the inspection of the input
knowledge graph is the important operation, but
the graph itself is not persisted in the output
(in particular, not in memory, where the huge
memory footprint may be problematics, e.g. when
stream processing huge graphs).
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
n/a (**kwargs allowed, but ignored)
"""
def __init__(self, owner, **kwargs: Any):
super().__init__(owner)
def write_node(self, record) -> None:
"""
Write a node record to the underlying store.
Parameters
----------
record: Any
A node record
"""
pass
def write_edge(self, record) -> None:
"""
Write an edge record to the underlying store.
Parameters
----------
record: Any
An edge record
"""
pass
def finalize(self) -> None:
"""
Operations that ought to be done after
writing all the incoming data should be called
by this method.
"""
pass
|
983,304 | 5ea5e66e461bbd98a9e022d92e2429f591feb954 | import datetime
import urllib2
import json
import pandas as pd
def _parse(date):
mmddyy = date.split("/")
return datetime.date(month=int(mmddyy[0]), day=int(mmddyy[1]), year=int(mmddyy[2]))
def load_csv(filepath):
df = pd.read_csv(filepath,
names=['time', '', 'value'],
usecols=[0,2],
header=0,
converters={"time": _parse})
df.set_index("time",inplace=True)
return df
|
983,305 | 214359b999f4cffcc98a2c65056d57d044fe3fc9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Generated by get_e212.py
# Based on https://en.wikipedia.org/wiki/Mobile_country_code
operators = \
{202: {1: ('GR', 'COSMOTE - Mobile Telecommunications S.A.'),
2: ('GR', 'COSMOTE - Mobile Telecommunications S.A.'),
3: ('GR', 'OTE'),
4: ('GR', 'OSE'),
5: ('GR', 'Vodafone Greece'),
7: ('GR', 'AMD Telecom'),
9: ('GR', 'Wind Hellas Telecommunications S.A.'),
10: ('GR', 'Wind Hellas Telecommunications S.A.'),
11: ('GR', 'interConnect'),
12: ('GR', 'Yuboto'),
13: ('GR', 'Compatel Limited'),
14: ('GR', 'CYTA'),
15: ('GR', 'BWS'),
16: ('GR', 'Inter Telecom')},
204: {1: ('NL', 'RadioAccess Network Services'),
2: ('NL', 'Tele2 Nederland B.V.'),
3: ('NL', 'Voiceworks B.V.'),
4: ('NL', 'Vodafone Libertel B.V.'),
5: ('NL', 'Elephant Talk Communications Premium Rate Services'),
6: ('NL', 'Mundio Mobile (Netherlands) Ltd'),
7: ('NL', 'Teleena (MVNE)'),
8: ('NL', 'KPN Mobile The Netherlands B.V.'),
9: ('NL', 'Lycamobile Netherlands Limited'),
10: ('NL', 'KPN B.V.'),
11: ('NL', 'VoipIT B.V.'),
12: ('NL', 'KPN Mobile The Netherlands B.V.'),
13: ('NL', 'Unica Installatietechniek B.V.'),
14: ('NL', '6GMOBILE B.V.'),
15: ('NL', 'Ziggo B.V.'),
16: ('NL', 'T-Mobile Netherlands B.V'),
17: ('NL', 'Intercity Mobile Communications B.V.'),
18: ('NL', 'UPC Nederland B.V.'),
19: ('NL', 'Mixe Communication Solutions B.V.'),
20: ('NL', 'T-Mobile Netherlands B.V'),
21: ('NL', 'ProRail B.V.'),
22: ('NL', 'Ministerie van Defensie'),
23: ('NL', 'Wyless Nederland B.V.'),
24: ('NL', 'Private Mobility Nederland B.V.'),
25: ('NL', 'CapX B.V.'),
26: ('NL', 'SpeakUp B.V.'),
27: ('NL', 'Breezz Nederland B.V.'),
28: ('NL', 'Lancelot B.V.'),
29: ('NL', 'Private Mobile Ltd'),
60: ('NL', 'Nextgen Mobile Ltd'),
61: ('NL', 'BodyTrace Netherlands B.V.'),
62: ('NL', 'Voxbone mobile'),
64: ('NL', 'Zetacom B.V.'),
65: ('NL', 'AGMS Netherlands B.V.'),
66: ('NL', 'Utility Connect B.V.'),
67: ('NL', 'Koning en Hartman B.V.'),
68: ('NL', 'Roamware (Netherlands) B.V.'),
69: ('NL', 'KPN Mobile The Netherlands B.V.')},
206: {1: ('BE', 'Belgacom Mobile'),
2: ('BE', 'Infrabel'),
5: ('BE', 'Telenet'),
6: ('BE', 'Lycamobile sprl'),
7: ('BE', 'Mundio Mobile Belgium nv'),
8: ('BE', 'Nethys'),
10: ('BE', 'Orange S.A.'),
20: ('BE', 'Telenet'),
25: ('BE', 'Voyacom SPRL'),
28: ('BE', 'BICS'),
30: ('BE', 'Unleashed NV'),
33: ('BE', 'Ericsson NV'),
40: ('BE', 'JOIN Experience (Belgium)'),
50: ('BE', 'IP Nexia')},
208: {1: ('MC', 'Orange S.A.'),
2: ('FR', 'Orange S.A.'),
3: ('FR', 'MobiquiThings'),
4: ('FR', "Societe d'ingenierie systeme telecom et reseaux"),
5: ('FR', 'Globalstar Europe'),
6: ('FR', 'Globalstar Europe'),
7: ('FR', 'Globalstar Europe'),
8: ('FR', 'Altice'),
9: ('FR', 'Altice'),
10: ('MC', 'Altice'),
11: ('FR', 'Altice'),
13: ('FR', 'Altice'),
14: ('FR', 'SNCF Réseau'),
15: ('FR', 'Iliad'),
16: ('FR', 'Iliad'),
17: ('FR', 'Local Exchange Global Operation Services'),
20: ('MC', 'Bouygues Telecom'),
21: ('FR', 'Bouygues Telecom'),
22: ('FR', 'Transatel'),
24: ('FR', 'MobiquiThings'),
25: ('FR', 'LycaMobile'),
26: ('FR', 'Euro-Information Telecom SAS'),
27: ('FR', 'Coriolis Telecom'),
28: ('FR', 'Airbus Defence and Space SAS'),
30: ('FR', 'Syma Mobile'),
31: ('FR', 'Mundio Mobile'),
88: ('FR', 'Bouygues Telecom'),
91: ('FR', 'Orange S.A.'),
94: ('FR', 'Halys')},
212: {10: ('MC', 'Monaco Telecom')},
213: {3: ('AD', 'Servei De Tele. DAndorra')},
214: {1: ('ES', 'Vodafone Spain'),
2: ('ES', 'Alta Tecnologia en Comunicacions SL'),
3: ('ES', 'Orange Espagne S.A.U'),
4: ('ES', 'Xfera Moviles SA'),
5: ('ES', 'Telefónica Móviles España'),
6: ('ES', 'Vodafone Spain'),
7: ('ES', 'Telefónica Móviles España'),
8: ('ES', ''),
9: ('ES', 'Orange Espagne S.A.U'),
10: ('ES', 'ZINNIA TELECOMUNICACIONES, S.L.U.'),
11: ('ES', 'TELECOM CASTILLA-LA MANCHA, S.A.'),
12: ('ES', 'SAC CONVERGENT AGGREGATION SERVICES, S.L.U.'),
16: ('ES', 'Telecable de Asturias S.A.U.'),
17: ('ES', 'R Cable y Telecomunicaciones Galicia S.A.'),
19: ('ES', 'Orange España Virtual Sl.'),
21: ('ES', 'Orange Espagne S.A.U'),
22: ('ES', 'Best Spain Telecom'),
23: ('ES', 'Barablu Móvil España'),
24: ('ES', 'Eroski Móvil España'),
25: ('ES', 'LycaMobile S.L.'),
26: ('ES', 'Lleida Networks Serveis Telemátics, SL'),
27: ('ES', 'SCN Truphone, S.L.'),
28: ('ES', 'Consorcio de Telecomunicaciones Avanzadas, S.A.'),
29: ('ES', 'NEO-SKY 2002, S.A.'),
30: ('ES', 'Compatel Limited'),
31: ('ES',
'Red Digital De Telecomunicaciones de las Islas Baleares, S.L.'),
32: ('ES', 'Telefónica Móviles España'),
33: ('ES', 'EURONA WIRELESS TELECOM, S.A.'),
34: ('ES', 'Aire Networks del Mediterráneo, S.L.U.'),
35: ('ES', 'INGENIUM OUTSOURCING SERVICES, S.L.'),
36: ('ES', 'OPEN CABLE TELECOMUNICACIONES, S.L.'),
37: ('ES', 'Vodafone Spain'),
51: ('ES', 'Administrador de Infraestructuras Ferroviarias')},
216: {1: ('HU', 'Telenor Magyarország Zrt.'),
2: ('HU', 'MVM Net Ltd.'),
4: ('HU', 'Invitech Solutions'),
30: ('HU', 'Magyar Telekom Plc'),
70: ('HU', 'Vodafone Magyarország Zrt.'),
71: ('HU', 'UPC Hungary Ltd.'),
99: ('HU', 'Magyar Államvasutak')},
218: {3: ('BA', 'Public Enterprise Croatian Telecom Ltd.'),
5: ('BA', 'RS Telecommunications JSC Banja Luka'),
90: ('BA', 'BH Telecom')},
219: {1: ('HR', 'T-Hrvatski Telekom'),
2: ('HR', 'Tele2'),
10: ('HR', 'Vipnet'),
12: ('HR', 'TELE FOCUS d.o.o.')},
220: {1: ('RS', 'Telenor Serbia'),
3: ('RS', 'Telekom Srbija'),
5: ('RS', 'VIP Mobile'),
7: ('RS', 'Orion Telekom'),
9: ('RS', 'MUNDIO MOBILE d.o.o.'),
11: ('RS', 'GLOBALTEL d.o.o.')},
221: {1: ('XK', 'Telecom of Kosovo J.S.C.'),
2: ('XK', 'IPKO'),
6: ('XK', 'Dardaphone.Net LLC')},
222: {1: ('IT', 'Telecom Italia S.p.A'),
4: ('IT', ''),
5: ('IT', ''),
6: ('IT', 'Vodafone Italia S.p.A.'),
8: ('IT', 'Fastweb S.p.A.'),
10: ('IT', 'Vodafone Italia S.p.A.'),
30: ('IT', 'Rete Ferroviaria Italiana'),
33: ('IT', 'Poste Mobile S.p.A.'),
34: ('IT', 'BT Italia'),
35: ('IT', 'Lycamobile'),
36: ('IT', 'Digi Italy S.r.l.'),
37: ('IT', 'Wind Tre'),
38: ('IT', 'Linkem S.p.A.'),
39: ('IT', 'SMS Italia S.r.l.'),
43: ('IT', 'Telecom Italia S.p.A.'),
47: ('IT', 'Tiscali S.p.A.'),
48: ('IT', 'Telecom Italia S.p.A.'),
50: ('IT', 'Iliad Italia'),
88: ('IT', 'Wind Tre'),
99: ('IT', 'Wind Tre')},
226: {1: ('RO', 'Vodafone România'),
3: ('RO', 'Telekom Romania'),
5: ('RO', 'RCS&RDS'),
6: ('RO', 'Telekom Romania'),
10: ('RO', 'Orange România'),
11: ('RO', 'Enigma-System'),
15: ('RO', 'Idilis'),
16: ('RO', 'Lycamobile Romania')},
228: {1: ('CH', 'Swisscom AG'),
2: ('CH', 'Sunrise Communications AG'),
3: ('CH', 'Salt Mobile SA'),
6: ('CH', 'SBB AG'),
8: ('CH', 'TelCommunication Services AG'),
9: ('CH', 'Comfone AG'),
10: ('CH', 'Stadt Polizei Zürich'),
11: ('CH', 'Swisscom Broadcast AG'),
51: ('CH', 'relario AG'),
53: ('CH', 'UPC Schweiz GmbH'),
54: ('CH', 'Lycamobile AG'),
55: ('CH', 'WeMobile SA'),
57: ('CH', 'Mitto AG'),
58: ('CH', 'Beeone Communications SA'),
60: ('CH', 'Sunrise Communications AG'),
61: ('CH', 'Compatel Ltd.'),
99: ('CH', 'Swisscom Broadcast AG')},
230: {1: ('CZ', 'T-Mobile Czech Republic'),
2: ('CZ', 'O2 Czech Republic'),
3: ('CZ', 'Vodafone Czech Republic'),
4: ('CZ', 'Nordic Telecom s.r.o.'),
5: ('CZ', 'PODA a.s.'),
8: ('CZ', 'Compatel s.r.o.'),
9: ('CZ', 'Mundio Distribution Czech Republic s.r.o.'),
98: ('CZ', 'Správa železniční dopravní cesty, s.o.'),
99: ('CZ', 'Vodafone Czech Republic')},
231: {1: ('SK', 'Orange Slovensko'),
2: ('SK', 'Slovak Telekom'),
3: ('SK', 'SWAN Mobile, a.s.'),
4: ('SK', 'Slovak Telekom'),
5: ('SK', 'Orange Slovensko'),
6: ('SK', 'Telefónica O2 Slovakia'),
7: ('SK', 'Towercom, a. s.'),
8: ('SK', 'IPfon, s.r.o.'),
99: ('SK', 'Železnice Slovenskej Republiky')},
232: {1: ('AT', 'A1 Telekom Austria'),
2: ('AT', 'A1 Telekom Austria'),
3: ('AT', 'T-Mobile Austria'),
4: ('AT', 'T-Mobile Austria Gmbh'),
5: ('AT', 'Hutchison Drei Austria'),
7: ('AT', 'T-Mobile Austria'),
8: ('AT', 'Lycamobile Austria'),
9: ('AT', 'A1 Telekom Austria'),
10: ('AT', 'Hutchison Drei Austria'),
11: ('AT', 'A1 Telekom Austria'),
12: ('AT', 'A1 Telekom Austria'),
13: ('AT', 'UPC Austria'),
14: ('AT', 'Hutchison Drei Austria'),
15: ('AT', 'Mundio Mobile Austria'),
16: ('AT', 'Hutchison Drei Austria'),
17: ('AT', 'MASS Response Service GmbH'),
18: ('AT', 'smartspace GmbH'),
19: ('AT', 'Tele2 Telecommunication GmbH'),
20: ('AT', 'MTEL Austrija GmbH'),
21: ('AT', 'Salzburg AG für Energie, Verkehr und Telekommunikation'),
22: ('AT', 'Plintron Austria Limited'),
23: ('AT', 'T-Mobile Austria GmbH'),
91: ('AT', 'ÖBB'),
92: ('AT', 'ArgoNET GmbH')},
234: {0: ('GB', 'BT Group'),
1: ('GB', 'Mundio Mobile Limited'),
2: ('GB', 'Telefónica Europe'),
3: ('GB', 'Jersey Airtel Ltd'),
4: ('GB', 'FMS Solutions Ltd'),
8: ('GB', 'BT OnePhone (UK) Ltd'),
9: ('GB', 'Tismi BV'),
10: ('GB', 'Telefónica Europe'),
11: ('GB', 'Telefónica Europe'),
12: ('GB', 'Network Rail Infrastructure Ltd'),
13: ('GB', 'Network Rail Infrastructure Ltd'),
14: ('GB', 'Hay Systems Ltd'),
15: ('GB', 'Vodafone'),
16: ('GB', 'TalkTalk Communications Limited'),
17: ('GB', 'FleXtel Limited'),
18: ('GB', 'Cloud9'),
19: ('GB', 'Teleware plc'),
20: ('GB', 'Hutchison 3G UK Ltd'),
22: ('GB', 'Telesign Mobile Limited'),
23: ('GB', 'Icron Network Limited'),
24: ('GB', 'Stour Marine Limited'),
25: ('GB', 'Truphone'),
26: ('GB', 'Lycamobile UK Limited'),
27: ('GB', 'Teleena UK Limited'),
28: ('GB', 'Marathon Telecom Limited'),
29: ('GB', '(aq) Limited'),
30: ('GB', 'EE'),
31: ('GB', 'EE'),
32: ('GB', 'EE'),
33: ('GB', 'EE'),
34: ('GB', 'EE'),
36: ('GB', 'Sure Isle of Man Ltd.'),
37: ('GB', 'Synectiv Ltd'),
38: ('GB', 'Virgin Media'),
39: ('GB', 'Gamma Telecom Holdings Ltd.'),
50: ('GB', 'JT Group Limited'),
51: ('GB', 'UK Broadband Limited'),
52: ('GB', 'Shyam Telecom UK Ltd'),
53: ('GB', 'Limitless Mobile Ltd'),
54: ('GB', 'The Carphone Warehouse Limited'),
55: ('GB', 'Sure (Guernsey) Limited'),
56: ('GB', 'CESG'),
57: ('GB', 'Sky UK Limited'),
58: ('GB', 'Manx Telecom'),
59: ('GB', 'Limitless Mobile Ltd'),
70: ('GB', 'AMSUK Ltd.'),
71: ('GB', 'Home Office'),
72: ('GB', 'Hanhaa Limited'),
76: ('GB', 'BT Group'),
78: ('GB', 'Airwave Solutions Ltd'),
86: ('GB', 'EE')},
235: {0: ('GB', 'Mundio Mobile Limited'),
1: ('GB', 'EE'),
2: ('GB', 'EE'),
3: ('GB', 'UK Broadband Limited'),
77: ('GB', 'BT Group'),
88: ('GB', 'Telet Research (N.I.) Limited'),
91: ('GB', 'Vodafone United Kingdom'),
94: ('GB', 'Hutchison 3G UK Ltd'),
95: ('GB', 'Network Rail Infrastructure Limited')},
238: {1: ('DK', 'TDC A/S'),
2: ('DK', 'Telenor Denmark'),
3: ('DK', 'Syniverse Technologies'),
4: ('DK', 'NextGen Mobile Ltd T/A CardBoardFish'),
5: ('DK', 'Dansk Beredskabskommunikation A/S'),
6: ('DK', 'Hi3G Denmark ApS'),
8: ('DK', 'Voxbone mobile'),
9: ('DK', 'Dansk Beredskabskommunikation A/S'),
10: ('DK', 'TDC A/S'),
11: ('DK', 'Dansk Beredskabskommunikation A/S'),
12: ('DK', 'Lycamobile Denmark Ltd'),
13: ('DK', 'Compatel Limited'),
14: ('DK', 'Monty UK Global Limited'),
15: ('DK', 'Ice Danmark ApS'),
16: ('DK', 'Tismi B.V.'),
18: ('DK', 'Cubic Telecom'),
20: ('DK', 'Telia'),
23: ('DK', 'Banedanmark'),
25: ('DK', 'SMS Provider Corp.'),
28: ('DK', 'LINK Mobile A/S'),
30: ('DK', 'Interactive digital media GmbH'),
42: ('DK', 'Greenwave Mobile IoT ApS'),
66: ('DK', 'TT-Netværket P/S'),
73: ('DK', 'Onomondo ApS'),
77: ('DK', 'Telenor Denmark')},
240: {1: ('SE', 'TeliaSonera Sverige AB'),
2: ('SE', 'HI3G Access AB'),
3: ('SE', 'Netett Sverige AB'),
4: ('SE', '3G Infrastructure Services AB'),
5: ('SE', 'Svenska UMTS-Nät AB'),
6: ('SE', 'Telenor Sverige AB'),
7: ('SE', 'Tele2 Sverige AB'),
9: ('SE', 'Communication for Devices in Sweden AB'),
10: ('SE', 'Tele2 Sverige AB'),
11: ('SE', 'ComHem AB'),
12: ('SE', 'Lycamobile Sweden Limited'),
13: ('SE', 'Alltele Företag Sverige AB'),
14: ('SE', 'Tele2 Business AB'),
15: ('SE', 'Wireless Maingate Nordic AB'),
16: ('SE', '42 Telecom AB'),
17: ('SE', 'Götalandsnätet AB'),
18: ('SE', 'Generic Mobile Systems Sweden AB'),
19: ('SE', 'Mundio Mobile (Sweden) Limited'),
20: ('SE', 'Wireless Maingate Messaging Services AB'),
21: ('SE', 'Trafikverket ICT'),
22: ('SE', 'EuTel AB'),
24: ('SE', 'Net4Mobility HB'),
25: ('SE', 'Monty UK Global Ltd'),
26: ('SE', 'Twilio Sweden AB'),
27: ('SE', 'GlobeTouch AB'),
28: ('SE', 'LINK Mobile A/S'),
29: ('SE', 'Mercury International Carrier Services'),
30: ('SE', 'NextGen Mobile Ltd.'),
31: ('SE', 'RebTel Network AB'),
32: ('SE', 'Compatel Limited'),
33: ('SE', 'Mobile Arts AB'),
35: ('SE', '42 Telecom LTD'),
36: ('SE', 'interactive digital media GmbH'),
37: ('SE', 'CLX Networks AB'),
38: ('SE', 'Voxbone mobile'),
39: ('SE', 'Borderlight AB'),
40: ('SE', 'North net connect AB'),
41: ('SE', 'Shyam Telecom UK Ltd.'),
42: ('SE', 'Telenor Connexion AB'),
43: ('SE', 'MobiWeb Ltd.'),
44: ('SE', 'Telenabler AB'),
45: ('SE', 'Spirius AB'),
46: ('SE', 'SMS Provider Corp.'),
47: ('SE', 'Viatel Sweden AB'),
60: ('SE', 'Telefonaktiebolaget LM Ericsson'),
61: ('SE', 'MessageBird B.V.')},
242: {1: ('NO', 'Telenor Norge AS'),
2: ('NO', 'TeliaSonera Norge AS'),
6: ('NO', 'ICE Norge AS'),
8: ('NO', 'TDC Mobil AS'),
9: ('NO', 'Com4 AS'),
10: ('NO', 'Norwegian Communications Authority'),
11: ('NO', 'SystemNet AS'),
12: ('NO', 'Telenor Norge AS'),
14: ('NO', 'ICE Communication Norge AS'),
20: ('NO', 'Jernbaneverket AS'),
21: ('NO', 'Jernbaneverket AS'),
22: ('NO', 'Altibox AS'),
23: ('NO', 'Lyca Mobile Ltd'),
25: ('NO', 'Forsvarets kompetansesenter KKIS'),
90: ('NO', 'Nokia Solutions and Networks Norge AS'),
99: ('NO', 'TampNet AS')},
244: {3: ('FI', 'DNA Oy'),
4: ('FI', 'DNA Oy'),
5: ('FI', 'Elisa Oyj'),
7: ('FI', 'Nokia Solutions and Networks Oy'),
8: ('FI', 'Nokia Solutions and Networks Oy'),
9: ('FI', 'Nokia Solutions and Networks Oy'),
10: ('FI', 'Viestintävirasto'),
11: ('FI', 'Viestintävirasto'),
12: ('FI', 'DNA Oy'),
14: ('FI', 'Ålands Telekommunikation Ab'),
17: ('FI', 'Liikennevirasto'),
21: ('FI', 'Elisa Oyj'),
26: ('FI', 'Compatel Ltd'),
27: ('FI', 'Teknologian tutkimuskeskus VTT Oy'),
28: ('FI', 'Teknologian tutkimuskeskus VTT Oy'),
32: ('FI', 'Voxbone SA'),
33: ('FI', 'Virve Tuotteet ja Palvelut Oy'),
34: ('FI', 'Bittium Wireless Oy'),
35: ('FI', 'Ukkoverkot Oy'),
36: ('FI', 'TeliaSonera Finland Oyj / Suomen Yhteisverkko Oy'),
37: ('FI', 'Tismi BV'),
38: ('FI', 'Nokia Solutions and Networks Oy'),
39: ('FI', 'Nokia Solutions and Networks Oy'),
40: ('FI', 'Nokia Solutions and Networks Oy'),
41: ('FI', 'Nokia Solutions and Networks Oy'),
42: ('FI', 'SMS Provider Corp.'),
43: ('FI', 'Telavox AB / Telavox Oy'),
44: ('FI', 'Turun ammattikorkeakoulu Oy'),
91: ('FI', 'TeliaSonera Finland Oyj')},
246: {1: ('LT', 'Telia Lietuva'),
2: ('LT', 'UAB Bitė Lietuva'),
3: ('LT', 'UAB Tele2 (Tele2 AB, Sweden)'),
4: ('LT', 'LR vidaus reikalų ministerija (Ministry of the Interior)'),
5: ('LT', 'Lietuvos geležinkeliai (Lithuanian Railways)'),
6: ('LT', 'UAB Mediafon'),
7: ('LT', 'Compatel Ltd.'),
8: ('LT', 'Lietuvos radijo ir televizijos centras'),
9: ('LT', 'Interactive Digital Media GmbH')},
247: {1: ('LV', 'Latvian Mobile Telephone'),
2: ('LV', 'Tele2'),
3: ('LV', 'Telekom Baltija'),
5: ('LV', 'Bite Latvija'),
7: ('LV', 'SIA "MEGATEL"'),
9: ('LV', 'Camel Mobile')},
248: {1: ('EE', 'Estonian Mobile Telecom'),
2: ('EE', 'Elisa Eesti'),
3: ('EE', 'Tele2 Eesti'),
4: ('EE', 'OY Top Connect'),
11: ('EE', 'UAB Raystorm Eesti filiaal'),
71: ('EE', 'Siseministeerium (Ministry of Interior)')},
250: {1: ('RU', 'Mobile TeleSystems'),
2: ('RU', 'MegaFon PJSC'),
3: ('RU', 'Nizhegorodskaya Cellular Communications'),
5: ('RU', 'Yeniseytelecom'),
6: ('RU', 'CJSC Saratov System of Cellular Communications'),
7: ('RU', 'Zao SMARTS'),
8: ('RU', 'CS "VainahTelecom"'),
9: ('RU', 'Khabarovsky Cellular Phone'),
11: ('RU', 'Scartel'),
12: ('RU', ''),
15: ('RU', 'SMARTS Ufa, SMARTS Uljanovsk'),
16: ('RU', 'New Telephone Company'),
17: ('RU', 'JSC Uralsvyazinform'),
20: ('RU', 'Tele2'),
21: ('RU', 'JSC "GlobalTel"'),
22: ('RU', 'Vainakh Telecom'),
23: ('RU', 'GTNT'),
29: ('RU', 'Iridium Communications'),
32: ('RU', 'K-Telecom'),
33: ('RU', 'Sevtelekom'),
34: ('RU', 'Krymtelekom'),
35: ('RU', 'EKATERINBURG-2000'),
38: ('RU', 'Central Telecommunication Company'),
50: ('RU', 'Bezlimitno.ru'),
54: ('RU', 'Tattelecom'),
60: ('RU', 'KTK Telecom'),
62: ('RU', 'Tinkoff Mobile'),
99: ('RU', 'OJSC Vimpel-Communications')},
255: {0: ('UA', 'Interdnestrcom'),
1: ('UA', 'PRJSC VF Ukraine'),
3: ('UA', 'Kyivstar JSC'),
4: ('UA', 'Intertelecom LLC'),
6: ('UA', 'Turkcell'),
7: ('UA', 'Trymob LLC'),
21: ('UA', 'Telesystems of Ukraine'),
25: ('UA', 'CST Invest')},
257: {1: ('BY', ''),
2: ('BY', 'Mobile TeleSystems'),
4: ('BY', 'Belarusian Telecommunications Network'),
6: ('BY', 'Belorussian Cloud Technologies')},
259: {1: ('MD', 'Orange Moldova'),
2: ('MD', ''),
3: ('MD', 'Moldtelecom'),
5: ('MD', 'Moldtelecom'),
15: ('MD', 'Interdnestrcom'),
99: ('MD', 'Moldtelecom')},
260: {1: ('PL', 'Polkomtel Sp. z o.o.'),
2: ('PL', 'T-Mobile Polska S.A.'),
3: ('PL', 'Polska Telefonia Komórkowa Centertel Sp. z o.o.'),
6: ('PL', 'P4 Sp. z o.o.'),
7: ('PL', 'Netia S.A.'),
9: ('PL', 'Lycamobile Sp. z o.o.'),
10: ('PL', 'T-Mobile Polska S.A.'),
11: ('PL', 'Nordisk Polska Sp. z o.o.'),
12: ('PL', 'Cyfrowy Polsat S.A.'),
13: ('PL', 'Move Telecom S.A.'),
15: ('PL', 'Aero 2 Sp. z o.o.'),
16: ('PL', 'Aero 2 Sp. z o.o.'),
17: ('PL', 'Aero 2 Sp. z o.o.'),
18: ('PL', 'AMD Telecom S.A.'),
22: ('PL', 'Arcomm Sp. z o.o.'),
24: ('PL', 'IT Partners Telco Sp. z o.o.'),
32: ('PL', 'Compatel Limited'),
33: ('PL', 'Truphone Poland Sp. z o.o.'),
34: ('PL', 'T-Mobile Polska S.A.'),
35: ('PL', 'PKP Polskie Linie Kolejowe S.A.'),
37: ('PL', 'NEXTGEN MOBILE LTD'),
38: ('PL', 'CALLFREEDOM Sp. z o.o.'),
39: ('PL', 'VOXBONE SA'),
40: ('PL', 'Interactive Digital Media GmbH'),
41: ('PL', 'EZ PHONE MOBILE Sp. z o.o.'),
42: ('PL', 'MobiWeb Telecom Limited'),
43: ('PL', 'Smart Idea International Sp. z o.o.'),
44: ('PL', 'Rebtel Poland Sp. z o.o.'),
45: ('PL', 'Virgin Mobile Polska Sp. z o.o.'),
46: ('PL', 'Terra Telekom Sp. z o.o.'),
47: ('PL', 'SMShighway Limited'),
48: ('PL', 'AGILE TELECOM S.P.A.'),
49: ('PL', 'Messagebird B.V.')},
262: {1: ('DE', 'Telekom Deutschland GmbH'),
2: ('DE', 'Vodafone D2 GmbH'),
3: ('DE', 'Telefónica Germany GmbH & Co. oHG'),
4: ('DE', 'Vodafone D2 GmbH'),
5: ('DE', 'Telefónica Germany GmbH & Co. oHG'),
6: ('DE', 'Telekom Deutschland GmbH'),
8: ('DE', 'Telefónica Germany GmbH & Co. oHG'),
9: ('DE', 'Vodafone D2 GmbH'),
10: ('DE', 'DB Netz AG'),
11: ('DE', 'Telefónica Germany GmbH & Co. oHG'),
12: ('DE', 'sipgate GmbH'),
15: ('DE', ''),
17: ('DE', 'Telefónica Germany GmbH & Co. oHG'),
18: ('DE', 'NetCologne'),
19: ('DE', 'Inquam Deutschland'),
20: ('DE', 'Voiceworks GmbH'),
21: ('DE', 'Multiconnect GmbH'),
22: ('DE', 'sipgate Wireless GmbH'),
23: ('DE', 'Drillisch Online AG'),
33: ('DE', 'sipgate GmbH'),
42: ('DE', 'Chaos Computer Club'),
43: ('DE', 'Lycamobile'),
60: ('DE', 'DB Telematik'),
72: ('DE', 'Ericsson GmbH'),
73: ('DE', 'Xantaro Deutschland GmbH'),
74: ('DE', 'Qualcomm CDMA Technologies GmbH'),
75: ('DE', 'Core Network Dynamics GmbH'),
77: ('DE', 'Telefónica Germany GmbH & Co. oHG'),
78: ('DE', 'Telekom Deutschland GmbH'),
92: ('DE', 'Nash Technologies')},
266: {1: ('GI', 'Gibtelecom'), 9: ('GI', 'Eazitelecom')},
268: {1: ('PT', 'Vodafone Portugal'),
2: ('PT', 'Telecomunicações Móveis Nacionais'),
3: ('PT', 'NOS Comunicações'),
4: ('PT', 'LycaMobile'),
6: ('PT', 'Telecomunicações Móveis Nacionais'),
7: ('PT', 'Mundio Mobile (Portugal) Limited'),
11: ('PT', 'Compatel, Limited'),
12: ('PT', 'Infraestruturas de Portugal, S.A.'),
13: ('PT', 'G9Telecom, S.A.'),
80: ('PT', 'Telecomunicações Móveis Nacionais')},
270: {1: ('LU', 'POST Luxembourg'),
2: ('LU', 'MTX Connect S.a.r.l.'),
7: ('LU', 'Bouygues Telecom S.A.'),
10: ('LU', 'Blue Communications'),
77: ('LU', 'Tango SA'),
78: ('LU', 'Interactive digital media GmbH'),
79: ('LU', 'Mitto A.G.'),
80: ('LU', 'Syniverse Technologies S.à r.l.'),
81: ('LU', 'E-Lux Mobile Telecommunication S.A.'),
99: ('LU', 'Orange S.A.')},
272: {1: ('IE', 'Vodafone Ireland'),
2: ('IE', 'Hutchison 3G Ireland limited'),
3: ('IE', 'Eir Group plc'),
4: ('IE', 'Access Telecom'),
5: ('IE', 'Hutchison 3G Ireland limited'),
7: ('IE', 'Eir Group plc'),
8: ('IE', 'Eir Group plc'),
11: ('IE', 'Liffey Telecom'),
13: ('IE', 'Lycamobile'),
15: ('IE', 'UPC'),
16: ('IE', 'Carphone Warehouse'),
17: ('IE', 'Hutchison 3G Ireland limited')},
274: {1: ('IS', 'Iceland Telecom'),
2: ('IS', 'Og fjarskipti hf'),
3: ('IS', 'Og fjarskipti hf'),
4: ('IS', 'IMC Island ehf'),
8: ('IS', 'Iceland Telecom'),
11: ('IS', 'Nova ehf'),
12: ('IS', 'IP fjarskipti'),
16: ('IS', 'Tismi BV'),
22: ('IS', 'Landhelgisgæslan (Icelandic Coast Guard)'),
31: ('IS', 'Iceland Telecom')},
276: {1: ('AL', 'Telekom Albania'),
2: ('AL', 'Vodafone Albania'),
3: ('AL', 'Albtelecom'),
4: ('AL', 'Plus Communication')},
278: {1: ('MT', 'Vodafone Malta'),
11: ('MT', 'YOM Ltd.'),
21: ('MT', 'Mobile Communications Limited'),
30: ('MT', 'Mobile Communications Limited'),
77: ('MT', 'Melita')},
280: {1: ('CY', 'Cyprus Telecommunications Authority'),
10: ('CY', 'MTN Group'),
20: ('CY', 'PrimeTel PLC'),
22: ('CY', 'Lemontel Ltd'),
23: ('CY', 'Mundio Mobile Cyprus Ltd.')},
282: {1: ('GE', 'Geocell Limited'),
2: ('GE', 'Magticom GSM'),
3: ('GE', 'Magtifix'),
4: ('GE', 'Mobitel LLC'),
5: ('GE', 'JSC Silknet'),
6: ('GE', 'JSC Compatel'),
7: ('GE', 'GlobalCell Ltd'),
8: ('GE', 'JSC Silknet'),
9: ('GE', 'Gmobile Ltd'),
10: ('GE', 'Premium Net International SRL Ltd'),
11: ('GE', 'Mobilive Ltd')},
283: {1: ('AM', 'ArmenTel'),
4: ('AM', 'Karabakh Telecom'),
5: ('AM', 'K Telecom CJSC'),
10: ('AM', 'Ucom LLC')},
284: {1: ('BG', 'A1 Bulgaria'),
3: ('BG', 'BTC'),
5: ('BG', 'Telenor (Bulgaria)'),
7: ('BG', 'НАЦИОНАЛНА КОМПАНИЯ ЖЕЛЕЗОПЪТНА ИНФРАСТРУКТУРА'),
11: ('BG', 'Bulsatcom'),
13: ('BG', 'Max Telecom LTD')},
286: {1: ('TR', 'Turkcell Iletisim Hizmetleri A.S.'),
2: ('TR', 'Vodafone Turkey'),
3: ('TR', 'Türk Telekom')},
288: {1: ('FO', 'Faroese Telecom'), 2: ('FO', 'Vodafone Faroe Islands')},
290: {1: ('GL', 'TELE Greenland A/S'), 2: ('GL', 'inu:it a/s')},
292: {1: ('SM', 'San Marino Telecom')},
293: {10: ('SI', 'SŽ - Infrastruktura, d.o.o.'),
20: ('SI', 'COMPATEL Ltd'),
40: ('SI', 'A1 Slovenija'),
41: ('SI', 'Telekom Slovenije'),
64: ('SI', 'T-2 d.o.o.'),
70: ('SI', 'Tušmobil d.o.o.')},
294: {1: ('MK', 'Makedonski Telekom'),
2: ('MK', 'ONE.VIP DOO'),
3: ('MK', 'ONE.VIP DOO'),
4: ('MK', 'Lycamobile LLC'),
11: ('MK', 'MOBIK TELEKOMUNIKACII DOOEL Skopje')},
295: {1: ('LI', 'Swisscom Schweiz AG'),
2: ('LI', 'Salt Liechtenstein AG'),
5: ('LI', 'Telecom Liechtenstein AG'),
6: ('LI', 'Cubic Telecom AG'),
7: ('LI', 'First Mobile AG'),
9: ('LI', 'EMnify GmbH'),
10: ('LI', 'Soracom LI Ltd.')},
297: {1: ('ME', 'Telenor Montenegro'),
2: ('ME', 'T-Mobile Montenegro LLC'),
3: ('ME', 'MTEL CG')},
302: {130: ('CA', 'Xplornet Communications'),
131: ('CA', 'Xplornet Communications'),
220: ('CA', 'Telus Mobility'),
221: ('CA', 'Telus Mobility'),
222: ('CA', 'Telus Mobility'),
250: ('CA', 'ALO Mobile Inc.'),
270: ('CA', 'Bragg Communications'),
290: ('CA', 'Airtel Wireless'),
300: ('CA', 'ECOTEL Inc.'),
320: ('CA', 'Rogers Communications'),
340: ('CA', 'Execulink'),
370: ('CA', 'Fido Solutions (Rogers Wireless)'),
380: ('CA', 'Keewaytinook Okimakanak Mobile'),
420: ('CA', 'A.B.C. Allen Business Communications Ltd.'),
480: ('CA', 'SSi Connexions'),
490: ('CA', 'Shaw Communications'),
491: ('CA', 'Shaw Communications'),
500: ('CA', 'Videotron'),
510: ('CA', 'Videotron'),
520: ('CA', 'Videotron'),
530: ('CA', 'Keewaytinook Okimakanak Mobile'),
540: ('CA', 'Rovvr Communications Inc.'),
560: ('CA', 'Lynx Mobility'),
570: ('CA', 'LightSquared'),
590: ('CA', 'Quadro Communications Co-op'),
610: ('CA', 'Bell Mobility'),
620: ('CA', 'ICE Wireless'),
630: ('CA', 'Bell Aliant'),
650: ('CA', 'Thunder Bay Telephone'),
655: ('CA', 'MTS Mobility'),
660: ('CA', 'Bell MTS'),
670: ('CA', 'CityWest'),
680: ('CA', 'SaskTel Mobility'),
690: ('CA', 'Bell Mobility'),
701: ('CA', 'MB Tel Mobility'),
710: ('CA', ''),
720: ('CA', 'Rogers Communications'),
730: ('CA', 'TerreStar Networks'),
750: ('CA', 'SaskTel Mobility'),
760: ('CA', 'Telus Mobility'),
770: ('CA', 'TNW Wireless Inc.'),
780: ('CA', 'SaskTel Mobility'),
790: ('CA', 'NetSet Communications'),
820: ('CA', 'Rogers Communications'),
860: ('CA', 'Telus Mobility'),
880: ('CA', 'Shared Telus, Bell, and SaskTel'),
940: ('CA', 'Wightman Telecom'),
990: ('CA', '')},
308: {1: ('PM', 'St. Pierre-et-Miquelon Télécom'), 2: ('PM', 'GLOBALTEL')},
310: {4: ('US', 'Verizon Wireless'),
5: ('US', 'Verizon Wireless'),
6: ('US', 'Verizon Wireless'),
12: ('US', 'Verizon Wireless'),
13: ('US', 'Verizon Wireless'),
14: ('US', ''),
15: ('US', 'Southern Communications'),
20: ('US', 'Union Telephone Company'),
30: ('US', 'AT&T Mobility'),
32: ('US', 'IT&E Overseas, Inc'),
33: ('US', 'Guam Telephone Authority'),
34: ('US', 'Airpeak'),
35: ('US', 'ETEX Communications, LP'),
50: ('US', 'Alaska Communications'),
53: ('US', 'Sprint'),
54: ('US', 'Alltel US'),
59: ('US', ''),
60: ('US', 'Consolidated Telcom'),
66: ('US', 'U.S. Cellular'),
70: ('US', 'AT&T Mobility'),
80: ('US', 'AT&T Mobility'),
90: ('US', 'AT&T Mobility'),
100: ('US', 'New Mexico RSA 4 East LP'),
110: ('US', 'PTI Pacifica Inc.'),
120: ('US', 'Sprint Corporation'),
130: ('US', 'Carolina West Wireless'),
140: ('US', 'Teleguam Holdings, LLC'),
150: ('US', 'AT&T Mobility'),
160: ('US', 'T-Mobile US'),
170: ('US', 'AT&T Mobility'),
180: ('US', 'West Central Wireless'),
190: ('US', 'Alaska Wireless Communications, LLC'),
260: ('US', 'T-Mobile USA'),
320: ('US', 'Smith Bagley, Inc.'),
330: ('US', 'Wireless Partners, LLC'),
340: ('US', 'Limitless Mobile, LLC'),
360: ('US', 'Cellular Network Partnership'),
370: ('US', 'NTT Docomo Pacific'),
390: ('US', 'TX-11 Acquisition, LLC'),
400: ('US', 'Wave Runner LLC'),
410: ('US', 'AT&T Mobility'),
430: ('US', 'GCI Communications Corp.'),
440: ('US', 'Numerex'),
450: ('US', 'Viaero Wireless'),
460: ('US', 'NewCore Wireless LLC'),
470: ('US', 'Shenandoah Telecommunications Company'),
480: ('US', 'Wave Runner LLC'),
490: ('US', 'T-Mobile'),
500: ('US', 'Public Service Cellular Inc.'),
510: ('US', 'Nsighttel Wireless LLC'),
520: ('US', 'Transaction Network Services'),
530: ('US', 'Iowa Wireless Services LLC'),
540: ('US', 'Hilliary Communications'),
550: ('US', 'Syniverse Technologies'),
570: ('US', 'TX-10, LLC and Central Louisiana Cellular, LLC (MTPCS)'),
580: ('US', 'Inland Cellular Telephone Company'),
590: ('US', 'Verizon Wireless'),
600: ('US', 'New-Cell Inc.'),
620: ('US', 'Nsighttel Wireless LLC'),
640: ('US', 'Numerex'),
650: ('US', 'Jasper Technologies'),
670: ('US', 'AT&T Mobility'),
680: ('US', 'AT&T Mobility'),
690: ('US', 'Limitless Mobile, LLC'),
700: ('US', 'Cross Valiant Cellular Partnership'),
710: ('US', 'Arctic Slope Telephone Association Cooperative'),
720: ('US', 'Syniverse Technologies'),
730: ('US', 'U.S. Cellular'),
740: ('US', 'Viaero Wireless'),
750: ('US', 'East Kentucky Network, LLC'),
770: ('US', 'Iowa Wireless Services'),
780: ('US', 'D. D. Inc.'),
790: ('US', 'PinPoint Communications Inc.'),
820: ('US', 'Verizon Wireless'),
840: ('US', 'Telecom North America Mobile, Inc.'),
850: ('US', 'Aeris Communications, Inc.'),
860: ('US', 'TX RSA 15B2, LP'),
880: ('US', 'Advantage Cellular Systems, Inc.'),
890: ('US', 'Verizon Wireless'),
900: ('US', 'Cable & Communications Corporation'),
910: ('US', 'Verizon Wireless'),
920: ('US', 'James Valley Wireless, LLC'),
930: ('US', 'Copper Valley Wireless'),
940: ('US', 'Tyntec Inc.'),
950: ('US', 'AT&T Mobility'),
960: ('US', 'UBET Wireless'),
970: ('US', 'Globalstar'),
990: ('US', 'Worldcall Interconnect Inc.')},
311: {0: ('US', 'Mid-Tex Cellular Ltd.'),
10: ('US', 'Chariton Valley Communications'),
12: ('US', 'Verizon Wireless'),
20: ('US', 'Missouri RSA 5 Partnership'),
30: ('US', 'Americell PA 3 Partnership'),
40: ('US', 'Commnet Wireless'),
50: ('US', 'Thumb Cellular LP'),
60: ('US', 'Space Data Corporation'),
70: ('US', 'AT&T Mobility'),
80: ('US', 'Pine Telephone Company'),
90: ('US', 'AT&T Mobility'),
100: ('US', 'Nex-Tech Wireless'),
110: ('US', 'Verizon Wireless'),
120: ('US', 'Wave Runner LLC'),
140: ('US', 'Cross Telephone Company'),
150: ('US', 'Wilkes Cellular'),
170: ('US', 'Broadpoint Inc.'),
190: ('US', 'AT&T Mobility'),
210: ('US', 'Telnyx LLC'),
220: ('US', 'U.S. Cellular'),
230: ('US', 'Cellular South Inc.'),
240: ('US', 'Cordova Wireless'),
250: ('US', 'Wave Runner LLC'),
270: ('US', 'Verizon Wireless'),
271: ('US', 'Verizon Wireless'),
272: ('US', 'Verizon Wireless'),
273: ('US', 'Verizon Wireless'),
274: ('US', 'Verizon Wireless'),
275: ('US', 'Verizon Wireless'),
276: ('US', 'Verizon Wireless'),
277: ('US', 'Verizon Wireless'),
278: ('US', 'Verizon Wireless'),
279: ('US', 'Verizon Wireless'),
280: ('US', 'Verizon Wireless'),
281: ('US', 'Verizon Wireless'),
282: ('US', 'Verizon Wireless'),
283: ('US', 'Verizon Wireless'),
284: ('US', 'Verizon Wireless'),
285: ('US', 'Verizon Wireless'),
286: ('US', 'Verizon Wireless'),
287: ('US', 'Verizon Wireless'),
288: ('US', 'Verizon Wireless'),
289: ('US', 'Verizon Wireless'),
290: ('US', 'PinPoint Communications Inc.'),
320: ('US', 'Commnet Wireless'),
330: ('US', 'Bug Tussel Wireless LLC'),
340: ('US', 'Illinois Valley Cellular '),
350: ('US', 'Sagebrush Cellular, Inc.'),
370: ('US', 'General Communication Inc.'),
380: ('US', 'New Dimension Wireless Ltd.'),
390: ('US', 'Verizon Wireless'),
400: ('US', ''),
410: ('US', 'Iowa RSA No. 2 LP'),
420: ('US', 'Northwest Missouri Cellular LP'),
430: ('US', 'RSA 1 LP'),
440: ('US', 'Bluegrass Cellular LLC'),
450: ('US', 'Panhandle Telecommunication Systems Inc.'),
460: ('US', 'Electric Imp Inc.'),
470: ('US', 'Vitelcom Cellular Inc.'),
480: ('US', 'Verizon Wireless'),
490: ('US', 'Sprint Corporation'),
530: ('US', 'NewCore Wireless LLC'),
550: ('US', 'Commnet Midwest LLC'),
560: ('US', 'OTZ Communications, Inc.'),
580: ('US', 'U.S. Cellular'),
590: ('US', 'Verizon Wireless'),
600: ('US', 'Limitless Mobile, LLC'),
630: ('US', 'Cellular South Inc.'),
640: ('US', 'Standing Rock Telecommunications'),
650: ('US', 'United Wireless'),
660: ('US', 'MetroPCS Wireless Inc.'),
670: ('US', 'Pine Belt Cellular Inc.'),
680: ('US', 'GreenFly LLC'),
690: ('US', 'TeleBEEPER of New Mexico'),
700: ('US', 'Midwest Network Solutions Hub LLC'),
710: ('US', 'Northeast Wireless Networks LLC'),
730: ('US', 'Proximiti Mobility Inc.'),
740: ('US', 'Telalaska Cellular'),
750: ('US', 'Flat Wireless LLC'),
770: ('US', 'Altiostar Networks, Inc.'),
790: ('US', 'Coleman County Telephone Cooperative, Inc.'),
800: ('US', 'Bluegrass Cellular LLC'),
810: ('US', 'Bluegrass Cellular LLC'),
820: ('US', 'Sonus Networks'),
830: ('US', 'Thumb Cellular LP'),
840: ('US', 'Nsight Spectrum LLC'),
850: ('US', 'Nsight Spectrum LLC'),
860: ('US', 'Uintah Basin Electronic Telecommunications '),
870: ('US', 'Sprint Corporation'),
880: ('US', 'Sprint Corporation'),
890: ('US', 'Globecomm Network Services Corporation '),
900: ('US', 'GigSky'),
910: ('US', 'SI Wireless LLC'),
920: ('US', 'Missouri RSA 5 Partnership'),
950: ('US', 'Enhanced Telecommmunications Corp.'),
960: ('US', 'Lycamobile USA Inc.'),
970: ('US', 'Big River Broadband, LLC'),
980: ('US', 'LigTel Communications'),
990: ('US', 'VTel Wireless')},
312: {10: ('US', 'Chariton Valley Communication Corporation, Inc'),
20: ('US', 'Infrastructure Networks, LLC'),
30: ('US', 'Cross Wireless'),
40: ('US', 'Custer Telephone Co-op (CTCI)'),
60: ('US', 'CoverageCo'),
70: ('US', 'Adams Networks Inc'),
80: ('US', 'South Georgia Regional Information Technology Authority'),
90: ('US', 'AT&T Mobility'),
100: ('US', 'ClearSky Technologies, Inc.'),
120: ('US', 'East Kentucky Network, LLC'),
130: ('US', 'East Kentucky Network, LLC'),
150: ('US', 'Northwest Missouri Cellular LP'),
160: ('US', 'RSA1 Limited Partnership'),
170: ('US', 'Iowa RSA No. 2 LP'),
180: ('US', 'Limiteless Mobile LLC'),
190: ('US', 'Sprint Corporation'),
210: ('US', 'Aspenta International, Inc.'),
220: ('US', 'Chariton Valley Communication Corporation, Inc.'),
240: ('US', 'Sprint Corporation'),
250: ('US', 'Sprint Corporation'),
260: ('US', 'Central LTE Holdings'),
270: ('US', 'Cellular Network Partnership'),
280: ('US', 'Cellular Network Partnership'),
290: ('US', 'Uintah Basin Electronic Telecommunications '),
300: ('US', 'Telecom North America Mobile, Inc.'),
310: ('US', 'Clear Stream Communications, LLC'),
320: ('US', 'RTC Communications LLC'),
330: ('US', 'Nemont Communications, Inc.'),
340: ('US', 'Matanuska Telephone Association, Inc.'),
350: ('US', 'Triangle Communication System Inc.'),
360: ('US', 'Wes-Tex Telecommunications, Ltd.'),
370: ('US', 'Commnet Wireless'),
380: ('US', 'Copper Valley Wireless'),
390: ('US', 'FTC Communications LLC'),
400: ('US', 'Mid-Rivers Telephone Cooperative'),
410: ('US', 'Eltopia Communications, LLC'),
420: ('US', 'Nex-Tech Wireless'),
430: ('US', 'Silver Star Communications'),
440: ('US', 'Consolidated Telcom'),
450: ('US', 'Cable & Communications Corporation'),
460: ('US', 'Ketchikan Public Utilities (KPU)'),
470: ('US', 'Carolina West Wireless'),
480: ('US', 'Sagebrush Cellular, Inc.'),
490: ('US', 'TrustComm, Inc.'),
510: ('US', 'WUE Inc.'),
530: ('US', 'Sprint Corporation'),
550: ('US', 'Great Plains Communications, Inc.'),
570: ('US', 'Buffalo-Lake Erie Wireless Systems Co., LLC'),
580: ('US', 'Morgan, Lewis & Bockius LLP'),
590: ('US', 'Northern Michigan University'),
600: ('US', 'Sagebrush Cellular, Inc.'),
620: ('US', 'GlobeTouch Inc.'),
630: ('US', 'NetGenuity, Inc.'),
650: ('US', '365 Wireless LLC'),
670: ('US', 'AT&T Mobility'),
680: ('US', 'AT&T Mobility'),
690: ('US', 'TGS, LLC'),
700: ('US', 'Wireless Partners, LLC'),
710: ('US', 'Great North Woods Wireless LLC'),
720: ('US', 'Southern Communications Services'),
730: ('US', 'Triangle Communication System Inc.'),
740: ('US', 'KDDI America, Inc.'),
750: ('US', 'Artemis Networks LLC'),
760: ('US', 'Arctic Slope Telephone Association Cooperative'),
770: ('US', 'Verizon Wireless'),
780: ('US', 'Redzone Wireless'),
790: ('US', 'Gila Electronics'),
800: ('US', 'Cirrus Core Networks'),
810: ('US', 'Bristol Bay Telephone Cooperative'),
820: ('US', 'Santel Communications Cooperative, Inc.'),
830: ('US', 'Kings County Office of Education'),
840: ('US', 'South Georgia Regional Information Technology Authority'),
850: ('US', 'Onvoy Spectrum, LLC'),
860: ('US', 'Flat Wireless, LLC'),
870: ('US', 'GigSky Mobile, LLC'),
880: ('US', 'Albemarle County Public Schools'),
890: ('US', 'Circle Gx'),
900: ('US', 'Flat West Wireless, LLC'),
910: ('US', 'East Kentucky Network, LLC'),
920: ('US', 'Northeast Wireless Networks LLC'),
930: ('US', 'Hewlett-Packard Communication Services, LLC'),
940: ('US', 'Webformix'),
950: ('US', 'Custer Telephone Co-op (CTCI)'),
960: ('US', 'M&A Technology, Inc.'),
970: ('US', 'IOSAZ Intellectual Property LLC'),
980: ('US', 'Mark Twain Communications Company'),
990: ('US', 'Premier Holdings LLC')},
313: {0: ('US', 'Tennessee Wireless '),
10: ('US', 'Cross Wireless LLC'),
20: ('US', 'Cambridge Telephone Company Inc.'),
30: ('US', 'Eagle Telephone System Inc.'),
40: ('US', 'Nucla-Naturita Telephone Company'),
60: ('US', 'Country Wireless'),
70: ('US', 'Midwest Network Solutions Hub LLC'),
80: ('US', 'Speedwavz LLP'),
90: ('US', 'Vivint Wireless, Inc.'),
100: ('US', '700\xa0MHz Public Safety Broadband'),
200: ('US', 'Mercury Network Corporation'),
210: ('US', 'AT&T Mobility'),
220: ('US', 'Custer Telephone Co-op (CTCI)'),
230: ('US', 'Velocity Communications Inc.'),
240: ('US', 'Fundamental Holdings, Corp.'),
250: ('US', 'Imperial County Office of Education'),
260: ('US', 'Expeto Wireless Inc.'),
270: ('US', 'Blackstar Management'),
280: ('US', 'King Street Wireless, LP'),
290: ('US', 'Gulf Coast Broadband LLC'),
300: ('US', 'Cambio WiFi of Delmarva, LLC'),
310: ('US', 'CAL.NET, Inc.'),
320: ('US', 'Paladin Wireless'),
330: ('US', 'CenturyTel Broadband Services LLC'),
340: ('US', 'Dish Network'),
350: ('US', 'Dish Network'),
360: ('US', 'Dish Network'),
370: ('US', 'Red Truck Wireless, LLC'),
380: ('US', 'OptimERA Inc.'),
390: ('US', 'Altice USA Wireless, Inc.'),
400: ('US', 'Texoma Communications, LLC'),
410: ('US', 'pdvWireless')},
316: {11: ('US', 'Southern Communications Services')},
330: {0: ('PR', 'PR Wireless'),
110: ('PR', 'América Móvil'),
120: ('PR', 'PR Wireless')},
334: {1: ('MX', 'Comunicaciones Digitales Del Norte, S.A. de C.V.'),
10: ('MX', 'AT&T Mexico'),
20: ('MX', 'América Móvil'),
30: ('MX', 'Telefónica'),
50: ('MX', 'AT&T Mexico'),
60: ('MX', 'Servicios de Acceso Inalambrico, S.A. de C.V.'),
66: ('MX', 'Telefonos de México, S.A.B. de C.V.'),
70: ('MX', 'AT&T Mexico'),
80: ('MX', 'AT&T Mexico'),
90: ('MX', 'AT&T Mexico'),
140: ('MX', 'Altán Redes S.A.P.I. de C.V.')},
338: {40: ('JM', 'Symbiote Investment Limited'),
50: ('JM', 'Digicel (Turks & Caicos) Limited'),
110: ('JM', 'Cable & Wireless Communications'),
180: ('JM', 'Cable & Wireless Communications')},
340: {1: ('GP', 'Orange Caraïbe Mobiles'),
2: ('GP', 'Outremer Telecom'),
3: ('GP', 'UTS Caraïbe'),
8: ('GP', 'Dauphin Telecom'),
20: ('GP', 'DIGICEL Antilles Française Guyane')},
342: {600: ('BB', 'LIME (formerly known as Cable & Wireless)'),
750: ('BB', 'Digicel (Barbados) Limited'),
800: ('BB', 'Ozone Wireless Inc.')},
344: {30: ('AG', 'Antigua Public Utilities Authority'),
50: ('AG', 'Antigua Wireless Ventures Limited'),
920: ('AG', 'Cable & Wireless Caribbean Cellular (Antigua) Limited'),
930: ('AG', 'AT&T Wireless')},
346: {50: ('KY', 'Digicel Cayman Ltd.'),
140: ('KY', 'Cable & Wireless (Cayman Islands) Limited')},
348: {170: ('VG', 'Cable & Wireless'),
370: ('VG', 'BVI Cable TV Ltd'),
570: ('VG', 'Caribbean Cellular Telephone'),
770: ('VG', 'Digicel (BVI) Limited')},
350: {0: ('BM', 'Bermuda Digital Communications Ltd.'),
1: ('BM', 'Telecommunications (Bermuda & West Indies) Ltd'),
2: ('BM', 'M3 Wireless'),
5: ('BM', 'Telecom Networks'),
11: ('BM', 'Deltronics')},
352: {30: ('GD', 'Digicel Grenada Ltd.'),
110: ('GD', 'Cable & Wireless Grenada Ltd.')},
354: {860: ('MS', 'Cable & Wireless')},
356: {50: ('KN', 'Wireless Ventures (St Kitts-Nevis) Limited'),
70: ('KN', 'UTS'),
110: ('KN', 'Cable & Wireless St. Kitts & Nevis Ltd')},
358: {110: ('LC', 'Cable & Wireless')},
360: {50: ('VC', 'Digicel (St. Vincent and the Grenadines) Limited'),
100: ('VC', ''),
110: ('VC', 'Cable & Wireless (St. Vincent & the Grenadines) Ltd')},
362: {31: ('CW', 'Eutel N.V.'),
33: ('CW', 'WICC N.V.'),
51: ('CW', 'Telcell N.V.'),
54: ('CW', 'East Caribbean Cellular'),
59: ('CW', 'United Telecommunication Service N.V. (UTS)'),
60: ('CW', 'United Telecommunication Service N.V. (UTS)'),
63: ('CW', 'CSC N.V.'),
68: ('CW', 'Curaçao Telecom N.V.'),
69: ('CW', 'Curaçao Telecom N.V.'),
74: ('CW', 'PCS N.V.'),
76: ('CW', 'Antiliano Por N.V.'),
78: ('CW', 'Telefonia Bonairiano N.V.'),
91: ('CW', 'United Telecommunication Service N.V. (UTS)'),
94: ('CW', 'Bòbò Frus N.V.')},
363: {1: ('AW', 'Servicio di Telecomunicacion di Aruba'),
2: ('AW', 'Digicel Aruba')},
364: {39: ('BS', 'The Bahamas Telecommunications Company Ltd (BaTelCo)'),
49: ('BS', 'Cable Bahamas Ltd')},
365: {10: ('AI', 'Weblinks Limited'), 840: ('AI', 'Cable & Wireless')},
366: {20: ('DM', 'Digicel Group Limited'), 110: ('DM', 'Cable & Wireless')},
368: {1: ('CU', 'Empresa de Telecomunicaciones de Cuba, SA')},
370: {1: ('DO', 'Altice Group'),
2: ('DO', 'Compañía Dominicana de Teléfonos'),
3: ('DO', 'Altice Group'),
4: ('DO', 'Trilogy Dominicana, S.A.'),
5: ('DO', 'WIND Telecom, S.A')},
372: {2: ('HT', 'Unigestion Holding S.A.'), 3: ('HT', 'NATCOM S.A.')},
374: {12: ('TT', 'TSTT'), 130: ('TT', 'Digicel (Trinidad & Tobago) Limited')},
376: {350: ('TC', 'Cable & Wireless West Indies Ltd (Turks & Caicos)'),
352: ('TC', 'Cable & Wireless West Indies Ltd (Turks & Caicos)'),
360: ('TC', 'Cable & Wireless West Indies Ltd (Turks & Caicos)')},
400: {1: ('AZ', ''),
2: ('AZ', ''),
3: ('AZ', 'CATEL'),
4: ('AZ', 'Azerfon'),
5: ('AZ',
'Special State Protection Service of the Republic of Azerbaijan'),
6: ('AZ', 'Nakhtel LLC')},
401: {1: ('KZ', 'KaR-Tel LLP'),
2: ('KZ', 'Kcell JSC'),
7: ('KZ', 'Altel'),
8: ('KZ', ''),
77: ('KZ', 'MTS')},
402: {11: ('BT', 'B-Mobile / Bhutan Telecom Ltd.'),
77: ('BT', 'Tashi InfoComm Limited')},
404: {1: ('IN', 'Haryana'),
2: ('IN', 'Punjab'),
3: ('IN', 'Himachal Pradesh'),
4: ('IN', 'Delhi & NCR'),
5: ('IN', 'Gujarat'),
7: ('IN', 'Andhra Pradesh and Telangana'),
9: ('IN', 'Assam'),
10: ('IN', 'Delhi & NCR'),
11: ('IN', 'Delhi & NCR'),
12: ('IN', 'Haryana'),
13: ('IN', 'Andhra Pradesh and Telangana'),
14: ('IN', 'Punjab'),
15: ('IN', 'Uttar Pradesh (East)'),
16: ('IN', 'North East'),
17: ('IN', 'West Bengal'),
18: ('IN', 'Himachal Pradesh'),
19: ('IN', 'Kerala'),
20: ('IN', 'Mumbai'),
21: ('IN', 'Mumbai'),
22: ('IN', 'Maharashtra & Goa'),
24: ('IN', 'Gujarat'),
25: ('IN', 'Bihar'),
27: ('IN', 'Maharashtra & Goa'),
28: ('IN', 'Orissa'),
29: ('IN', 'Assam'),
30: ('IN', 'Kolkata'),
31: ('IN', 'Kolkata'),
34: ('IN', 'Haryana'),
35: ('IN', 'Himachal Pradesh'),
36: ('IN', 'Bihar & Jharkhand'),
37: ('IN', 'Jammu & Kashmir'),
38: ('IN', 'Assam'),
40: ('IN', 'Chennai'),
41: ('IN', 'Chennai'),
42: ('IN', 'Tamil Nadu'),
43: ('IN', 'Tamil Nadu'),
44: ('IN', 'Karnataka'),
45: ('IN', 'Karnataka'),
46: ('IN', 'Kerala'),
48: ('IN', 'Unknown'),
49: ('IN', 'Andhra Pradesh and Telangana'),
50: ('IN', 'North East'),
51: ('IN', 'Himachal Pradesh'),
52: ('IN', 'Orissa'),
53: ('IN', 'Punjab'),
54: ('IN', 'Uttar Pradesh (West)'),
55: ('IN', 'Uttar Pradesh (East)'),
56: ('IN', 'Uttar Pradesh (West)'),
57: ('IN', 'Gujarat'),
58: ('IN', 'Madhya Pradesh & Chhattisgarh'),
59: ('IN', 'Rajasthan'),
60: ('IN', 'Rajasthan'),
62: ('IN', 'Jammu & Kashmir'),
64: ('IN', 'Chennai'),
66: ('IN', 'Maharashtra & Goa'),
67: ('IN', 'Madhya Pradesh & Chhattisgarh'),
68: ('IN', 'Delhi & NCR'),
69: ('IN', 'Mumbai'),
70: ('IN', 'Rajasthan'),
71: ('IN', 'Karnataka (Bangalore)'),
72: ('IN', 'Kerala'),
73: ('IN', 'Andhra Pradesh and Telangana'),
74: ('IN', 'West Bengal'),
75: ('IN', 'Bihar'),
76: ('IN', 'Orissa'),
77: ('IN', 'North East'),
78: ('IN', 'Madhya Pradesh & Chattishgarh'),
79: ('IN', 'Andaman Nicobar'),
80: ('IN', 'Tamil Nadu'),
81: ('IN', 'Kolkata'),
82: ('IN', 'Himachal Pradesh'),
83: ('IN', 'Kolkata'),
84: ('IN', 'Chennai'),
85: ('IN', 'West Bengal'),
86: ('IN', 'Karnataka'),
87: ('IN', 'Rajasthan'),
88: ('IN', 'Vodafone Punjab'),
89: ('IN', 'Uttar Pradesh (East)'),
90: ('IN', 'Maharashtra'),
91: ('IN', 'Kolkata'),
92: ('IN', 'Mumbai'),
93: ('IN', 'Madhya Pradesh'),
94: ('IN', 'Tamil Nadu'),
95: ('IN', 'Kerala'),
96: ('IN', 'Haryana'),
97: ('IN', 'Uttar Pradesh (West)'),
98: ('IN', 'Gujarat')},
405: {1: ('IN', 'Andhra Pradesh and Telangana'),
3: ('IN', 'Bihar'),
4: ('IN', 'Chennai'),
5: ('IN', 'Delhi & NCR'),
6: ('IN', 'Gujarat'),
7: ('IN', 'Haryana'),
8: ('IN', 'Himachal Pradesh'),
9: ('IN', 'Jammu & Kashmir'),
10: ('IN', 'Karnataka'),
11: ('IN', 'Kerala'),
12: ('IN', 'Kolkata'),
13: ('IN', 'Maharashtra & Goa'),
14: ('IN', 'Madhya Pradesh'),
15: ('IN', 'Mumbai'),
17: ('IN', 'Orissa'),
18: ('IN', 'Punjab'),
19: ('IN', 'Rajasthan'),
20: ('IN', 'Tamil Nadu'),
21: ('IN', 'Uttar Pradesh (East)'),
22: ('IN', 'Uttar Pradesh (West)'),
23: ('IN', 'West Bengal'),
25: ('IN', 'Andhra Pradesh and Telangana'),
26: ('IN', 'Assam'),
27: ('IN', 'Bihar/Jharkhand'),
28: ('IN', 'Chennai'),
29: ('IN', 'Delhi'),
30: ('IN', 'Gujarat'),
31: ('IN', 'Haryana'),
32: ('IN', 'Himachal Pradesh'),
33: ('IN', 'Jammu & Kashmir'),
34: ('IN', 'Karnataka'),
35: ('IN', 'Kerala'),
36: ('IN', 'Kolkata'),
37: ('IN', 'Maharashtra & Goa'),
38: ('IN', 'Madhya Pradesh'),
39: ('IN', 'Mumbai'),
41: ('IN', 'Orissa'),
42: ('IN', 'Punjab'),
43: ('IN', 'Rajasthan'),
44: ('IN', 'Tamil Nadu including Chennai'),
45: ('IN', 'Uttar Pradesh (E)'),
46: ('IN', 'Uttar Pradesh (W) & Uttarakhand'),
47: ('IN', 'West Bengal'),
51: ('IN', 'West Bengal'),
52: ('IN', 'Bihar & Jharkhand'),
53: ('IN', 'Orissa'),
54: ('IN', 'Uttar Pradesh (East)'),
55: ('IN', 'Jammu & Kashmir'),
56: ('IN', 'Assam'),
66: ('IN', 'Uttar Pradesh (West)'),
67: ('IN', 'West Bengal'),
70: ('IN', 'Bihar & Jharkhand'),
750: ('IN', 'Jammu & Kashmir'),
751: ('IN', 'Assam'),
752: ('IN', 'Bihar & Jharkhand'),
753: ('IN', 'Orissa'),
754: ('IN', 'Himachal Pradesh'),
755: ('IN', 'North East'),
756: ('IN', 'Madhya Pradesh & Chhattisgarh'),
799: ('IN', 'Mumbai'),
800: ('IN', 'Delhi & NCR'),
801: ('IN', 'Andhra Pradesh and Telangana'),
803: ('IN', 'Karnataka'),
804: ('IN', 'Maharashtra & Goa'),
805: ('IN', 'Mumbai'),
806: ('IN', 'Rajasthan'),
809: ('IN', 'Kerala'),
810: ('IN', 'Uttar Pradesh (East)'),
811: ('IN', 'Uttar Pradesh (West)'),
818: ('IN', 'Uttar Pradesh (West)'),
819: ('IN', 'Andhra Pradesh and Telangana'),
820: ('IN', 'Karnataka'),
821: ('IN', 'Kerala'),
822: ('IN', 'Kolkata'),
824: ('IN', 'Assam'),
827: ('IN', 'Gujarat'),
834: ('IN', 'Madhya Pradesh'),
840: ('IN', 'West Bengal'),
845: ('IN', 'Assam'),
846: ('IN', 'Jammu & Kashmir'),
847: ('IN', 'Karnataka'),
848: ('IN', 'Kolkata'),
849: ('IN', 'North East'),
850: ('IN', 'Orissa'),
851: ('IN', 'Punjab'),
852: ('IN', 'Tamil Nadu'),
853: ('IN', 'West Bengal'),
854: ('IN', 'Andhra Pradesh'),
855: ('IN', 'Assam'),
856: ('IN', 'Bihar'),
857: ('IN', 'Gujarat'),
858: ('IN', 'Haryana'),
859: ('IN', 'Himachal Pradesh'),
860: ('IN', 'Jammu & Kashmir'),
861: ('IN', 'Karnataka'),
862: ('IN', 'Kerala'),
863: ('IN', 'Madhya Pradesh'),
864: ('IN', 'Maharashtra'),
865: ('IN', 'North East'),
866: ('IN', 'Orissa'),
867: ('IN', 'Punjab'),
868: ('IN', 'Rajasthan'),
869: ('IN', 'Tamil Nadu (incl. Chennai)'),
870: ('IN', 'Uttar Pradesh (West)'),
871: ('IN', 'Uttar Pradesh (East)'),
872: ('IN', 'Delhi'),
873: ('IN', 'Kolkata'),
874: ('IN', 'Mumbai'),
875: ('IN', 'Assam'),
880: ('IN', 'West Bengal'),
881: ('IN', 'Assam'),
908: ('IN', 'Andhra Pradesh and Telangana'),
909: ('IN', 'Delhi'),
910: ('IN', 'Haryana'),
911: ('IN', 'Maharashtra'),
927: ('IN', 'Gujarat'),
929: ('IN', 'Maharashtra')},
410: {1: ('PK', 'Mobilink-PMCL'),
2: ('PK', 'PTCL'),
3: ('PK', 'Pakistan Telecommunication Mobile Ltd'),
4: ('PK', 'China Mobile'),
5: ('PK', 'SCO Mobile Ltd'),
6: ('PK', 'Telenor Pakistan'),
7: ('PK', 'WaridTel'),
8: ('PK', 'SCO Mobile Ltd')},
412: {1: ('AF', 'Afghan Wireless Communication Company'),
20: ('AF', 'Telecom Development Company Afghanistan Ltd.'),
40: ('AF', 'MTN Group Afghanistan'),
50: ('AF', 'Etisalat Afghanistan'),
55: ('AF', 'WASEL Afghanistan'),
80: ('AF', 'Afghan Telecom'),
88: ('AF', 'Afghan Telecom')},
413: {1: ('LK', 'Mobitel (Pvt) Ltd'),
2: ('LK', 'Dialog Axiata PLC'),
3: ('LK', 'Etisalat Lanka (Pvt) Ltd'),
4: ('LK', 'Lanka Bell Ltd'),
5: ('LK', 'Bharti Airtel Lanka (Pvt) Ltd'),
8: ('LK', 'Hutchison Telecommunications Lanka (Pvt) Ltd'),
11: ('LK', 'Dialog Broadband Networks (Pvt) Ltd'),
12: ('LK', 'Sri Lanka Telecom PLC'),
13: ('LK', 'Lanka Bell Ltd')},
414: {0: ('MM', 'Myanmar Posts and Telecommunications'),
1: ('MM', 'Myanmar Posts and Telecommunications'),
2: ('MM', 'Myanmar Posts and Telecommunications'),
3: ('MM', 'Myanmar Economic Corporation'),
4: ('MM', 'Myanmar Posts and Telecommunications'),
5: ('MM', 'Ooredoo Myanmar'),
6: ('MM', 'Telenor Myanmar'),
9: ('MM', 'Myanmar National Tele & Communication Co., Ltd'),
20: ('MM', 'Amara Communication Co.,Ltd'),
21: ('MM', 'Amara Communication Co.,Ltd')},
415: {1: ('LB', 'MIC 1'), 3: ('LB', 'MIC 2')},
416: {1: ('JO', 'Jordan Mobile Telephone Services'),
3: ('JO', 'Umniah Mobile Company'),
77: ('JO',
'Petra Jordanian Mobile Telecommunications Company (MobileCom)')},
417: {1: ('SY', 'Syriatel Mobile Telecom'),
2: ('SY', 'MTN Syria'),
9: ('SY', 'Syrian Telecom')},
418: {0: ('IQ', 'Asia Cell Telecommunications Company'),
5: ('IQ', 'Asia Cell Telecommunications Company'),
8: ('IQ', ''),
20: ('IQ', 'Zain Iraq'),
30: ('IQ', 'Zain Iraq'),
40: ('IQ', 'Telecom Ltd'),
45: ('IQ', 'Mobitel Co. Ltd.'),
62: ('IQ', 'Itisaluna Wireless CO.'),
92: ('IQ', 'Omnnea Wireless')},
419: {2: ('KW', 'Zain Kuwait'),
3: ('KW', 'National Mobile Telecommunications'),
4: ('KW', 'Kuwait Telecommunication Company')},
420: {1: ('SA', 'Saudi Telecom Company'),
3: ('SA', 'Etihad Etisalat Company'),
4: ('SA', 'Zain Saudi Arabia'),
5: ('SA', 'Virgin Mobile Saudi Arabia'),
21: ('SA', 'Saudi Railways GSM')},
421: {1: ('YE', ''),
2: ('YE', 'Spacetel Yemen'),
3: ('YE', 'Yemen Mobile'),
4: ('YE', 'Y')},
422: {2: ('OM', 'Oman Telecommunications Company'),
3: ('OM', 'Omani Qatari Telecommunications Company SAOC'),
4: ('OM', 'Oman Telecommunications Company')},
424: {2: ('AE', 'Emirates Telecom Corp'),
3: ('AE', 'Emirates Integrated Telecommunications Company')},
425: {1: ('IL', 'Partner Communications Company Ltd.'),
2: ('IL', 'Cellcom Israel Ltd.'),
3: ('IL', 'Pelephone Communications Ltd.'),
4: ('IL', 'Globalsim Ltd'),
5: ('IL', 'Palestine Cellular Communications, Ltd.'),
6: ('IL', 'Wataniya Palestine Mobile Telecommunications Company'),
7: ('IL', 'Hot Mobile Ltd.'),
8: ('IL', 'Golan Telecom Ltd'),
9: ('IL', 'Marathon 018 Xphone Ltd.'),
11: ('IL', '365 Telecom'),
12: ('IL', 'Free Telecom'),
13: ('IL', 'Ituran Cellular Communications'),
14: ('IL', 'Alon Cellular Ltd.'),
15: ('IL', 'Home Cellular'),
16: ('IL', 'Rami Levy Communications Ltd.'),
17: ('IL', 'Gale Phone'),
18: ('IL', 'Cellact Communications Ltd.'),
19: ('IL', 'Azi Communications Ltd.'),
20: ('IL', 'Bezeq The Israeli Telecommunication Corp Ltd.'),
21: ('IL', 'B.I.P. Communications Ltd.'),
23: ('IL', 'Beezz Communication Solutions Ltd.'),
24: ('IL', 'Partner Communications Company Ltd.'),
26: ('IL', 'LB Annatel Ltd.'),
28: ('IL', 'PHI Networks'),
29: ('IL', 'CG Networks')},
426: {1: ('BH', 'Bahrain Telecommunications Company'),
2: ('BH', 'Zain Bahrain'),
3: ('BH', 'Civil Aviation Authority'),
4: ('BH', 'Viva Bahrain'),
5: ('BH', 'Bahrain Telecommunications Company')},
427: {1: ('QA', 'ooredoo'),
2: ('QA', 'Vodafone Qatar'),
5: ('QA', 'Ministry of Interior'),
6: ('QA', 'Ministry of Interior')},
428: {88: ('MN', 'Unitel LLC'),
91: ('MN', 'Skytel LLC'),
98: ('MN', 'G-Mobile LLC'),
99: ('MN', 'Mobicom Corporation')},
429: {1: ('NP', 'Nepal Telecom (NDCL)'),
2: ('NP', 'Ncell Pvt. Ltd.'),
3: ('NP', 'United Telecom Limited'),
4: ('NP', 'Smart Telecom Pvt. Ltd. (STPL)')},
432: {1: ('IR', 'Ertebatat Iran'),
2: ('IR', 'Azartel Mobile'),
8: ('IR', 'Shatel Mobile'),
10: ('IR', 'Samantel Mobile'),
11: ('IR', 'Mobile Communications Company of Iran (MCI)'),
12: ('IR',
'Dadeh Dostar asr Novin p.j.s. co & Information Technology Company '
'of Iran'),
14: ('IR', 'Telecommunication Kish Company'),
19: ('IR', 'Mobile Telecommunications Company of Esfahan'),
20: ('IR', 'Social Security Investment Co.'),
21: ('IR', 'Social Security Investment Co.'),
32: ('IR', 'TCI of Iran and Iran Mobin'),
35: ('IR', 'MTN Irancell Telecommunications Services Company'),
40: ('IR', 'Ertebatat Mobinnet'),
50: ('IR', 'Arya Resaneh Tadbir'),
70: ('IR', 'Telephone Communications Company of Iran'),
71: ('IR', 'Telephone Communications Company of Iran'),
90: ('IR', 'IRAPHONE GHESHM of Iran'),
93: ('IR', 'Iraphone'),
99: ('IR', 'TCI of Iran and Rightel')},
434: {3: ('UZ', 'Uzbektelekom'),
4: ('UZ', 'Unitel LLC'),
5: ('UZ', 'Coscom'),
6: ('UZ', 'RUBICON WIRELESS COMMUNICATION'),
7: ('UZ', 'Universal Mobile Systems'),
8: ('UZ', 'Uzbektelekom')},
436: {1: ('TJ', 'JV Somoncom'),
2: ('TJ', 'Indigo Tajikistan'),
3: ('TJ', 'TT Mobile'),
4: ('TJ', 'Babilon-Mobile'),
5: ('TJ', 'Tacom'),
10: ('TJ', 'Babilon-T'),
12: ('TJ', 'Indigo')},
437: {1: ('KG', 'Sky Mobile LLC'),
3: ('KG', '7 Mobile'),
5: ('KG', 'Alfa Telecom CJSC'),
9: ('KG', 'NurTelecom LLC'),
10: ('KG', 'Saima Telecom'),
11: ('KG', 'iTel')},
438: {1: ('TM', 'MTS Turkmenistan'),
2: ('TM', 'Altyn Asyr'),
3: ('TM', 'AŞTU')},
440: {0: ('JP', 'SoftBank Corp.'),
1: ('JP', 'UQ Communications Inc.'),
2: ('JP', 'Hanshin Cable Engineering Co., Ltd.'),
3: ('JP', 'Internet Initiative Japan Inc.'),
4: ('JP', 'Japan Radio Company, Ltd.'),
5: ('JP', 'Wireless City Planning Inc.'),
6: ('JP', 'SAKURA Internet Inc.'),
7: ('JP', 'LTE-X, Inc.'),
10: ('JP', 'NTT DoCoMo, Inc.'),
20: ('JP', 'SoftBank Corp.'),
21: ('JP', 'SoftBank Corp.'),
50: ('JP', 'KDDI Corporation'),
51: ('JP', 'KDDI Corporation'),
52: ('JP', 'KDDI Corporation'),
53: ('JP', 'KDDI Corporation'),
54: ('JP', 'KDDI Corporation'),
70: ('JP', 'KDDI Corporation'),
71: ('JP', 'KDDI Corporation'),
72: ('JP', 'KDDI Corporation'),
73: ('JP', 'KDDI Corporation'),
74: ('JP', 'KDDI Corporation'),
75: ('JP', 'KDDI Corporation'),
76: ('JP', 'KDDI Corporation'),
78: ('JP', 'Okinawa Cellular Telephone'),
91: ('JP',
'Tokyo Organising Committee of the Olympic and Paralympic Games')},
441: {0: ('JP', 'Wireless City Planning Inc.'),
1: ('JP', 'SoftBank Corp.'),
10: ('JP', 'UQ Communications Inc.')},
450: {1: ('KR', 'Globalstar Asia Pacific'),
2: ('KR', 'KT'),
4: ('KR', 'KT'),
5: ('KR', 'SK Telecom'),
6: ('KR', 'LG Telecom'),
7: ('KR', 'KT'),
8: ('KR', 'KT'),
11: ('KR', 'Korea Cable Telecom'),
12: ('KR', 'SK Telecom')},
452: {1: ('VN', 'Vietnam Mobile Telecom Services Company'),
2: ('VN', 'Vietnam Telecom Services Company'),
4: ('VN', 'Viettel Telecom'),
5: ('VN', 'Hanoi Telecom'),
7: ('VN', 'GTEL Mobile JSC')},
454: {0: ('HK', 'CSL Limited'),
1: ('HK', 'CITIC Telecom 1616'),
2: ('HK', 'CSL Limited'),
3: ('HK', 'Hutchison Telecom'),
4: ('HK', 'Hutchison Telecom'),
6: ('HK', 'SmarTone Mobile Communications Limited'),
7: ('HK', 'China Unicom (Hong Kong) Limited'),
8: ('HK', 'Truphone Limited'),
9: ('HK', 'China Motion Telecom'),
11: ('HK', 'China-Hong Kong Telecom'),
12: ('HK', 'China Mobile Hong Kong Company Limited'),
13: ('HK', 'China Mobile Hong Kong Company Limited'),
14: ('HK', 'Hutchison Telecom'),
15: ('HK', 'SmarTone Mobile Communications Limited'),
16: ('HK', 'PCCW'),
17: ('HK', 'SmarTone Mobile Communications Limited'),
19: ('HK', 'PCCW-HKT'),
20: ('HK', 'PCCW-HKT'),
21: ('HK', '21Vianet Mobile Ltd.'),
22: ('HK', '263 Mobile Communications (HongKong) Limited'),
24: ('HK', 'Multibyte Info Technology Ltd'),
25: ('HK', 'Hong Kong Government'),
26: ('HK', 'Hong Kong Government'),
29: ('HK', 'PCCW-HKT'),
30: ('HK', 'China Data Enterprises Ltd'),
31: ('HK', 'China Telecom Global Limited'),
32: ('HK', 'Hong Kong Broadband Network Ltd'),
35: ('HK', 'Webbing Hong Kong Ltd')},
455: {0: ('MO', 'Smartone – Comunicações Móveis, S.A.'),
1: ('MO', 'Companhia de Telecomunicações de Macau, S.A.R.L.'),
2: ('MO', 'China Telecom (Macau) Company Limited'),
3: ('MO', 'Hutchison Telephone (Macau), Limitada'),
4: ('MO', 'Companhia de Telecomunicações de Macau, S.A.R.L.'),
5: ('MO', 'Hutchison Telephone (Macau), Limitada'),
6: ('MO', 'Smartone – Comunicações Móveis, S.A.'),
7: ('MO', 'China Telecom (Macau) Limitada')},
456: {1: ('KH', 'CamGSM / The Royal Group'),
2: ('KH', 'Smart Axiata Co. Ltd'),
3: ('KH', 'Cambodia Advance Communications Co. Ltd'),
4: ('KH', 'Cambodia Advance Communications Co. Ltd'),
5: ('KH', 'Smart Axiata Co. Ltd'),
6: ('KH', 'Smart Axiata Co. Ltd'),
8: ('KH', 'Viettel'),
9: ('KH', 'Viettel'),
11: ('KH', 'SEATEL Cambodia'),
18: ('KH', 'The Royal Group')},
457: {1: ('LA', 'Lao Telecom'),
2: ('LA', 'Enterprise of Telecommunications Lao'),
3: ('LA', 'Star Telecom Co., Ltd'),
8: ('LA', 'VimpelCom Lao Ltd')},
460: {0: ('CN', 'China Mobile'),
1: ('CN', 'China Unicom'),
3: ('CN', 'China Telecom'),
4: ('CN', 'Global Star Satellite'),
8: ('CN', 'China Mobile'),
9: ('CN', 'China Unicom'),
11: ('CN', 'China Telecom'),
20: ('CN', 'China Tietong')},
466: {1: ('TW', 'Far EasTone Telecommunications Co Ltd'),
2: ('TW', 'Far EasTone Telecommunications Co Ltd'),
3: ('TW', 'Far EasTone Telecommunications Co Ltd'),
5: ('TW', 'Asia Pacific Telecom'),
6: ('TW', 'Far EasTone Telecommunications Co Ltd'),
9: ('TW', 'Vmax Telecom'),
10: ('TW', 'Global Mobile Corp.'),
11: ('TW', 'LDTA/Chunghwa Telecom'),
12: ('TW', 'Ambit Microsystems'),
88: ('TW', 'Far EasTone Telecommunications Co Ltd'),
89: ('TW', 'Taiwan Star Telecom'),
90: ('TW', 'Taiwan Star Telecom'),
92: ('TW', 'Chunghwa Telecom'),
97: ('TW', 'Taiwan Mobile Co. Ltd')},
467: {5: ('KR', 'Cheo Technology Jv Company'),
6: ('KR', 'Cheo Technology Jv Company')},
470: {1: ('BD', 'Grameenphone Ltd.'),
2: ('BD', 'Axiata Bangladesh Ltd.'),
3: ('BD', 'Banglalink Digital Communications Ltd.'),
4: ('BD', 'Teletalk Bangladesh Limited'),
5: ('BD', 'Pacific Bangladesh Telecom Limited'),
7: ('BD', 'Bharti Airtel Bangladesh Ltd.'),
9: ('BD', 'Bangladesh Internet Exchange Limited (BIEL)'),
10: ('BD', 'Banglalion Communications Ltd.')},
472: {1: ('MV', 'Dhivehi Raajjeyge Gulhun'),
2: ('MV', 'Wataniya Telecom Maldives')},
502: {10: ('MY', 'Maxis, DiGi, Celcom, XOX'),
11: ('MY', 'Telekom Malaysia Bhd'),
12: ('MY', 'Maxis Communications Berhad'),
13: ('MY', 'Celcom Axiata Berhad'),
14: ('MY', 'Telekom Malaysia Berhad for PSTN SMS'),
16: ('MY', 'DiGi Telecommunications'),
17: ('MY', 'Maxis Communications Berhad'),
18: ('MY', 'U Mobile Sdn Bhd'),
19: ('MY', 'Celcom Axiata Berhad'),
20: ('MY', 'Electcoms Berhad'),
150: ('MY', 'Tune Talk Sdn Bhd'),
152: ('MY', 'YTL Communications Sdn Bhd'),
153: ('MY', 'Webe Digital Sdn Bhd'),
154: ('MY', 'Talk Focus Sdn Bhd'),
156: ('MY', 'Altel Communications Sdn Bhd'),
157: ('MY', 'Telekomunikasi Indonesia International (M) Sdn Bhd')},
505: {1: ('AU', 'Telstra Corporation Limited'),
2: ('AU', 'Singtel Optus Proprietary Limited'),
3: ('AU', 'Vodafone Hutchison Australia Proprietary Limited'),
4: ('AU', 'Department of Defence'),
7: ('AU', 'Vodafone Network Pty. Ltd.'),
10: ('AU', 'Norfolk Telecom'),
11: ('AU', 'Telstra Corporation Ltd.'),
13: ('AU', 'Railcorp, Transport for New South Wales'),
14: ('AU', 'TPG Telecom'),
16: ('AU', 'Victorian Rail Track'),
17: ('AU', 'Optus'),
18: ('AU', 'Pactel International Pty Ltd'),
19: ('AU', 'Lycamobile Pty Ltd'),
20: ('AU', 'Ausgrid Corporation'),
21: ('AU', 'Queensland Rail Limited'),
22: ('AU', 'iiNet Ltd'),
23: ('AU', 'Challenge Networks Pty. Ltd.'),
24: ('AU', 'Advanced Communications Technologies Pty. Ltd.'),
25: ('AU', 'Pilbara Iron Company Services Pty Ltd'),
26: ('AU', 'Dialogue Communications Pty. Ltd.'),
27: ('AU', 'Nexium Telecommunications'),
28: ('AU', 'RCOM International Pty Ltd'),
30: ('AU', 'Compatel Limited'),
31: ('AU', 'BHP Billiton'),
32: ('AU', 'Thales Australia'),
33: ('AU', 'CLX Networks Pty Ltd'),
34: ('AU', 'Santos Limited'),
35: ('AU', 'MessageBird Pty Ltd'),
36: ('AU', 'Optus Mobile Pty. Ltd.'),
37: ('AU', 'Yancoal Australia Ltd'),
38: ('AU', 'Truphone Pty Ltd'),
39: ('AU', 'Telstra Corporation Ltd.'),
40: ('AU', 'CITIC Pacific Mining'),
41: ('AU', 'Aqura Technologies Pty'),
42: ('AU', 'Groote Eylandt Mining Company Pty Ltd'),
43: ('AU', 'Arrow Energy Pty Ltd'),
44: ('AU', 'Roy Hill Iron Ore Pty Ltd'),
50: ('AU', 'Pivotel Group Pty Limited'),
61: ('AU', 'Commtel Network Solutions Pty Ltd'),
62: ('AU', 'National Broadband Network Co.'),
68: ('AU', 'National Broadband Network Co.'),
71: ('AU', 'Telstra Corporation Limited'),
72: ('AU', 'Telstra Corporation Limited'),
88: ('AU', 'Pivotel Group Pty Limited'),
90: ('AU', 'Singtel Optus Proprietary Limited')},
510: {0: ('ID', 'PT Pasifik Satelit Nusantara (ACeS)'),
1: ('ID', 'PT Indonesian Satellite Corporation Tbk (INDOSAT)'),
9: ('ID', 'PT Smartfren Telecom'),
10: ('ID', 'PT Telekomunikasi Selular'),
11: ('ID', 'PT XL Axiata Tbk'),
27: ('ID', 'PT Sampoerna Telekomunikasi Indonesia'),
28: ('ID', 'PT Mobile-8 Telecom'),
88: ('ID', 'PT Internux'),
89: ('ID', 'PT Hutchison CP Telecommunications'),
99: ('ID', 'PT Bakrie Telecom')},
514: {1: ('TL', 'PT Telekomunikasi Indonesia International'),
2: ('TL', 'Timor Telecom'),
3: ('TL', 'Viettel Timor-Leste')},
515: {2: ('PH', 'Globe Telecom'),
3: ('PH', 'PLDT via Smart Communications'),
5: ('PH', 'Digital Telecommunications Philippines'),
11: ('PH', 'PLDT via ACeS Philippines'),
24: ('PH', 'ABS-CBN Convergence with Globe Telecom'),
88: ('PH', 'Next Mobile Inc.')},
520: {0: ('TH', 'CAT Telecom'),
3: ('TH', 'Advanced Wireless Network Company Ltd.'),
4: ('TH', 'True Move H Universal Communication Company Ltd.'),
5: ('TH', 'DTAC TriNet Company Ltd.'),
9: ('TH', 'Royal Thai Police'),
15: ('TH', 'TOT Public Company Limited'),
18: ('TH', 'Total Access Communications Public Company Ltd.'),
20: ('TH', 'ACeS'),
47: ('TH', 'TOT Public Company Limited')},
525: {1: ('SG', 'Singapore Telecom'),
3: ('SG', 'M1 Limited'),
5: ('SG', 'StarHub Mobile'),
6: ('SG', 'StarHub Mobile'),
7: ('SG', 'Singapore Telecom'),
8: ('SG', 'StarHub Mobile'),
9: ('SG', 'Liberty Wireless Pte Ltd'),
10: ('SG', 'TPG Telecom Pte Ltd'),
12: ('SG', 'GRID Communications Pte Ltd.')},
528: {1: ('BN', 'Jabatan Telekom Brunei'),
2: ('BN', 'B-Mobile Communications Sdn Bhd'),
11: ('BN', 'Data Stream Technology')},
530: {1: ('NZ', 'Vodafone New Zealand'),
3: ('NZ', 'Woosh Wireless'),
5: ('NZ', 'Spark New Zealand'),
6: ('NZ', 'Spark New Zealand'),
7: ('NZ', 'Bluereach Limited'),
24: ('NZ', '2degrees')},
536: {2: ('NR', 'Digicel (Nauru) Corporation')},
537: {1: ('PG', 'Bemobile Limited'),
2: ('PG', 'Telikom PNG Ltd.'),
3: ('PG', 'Digicel PNG')},
539: {1: ('TO', 'Tonga Communications Corporation'),
43: ('TO', 'Shoreline Communication'),
88: ('TO', 'Digicel (Tonga) Limited')},
540: {1: ('SB', 'Our Telekom'), 2: ('SB', 'BMobile (SI) Ltd')},
541: {0: ('VU', 'ACeS International (AIL)'),
1: ('VU', 'Telecom Vanuatu Ltd'),
5: ('VU', 'Digicel Vanuatu Ltd'),
7: ('VU', 'WanTok Vanuatu Ltd')},
542: {1: ('FJ', 'Vodafone Fiji'),
2: ('NR', 'Digicel Fiji'),
3: ('FJ', 'Telecom Fiji Ltd')},
543: {1: ('WF',
'Service des Postes et Télécommunications des Îles Wallis et Futuna '
'(SPT)')},
544: {11: ('AS', 'Bluesky')},
545: {1: ('KI', 'Telecom Services Kiribati Ltd'),
2: ('KI', 'OceanLink'),
9: ('KI', 'Telecom Services Kiribati Ltd')},
546: {1: ('NC', 'OPT New Caledonia')},
547: {5: ('PF', 'VITI'),
15: ('PF', 'Pacific Mobile Telecom'),
20: ('PF', 'Tikiphone SA')},
548: {1: ('CK', 'Telecom Cook Islands')},
549: {0: ('WS', 'Digicel Pacific Ltd.'),
1: ('WS', 'Digicel Pacific Ltd.'),
27: ('WS', 'Bluesky Samoa Ltd')},
550: {1: ('FM', 'FSMTC')},
551: {1: ('MH',
'Marshall Islands National Telecommunications Authority (MINTA)')},
552: {1: ('PW', 'Palau National Communications Corp.'),
2: ('PW', 'Palau Equipment Company Inc.')},
553: {1: ('TV', 'Tuvalu Telecom')},
554: {1: ('TK', 'Teletok')},
555: {1: ('NU', 'Telecom Niue')},
602: {1: ('EG', 'Orange Egypt'),
2: ('EG', 'Vodafone Egypt'),
3: ('EG', 'Etisalat Egypt'),
4: ('EG', 'Telecom Egypt')},
603: {1: ('DZ', 'Algérie Télécom'),
2: ('DZ', 'Optimum Telecom Algérie Spa'),
3: ('DZ', 'Wataniya Telecom Algérie'),
7: ('DZ', 'Algérie Télécom'),
9: ('DZ', 'Algérie Télécom'),
21: ('DZ', 'Anesrif')},
604: {0: ('MA', 'Médi Télécom'),
1: ('MA', 'Ittissalat Al-Maghrib (Maroc Telecom)'),
2: ('MA', 'Wana Corporate'),
4: ('MA', 'Al Houria Telecom'),
5: ('MA', 'Wana Corporate'),
6: ('MA', 'Ittissalat Al-Maghrib (Maroc Telecom)'),
99: ('MA', 'Al Houria Telecom')},
605: {1: ('TN', 'Orange Tunisie'),
2: ('TN', 'Tunisie Telecom'),
3: ('TN', 'ooredoo Tunisiana')},
606: {0: ('LY', 'Libyana'),
1: ('LY', 'Al-Madar Al-Jadeed'),
2: ('LY', 'Al-Jeel Al-Jadeed'),
3: ('LY', 'Libya Telecom & Technology (LTT)'),
6: ('LY', 'Hatef Libya')},
607: {1: ('GM', 'Gamcel'),
2: ('GM', 'Africell'),
3: ('GM', 'Comium'),
4: ('GM', 'QCell Gambia'),
5: ('GM', 'GAMTEL-Ecowan'),
6: ('GM', 'NETPAGE')},
608: {1: ('SN', 'Sonatel'),
2: ('SN', 'Millicom International Cellular S.A.'),
3: ('SN', 'Sudatel'),
4: ('SN', 'CSU-SA')},
609: {1: ('MR', 'Mattel'),
2: ('MR', 'Chinguitel'),
10: ('MR', 'Mauritel Mobiles')},
610: {1: ('ML', 'Malitel SA'),
2: ('ML', 'Orange Mali SA'),
3: ('ML', 'Alpha Telecommunication Mali S.A.')},
611: {1: ('GN', 'Orange S.A.'),
2: ('GN', 'Sotelgui Lagui'),
3: ('GN', 'INTERCEL Guinée'),
4: ('GN', 'Areeba Guinea'),
5: ('GN', 'Cellcom')},
612: {2: ('CI', 'Atlantique Cellulaire'),
3: ('CI', 'Orange'),
4: ('CI', 'Comium Ivory Coast Inc'),
5: ('CI', 'Loteny Telecom'),
6: ('CI', 'Oricel'),
7: ('CI', 'Aircomm'),
18: ('CI', 'YooMee')},
613: {1: ('BF', 'Onatel'),
2: ('BF', 'Orange Burkina Faso'),
3: ('BF', 'Telecel Faso SA')},
614: {1: ('NE', 'La Société Sahélienne de Télécommunications (SahelCom)'),
2: ('NE', 'Bharti Airtel Limited'),
3: ('NE', 'Atlantique Telecom (subsidiary of Etisalat)'),
4: ('NE', 'Orange Niger')},
615: {1: ('TG', 'Togo Telecom'), 3: ('TG', 'Moov Togo')},
616: {1: ('BJ', 'Benin Telecoms Mobile'),
2: ('BJ', 'Telecel Benin'),
3: ('BJ', 'Spacetel Benin'),
4: ('BJ', 'Bell Benin Communications'),
5: ('BJ', 'Glo Communication Benin')},
617: {1: ('MU', 'Cellplus Mobile Communications Ltd.'),
2: ('MU', 'Mahanagar Telephone Mauritius Limited (MTML)'),
3: ('MU', 'Mahanagar Telephone Mauritius Limited (MTML)'),
10: ('MU', 'Emtel Ltd.')},
618: {1: ('LR', 'Lonestar Communications Corporation'),
4: ('LR', 'Novafone Inc.'),
7: ('LR', 'Orange Liberia'),
20: ('LR', 'Liberia Telecommunications Corporation')},
619: {1: ('SL', 'Orange SL Limited'),
2: ('SL', 'Lintel Sierra Leone Limited'),
3: ('SL', 'Lintel Sierra Leone Limited'),
5: ('SL', 'Lintel Sierra Leone Limited'),
6: ('SL', 'Sierra Leone Telephony'),
7: ('SL', 'Qcell Sierra Leone'),
9: ('SL', 'InterGroup Telecom SL'),
25: ('SL', 'Mobitel'),
40: ('SL', 'Datatel (SL) Ltd.'),
50: ('SL', 'Datatel (SL) Ltd.')},
620: {1: ('GH', 'MTN Group'),
2: ('GH', 'Vodafone Group'),
3: ('GH', 'Millicom Ghana'),
4: ('GH', 'Kasapa / Hutchison Telecom'),
6: ('GH', 'Airtel'),
7: ('GH', 'Globacom Group'),
8: ('GH', 'Surfline Communications Ltd'),
10: ('GH', 'Blu Telecommunications'),
11: ('GH', 'Netafrique Dot Com Ltd')},
621: {20: ('NG', 'Bharti Airtel Limited'),
22: ('NG', 'InterC Network Ltd.'),
24: ('NG', 'Spectranet'),
26: ('NG', 'Swift'),
27: ('NG', 'Smile Communications Nigeria'),
30: ('NG', 'MTN Nigeria Communications Limited'),
40: ('NG', 'Nigerian Mobile Telecommunications Limited'),
50: ('NG', 'Globacom Ltd'),
60: ('NG', '')},
622: {1: ('TD', 'Bharti Airtel SA'),
2: ('TD', 'SotelTchad'),
3: ('TD', 'Millicom'),
7: ('TD', 'SotelTchad')},
623: {1: ('CF', 'Centrafrique Telecom Plus'),
2: ('CF', 'Telecel Centrafrique'),
3: ('CF', 'Orange RCA'),
4: ('CF', 'Nationlink Telecom RCA')},
624: {1: ('CM', 'Mobile Telephone Network Cameroon Ltd'),
2: ('CM', 'Orange Cameroun S.A.'),
4: ('CM', 'Viettel Cameroun')},
625: {1: ('CV', 'CVMóvel, S.A.'),
2: ('CV', 'UNITEL T+ TELECOMUNICACÕES, S.A.')},
626: {1: ('ST', 'Companhia Santomese de Telecomunicaçôe'),
2: ('ST', 'Unitel Sao Tome and Principe')},
627: {1: ('GQ', 'GETESA'), 3: ('GQ', 'HiTs EG.SA')},
628: {1: ('GA', 'Gabon Telecom & Libertis S.A.'),
2: ('GA', 'Atlantique Télécom (Etisalat Group) Gabon S.A.'),
3: ('GA', 'Airtel Gabon S.A.'),
4: ('GA', 'USAN Gabon S.A.'),
5: ('GA', 'Réseau de l’Administration Gabonaise')},
629: {1: ('CG', 'Celtel Congo'),
7: ('CG', 'Warid Telecom'),
10: ('CG', 'MTN CONGO S.A')},
630: {1: ('CD', 'Vodacom Congo RDC sprl'),
2: ('CD', 'Airtel sprl'),
4: ('CD', 'Cellco'),
5: ('CD', 'Supercell SPRL'),
10: ('CD', ''),
86: ('CD', 'Orange RDC sarl'),
89: ('CD', 'OASIS sprl'),
90: ('CD', 'Africell RDC sprl')},
631: {2: ('AO', 'UNITEL S.a.r.l.'),
4: ('AO', 'MOVICEL Telecommunications S.A.')},
632: {1: ('GW', 'Guinétel S.A.'),
2: ('GW', 'Spacetel Guiné-Bissau S.A.'),
3: ('GW', ''),
7: ('GW', 'Guinétel S.A.')},
633: {1: ('SC', 'Cable & Wireless Seychelles'),
10: ('SC', 'Telecom Seychelles Ltd')},
634: {1: ('SD', 'Zain Group - Sudan'),
2: ('SD', 'MTN Sudan'),
3: ('SD', 'MTN Sudan'),
5: ('SD', 'Canar Telecom'),
7: ('SD', 'Sudatel Group'),
9: ('SD', 'NEC')},
635: {10: ('RW', 'MTN Rwandacell SARL'),
13: ('RW', 'TIGO RWANDA S.A'),
14: ('RW', 'Airtel RWANDA'),
17: ('RW', 'Olleh Rwanda Networks')},
636: {1: ('ET', 'Ethio Telecom')},
637: {1: ('SO', 'Telesom'),
4: ('SO', 'Somafone FZLLC'),
10: ('SO', 'NationLink Telecom'),
20: ('SO', 'SOMNET'),
30: ('SO', 'Golis Telecom Somalia'),
50: ('SO', 'Hormuud Telecom Somalia Inc'),
57: ('SO', 'UNITEL S.a.r.l.'),
60: ('SO', 'Nationlink Telecom'),
67: ('SO', 'HTG Group Somalia'),
71: ('SO', 'Somtel'),
82: ('SO', 'Telcom Somalia')},
638: {1: ('DJ', 'Djibouti Telecom SA')},
639: {1: ('KE', 'Safaricom Limited'),
2: ('KE', 'Safaricom Limited'),
3: ('KE', 'Bharti Airtel'),
4: ('KE', 'Mobile Pay Kenya Limited'),
6: ('KE', 'Finserve Africa Limited'),
7: ('KE', 'Telkom Kenya'),
8: ('KE', 'Sema Mobile Services Limited'),
9: ('KE', 'Homeland Media Group Limited'),
10: ('KE', 'Jamii Telecommunications Limited'),
11: ('KE', 'WiAfrica Kenya Limited')},
640: {2: ('TZ', 'MIC Tanzania Limited'),
3: ('TZ', 'Zanzibar Telecom Ltd'),
4: ('TZ', 'Vodacom Tanzania Limited'),
5: ('TZ', 'Bharti Airtel'),
7: ('TZ', 'Tanzania Telecommunication Company LTD (TTCL)'),
8: ('TZ', 'Benson Informatics Limited'),
9: ('TZ', 'Viettel Tanzania Limited'),
11: ('TZ', 'Smile Telecoms Holdings Ltd. '),
13: ('TZ', 'Wiafrica Tanzania Limited')},
641: {1: ('UG', 'Bharti Airtel'),
4: ('UG', 'Tangerine Uganda Limited'),
6: ('UG', 'Afrimax Uganda'),
10: ('UG', 'MTN Uganda'),
11: ('UG', 'Uganda Telecom Ltd.'),
14: ('UG', 'Africell Uganda'),
16: ('UG', 'SimbaNET Uganda Limited'),
18: ('UG', 'Suretelecom Uganda Ltd.'),
22: ('UG', 'Bharti Airtel'),
26: ('UG', 'Lycamobile Network Services Uganda Limited'),
33: ('UG', 'Smile Communications Uganda Limited'),
40: ('UG', 'Civil Aviation Authority (CAA)'),
44: ('UG', 'K2 Telecom Ltd')},
642: {1: ('BI', 'Econet Wireless Burundi PLC'),
3: ('BI', 'Onatel'),
7: ('BI', 'LACELL SU'),
8: ('BI', 'Viettel Burundi'),
82: ('BI', 'Econet Wireless Burundi PLC')},
643: {1: ('MZ', 'Mocambique Celular S.A.'),
3: ('MZ', 'Movitel, SA'),
4: ('MZ', 'Vodacom Mozambique, S.A.')},
645: {1: ('ZM', 'Bharti Airtel'),
2: ('ZM', 'MTN Group'),
3: ('ZM', 'Zambia Telecommunications Company Ltd')},
646: {1: ('MG', 'Bharti Airtel'),
2: ('MG', 'Orange Madagascar S.A.'),
4: ('MG', 'Telma Mobile S.A.')},
647: {0: ('RE', 'Orange La Réunion'),
1: ('RE', 'BJT Partners'),
2: ('RE', 'Telco OI'),
3: ('RE', 'Telco OI'),
4: ('RE', 'Zeop mobile'),
10: ('RE', 'Société Réunionnaise du Radiotéléphone')},
648: {1: ('ZW', 'Net*One Cellular (Pvt) Ltd'),
3: ('ZW', 'Telecel Zimbabwe (PVT) Ltd'),
4: ('ZW', 'Econet Wireless')},
649: {1: ('NA', 'MTC Namibia'),
2: ('NA', 'Telecom Namibia'),
3: ('NA', 'Telecom Namibia'),
4: ('NA', 'Paratus Telecommunications (Pty)'),
5: ('NA', 'Demshi Investments CC'),
6: ('NA', 'MTN Namibia')},
650: {1: ('MW', 'Telecom Network Malawi'),
2: ('MW', 'Access Communications Ltd'),
10: ('MW', 'Bharti Airtel Limited')},
651: {1: ('LS', 'Vodacom Lesotho (Pty) Ltd'), 2: ('LS', 'Econet Ezi-cel')},
652: {1: ('BW', 'Mascom Wireless (Pty) Limited'),
2: ('BW', 'Orange (Botswana) Pty Limited'),
4: ('BW', 'Botswana Telecommunications Corporation')},
653: {1: ('SZ', 'SPTC'),
2: ('SZ', 'Swazi Mobile Limited'),
10: ('SZ', 'Swazi MTN Limited')},
654: {1: ('KM', 'Comoros Telecom'), 2: ('KM', 'Telecom Malagasy (Telma)')},
655: {1: ('ZA', 'Vodacom'),
2: ('ZA', 'Telkom SA SOC Ltd'),
4: ('ZA', 'Sasol (Pty) Ltd.'),
5: ('ZA', 'Telkom SA Ltd'),
6: ('ZA', 'Sentech (Pty) Ltd'),
7: ('ZA', 'Cell C (Pty) Ltd'),
10: ('ZA', 'MTN Group'),
11: ('ZA', 'South African Police Service Gauteng'),
12: ('ZA', 'MTN Group'),
13: ('ZA', 'Neotel Pty Ltd'),
14: ('ZA', 'Neotel Pty Ltd'),
17: ('ZA', 'Sishen Iron Ore Company (Ltd) Pty'),
19: ('ZA', 'Wireless Business Solutions (Pty) Ltd'),
21: ('ZA', 'Cape Town Metropolitan Council'),
24: ('ZA', 'SMSPortal (Pty) Ltd.'),
25: ('ZA', 'Wirels Connect'),
27: ('ZA', 'A to Z Vaal Industrial Supplies Pty Ltd'),
28: ('ZA', 'Hymax Talking Solutions (Pty) Ltd'),
30: ('ZA', 'Bokamoso Consortium'),
31: ('ZA', 'Karabo Telecoms (Pty) Ltd.'),
32: ('ZA', 'Ilizwi Telecommunications'),
33: ('ZA', 'Thinta Thinta Telecommunications Pty Ltd'),
34: ('ZA', 'Bokone Telecoms Pty Ltd'),
35: ('ZA', 'Kingdom Communications Pty Ltd'),
36: ('ZA', 'Amatole Telecommunications Pty Ltd'),
38: ('ZA', 'Wireless Business Solutions (Pty) Ltd'),
41: ('ZA', 'South African Police Service'),
46: ('ZA', 'SMS Cellular Services (Pty) Ltd'),
50: ('ZA', 'Ericsson South Africa (Pty) Ltd'),
51: ('ZA', 'Integrat (Pty) Ltd'),
53: ('ZA', 'Lycamobile (Pty) Ltd'),
73: ('ZA', 'Wireless Business Solutions (Pty) Ltd'),
74: ('ZA', 'Wireless Business Solutions (Pty) Ltd'),
75: ('ZA', 'Airports Company South Africa')},
657: {1: ('ER', 'Eritrea Telecommunications Services Corporation')},
658: {1: ('SH', 'Sure South Atlantic Ltd.')},
659: {2: ('SS', 'MTN South Sudan'),
3: ('SS', 'Gemtel'),
4: ('SS', 'Network of the World (NOW)'),
6: ('SS', 'Zain South Sudan'),
7: ('SS', 'Sudani')},
702: {67: ('BZ', 'Belize Telemedia Limited (BTL)'),
69: ('BZ', 'Speednet Communications Limited'),
99: ('BZ', 'Speednet Communications Limited')},
704: {1: ('GT', 'Telecomunicaciones de Guatemala, S.A.'),
2: ('GT', 'Millicom / Local partners'),
3: ('GT', 'Telefónica Móviles Guatemala (Telefónica)')},
706: {1: ('SV', 'CTE Telecom Personal, S.A. de C.V.'),
2: ('SV', 'Digicel, S.A. de C.V.'),
3: ('SV', 'Telemovil El Salvador S.A.'),
4: ('SV', 'Telefónica Móviles El Salvador'),
5: ('SV', 'INTELFON, S.A. de C.V.')},
708: {1: ('HN', 'Servicios de Comunicaciones de Honduras S.A. de C.V.'),
2: ('HN', 'Celtel'),
30: ('HN', 'Empresa Hondureña de Telecomunicaciones'),
40: ('HN', 'Digicel de Honduras')},
710: {21: ('NI',
'Empresa Nicaragüense de Telecomunicaciones, S.A. (ENITEL) '
'(América Móvil)'),
30: ('NI', 'Telefonía Celular de Nicaragua, S.A. (Telefónica, S.A.)'),
73: ('NI', 'Servicios de Comunicaciones S.A.')},
712: {1: ('CR', 'Instituto Costarricense de Electricidad'),
2: ('CR', 'Instituto Costarricense de Electricidad'),
3: ('CR', 'Claro CR Telecomunicaciones (Aló)'),
4: ('CR', 'Telefónica Móviles Costa Rica'),
20: ('CR', 'Virtualis S.A.')},
714: {1: ('PA', 'Cable & Wireless Panama S.A.'),
2: ('PA', 'Telefónica Moviles Panama S.A, Bell South Corp. (BSC)'),
3: ('PA', 'América Móvil'),
4: ('PA', 'Digicel Group'),
20: ('PA', 'Telefónica Móviles de Panama S.A')},
716: {6: ('PE', 'Telefónica del Perú S.A.A.'),
7: ('PE', 'Entel Perú S.A.'),
10: ('PE', 'América Móvil Perú'),
15: ('PE', 'Viettel Peru S.A.C.'),
17: ('PE', 'Entel Perú S.A.')},
722: {10: ('AR', 'Telefónica Móviles Argentina S.A.'),
20: ('AR', 'NII Holdings'),
34: ('AR', 'Telecom Personal S.A.'),
40: ('AR', 'TE.SA.M Argentina S.A.'),
70: ('AR', 'Telefónica Móviles Argentina S.A.'),
310: ('AR', 'AMX Argentina S.A.'),
320: ('AR', 'AMX Argentina S.A.'),
330: ('AR', 'AMX Argentina S.A.'),
341: ('AR', 'Telecom Personal S.A.')},
724: {0: ('BR', 'NII Holdings, Inc.'),
1: ('BR', 'SISTEER DO BRASIL TELECOMUNICAÇÔES'),
2: ('BR', 'Telecom Italia Mobile'),
3: ('BR', 'Telecom Italia Mobile'),
4: ('BR', 'Telecom Italia Mobile'),
5: ('BR', 'Claro'),
6: ('BR', 'Vivo S.A.'),
10: ('BR', 'Vivo S.A.'),
11: ('BR', 'Vivo S.A.'),
15: ('BR', 'Sercomtel Celular'),
17: ('BR', 'Correios Celular'),
18: ('BR', 'Datora (Vodafone)'),
23: ('BR', 'Vivo S.A.'),
24: ('BR', 'Amazonia Celular'),
28: ('BR', ''),
30: ('BR', 'TNL PCS Oi'),
31: ('BR', 'TNL PCS Oi'),
32: ('BR', 'Algar Telecom S.A.'),
33: ('BR', 'Algar Telecom S.A.'),
34: ('BR', 'Algar Telecom S.A.'),
35: ('BR', 'Telcom Telecomunicações'),
36: ('BR', 'Options Telecomunicações'),
38: ('BR', 'Claro'),
39: ('BR', 'NII Holdings, Inc.'),
54: ('BR', 'PORTO SEGURO TELECOMUNICAÇÔES'),
99: ('BR', '')},
730: {1: ('CL', 'Entel Telefonía Móvil S.A.'),
2: ('CL', 'Telefónica Móvil de Chile'),
3: ('CL', 'Claro Chile S.A.'),
4: ('CL', 'Novator Partners'),
5: ('CL', 'Multikom S.A.'),
6: ('CL', 'Blue Two Chile S.A.'),
7: ('CL', 'Telefónica Móvil de Chile'),
8: ('CL', 'VTR S.A.'),
9: ('CL', 'Novator Partners'),
10: ('CL', 'Entel Telefonía Móvil S.A.'),
11: ('CL', 'Celupago S.A.'),
12: ('CL', 'Telestar Móvil S.A.'),
13: ('CL', 'Tribe Mobile Chile SPA'),
14: ('CL', 'Netline Telefónica Móvil Ltda'),
15: ('CL', 'Cibeles Telecom S.A.'),
16: ('CL', 'Nomade Telecomunicaciones S.A.'),
17: ('CL', 'COMPATEL Chile Limitada'),
18: ('CL', 'Empresas Bunker S.A.'),
19: ('CL', 'Sociedad Falabella Móvil SPA'),
20: ('CL', 'Inversiones Santa Fe Limitada'),
22: ('CL', 'Cellplus SpA'),
23: ('CL', 'Claro Servicios Empresariales S. A.'),
99: ('CL', 'WILL Telefonía')},
732: {1: ('CO', 'Colombia Telecomunicaciones S.A. ESP'),
2: ('CO', 'Edatel S.A. ESP'),
3: ('CO', 'LLEIDA S.A.S.'),
4: ('CO', 'COMPATEL COLOMBIA SAS'),
20: ('CO', 'Une EPM Telecomunicaciones S.A. E.S.P.'),
99: ('CO', 'Empresas Municipales de Cali'),
101: ('CO', 'COMCEL S.A.'),
103: ('CO', 'Colombia Móvil S.A. ESP'),
111: ('CO', 'Colombia Móvil S.A. ESP'),
123: ('CO', 'Colombia Telecomunicaciones S.A. ESP'),
130: ('CO', 'Avantel S.A.S'),
142: ('CO', 'Une EPM Telecomunicaciones S.A. E.S.P.'),
154: ('CO', 'Virgin Mobile Colombia S.A.S.'),
165: ('CO', 'Colombia Móvil S.A. ESP'),
176: ('CO', 'DirecTV Colombia Ltda'),
187: ('CO', 'Empresa de Telecomunicaciones de Bogotá S.A. ESP'),
199: ('CO', 'SUMA Movil SAS'),
208: ('CO', 'UFF Movil SAS')},
734: {2: ('VE', 'Corporacion Digitel C.A.'),
3: ('VE', 'Galaxy Entertainment de Venezuela C.A.'),
4: ('VE', 'Telefónica Móviles Venezuela'),
6: ('VE', 'Telecomunicaciones Movilnet')},
736: {1: ('BO', 'Nuevatel PCS De Bolivia SA'),
2: ('BO', 'Entel SA'),
3: ('BO', 'Telefónica Celular De Bolivia S.A')},
738: {1: ('GY', 'U-Mobile (Cellular) Inc.'),
2: ('GY', 'Guyana Telephone & Telegraph Co.'),
3: ('GY', 'Quark Communications Inc.'),
5: ('GY', 'eGovernment Unit, Ministry of the Presidency')},
740: {0: ('EC', 'Otecel S.A.'),
1: ('EC', 'CONECEL S.A.'),
2: ('EC', 'Corporación Nacional de Telecomunicaciones (CNT EP)'),
3: ('EC', 'Otecel S.A.')},
744: {1: ('PY', 'Hola Paraguay S.A'),
2: ('PY', 'AMX Paraguay S.A.'),
3: ('PY', 'Compañia Privada de Comunicaciones S.A.'),
4: ('PY', 'Telefónica Celular Del Paraguay S.A. (Telecel)'),
5: ('PY', 'Núcleo S.A(TIM)'),
6: ('PY', 'Copaco S.A.')},
746: {2: ('SR', 'Telecommunications Company Suriname (Telesur)'),
3: ('SR', 'Digicel Group Limited'),
5: ('SR', 'Telecommunications Company Suriname (Telesur)')},
748: {0: ('UY', 'Administración Nacional de Telecomunicaciones'),
1: ('UY', 'Administración Nacional de Telecomunicaciones'),
3: ('UY', 'Administración Nacional de Telecomunicaciones'),
7: ('UY', 'Telefónica Móviles Uruguay'),
10: ('UY', 'AM Wireless Uruguay S.A.')},
750: {1: ('FK', 'Sure South Atlantic Ltd.')}}
countries = \
{'AD': [(213, 3)],
'AE': [(424, 2), (424, 3)],
'AF': [(412, 1),
(412, 20),
(412, 40),
(412, 50),
(412, 55),
(412, 80),
(412, 88)],
'AG': [(344, 30), (344, 50), (344, 920), (344, 930)],
'AI': [(365, 10), (365, 840)],
'AL': [(276, 1), (276, 2), (276, 3), (276, 4)],
'AM': [(283, 1), (283, 4), (283, 5), (283, 10)],
'AO': [(631, 2), (631, 4)],
'AR': [(722, 10),
(722, 20),
(722, 34),
(722, 40),
(722, 70),
(722, 310),
(722, 320),
(722, 330),
(722, 341)],
'AS': [(544, 11)],
'AT': [(232, 1),
(232, 2),
(232, 3),
(232, 4),
(232, 5),
(232, 7),
(232, 8),
(232, 9),
(232, 10),
(232, 11),
(232, 12),
(232, 13),
(232, 14),
(232, 15),
(232, 16),
(232, 17),
(232, 18),
(232, 19),
(232, 20),
(232, 21),
(232, 22),
(232, 23),
(232, 91),
(232, 92)],
'AU': [(505, 1),
(505, 2),
(505, 3),
(505, 4),
(505, 7),
(505, 10),
(505, 11),
(505, 13),
(505, 14),
(505, 16),
(505, 17),
(505, 18),
(505, 19),
(505, 20),
(505, 21),
(505, 22),
(505, 23),
(505, 24),
(505, 25),
(505, 26),
(505, 27),
(505, 28),
(505, 30),
(505, 31),
(505, 32),
(505, 33),
(505, 34),
(505, 35),
(505, 36),
(505, 37),
(505, 38),
(505, 39),
(505, 40),
(505, 41),
(505, 42),
(505, 43),
(505, 44),
(505, 50),
(505, 61),
(505, 62),
(505, 68),
(505, 71),
(505, 72),
(505, 88),
(505, 90)],
'AW': [(363, 1), (363, 2)],
'AZ': [(400, 1), (400, 2), (400, 3), (400, 4), (400, 5), (400, 6)],
'BA': [(218, 3), (218, 5), (218, 90)],
'BB': [(342, 600), (342, 750), (342, 800)],
'BD': [(470, 1),
(470, 2),
(470, 3),
(470, 4),
(470, 5),
(470, 7),
(470, 9),
(470, 10)],
'BE': [(206, 1),
(206, 2),
(206, 5),
(206, 6),
(206, 7),
(206, 8),
(206, 10),
(206, 20),
(206, 25),
(206, 28),
(206, 30),
(206, 33),
(206, 40),
(206, 50)],
'BF': [(613, 1), (613, 2), (613, 3)],
'BG': [(284, 1), (284, 3), (284, 5), (284, 7), (284, 11), (284, 13)],
'BH': [(426, 1), (426, 2), (426, 3), (426, 4), (426, 5)],
'BI': [(642, 1), (642, 3), (642, 7), (642, 8), (642, 82)],
'BJ': [(616, 1), (616, 2), (616, 3), (616, 4), (616, 5)],
'BM': [(350, 0), (350, 1), (350, 2), (350, 5), (350, 11)],
'BN': [(528, 1), (528, 2), (528, 11)],
'BO': [(736, 1), (736, 2), (736, 3)],
'BR': [(724, 0),
(724, 1),
(724, 2),
(724, 3),
(724, 4),
(724, 5),
(724, 6),
(724, 10),
(724, 11),
(724, 15),
(724, 17),
(724, 18),
(724, 23),
(724, 24),
(724, 28),
(724, 30),
(724, 31),
(724, 32),
(724, 33),
(724, 34),
(724, 35),
(724, 36),
(724, 38),
(724, 39),
(724, 54),
(724, 99)],
'BS': [(364, 39), (364, 49)],
'BT': [(402, 11), (402, 77)],
'BW': [(652, 1), (652, 2), (652, 4)],
'BY': [(257, 1), (257, 2), (257, 4), (257, 6)],
'BZ': [(702, 67), (702, 69), (702, 99)],
'CA': [(302, 130),
(302, 131),
(302, 220),
(302, 221),
(302, 222),
(302, 250),
(302, 270),
(302, 290),
(302, 300),
(302, 320),
(302, 340),
(302, 370),
(302, 380),
(302, 420),
(302, 480),
(302, 490),
(302, 491),
(302, 500),
(302, 510),
(302, 520),
(302, 530),
(302, 540),
(302, 560),
(302, 570),
(302, 590),
(302, 610),
(302, 620),
(302, 630),
(302, 650),
(302, 655),
(302, 660),
(302, 670),
(302, 680),
(302, 690),
(302, 701),
(302, 710),
(302, 720),
(302, 730),
(302, 750),
(302, 760),
(302, 770),
(302, 780),
(302, 790),
(302, 820),
(302, 860),
(302, 880),
(302, 940),
(302, 990)],
'CD': [(630, 1),
(630, 2),
(630, 4),
(630, 5),
(630, 10),
(630, 86),
(630, 89),
(630, 90)],
'CF': [(623, 1), (623, 2), (623, 3), (623, 4)],
'CG': [(629, 1), (629, 7), (629, 10)],
'CH': [(228, 1),
(228, 2),
(228, 3),
(228, 6),
(228, 8),
(228, 9),
(228, 10),
(228, 11),
(228, 51),
(228, 53),
(228, 54),
(228, 55),
(228, 57),
(228, 58),
(228, 60),
(228, 61),
(228, 99)],
'CI': [(612, 2), (612, 3), (612, 4), (612, 5), (612, 6), (612, 7), (612, 18)],
'CK': [(548, 1)],
'CL': [(730, 1),
(730, 2),
(730, 3),
(730, 4),
(730, 5),
(730, 6),
(730, 7),
(730, 8),
(730, 9),
(730, 10),
(730, 11),
(730, 12),
(730, 13),
(730, 14),
(730, 15),
(730, 16),
(730, 17),
(730, 18),
(730, 19),
(730, 20),
(730, 22),
(730, 23),
(730, 99)],
'CM': [(624, 1), (624, 2), (624, 4)],
'CN': [(460, 0),
(460, 1),
(460, 3),
(460, 4),
(460, 8),
(460, 9),
(460, 11),
(460, 20)],
'CO': [(732, 1),
(732, 2),
(732, 3),
(732, 4),
(732, 20),
(732, 99),
(732, 101),
(732, 103),
(732, 111),
(732, 123),
(732, 130),
(732, 142),
(732, 154),
(732, 165),
(732, 176),
(732, 187),
(732, 199),
(732, 208)],
'CR': [(712, 1), (712, 2), (712, 3), (712, 4), (712, 20)],
'CU': [(368, 1)],
'CV': [(625, 1), (625, 2)],
'CW': [(362, 31),
(362, 33),
(362, 51),
(362, 54),
(362, 59),
(362, 60),
(362, 63),
(362, 68),
(362, 69),
(362, 74),
(362, 76),
(362, 78),
(362, 91),
(362, 94)],
'CY': [(280, 1), (280, 10), (280, 20), (280, 22), (280, 23)],
'CZ': [(230, 1),
(230, 2),
(230, 3),
(230, 4),
(230, 5),
(230, 8),
(230, 9),
(230, 98),
(230, 99)],
'DE': [(262, 1),
(262, 2),
(262, 3),
(262, 4),
(262, 5),
(262, 6),
(262, 8),
(262, 9),
(262, 10),
(262, 11),
(262, 12),
(262, 15),
(262, 17),
(262, 18),
(262, 19),
(262, 20),
(262, 21),
(262, 22),
(262, 23),
(262, 33),
(262, 42),
(262, 43),
(262, 60),
(262, 72),
(262, 73),
(262, 74),
(262, 75),
(262, 77),
(262, 78),
(262, 92)],
'DJ': [(638, 1)],
'DK': [(238, 1),
(238, 2),
(238, 3),
(238, 4),
(238, 5),
(238, 6),
(238, 8),
(238, 9),
(238, 10),
(238, 11),
(238, 12),
(238, 13),
(238, 14),
(238, 15),
(238, 16),
(238, 18),
(238, 20),
(238, 23),
(238, 25),
(238, 28),
(238, 30),
(238, 42),
(238, 66),
(238, 73),
(238, 77)],
'DM': [(366, 20), (366, 110)],
'DO': [(370, 1), (370, 2), (370, 3), (370, 4), (370, 5)],
'DZ': [(603, 1), (603, 2), (603, 3), (603, 7), (603, 9), (603, 21)],
'EC': [(740, 0), (740, 1), (740, 2), (740, 3)],
'EE': [(248, 1), (248, 2), (248, 3), (248, 4), (248, 11), (248, 71)],
'EG': [(602, 1), (602, 2), (602, 3), (602, 4)],
'ER': [(657, 1)],
'ES': [(214, 1),
(214, 2),
(214, 3),
(214, 4),
(214, 5),
(214, 6),
(214, 7),
(214, 8),
(214, 9),
(214, 10),
(214, 11),
(214, 12),
(214, 16),
(214, 17),
(214, 19),
(214, 21),
(214, 22),
(214, 23),
(214, 24),
(214, 25),
(214, 26),
(214, 27),
(214, 28),
(214, 29),
(214, 30),
(214, 31),
(214, 32),
(214, 33),
(214, 34),
(214, 35),
(214, 36),
(214, 37),
(214, 51)],
'ET': [(636, 1)],
'FI': [(244, 3),
(244, 4),
(244, 5),
(244, 7),
(244, 8),
(244, 9),
(244, 10),
(244, 11),
(244, 12),
(244, 14),
(244, 17),
(244, 21),
(244, 26),
(244, 27),
(244, 28),
(244, 32),
(244, 33),
(244, 34),
(244, 35),
(244, 36),
(244, 37),
(244, 38),
(244, 39),
(244, 40),
(244, 41),
(244, 42),
(244, 43),
(244, 44),
(244, 91)],
'FJ': [(542, 1), (542, 3)],
'FK': [(750, 1)],
'FM': [(550, 1)],
'FO': [(288, 1), (288, 2)],
'FR': [(208, 2),
(208, 3),
(208, 4),
(208, 5),
(208, 6),
(208, 7),
(208, 8),
(208, 9),
(208, 11),
(208, 13),
(208, 14),
(208, 15),
(208, 16),
(208, 17),
(208, 21),
(208, 22),
(208, 24),
(208, 25),
(208, 26),
(208, 27),
(208, 28),
(208, 30),
(208, 31),
(208, 88),
(208, 91),
(208, 94)],
'GA': [(628, 1), (628, 2), (628, 3), (628, 4), (628, 5)],
'GB': [(234, 3),
(234, 50),
(234, 55),
(234, 36),
(234, 58),
(234, 0),
(234, 1),
(234, 2),
(234, 4),
(234, 8),
(234, 9),
(234, 10),
(234, 11),
(234, 12),
(234, 13),
(234, 14),
(234, 15),
(234, 16),
(234, 17),
(234, 18),
(234, 19),
(234, 20),
(234, 22),
(234, 23),
(234, 24),
(234, 25),
(234, 26),
(234, 27),
(234, 28),
(234, 29),
(234, 30),
(234, 31),
(234, 32),
(234, 33),
(234, 34),
(234, 37),
(234, 38),
(234, 39),
(234, 51),
(234, 52),
(234, 53),
(234, 54),
(234, 56),
(234, 57),
(234, 59),
(234, 70),
(234, 71),
(234, 72),
(234, 76),
(234, 78),
(234, 86),
(235, 0),
(235, 1),
(235, 2),
(235, 3),
(235, 77),
(235, 88),
(235, 91),
(235, 94),
(235, 95)],
'GD': [(352, 30), (352, 110)],
'GE': [(282, 1),
(282, 2),
(282, 3),
(282, 4),
(282, 5),
(282, 6),
(282, 7),
(282, 8),
(282, 9),
(282, 10),
(282, 11)],
'GH': [(620, 1),
(620, 2),
(620, 3),
(620, 4),
(620, 6),
(620, 7),
(620, 8),
(620, 10),
(620, 11)],
'GI': [(266, 1), (266, 9)],
'GL': [(290, 1), (290, 2)],
'GM': [(607, 1), (607, 2), (607, 3), (607, 4), (607, 5), (607, 6)],
'GN': [(611, 1), (611, 2), (611, 3), (611, 4), (611, 5)],
'GP': [(340, 1), (340, 2), (340, 3), (340, 8), (340, 20)],
'GQ': [(627, 1), (627, 3)],
'GR': [(202, 1),
(202, 2),
(202, 3),
(202, 4),
(202, 5),
(202, 7),
(202, 9),
(202, 10),
(202, 11),
(202, 12),
(202, 13),
(202, 14),
(202, 15),
(202, 16)],
'GT': [(704, 1), (704, 2), (704, 3)],
'GW': [(632, 1), (632, 2), (632, 3), (632, 7)],
'GY': [(738, 1), (738, 2), (738, 3), (738, 5)],
'HK': [(454, 0),
(454, 1),
(454, 2),
(454, 3),
(454, 4),
(454, 6),
(454, 7),
(454, 8),
(454, 9),
(454, 11),
(454, 12),
(454, 13),
(454, 14),
(454, 15),
(454, 16),
(454, 17),
(454, 19),
(454, 20),
(454, 21),
(454, 22),
(454, 24),
(454, 25),
(454, 26),
(454, 29),
(454, 30),
(454, 31),
(454, 32),
(454, 35)],
'HN': [(708, 1), (708, 2), (708, 30), (708, 40)],
'HR': [(219, 1), (219, 2), (219, 10), (219, 12)],
'HT': [(372, 2), (372, 3)],
'HU': [(216, 1),
(216, 2),
(216, 4),
(216, 30),
(216, 70),
(216, 71),
(216, 99)],
'ID': [(510, 0),
(510, 1),
(510, 9),
(510, 10),
(510, 11),
(510, 27),
(510, 28),
(510, 88),
(510, 89),
(510, 99)],
'IE': [(272, 1),
(272, 2),
(272, 3),
(272, 4),
(272, 5),
(272, 7),
(272, 8),
(272, 11),
(272, 13),
(272, 15),
(272, 16),
(272, 17)],
'IL': [(425, 1),
(425, 2),
(425, 3),
(425, 4),
(425, 5),
(425, 6),
(425, 7),
(425, 8),
(425, 9),
(425, 11),
(425, 12),
(425, 13),
(425, 14),
(425, 15),
(425, 16),
(425, 17),
(425, 18),
(425, 19),
(425, 20),
(425, 21),
(425, 23),
(425, 24),
(425, 26),
(425, 28),
(425, 29)],
'IN': [(404, 1),
(404, 2),
(404, 3),
(404, 4),
(404, 5),
(404, 7),
(404, 9),
(404, 10),
(404, 11),
(404, 12),
(404, 13),
(404, 14),
(404, 15),
(404, 16),
(404, 17),
(404, 18),
(404, 19),
(404, 20),
(404, 21),
(404, 22),
(404, 24),
(404, 25),
(404, 27),
(404, 28),
(404, 29),
(404, 30),
(404, 31),
(404, 34),
(404, 35),
(404, 36),
(404, 37),
(404, 38),
(404, 40),
(404, 41),
(404, 42),
(404, 43),
(404, 44),
(404, 45),
(404, 46),
(404, 48),
(404, 49),
(404, 50),
(404, 51),
(404, 52),
(404, 53),
(404, 54),
(404, 55),
(404, 56),
(404, 57),
(404, 58),
(404, 59),
(404, 60),
(404, 62),
(404, 64),
(404, 66),
(404, 67),
(404, 68),
(404, 69),
(404, 70),
(404, 71),
(404, 72),
(404, 73),
(404, 74),
(404, 75),
(404, 76),
(404, 77),
(404, 78),
(404, 79),
(404, 80),
(404, 81),
(404, 82),
(404, 83),
(404, 84),
(404, 85),
(404, 86),
(404, 87),
(404, 88),
(404, 89),
(404, 90),
(404, 91),
(404, 92),
(404, 93),
(404, 94),
(404, 95),
(404, 96),
(404, 97),
(404, 98),
(405, 1),
(405, 25),
(405, 26),
(405, 27),
(405, 28),
(405, 29),
(405, 3),
(405, 30),
(405, 31),
(405, 32),
(405, 33),
(405, 34),
(405, 35),
(405, 36),
(405, 37),
(405, 38),
(405, 39),
(405, 4),
(405, 41),
(405, 42),
(405, 43),
(405, 44),
(405, 45),
(405, 46),
(405, 47),
(405, 5),
(405, 6),
(405, 7),
(405, 8),
(405, 9),
(405, 10),
(405, 11),
(405, 12),
(405, 13),
(405, 14),
(405, 15),
(405, 17),
(405, 18),
(405, 19),
(405, 20),
(405, 21),
(405, 22),
(405, 23),
(405, 51),
(405, 52),
(405, 53),
(405, 54),
(405, 55),
(405, 56),
(405, 66),
(405, 67),
(405, 70),
(405, 750),
(405, 751),
(405, 752),
(405, 753),
(405, 754),
(405, 755),
(405, 756),
(405, 799),
(405, 800),
(405, 801),
(405, 803),
(405, 804),
(405, 805),
(405, 806),
(405, 809),
(405, 810),
(405, 811),
(405, 819),
(405, 818),
(405, 820),
(405, 821),
(405, 822),
(405, 824),
(405, 827),
(405, 834),
(405, 840),
(405, 845),
(405, 846),
(405, 847),
(405, 848),
(405, 849),
(405, 850),
(405, 851),
(405, 852),
(405, 853),
(405, 854),
(405, 855),
(405, 856),
(405, 857),
(405, 858),
(405, 859),
(405, 860),
(405, 861),
(405, 862),
(405, 863),
(405, 864),
(405, 865),
(405, 866),
(405, 867),
(405, 868),
(405, 869),
(405, 870),
(405, 871),
(405, 872),
(405, 873),
(405, 874),
(405, 875),
(405, 880),
(405, 881),
(405, 908),
(405, 909),
(405, 910),
(405, 911),
(405, 927),
(405, 929)],
'IQ': [(418, 0),
(418, 5),
(418, 8),
(418, 20),
(418, 30),
(418, 40),
(418, 45),
(418, 62),
(418, 92)],
'IR': [(432, 1),
(432, 2),
(432, 8),
(432, 10),
(432, 11),
(432, 12),
(432, 14),
(432, 19),
(432, 20),
(432, 21),
(432, 32),
(432, 35),
(432, 40),
(432, 50),
(432, 70),
(432, 71),
(432, 90),
(432, 93),
(432, 99)],
'IS': [(274, 1),
(274, 2),
(274, 3),
(274, 4),
(274, 8),
(274, 11),
(274, 12),
(274, 16),
(274, 22),
(274, 31)],
'IT': [(222, 1),
(222, 4),
(222, 5),
(222, 6),
(222, 8),
(222, 10),
(222, 30),
(222, 33),
(222, 34),
(222, 35),
(222, 36),
(222, 37),
(222, 38),
(222, 39),
(222, 43),
(222, 47),
(222, 48),
(222, 50),
(222, 88),
(222, 99)],
'JM': [(338, 50), (338, 40), (338, 110), (338, 180)],
'JO': [(416, 1), (416, 3), (416, 77)],
'JP': [(440, 0),
(440, 1),
(440, 2),
(440, 3),
(440, 4),
(440, 5),
(440, 6),
(440, 7),
(440, 10),
(440, 20),
(440, 21),
(440, 50),
(440, 51),
(440, 52),
(440, 53),
(440, 54),
(440, 70),
(440, 71),
(440, 72),
(440, 73),
(440, 74),
(440, 75),
(440, 76),
(440, 78),
(440, 91),
(441, 0),
(441, 1),
(441, 10)],
'KE': [(639, 1),
(639, 2),
(639, 3),
(639, 4),
(639, 6),
(639, 7),
(639, 8),
(639, 9),
(639, 10),
(639, 11)],
'KG': [(437, 1), (437, 3), (437, 5), (437, 9), (437, 10), (437, 11)],
'KH': [(456, 1),
(456, 2),
(456, 3),
(456, 4),
(456, 5),
(456, 6),
(456, 8),
(456, 9),
(456, 11),
(456, 18)],
'KI': [(545, 1), (545, 2), (545, 9)],
'KM': [(654, 1), (654, 2)],
'KN': [(356, 50), (356, 70), (356, 110)],
'KR': [(467, 5),
(467, 6),
(450, 1),
(450, 2),
(450, 4),
(450, 5),
(450, 6),
(450, 7),
(450, 8),
(450, 11),
(450, 12)],
'KW': [(419, 2), (419, 3), (419, 4)],
'KY': [(346, 140), (346, 50)],
'KZ': [(401, 1), (401, 2), (401, 7), (401, 8), (401, 77)],
'LA': [(457, 1), (457, 2), (457, 3), (457, 8)],
'LB': [(415, 1), (415, 3)],
'LC': [(358, 110)],
'LI': [(295, 1), (295, 2), (295, 5), (295, 6), (295, 7), (295, 9), (295, 10)],
'LK': [(413, 1),
(413, 2),
(413, 3),
(413, 4),
(413, 5),
(413, 8),
(413, 11),
(413, 12),
(413, 13)],
'LR': [(618, 1), (618, 4), (618, 7), (618, 20)],
'LS': [(651, 1), (651, 2)],
'LT': [(246, 1),
(246, 2),
(246, 3),
(246, 4),
(246, 5),
(246, 6),
(246, 7),
(246, 8),
(246, 9)],
'LU': [(270, 1),
(270, 2),
(270, 7),
(270, 10),
(270, 77),
(270, 78),
(270, 79),
(270, 80),
(270, 81),
(270, 99)],
'LV': [(247, 1), (247, 2), (247, 3), (247, 5), (247, 7), (247, 9)],
'LY': [(606, 0), (606, 1), (606, 2), (606, 3), (606, 6)],
'MA': [(604, 0), (604, 1), (604, 2), (604, 4), (604, 5), (604, 6), (604, 99)],
'MC': [(208, 1), (208, 10), (208, 20), (212, 10)],
'MD': [(259, 1), (259, 2), (259, 3), (259, 5), (259, 15), (259, 99)],
'ME': [(297, 1), (297, 2), (297, 3)],
'MG': [(646, 1), (646, 2), (646, 4)],
'MH': [(551, 1)],
'MK': [(294, 1), (294, 2), (294, 3), (294, 4), (294, 11)],
'ML': [(610, 1), (610, 2), (610, 3)],
'MM': [(414, 0),
(414, 1),
(414, 2),
(414, 3),
(414, 4),
(414, 5),
(414, 6),
(414, 9),
(414, 20),
(414, 21)],
'MN': [(428, 88), (428, 91), (428, 98), (428, 99)],
'MO': [(455, 0),
(455, 1),
(455, 2),
(455, 3),
(455, 4),
(455, 5),
(455, 6),
(455, 7)],
'MR': [(609, 1), (609, 2), (609, 10)],
'MS': [(354, 860)],
'MT': [(278, 1), (278, 11), (278, 21), (278, 30), (278, 77)],
'MU': [(617, 1), (617, 2), (617, 3), (617, 10)],
'MV': [(472, 1), (472, 2)],
'MW': [(650, 1), (650, 2), (650, 10)],
'MX': [(334, 1),
(334, 10),
(334, 20),
(334, 30),
(334, 50),
(334, 60),
(334, 66),
(334, 70),
(334, 80),
(334, 90),
(334, 140)],
'MY': [(502, 10),
(502, 11),
(502, 12),
(502, 13),
(502, 14),
(502, 150),
(502, 152),
(502, 153),
(502, 154),
(502, 156),
(502, 157),
(502, 16),
(502, 17),
(502, 18),
(502, 19),
(502, 20)],
'MZ': [(643, 1), (643, 3), (643, 4)],
'NA': [(649, 1), (649, 2), (649, 3), (649, 4), (649, 5), (649, 6)],
'NC': [(546, 1)],
'NE': [(614, 1), (614, 2), (614, 3), (614, 4)],
'NG': [(621, 20),
(621, 22),
(621, 24),
(621, 26),
(621, 27),
(621, 30),
(621, 40),
(621, 50),
(621, 60)],
'NI': [(710, 21), (710, 30), (710, 73)],
'NL': [(204, 1),
(204, 2),
(204, 3),
(204, 4),
(204, 5),
(204, 6),
(204, 7),
(204, 8),
(204, 9),
(204, 10),
(204, 11),
(204, 12),
(204, 13),
(204, 14),
(204, 15),
(204, 16),
(204, 17),
(204, 18),
(204, 19),
(204, 20),
(204, 21),
(204, 22),
(204, 23),
(204, 24),
(204, 25),
(204, 26),
(204, 27),
(204, 28),
(204, 29),
(204, 60),
(204, 61),
(204, 62),
(204, 64),
(204, 65),
(204, 66),
(204, 67),
(204, 68),
(204, 69)],
'NO': [(242, 1),
(242, 2),
(242, 6),
(242, 8),
(242, 9),
(242, 10),
(242, 11),
(242, 12),
(242, 14),
(242, 20),
(242, 21),
(242, 22),
(242, 23),
(242, 25),
(242, 90),
(242, 99)],
'NP': [(429, 1), (429, 2), (429, 3), (429, 4)],
'NR': [(542, 2), (536, 2)],
'NU': [(555, 1)],
'NZ': [(530, 1), (530, 3), (530, 5), (530, 6), (530, 7), (530, 24)],
'OM': [(422, 2), (422, 3), (422, 4)],
'PA': [(714, 1), (714, 2), (714, 20), (714, 3), (714, 4)],
'PE': [(716, 6), (716, 7), (716, 10), (716, 15), (716, 17)],
'PF': [(547, 5), (547, 15), (547, 20)],
'PG': [(537, 1), (537, 2), (537, 3)],
'PH': [(515, 2), (515, 3), (515, 5), (515, 11), (515, 24), (515, 88)],
'PK': [(410, 1),
(410, 2),
(410, 3),
(410, 4),
(410, 5),
(410, 6),
(410, 7),
(410, 8)],
'PL': [(260, 1),
(260, 2),
(260, 3),
(260, 6),
(260, 7),
(260, 9),
(260, 10),
(260, 11),
(260, 12),
(260, 13),
(260, 15),
(260, 16),
(260, 17),
(260, 18),
(260, 22),
(260, 24),
(260, 32),
(260, 33),
(260, 34),
(260, 35),
(260, 37),
(260, 38),
(260, 39),
(260, 40),
(260, 41),
(260, 42),
(260, 43),
(260, 44),
(260, 45),
(260, 46),
(260, 47),
(260, 48),
(260, 49)],
'PM': [(308, 1), (308, 2)],
'PR': [(330, 0), (330, 110), (330, 120)],
'PT': [(268, 1),
(268, 2),
(268, 3),
(268, 4),
(268, 6),
(268, 7),
(268, 11),
(268, 12),
(268, 13),
(268, 80)],
'PW': [(552, 1), (552, 2)],
'PY': [(744, 1), (744, 2), (744, 3), (744, 4), (744, 5), (744, 6)],
'QA': [(427, 1), (427, 2), (427, 5), (427, 6)],
'RE': [(647, 0), (647, 1), (647, 2), (647, 3), (647, 4), (647, 10)],
'RO': [(226, 1),
(226, 3),
(226, 5),
(226, 6),
(226, 10),
(226, 11),
(226, 15),
(226, 16)],
'RS': [(220, 1), (220, 3), (220, 5), (220, 7), (220, 9), (220, 11)],
'RU': [(250, 1),
(250, 2),
(250, 3),
(250, 5),
(250, 6),
(250, 7),
(250, 8),
(250, 9),
(250, 11),
(250, 12),
(250, 15),
(250, 16),
(250, 17),
(250, 20),
(250, 21),
(250, 22),
(250, 23),
(250, 29),
(250, 32),
(250, 33),
(250, 34),
(250, 35),
(250, 38),
(250, 50),
(250, 54),
(250, 60),
(250, 62),
(250, 99)],
'RW': [(635, 10), (635, 13), (635, 14), (635, 17)],
'SA': [(420, 1), (420, 3), (420, 4), (420, 5), (420, 21)],
'SB': [(540, 1), (540, 2)],
'SC': [(633, 1), (633, 10)],
'SD': [(634, 1), (634, 2), (634, 3), (634, 5), (634, 7), (634, 9)],
'SE': [(240, 1),
(240, 2),
(240, 3),
(240, 4),
(240, 5),
(240, 6),
(240, 7),
(240, 9),
(240, 10),
(240, 11),
(240, 12),
(240, 13),
(240, 14),
(240, 15),
(240, 16),
(240, 17),
(240, 18),
(240, 19),
(240, 20),
(240, 21),
(240, 22),
(240, 24),
(240, 25),
(240, 26),
(240, 27),
(240, 28),
(240, 29),
(240, 30),
(240, 31),
(240, 32),
(240, 33),
(240, 35),
(240, 36),
(240, 37),
(240, 38),
(240, 39),
(240, 40),
(240, 41),
(240, 42),
(240, 43),
(240, 44),
(240, 45),
(240, 46),
(240, 47),
(240, 60),
(240, 61)],
'SG': [(525, 1),
(525, 3),
(525, 5),
(525, 6),
(525, 7),
(525, 8),
(525, 9),
(525, 10),
(525, 12)],
'SH': [(658, 1)],
'SI': [(293, 10), (293, 20), (293, 40), (293, 41), (293, 64), (293, 70)],
'SK': [(231, 1),
(231, 2),
(231, 3),
(231, 4),
(231, 5),
(231, 6),
(231, 7),
(231, 8),
(231, 99)],
'SL': [(619, 1),
(619, 2),
(619, 3),
(619, 5),
(619, 6),
(619, 7),
(619, 9),
(619, 25),
(619, 40),
(619, 50)],
'SM': [(292, 1)],
'SN': [(608, 1), (608, 2), (608, 3), (608, 4)],
'SO': [(637, 1),
(637, 4),
(637, 10),
(637, 20),
(637, 50),
(637, 30),
(637, 57),
(637, 60),
(637, 67),
(637, 71),
(637, 82)],
'SR': [(746, 2), (746, 3), (746, 5)],
'SS': [(659, 2), (659, 3), (659, 4), (659, 6), (659, 7)],
'ST': [(626, 1), (626, 2)],
'SV': [(706, 1), (706, 2), (706, 3), (706, 4), (706, 5)],
'SY': [(417, 1), (417, 2), (417, 9)],
'SZ': [(653, 1), (653, 2), (653, 10)],
'TC': [(376, 350), (376, 352), (376, 360)],
'TD': [(622, 1), (622, 2), (622, 3), (622, 7)],
'TG': [(615, 1), (615, 3)],
'TH': [(520, 0),
(520, 3),
(520, 4),
(520, 5),
(520, 9),
(520, 15),
(520, 18),
(520, 20),
(520, 47)],
'TJ': [(436, 1), (436, 2), (436, 3), (436, 4), (436, 5), (436, 10), (436, 12)],
'TK': [(554, 1)],
'TL': [(514, 1), (514, 2), (514, 3)],
'TM': [(438, 1), (438, 2), (438, 3)],
'TN': [(605, 1), (605, 2), (605, 3)],
'TO': [(539, 1), (539, 43), (539, 88)],
'TR': [(286, 1), (286, 2), (286, 3)],
'TT': [(374, 12), (374, 130)],
'TV': [(553, 1)],
'TW': [(466, 1),
(466, 2),
(466, 3),
(466, 5),
(466, 6),
(466, 9),
(466, 10),
(466, 11),
(466, 12),
(466, 88),
(466, 89),
(466, 90),
(466, 92),
(466, 97)],
'TZ': [(640, 2),
(640, 3),
(640, 4),
(640, 5),
(640, 7),
(640, 8),
(640, 9),
(640, 11),
(640, 13)],
'UA': [(255, 0),
(255, 1),
(255, 3),
(255, 4),
(255, 6),
(255, 7),
(255, 21),
(255, 25)],
'UG': [(641, 1),
(641, 4),
(641, 6),
(641, 10),
(641, 11),
(641, 14),
(641, 16),
(641, 18),
(641, 22),
(641, 26),
(641, 33),
(641, 40),
(641, 44)],
'US': [(310, 59),
(310, 32),
(310, 33),
(310, 140),
(310, 370),
(310, 400),
(310, 480),
(310, 110),
(310, 4),
(310, 5),
(310, 6),
(310, 12),
(310, 13),
(310, 14),
(310, 15),
(310, 20),
(310, 30),
(310, 34),
(310, 35),
(310, 50),
(310, 53),
(310, 54),
(310, 60),
(310, 66),
(310, 70),
(310, 80),
(310, 90),
(310, 100),
(310, 120),
(310, 130),
(310, 150),
(310, 160),
(310, 170),
(310, 180),
(310, 190),
(310, 260),
(310, 320),
(310, 330),
(310, 340),
(310, 360),
(310, 390),
(310, 410),
(310, 430),
(310, 440),
(310, 450),
(310, 460),
(310, 470),
(310, 490),
(310, 500),
(310, 510),
(310, 520),
(310, 530),
(310, 540),
(310, 550),
(310, 570),
(310, 580),
(310, 590),
(310, 600),
(310, 620),
(310, 640),
(310, 650),
(310, 670),
(310, 680),
(310, 690),
(310, 700),
(310, 710),
(310, 720),
(310, 730),
(310, 740),
(310, 750),
(310, 770),
(310, 780),
(310, 790),
(310, 820),
(310, 840),
(310, 850),
(310, 860),
(310, 880),
(310, 890),
(310, 900),
(310, 910),
(310, 920),
(310, 930),
(310, 940),
(310, 950),
(310, 960),
(310, 970),
(310, 990),
(311, 120),
(311, 250),
(311, 0),
(311, 10),
(311, 12),
(311, 20),
(311, 30),
(311, 40),
(311, 50),
(311, 60),
(311, 70),
(311, 80),
(311, 90),
(311, 100),
(311, 110),
(311, 140),
(311, 150),
(311, 170),
(311, 190),
(311, 210),
(311, 220),
(311, 230),
(311, 240),
(311, 270),
(311, 271),
(311, 272),
(311, 273),
(311, 274),
(311, 275),
(311, 276),
(311, 277),
(311, 278),
(311, 279),
(311, 280),
(311, 281),
(311, 282),
(311, 283),
(311, 284),
(311, 285),
(311, 286),
(311, 287),
(311, 288),
(311, 289),
(311, 290),
(311, 320),
(311, 330),
(311, 340),
(311, 350),
(311, 370),
(311, 380),
(311, 390),
(311, 400),
(311, 410),
(311, 420),
(311, 430),
(311, 440),
(311, 450),
(311, 460),
(311, 470),
(311, 480),
(311, 490),
(311, 530),
(311, 550),
(311, 560),
(311, 580),
(311, 590),
(311, 600),
(311, 630),
(311, 640),
(311, 650),
(311, 660),
(311, 670),
(311, 680),
(311, 690),
(311, 700),
(311, 710),
(311, 730),
(311, 740),
(311, 750),
(311, 770),
(311, 790),
(311, 800),
(311, 810),
(311, 820),
(311, 830),
(311, 840),
(311, 850),
(311, 860),
(311, 870),
(311, 880),
(311, 890),
(311, 900),
(311, 910),
(311, 920),
(311, 950),
(311, 960),
(311, 970),
(311, 980),
(311, 990),
(312, 10),
(312, 20),
(312, 30),
(312, 40),
(312, 60),
(312, 70),
(312, 80),
(312, 90),
(312, 100),
(312, 120),
(312, 130),
(312, 150),
(312, 160),
(312, 170),
(312, 180),
(312, 190),
(312, 210),
(312, 220),
(312, 240),
(312, 250),
(312, 260),
(312, 270),
(312, 280),
(312, 290),
(312, 300),
(312, 310),
(312, 320),
(312, 330),
(312, 340),
(312, 350),
(312, 360),
(312, 370),
(312, 380),
(312, 390),
(312, 400),
(312, 410),
(312, 420),
(312, 430),
(312, 440),
(312, 450),
(312, 460),
(312, 470),
(312, 480),
(312, 490),
(312, 510),
(312, 530),
(312, 550),
(312, 570),
(312, 580),
(312, 590),
(312, 600),
(312, 620),
(312, 630),
(312, 650),
(312, 670),
(312, 680),
(312, 690),
(312, 700),
(312, 710),
(312, 720),
(312, 730),
(312, 740),
(312, 750),
(312, 760),
(312, 770),
(312, 780),
(312, 790),
(312, 800),
(312, 810),
(312, 820),
(312, 830),
(312, 840),
(312, 850),
(312, 860),
(312, 870),
(312, 880),
(312, 890),
(312, 900),
(312, 910),
(312, 920),
(312, 930),
(312, 940),
(312, 950),
(312, 960),
(312, 970),
(312, 980),
(312, 990),
(313, 0),
(313, 10),
(313, 20),
(313, 30),
(313, 40),
(313, 60),
(313, 70),
(313, 80),
(313, 90),
(313, 100),
(313, 200),
(313, 210),
(313, 220),
(313, 230),
(313, 240),
(313, 250),
(313, 260),
(313, 270),
(313, 280),
(313, 290),
(313, 300),
(313, 310),
(313, 320),
(313, 330),
(313, 340),
(313, 350),
(313, 360),
(313, 370),
(313, 380),
(313, 390),
(313, 400),
(313, 410),
(316, 11)],
'UY': [(748, 0), (748, 1), (748, 3), (748, 7), (748, 10)],
'UZ': [(434, 3), (434, 4), (434, 5), (434, 6), (434, 7), (434, 8)],
'VC': [(360, 50), (360, 100), (360, 110)],
'VE': [(734, 2), (734, 3), (734, 4), (734, 6)],
'VG': [(348, 170), (348, 370), (348, 570), (348, 770)],
'VN': [(452, 1), (452, 2), (452, 4), (452, 5), (452, 7)],
'VU': [(541, 0), (541, 1), (541, 5), (541, 7)],
'WF': [(543, 1)],
'WS': [(549, 0), (549, 1), (549, 27)],
'XK': [(221, 1), (221, 2), (221, 6)],
'YE': [(421, 1), (421, 2), (421, 3), (421, 4)],
'ZA': [(655, 1),
(655, 2),
(655, 4),
(655, 5),
(655, 6),
(655, 7),
(655, 10),
(655, 11),
(655, 12),
(655, 13),
(655, 14),
(655, 17),
(655, 19),
(655, 21),
(655, 24),
(655, 25),
(655, 27),
(655, 28),
(655, 30),
(655, 31),
(655, 32),
(655, 33),
(655, 34),
(655, 35),
(655, 36),
(655, 38),
(655, 41),
(655, 46),
(655, 50),
(655, 51),
(655, 53),
(655, 73),
(655, 74),
(655, 75)],
'ZM': [(645, 1), (645, 2), (645, 3)],
'ZW': [(648, 1), (648, 3), (648, 4)]}
|
983,306 | f4998c8999c5d0367617871bbb48f3d86a2b25cd | # Generated by Django 3.2.5 on 2021-07-02 18:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_auto_20210702_1650'),
]
operations = [
migrations.RemoveField(
model_name='benificial',
name='date_of_registration',
),
migrations.RemoveField(
model_name='benificial',
name='slot_date',
),
migrations.RemoveField(
model_name='benificial',
name='slot_time',
),
migrations.RemoveField(
model_name='benificial',
name='time_of_registration',
),
migrations.AddField(
model_name='benificial',
name='registration_timing',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='benificial',
name='roll_number',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='benificial',
name='slot_timing',
field=models.DateTimeField(null=True),
),
]
|
983,307 | c7c2f5394655f05627bd688e873115db01212ccb | from typing import List
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
def arrange_inline_schema(buttons: List[InlineKeyboardButton], count: List[int]) -> InlineKeyboardMarkup:
kb = InlineKeyboardMarkup()
kb.row_width = max(count)
if sum(count) != len(buttons):
raise ValueError('Количество кнопок не совпадает со схемой')
tmplist = []
for a in count:
tmplist.append([])
for _ in range(a):
tmplist[-1].append(buttons.pop(0))
kb.inline_keyboard = tmplist
return kb
|
983,308 | 34b69e4ca05bc9cc9b81c2a44a6dc4d579915efa | # Generated by Django 3.2.2 on 2021-05-09 04:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Reception',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=100, null=True)),
('admissionClass', models.TextField(choices=[('Class I', 'Class I'), ('Class II', 'Class II'), ('Class III', 'Class III'), ('Class IV', 'Class IV'), ('Class V', 'Class V'), ('Class VI', 'Class VI'), ('Class VII', 'Class VII')], default='SODEXO', null=True)),
('gender', models.CharField(max_length=10, null=True)),
('dob', models.DateField(null=True)),
('email', models.EmailField(max_length=100, null=True)),
('father_name', models.CharField(max_length=100, null=True)),
('f_occupation', models.CharField(max_length=100, null=True)),
('f_qualification', models.CharField(max_length=100, null=True)),
('f_mobile', models.CharField(max_length=100, null=True)),
('mother_name', models.CharField(max_length=100, null=True)),
('m_occupation', models.CharField(max_length=100, null=True)),
('m_qualification', models.CharField(max_length=100, null=True)),
('alternateNo', models.CharField(max_length=100, null=True)),
('l_address', models.TextField(max_length=5000, null=True)),
('p_address', models.TextField(max_length=5000, null=True)),
('testDate', models.DateTimeField(auto_now_add=True)),
('testTime', models.DateTimeField(auto_now_add=True)),
('studentImage', models.ImageField(null=True, upload_to='static/images')),
('studentAadhar', models.ImageField(null=True, upload_to='static/images')),
('studentDOBCer', models.ImageField(null=True, upload_to='static/images')),
('studentTC', models.ImageField(null=True, upload_to='static/images')),
],
),
migrations.DeleteModel(
name='Receptions',
),
]
|
983,309 | 766afda6f8c80994ea98de6ce8b5bb232c2ec166 | # Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from unittest.mock import MagicMock, Mock, patch
import pytest
from botocore.exceptions import ClientError
import braket._schemas as braket_schemas
import braket._sdk as braket_sdk
from braket.aws import AwsSession
TEST_S3_OBJ_CONTENTS = {
"TaskMetadata": {
"Id": "blah",
}
}
@pytest.fixture
def boto_session():
_boto_session = Mock()
_boto_session.region_name = "us-west-2"
return _boto_session
@pytest.fixture
def aws_session(boto_session):
return AwsSession(boto_session=boto_session, braket_client=Mock())
def test_initializes_boto_client_if_required(boto_session):
AwsSession(boto_session=boto_session)
boto_session.client.assert_called_with("braket", config=None)
def test_uses_supplied_braket_client():
boto_session = Mock()
boto_session.region_name = "foobar"
braket_client = Mock()
aws_session = AwsSession(boto_session=boto_session, braket_client=braket_client)
assert aws_session.braket_client == braket_client
def test_config(boto_session):
config = Mock()
AwsSession(boto_session=boto_session, config=config)
boto_session.client.assert_called_with("braket", config=config)
@patch("os.path.exists")
@pytest.mark.parametrize(
"metadata_file_exists, initial_user_agent",
[
(True, None),
(False, None),
(True, ""),
(False, ""),
(True, "Boto3/1.17.18 Python/3.7.10"),
(False, "Boto3/1.17.18 Python/3.7.10 exec-env/AWS_Lambda_python3.7"),
],
)
def test_populates_user_agent(os_path_exists_mock, metadata_file_exists, initial_user_agent):
boto_session = Mock()
boto_session.region_name = "foobar"
braket_client = Mock()
braket_client._client_config.user_agent = initial_user_agent
nbi_metadata_path = "/opt/ml/metadata/resource-metadata.json"
os_path_exists_mock.return_value = metadata_file_exists
aws_session = AwsSession(boto_session=boto_session, braket_client=braket_client)
expected_user_agent = (
f"{initial_user_agent} BraketSdk/{braket_sdk.__version__} "
f"BraketSchemas/{braket_schemas.__version__} "
f"NotebookInstance/{0 if metadata_file_exists else None}"
)
os_path_exists_mock.assert_called_with(nbi_metadata_path)
assert aws_session.braket_client._client_config.user_agent == expected_user_agent
def test_retrieve_s3_object_body_success(boto_session):
bucket_name = "braket-integ-test"
filename = "tasks/test_task_1.json"
mock_resource = Mock()
boto_session.resource.return_value = mock_resource
mock_object = Mock()
mock_resource.Object.return_value = mock_object
mock_body_object = Mock()
mock_object.get.return_value = {"Body": mock_body_object}
mock_read_object = Mock()
mock_body_object.read.return_value = mock_read_object
mock_read_object.decode.return_value = json.dumps(TEST_S3_OBJ_CONTENTS)
json.dumps(TEST_S3_OBJ_CONTENTS)
aws_session = AwsSession(boto_session=boto_session)
return_value = aws_session.retrieve_s3_object_body(bucket_name, filename)
assert return_value == json.dumps(TEST_S3_OBJ_CONTENTS)
boto_session.resource.assert_called_with("s3", config=None)
config = Mock()
AwsSession(boto_session=boto_session, config=config).retrieve_s3_object_body(
bucket_name, filename
)
boto_session.resource.assert_called_with("s3", config=config)
@pytest.mark.xfail(raises=ClientError)
def test_retrieve_s3_object_body_client_error(boto_session):
bucket_name = "braket-integ-test"
filename = "tasks/test_task_1.json"
mock_resource = Mock()
boto_session.resource.return_value = mock_resource
mock_object = Mock()
mock_resource.Object.return_value = mock_object
mock_object.get.side_effect = ClientError(
{"Error": {"Code": "ValidationException", "Message": "NoSuchKey"}}, "Operation"
)
aws_session = AwsSession(boto_session=boto_session)
aws_session.retrieve_s3_object_body(bucket_name, filename)
def test_get_device(boto_session):
braket_client = Mock()
return_val = {"deviceArn": "arn1", "deviceName": "name1"}
braket_client.get_device.return_value = return_val
aws_session = AwsSession(boto_session=boto_session, braket_client=braket_client)
metadata = aws_session.get_device("arn1")
assert return_val == metadata
def test_cancel_quantum_task(aws_session):
arn = "foo:bar:arn"
aws_session.braket_client.cancel_quantum_task.return_value = {"quantumTaskArn": arn}
assert aws_session.cancel_quantum_task(arn) is None
aws_session.braket_client.cancel_quantum_task.assert_called_with(quantumTaskArn=arn)
def test_create_quantum_task(aws_session):
arn = "foo:bar:arn"
aws_session.braket_client.create_quantum_task.return_value = {"quantumTaskArn": arn}
kwargs = {
"backendArn": "arn:aws:us-west-2:abc:xyz:abc",
"cwLogGroupArn": "arn:aws:us-west-2:abc:xyz:abc",
"destinationUrl": "http://s3-us-west-2.amazonaws.com/task-output-derebolt-1/output.json",
"program": {"ir": '{"instructions":[]}', "qubitCount": 4},
}
assert aws_session.create_quantum_task(**kwargs) == arn
aws_session.braket_client.create_quantum_task.assert_called_with(**kwargs)
def test_get_quantum_task(aws_session):
arn = "foo:bar:arn"
return_value = {"quantumTaskArn": arn}
aws_session.braket_client.get_quantum_task.return_value = return_value
assert aws_session.get_quantum_task(arn) == return_value
aws_session.braket_client.get_quantum_task.assert_called_with(quantumTaskArn=arn)
def test_get_quantum_task_retry(aws_session):
arn = "foo:bar:arn"
return_value = {"quantumTaskArn": arn}
resource_not_found_response = {
"Error": {
"Code": "ResourceNotFoundException",
"Message": "unit-test-error",
}
}
throttling_response = {
"Error": {
"Code": "ThrottlingException",
"Message": "unit-test-error",
}
}
aws_session.braket_client.get_quantum_task.side_effect = [
ClientError(resource_not_found_response, "unit-test"),
ClientError(throttling_response, "unit-test"),
return_value,
]
assert aws_session.get_quantum_task(arn) == return_value
aws_session.braket_client.get_quantum_task.assert_called_with(quantumTaskArn=arn)
aws_session.braket_client.get_quantum_task.call_count == 3
def test_get_quantum_task_fail_after_retries(aws_session):
resource_not_found_response = {
"Error": {
"Code": "ResourceNotFoundException",
"Message": "unit-test-error",
}
}
throttling_response = {
"Error": {
"Code": "ThrottlingException",
"Message": "unit-test-error",
}
}
aws_session.braket_client.get_quantum_task.side_effect = [
ClientError(resource_not_found_response, "unit-test"),
ClientError(throttling_response, "unit-test"),
ClientError(throttling_response, "unit-test"),
]
with pytest.raises(ClientError):
aws_session.get_quantum_task("some-arn")
aws_session.braket_client.get_quantum_task.call_count == 3
def test_get_quantum_task_does_not_retry_other_exceptions(aws_session):
exception_response = {
"Error": {
"Code": "SomeOtherException",
"Message": "unit-test-error",
}
}
aws_session.braket_client.get_quantum_task.side_effect = [
ClientError(exception_response, "unit-test"),
]
with pytest.raises(ClientError):
aws_session.get_quantum_task("some-arn")
aws_session.braket_client.get_quantum_task.call_count == 1
@pytest.mark.parametrize(
"input,output",
[
(
{},
[
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
},
{
"deviceArn": "arn2",
"deviceName": "name2",
"deviceType": "SIMULATOR",
"deviceStatus": "OFFLINE",
"providerName": "pname1",
},
{
"deviceArn": "arn3",
"deviceName": "name3",
"deviceType": "QPU",
"deviceStatus": "ONLINE",
"providerName": "pname2",
},
],
),
(
{"names": ["name1"]},
[
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
},
],
),
(
{"types": ["SIMULATOR"]},
[
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
},
{
"deviceArn": "arn2",
"deviceName": "name2",
"deviceType": "SIMULATOR",
"deviceStatus": "OFFLINE",
"providerName": "pname1",
},
],
),
(
{"statuses": ["ONLINE"]},
[
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
},
{
"deviceArn": "arn3",
"deviceName": "name3",
"deviceType": "QPU",
"deviceStatus": "ONLINE",
"providerName": "pname2",
},
],
),
(
{"provider_names": ["pname2"]},
[
{
"deviceArn": "arn3",
"deviceName": "name3",
"deviceType": "QPU",
"deviceStatus": "ONLINE",
"providerName": "pname2",
},
],
),
(
{
"provider_names": ["pname2"],
"types": ["QPU"],
"statuses": ["ONLINE"],
"names": ["name3"],
},
[
{
"deviceArn": "arn3",
"deviceName": "name3",
"deviceType": "QPU",
"deviceStatus": "ONLINE",
"providerName": "pname2",
},
],
),
(
{
"provider_names": ["pname1"],
"types": ["SIMULATOR"],
"statuses": ["ONLINE"],
},
[
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
},
],
),
],
)
def test_search_devices(input, output, aws_session):
return_value = [
{
"devices": [
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
},
{
"deviceArn": "arn2",
"deviceName": "name2",
"deviceType": "SIMULATOR",
"deviceStatus": "OFFLINE",
"providerName": "pname1",
},
{
"deviceArn": "arn3",
"deviceName": "name3",
"deviceType": "QPU",
"deviceStatus": "ONLINE",
"providerName": "pname2",
},
]
}
]
mock_paginator = Mock()
mock_iterator = MagicMock()
aws_session.braket_client.get_paginator.return_value = mock_paginator
mock_paginator.paginate.return_value = mock_iterator
mock_iterator.__iter__.return_value = return_value
assert aws_session.search_devices(**input) == output
def test_search_devices_arns(aws_session):
return_value = [
{
"devices": [
{
"deviceArn": "arn1",
"deviceName": "name1",
"deviceType": "SIMULATOR",
"deviceStatus": "ONLINE",
"providerName": "pname1",
}
]
}
]
mock_paginator = Mock()
mock_iterator = MagicMock()
aws_session.braket_client.get_paginator.return_value = mock_paginator
mock_paginator.paginate.return_value = mock_iterator
mock_iterator.__iter__.return_value = return_value
assert aws_session.search_devices(arns=["arn1"]) == return_value[0]["devices"]
mock_paginator.paginate.assert_called_with(
filters=[
{"name": "deviceArn", "values": ["arn1"]},
],
PaginationConfig={"MaxItems": 100},
)
|
983,310 | e186917f992656150c501db6876b939b40696192 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import HDFSBrowserForm
from .functions import handle_uploaded_file
@login_required
def fileBrowser(request):
if request.method == 'POST':
files = HDFSBrowserForm(request.POST, request.FILES)
if files.is_valid():
handle_uploaded_file(request.FILES['file'])
messages.success(request, f'Load datas success')
return redirect('filebrowser')
else:
files = HDFSBrowserForm()
return render(request, 'browser/index.html', {'form': files})
|
983,311 | c23d978b691a51bb553ee16ae4ab3931f03edb3e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import BlogPost
class MomentAdmin(admin.ModelAdmin):
fieldsets = (
("消息内容", {
'fields': ('title', 'body', 'timestamp')
}),
("用户消息", {
'fields': ('author',),
}),
)
list_display = (
'title',
'author',
'timestamp'
)
admin.site.register(BlogPost, MomentAdmin)
|
983,312 | ed30cca5d76a61d782e6040fd023fc50f810ab90 | """
Example usage for the TurbSimFile class.
- Read a TurbSim file and display main properties
- Extract time series at a given y, z location and plot it
- Extract a horizontal plane and plot it
- Compute vertical profile/shear and plot it
- Fit vertical profiel with a power law
- Compute cross corelation in y and z directions
- Modify field (add a constant velocity in the streamwise direction) and write to disk
- Write to Mann Box format
- Write time series at given locations to a CSV file
NOTE: this example uses an extremely small TurbSim box.
Results will be more "physical" on a more realstic box.
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyFAST.input_output import TurbSimFile
def main():
MyDir = os.path.dirname(__file__)
# --- Read a TurbSim file and display main properties
filename = os.path.join(MyDir, '../tests/example_files/TurbSim_WithTwr.bts')
ts = TurbSimFile(filename)
print(ts)
# --- Extract time series at a given y, z location and plot it
# Method 1 - use object method
u,v,w = ts.valuesAt(y=0, z=90, method='nearest')
# Method 2 - use data directly
iy, iz = ts.closestPoint(y=0, z=90)
u2,v2,w2 = ts['u'][0, :, iy, iz], ts['u'][1, :, iy, iz], ts['u'][2, :, iy, iz]
fig,ax = plt.subplots(1, 1)
ax.plot(ts.t, u, label='u')
ax.plot(ts.t, v, label='v')
ax.plot(ts.t, w, label='w')
ax.plot(ts.t, u2, 'k--')
ax.plot(ts.t, v2, 'k--')
ax.plot(ts.t, w2, 'k--')
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Velocity [m/s]')
ax.set_title('Velocity at y=0 z=90')
# --- Extract a horizontal plane and plot it
U, V, W = ts.horizontalPlane(z=90)
T, Y = np.meshgrid(ts.t, ts.y)
fig,ax = plt.subplots(1, 1)
ax.contourf(T, Y, U.T)
ax.set_xlabel('Time [s]')
ax.set_ylabel('y [m]')
ax.set_title('Velocity at z=90')
# --- Compute vertical profile/shear and plot it
# NOTE: the example file has only three points in y&z
z, u_mean, u_std = ts.vertProfile(y_span='full')
# Fit a power law
u_fit, pfit, model, z_ref = ts.fitPowerLaw()
print('Power law: alpha={:.5f}, u_ref={:.5f}, z_ref={:.5f}'.format(pfit[1],pfit[0],z_ref))
print('Formula: {} '.format(model['formula']))
fig,ax = plt.subplots(1, 1)
ax.plot(u_mean[0,:], z, label='u')
ax.plot(u_mean[1,:], z, label='v')
ax.plot(u_mean[2,:], z, label='w')
ax.plot(u_fit , z, 'k--', label='u fit (power law)')
if 'uTwr' in ts:
ax.plot(np.mean(ts['uTwr'][0,:,:], axis=0), ts['zTwr'], label='u on tower')
ax.legend()
ax.set_xlabel('Velocity [m/s]')
ax.set_ylabel('z [m]')
ax.set_title('Vertical profiles (averaged over y and time)')
# --- Compute cross corelation in y and z directions
# NOTE: the example file has only three points in y&z
iy0, iz0 = ts.iMid # Index at middle of the box
y, rho_uu_y, rho_vv_y, rho_ww_y = ts.crosscorr_y(iy0, iz0)
z, rho_uu_z, rho_vv_z, rho_ww_z = ts.crosscorr_z(iy0, iz0)
fig,ax = plt.subplots(1, 1)
ax.plot(y, rho_uu_y, label=r'rho_uu}')
ax.plot(y, rho_vv_y, label=r'rho_vv}')
ax.plot(y, rho_ww_y, label=r'rho_ww}')
ax.set_xlabel('y [m]')
ax.set_ylabel('Cross correlation')
ax.set_title('Cross correlation in y direction at middle of the box')
ax.legend()
# --- Convert to "DataFrame"
# Contains relevant time series like vertical profile, midline, coherence, cross correlation
# dfs = ts.toDataFrame()
# --- Modify field and write to disk
ts['u'][0,:,:,:] += 1 # Adding 1 m/s in the streamwise
ts.makePeriodic() # Make the field periodic by mirroring it in the streamwise direction
ts.write('_MyNewTurbBox.bts')
# --- Write to Mann Box format
ts.toMannBox()
# --- Write time series at given locations to a CSV file
ts.writeProbes('_Probes.csv', yProbe=[0], zProbe=[65,115])
if __name__ == "__main__":
main()
plt.show()
if __name__=='__test__':
main()
try:
os.remove('_MyNewTurbBox.bts')
os.remove('_MyNewTurbBox_198x3x4.u')
os.remove('_MyNewTurbBox_198x3x4.v')
os.remove('_MyNewTurbBox_198x3x4.w')
os.remove('_Probes.csv')
except:
pass
|
983,313 | 038b116914eb74a63112fe1191dc960976d6a5ee | import numpy as np
from PIL import Image
from scipy import *
from scipy.sparse import *
from scipy.sparse.linalg import *
import time
from eval import bde, region_based_eval
from parser import import_berkeley
from tqdm import tqdm
'''Compute difference between two pixels in the np array of rgb tuples by euclidean
distance between rgb values and euclidean distance between pixel locations'''
def differenceMetricRGBAndDist(coords1, coords2, pixels):
rgb1 = pixels[coords1[0], coords1[1]]
rgb2 = pixels[coords2[0], coords2[1]]
featureDifference = (rgb1[0] - rgb2[0])**2 + (rgb1[1] - rgb2[1])**2 + (rgb1[2] - rgb2[2])**2
spatialDistance = (coords1[0] - coords2[0])**2 + (coords1[1] - coords2[1])**2
# feature weight and spatial weight correspond to the sigma values in the edge weight equation, can be tweaked
featureWeight = 1
spatialWeight = 1
# this version of the distance metric equation is what is specified in the Shi and Malik paper,
# but results in images that just look like static
# difference = math.exp(-(featureWeight * featureDifference + spatialWeight* spatialDistance))
# this version of the distance metric is wrong because it assigns higher weights
# to less similar pixels rather than lower weights. but this is the version that
# was used to run on all our images before the bug was caught.
difference = featureWeight * featureDifference + spatialWeight* spatialDistance
return difference
'''Compute difference between two pixels in the np array of rgb tuples by euclidean
distance between rgb values'''
def differenceMetricRGB(coords1, coords2, pixels):
rgb1 = pixels[coords1[0], coords1[1]]
rgb2 = pixels[coords2[0], coords2[1]]
# this version of the distance metric is wrong because it assigns higher weights
# to less similar pixels rather than lower weights. but this is the version that
# was used to run on all our images before the bug was caught.
return ((rgb1[0] - rgb2[0])**2 + (rgb1[1] - rgb2[1])**2 + (rgb1[2] - rgb2[2])**2)
'''D is the diagonal matrix and W is the adjacency matrix of edge weights (both csc sparse matrices)
uses scipy sparse linalg eigenvector solver for the Lanczos method
returns a tuple containing an array of eigenvalues and an array of eigenvectors'''
def findEigens(D, W):
eigs = eigsh(inv(D)*(D - W))
return eigs
'''Takes in an np array of pixels and returns a sparse adjacency matrix (csc_matrix) with edge weights for this image'''
def pixelsToAdjMatrix(pixels, edge_weight_function, connectivity):
r = connectivity # 12 was the value used in running our experiments
y,x,_ = pixels.shape #assuming tuples of 3 rgb values are the third coordinate of the shape
N = x * y
row = []
col = []
data = []
#go through each pixel in the image and compare it to the r pixels on all sides of it
for i in tqdm(range(x)):
for j in range(y):
# compare (i,j) to each pixel in range r arround it
for k in range(i-r,i+r): # x coordinate of offset pixel
for l in range(j-r,j+r): # y coordinate of offset pixel
if k >= 0 and l >= 0 and k < x and l < y: # make sure this pixel isn't out of bounds
diff = edge_weight_function((j,i), (l,k), pixels)
row.append(j*x + i) #add x coord to list of x coords
col.append(l*x + k) #add y coord to list of y coords
data.append(diff) #add the value that belongs at (j*x + i, l*x + k) in the adjacency matrix
return csc_matrix((np.array(data), (np.array(row), np.array(col))), shape=(N, N))
'''Takes in a csc_matrix and returns a diagonal matrix (scipy.sparse.dia.dia_matrix) converted to a csc_matrix'''
def adjMatrixToDiagMatrix(matrix):
N, _ = matrix.shape
vec = np.array(matrix.sum(axis=0))[0]
# change any 0s on the diagonal to 1s (otherwise you can get a singular matrix)
# didn't have time to decide if this was a good way to handle this case
vec = np.where(vec != 0, vec, 1)
return diags(vec, offsets=0).tocsc()
'''Takes in an image to segment by mincut, returns an np array of the segment numbers
for each pixel'''
def mincut(img, edge_weight_function, connectivity=12):
pixels = np.array(img).astype("int")
y,x,_ = pixels.shape
W = pixelsToAdjMatrix(pixels, edge_weight_function, connectivity)
D = adjMatrixToDiagMatrix(W)
eigenStuff = findEigens(D, W)
# pick second smallest eigenvector (second column of the array of eigenvectors) - returns as an ndarray
eigVec = eigenStuff[1][:, 1]
# converts eigvec to an indicator vector with 1 if the value is > 0 and 0 otherwise
eigVecIndicator = (eigVec > 0).astype(int)
# reshape the indicator vector into a matrix the same shape as pixels
newEigIndicator = np.reshape(eigVecIndicator, (y,x))
newEigIndicator = (newEigIndicator).astype('uint8')
return newEigIndicator
if __name__ == "__main__":
#sample run of mincut on a berkeley image
filename = "15088.jpg"
img = Image.open(filename)
print(filename)
start=time.time()
array = mincut(img, differenceMetricRGB)
stop=time.time()
print("total runtime is", stop-start)
img = Image.fromarray(array*255, mode="L")
img.show()
img.save("15088-12-fweight.jpg", "JPEG")
groundTruth = import_berkeley("15088.seg")
print("region based is ", region_based_eval(groundTruth, array))
print("edge based is ", bde(groundTruth, array))
|
983,314 | c39c72527e73b7dd8773fd6b45c566463611cb0f | ## LAMMPS Dump: python 2 script
# This script was created to read a lammps dump file and return values to a
# lammps run as an include in a lammps input script.
# the script reads the dump file produced by lammps using a run 0 command
# then reads all the lines after the atoms id, line number 9. it then creates an
# include file called celist which makes the list of atoms to be turned into ce3+
# find the dump files
names=["ceids1","vacids"]
#names=["ceids1","ceids2","vacids"]
#names=["ceids1","ceids2","ceids3","vacids"]
#names=["ceids1","ceids2","ceids3","ceids4","vacids"]
counts=[]
export=[]
# read the files into export
for name in names:
fpath=name
counter=0
fileopen=open(fpath)
for line in fileopen:
if counter >= 9:
line=line.strip()
line=int(line)
export.append(line)
counter+=1
fileopen.close()
counts.append(counter-9)
# parse out export into ceidslist ( all the cerium ids )
idlength=3*(len(names)-1)
cesum=0
for i in range( len(names)-1 ):
ce=counts[i]
cesum+=ce
# find the number of unique id's
uniqueID=0
sortedID=sorted(export)
for line in range(len(sortedID)-1):
if sortedID[line] == sortedID[line+1]:
uniqueID+=1
## if statement for the 101 surface
if counts[0] == 11:
array=[[]]*counts[1]
counter=0
for i in range(counts[1]-1):
temp=[[]]*3
for x in range(3):
num=int(export[counter])
temp[x]=num
counter+=1
array[i]=sorted(temp)
temp=[[]]*3
for x in range(2):
num=int(export[counter])
temp[x]=num
counter+=1
temp=sorted(temp)
temp[2]=0
array[3]=temp
fill=[[]]*counts[1]
for i in range(counts[1]):
array[i].remove(array[i][2])
temp=array[i]
# make into a string to add to new file
fill[i]=' '.join([str(x) for x in temp]) # string together each element of temp and place into array
# create the new file
filenew='celist'
listce=' '.join([str(x) for x in fill])
fileopen=open(filenew,'w')
fileopen.write('group ce3 id ' + listce + ' \n')
fileopen.write('set group ce3 type 3 \n')
fileopen.write('set type 3 charge 3 \n')
fileopen.close()
# if there is no overlap between ce 1NN lists
elif counts[-1] == (cesum-uniqueID)/float(3):
counterq=0
for q in range(1,4): # make 3 celist files to pick from
array=[[]]*(cesum/3)
counter=0
for i in range(cesum/3): # loop over the total number of vacancy groups
temp=[[]]*3 # parse the data into appropriate dimensions
for x in range(3): # make each group x number of ce atoms long
temp[x]=export[counter] # place components of export into ce temp-array
counter+=1
temp=sorted(temp) # sort the list
## remove the atom symmetrically from each place in temp
temp.remove(temp[q-1])
array[i]=' '.join([str(x) for x in temp]) # string together each element of temp and place into array
# create the new file
fileID=str(q) # get the celist id number
includefile='celist' # base name for the include file
filenew=includefile+fileID # concatonate the file base name with file id
listce=' '.join([str(x) for x in array]) # string together the array numbers
fileopen=open(filenew,'w')
fileopen.write('group ce3 id ' + listce + ' \n')
fileopen.write('set group ce3 type 3 \n')
fileopen.write('set type 3 charge 3 \n')
fileopen.close()
# hard coded: delete ce3+ from list
elif (cesum-uniqueID)%3 != 0:
temp=[[]]*cesum
for i in range(cesum): # pull out the ce ids from export
temp[i]=export[i] # put number into temp
temp=sorted(temp) # sort the array
listce=' '.join([str(x) for x in temp]) # string together each element of temp and place into array
# create the new file
filenew='celist'
fileopen=open(filenew,'w')
fileopen.write('group ce3 id ' + listce + ' \n')
fileopen.write('set group ce3 type 3 \n')
fileopen.write('set type 3 charge 3 \n')
fileopen.close()
## elif statement Future work
# counts[0]%5 == 0:
# # 101
# span=counts[0]/5
# sortarray=[[]]*counts[0]
# array=[[]]*span
# counter=0
# # sort the list of ce values from highest to lowest and get all id's into sortarray
# for i in range(counts[0]): # break sortarray into x groups of 5
# num=int(export[counter]) # make each element of export a number
# sortarray[i]=num # put number into sortarray
# counter+=1 # increment the index to append sortarray
# sortarray=sorted(sortarray) # sort the array
# counter=0 # re-initialize counter
# # split up sortarray and append to array
# for i in range(span):
# temp=[[]]*5 # initialize the temporary array to append each segment into array
# for x in range(5): # loop over 5 values
# temp[x]=sortarray[counter] # append 5 values from sortarray into temp
# counter+=1 # increment counter
# ## remove the atom symmetrically from each place in temp
# temp.remove(temp[3])
# # make into a string to add to new file
# array[i]=' '.join([str(x) for x in temp]) # string together each element of temp and place into array
#
# # create the new file
# filenew='celist'
# listce=' '.join([str(x) for x in array])
# fileopen=open(filenew,'w')
# fileopen.write('group ce3 id ' + listce + ' \n')
# fileopen.write('set group ce3 type 3 \n')
# fileopen.write('set type 3 charge 3 \n')
# fileopen.close()
|
983,315 | 375ceb774bad114fdf88d184070244ecd86ac4fd | # For CS club - result by Julian
for n in range(1, 101): # Iterate through set (1,100)
string = "" # Declare variable to store value
if n % 3 == 0: string += "fizz" # If remainder of n/3=0, add fizz to string
if n % 5 == 0: string += "buzz" # If remainder of n/5=0, add buzz to string
if n % 3 != 0 and n % 5 != 0: string = str(n) # If neither, add n to string
print(string.title()) # Convert string to title case and print it
|
983,316 | 7798634555e205a039ee60ed81f6c3e7d1d21d82 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 18:20:09 2021
@author: loki
"""
|
983,317 | 53976c3606c336e3d79146d3f3dab2409244c19f | # Generated by Django 3.1.6 on 2021-04-11 06:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('i_job', '0002_auto_20210222_0648'),
]
operations = [
migrations.AddField(
model_name='job',
name='progress',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='job',
name='vinner',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='job',
name='type_of',
field=models.IntegerField(choices=[(0, 'Лайки по лайктайму'), (1, 'Лайки по хэштегам'), (2, 'Подписки по хэштегам'), (3, 'Отписка от неподписаных'), (4, 'Отписка от всех'), (5, 'Игра'), (6, 'Розыгрыш')]),
),
]
|
983,318 | abb6bb560185c8f353b31fc9782121bc77dbfbde | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 3 10:54:06 2018
@author: Administrator
"""
import numpy as np
import scipy.stats as st
from scipy import signal
import scipy
def lbf(u0, Img, Ksigma, KI, KONE, nu, timestep, mu, lambda1, lambda2, epsilon):
u = u0
u = NeumannBoundCond(u)
K = curvature_central(u)
DrcU= (epsilon/np.pi)/(epsilon**2+u**2)
[f1, f2] = localBinaryFit(Img, u, KI, KONE, Ksigma, epsilon)
s1 = lambda1*(f1**2) - lambda2*(f2**2) #compute lambda1*e1-lambda2*e2 in the 1st term in eq. (15) in IEEE TIP 08
s2 = lambda1*f1 - lambda2*f2
dataForce = (lambda1-lambda2)*KONE*Img*Img+signal.convolve2d(s1,Ksigma,'same')-2*Img*signal.convolve2d(s2,Ksigma,'same')
A = -DrcU*dataForce
P = mu*(4*del2(u)-K)
L = nu*DrcU*K
u = u + timestep*(L+P+A)
return u
def localBinaryFit(Img, u, KI, KONE, Ksigma, epsilon):#局部二值拟合
Hu = 0.5*(1+(2/np.pi)*np.arctan(u/epsilon))#heaviside
I = Img*Hu
c1 = signal.convolve2d(Hu, Ksigma, 'same')
c2 = signal.convolve2d(I, Ksigma, 'same')
f1 = (c2)/(c1) #compute f1 according to eq.(14) for i = 1
f2 = (KI-c2)/(KONE-c1)
return f1, f2
def NeumannBoundCond(f):#诺依曼边界条件
g = f
g[0,0] = g[2,2]
g[0,-1] = g[2,-3]
g[-1, 0] = g[-3, 2]
g[-1,-1] = g[-3,-3]
g[0][1:-1] = g[2][1:-1]
g[-1][1:-1] = g[-3][1:-1]
g[0][1:-1] = g[2][1:-1]
g[-1][1:-1] = g[-3][1:-1]
g[1:-1,0] = g[1:-1,2]
g[1:-1,-1] = g[1:-1,-3]
return g
def curvature_central(u): #中心差分求kappa
ep = 1e-10
[fx, fy]= np.gradient(u)
ax = fx/np.sqrt(fx**2 + fy**2 + ep)
ay = fy/np.sqrt(fx**2 + fy**2 + ep)
[axx, axy] = np.gradient(ax) #central difference
[ayx, ayy] = np.gradient(ay)
K = axx + ayy
return K
def del2(M):
return scipy.ndimage.filters.laplace(M)/4
#def del2(M): #LAPLACE模拟matlab del2()
# dx = 1
# dy = 1
# rows, cols = M.shape
# dx = dx * np.ones ((1, cols - 1))
# dy = dy * np.ones ((rows-1, 1))
#
# mr, mc = M.shape
# D = np.zeros ((mr, mc))
#
# if (mr >= 3):
# ## x direction
# ## left and right boundary
# D[:, 0] = (M[:, 0] - 2 * M[:, 1] + M[:, 2]) / (dx[:,0] * dx[:,1])
# D[:, mc-1] = (M[:, mc - 3] - 2 * M[:, mc - 2] + M[:, mc-1]) \
# / (dx[:,mc - 3] * dx[:,mc - 2])
#
# ## interior points
# tmp1 = D[:, 1:mc - 1]
# tmp2 = (M[:, 2:mc] - 2 * M[:, 1:mc - 1] + M[:, 0:mc - 2])
# tmp3 = np.kron (dx[:,0:mc -2] * dx[:,1:mc - 1], np.ones ((mr, 1)))
# D[:, 1:mc - 1] = tmp1 + tmp2 / tmp3
#
# if (mr >= 3):
# ## y direction
# ## top and bottom boundary
# D[0, :] = D[0,:] + \
# (M[0, :] - 2 * M[1, :] + M[2, :] ) / (dy[0,:] * dy[1,:])
#
# D[mr-1, :] = D[mr-1, :] \
# + (M[mr-3,:] - 2 * M[mr-2, :] + M[mr-1, :]) \
# / (dy[mr-3,:] * dx[:,mr-2])
#
# ## interior points
# tmp1 = D[1:mr-1, :]
# tmp2 = (M[2:mr, :] - 2 * M[1:mr - 1, :] + M[0:mr-2, :])
# tmp3 = np.kron (dy[0:mr-2,:] * dy[1:mr-1,:], np.ones ((1, mc)))
# D[1:mr-1, :] = tmp1 + tmp2 / tmp3
#
# return D / 4
def gaussian_kern(nsig):
kernlen=np.around(nsig*2)*2+2
#Returns a 2D Gaussian kernel array.
interval = (2*nsig+1.)/(kernlen)
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel |
983,319 | 260e34b4e96afae7a794bf467ae0aa7e9bdefddc | from Phase import *
from qutip import *
from Fields import *
from Fields_spin12 import *
import numpy as np
import os
from Storage import *
from Utility import *
# =============================================================================
# Building mass term with gauge field
# =============================================================================
def mass_spin(x, y, r, m, a, C):
save_dir = './Data/Operators/Mass/'
filename = 'Mass_a=' + str(a) + '_m=' + str(m) + '_x=' + str(x) + '_y=' +str(y)
if os.path.isfile(save_dir + filename + '.p'):
H_mass = load_data(save_dir, filename)
else:
Mass = []
j = 0
while j < y:
i = 0
while i < x:
save_dir2 = './Data/Operators/Simple/'
filename_P = 'Site_mass_P=' + '_x=' + str(x) + '_y=' + str(y) + '_i=' + str(i) + '_j=' + str(j)
filename_A = 'Site_mass_A=' + '_x=' + str(x) + '_y=' + str(y) + '_i=' + str(i) + '_j=' + str(j)
target = (j*y + i)*2
if os.path.isfile(save_dir2 + filename_P + '.p'):
P = load_data(save_dir2, filename_P)
else:
P = ope_prod(ferm_dag_1(x, y, i, j), ferm_1(x, y, i, j), 0, 0)
P = project_op(x, y, P)
save_data(P, save_dir2, filename_P)
if os.path.isfile(save_dir2 + filename_A + '.p'):
A = load_data(save_dir2, filename_A)
else:
A = ope_prod(ferm_dag_2(x, y, i, j), ferm_2(x, y, i, j), 0, 0)
A[target+1] = -A[target+1]
A = project_op(x, y, A)
save_data(A, save_dir2, filename_A)
term = P + A
Mass.append(term)
i += 1
j += 1
H_mass = sum(Mass)
H_mass = H_mass*C[1]*(m + 2*r/a)
save_data(H_mass, save_dir, filename)
return H_mass
# =============================================================================
# Building electric potential term
# =============================================================================
def electric_pot(x, y, a, g):
save_dir = './Data/Operators/Electric_potential/'
filename = 'Electric_term' + '_x=' + str(x) + '_y=' + str(y) + '_a=' + str(a) + '_e=' + str(g)
E_pot = 0
N = x*y
J = 1/2*(-a*g/2)**2
if os.path.isfile(save_dir + filename + '.p'):
H_E = load_data(save_dir, filename)
else:
E_field = []
j = 0
while j < y:
i = 0
while i < x:
save_dir2 = './Data/Operators/Simple/'
filename_x = 'Link_Sz_x_' + '_x=' + str(x) + '_y=' + str(y) + '_i=' + str(i) + '_j=' + str(j)
filename_y = 'Link_Sz_y_' + '_x=' + str(x) + '_y=' + str(y) + '_i=' + str(i) + '_j=' + str(j)
if os.path.isfile(save_dir2 + filename_x + '.p'):
E_pot_x = load_data(save_dir2, filename_x)
else:
E_pot_x = ope_prod(Ex(x, y, i, j), Ex(x, y, i, j), 0, 0)
E_pot_x = project_op(x, y, E_pot_x)
save_data(E_pot_x, save_dir2, filename_x)
if os.path.isfile(save_dir2 + filename_y + '.p'):
E_pot_y = load_data(save_dir2, filename_y)
else:
E_pot_y = ope_prod(Ey(x, y, i, j), Ey(x, y, i, j), 0, 0)
E_pot_y = project_op(x, y, E_pot_y)
save_data(E_pot_y, save_dir2, filename_y)
term = E_pot_x + E_pot_y
E_field.append(term)
i += 1
j += 1
H_E = sum(E_field)
H_E = J*H_E
save_data(H_E, save_dir, filename)
return H_E
# =============================================================================
# Building magnetic potential term
# =============================================================================
def magnetic_pot(x, y, a, g):
save_dir = './Data/Operators/Magnetic_potential/'
filename = 'Magnetic_term' + '_x=' + str(x) + '_y=' + str(y) + '_a=' + str(a) + '_e=' + str(g)
M_pot = 0
N = x*y
J = 1/(4*a**4*g**2)
if os.path.isfile(save_dir + filename + '.p'):
H_B = load_data(save_dir, filename)
else:
B_field = []
j = 0
while j < y:
i = 0
while i < x:
save_dir2 = './Data/Operators/Simple/'
filename_R = 'plaquette_' + '_x=' + str(x) + '_y=' + str(y) + '_i=' + str(i) + '_j=' + str(j)
filename_L = 'plaquette_dag_' + '_x=' + str(x) + '_y=' + str(y) + '_i=' + str(i) + '_j=' + str(j)
if os.path.isfile(save_dir2 + filename_R + '.p'):
plaquette = load_data(save_dir2, filename_R)
else:
plaquette = ope_prod(Ux(x, y, i, j), Uy(x, y, i+1, j), Ux_dag(x, y, i, j+1), Uy_dag(x, y, i, j))
plaquette = project_op(x, y, plaquette)
save_data(plaquette, save_dir2, filename_R)
term = (plaquette + plaquette.dag())
B_field.append(term)
i += 1
j += 1
H_B = sum(B_field)
H_B = J*H_B
save_data(H_B, save_dir, filename)
return H_B
|
983,320 | d483c86401791728e7d28785442838eafafe034e | '''
Optimizers
'''
import tensorflow as tf
from app.hparams import hparams
@hparams.register_optimizer('sgd')
def sgd_ozer(learn_rate, lr_decay=None, lr_decay_epoch=2, **kwargs):
kwargs.update(dict(learning_rate=learn_rate))
return tf.train.GradientDescentOptimizer(**kwargs)
@hparams.register_optimizer('adam')
def adam_ozer(learn_rate, lr_decay=None, lr_decay_epoch=2, **kwargs):
kwargs.update(dict(learning_rate=learn_rate))
return tf.train.AdamOptimizer(**kwargs)
|
983,321 | 57f5936c5d74913cd0b6f93129c2f501e3b6712f | #!/usr/bin/env python
import h5py
import numpy as np
def check_h5(fname):
seter = set()
with h5py.File(fname) as f:
for k in f.keys():
spk, _ = k.split('.')
seter.add(spk)
assert not np.isnan(f[k].value).any(), 'Bad value:: %s' % k
print(len(seter)
if __name__ == '__main__':
import fire
fire.Fire({
'check': check_h5
})
|
983,322 | 687755fbf60f0c96ab3f7f99dff395d75102ba50 | """
Example full run of pv-pro analysis using synthetic data. Synthetic data is
generated using file 'synth01_generate_synthetic_data.py'.
@author: toddkarin
"""
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from pvpro import PvProHandler
from pvpro.preprocess import Preprocessor
from pvpro.postprocess import analyze_yoy
from pvpro.plotting import plot_results_timeseries
# Import synthetic data
df = pd.read_pickle('synth01_out.pkl')
# Load preprocessor.
pre = Preprocessor(df,
voltage_dc_key='v_dc',
current_dc_key='i_dc',
temperature_module_key='temperature_module_meas',
irradiance_poa_key='poa_meas',
modules_per_string=1,
parallel_strings=1,
)
# Calculate cell temperature from moduule temperature and POA.
pre.calculate_cell_temperature(delta_T=3)
# Two preprocessing modes, 'fast' and 'sdt'. Since this is clean synthetic
# data, we will use the 'fast' pipeline.
method='fast'
if method=='sdt':
pre.run_preprocess_sdt(correct_dst=True)
pre.classify_points_sdt()
pre.build_operating_cls()
elif method=='fast':
pre.classify_points_pva()
pre.build_operating_cls()
# Make PvProHandler object to store data.
pvp = PvProHandler(pre.df,
system_name='synthetic',
cells_in_series=60,
resistance_shunt_ref=df['resistance_shunt_ref'].mean(),
alpha_isc=0.001,
voltage_key='v_dc',
current_key='i_dc',
temperature_cell_key='temperature_cell',
irradiance_poa_key='poa_meas',
modules_per_string=1,
parallel_strings=1,
)
# Estimate startpoint.
pvp.estimate_p0()
print('Estimated startpoint:')
print(pvp.p0)
# Can set a custom startpoint if auto-chosen startpoint isn't great.
pvp.p0 = {'diode_factor': 1.12,
'photocurrent_ref': 5.9,
'saturation_current_ref': 2e-9,
'resistance_series_ref': 0.4,
'conductance_shunt_extra': 0.001}
# Plot startpoint on top of data.
# plt.figure(0)
# plt.clf()
# pvp.plot_Vmp_Imp_scatter(df=pvp.df[:5000],
# p_plot=pvp.p0,
# figure_number=4,
# plot_vmp_max=37,
# plot_imp_max=6)
# plt.title('Startpoint')
# plt.show()
# Set boolean mask for which points to include.
boolean_mask = pvp.df['poa_meas'] > 100
# Set hyperparameters for running model.
hyperparams = {
'use_voc_points': False,
'use_mpp_points': True,
'use_clip_points': False,
# 'method': 'basinhopping',
'method': 'minimize',
'solver': 'L-BFGS-B',
# 'solver': 'nelder-mead',
'days_per_run': 30,
'iterations_per_year': 6,
'save_figs': False,
'verbose' : False,
# 'saturation_current_multistart':[0.8,1.2],
'start_point_method': 'last',
'save_figs_directory': 'figures',
'plot_imp_max': 7,
'plot_vmp_max': 35,
'boolean_mask': boolean_mask,
'singlediode_method':'fast'
}
ret = pvp.execute(iteration='all',
**hyperparams)
# Get results
pfit = pvp.result['p']
pfit.index = pfit['t_start']
print(pfit)
# Analyze year-on-year trend.
yoy_result = analyze_yoy(pfit)
extra_text = 'System: {}\n'.format(pvp.system_name) + \
'Use mpp points: {}\n'.format(hyperparams['use_mpp_points']) + \
'Use voc points: {}\n'.format(hyperparams['use_voc_points']) + \
'Use clip points: {}\n'.format(hyperparams['use_clip_points']) + \
'Irrad: {}\n'.format(pvp.irradiance_poa_key) + \
'Days per run: {}\n'.format(hyperparams['days_per_run']) + \
'start point method: {}\n'.format(hyperparams['start_point_method']) + \
'Median residual: {:.4f}\n'.format(1000*np.median(pfit['residual']))
compare = pvp.df.resample('M').mean()
compare['t_years'] = np.array(
[t.year + (t.dayofyear - 1) / 365.25 for t in compare.index])
# Plot results
plot_results_timeseries(pfit,yoy_result=None,
extra_text=extra_text,
compare=compare)
# plt.savefig('figures/synth02_result.pdf',bbox_inches='tight') |
983,323 | 6560ae57b41b7685267cb81be7ae0e31fb32c5ce | def f(n):
if n > 0:
return g(n-1)
return 0
def g(n):
ans = 1
if n > 1:
ans += f(n-3)
return ans
print(f(11)) |
983,324 | 5d5dec896db5ee7c9d92e413ca2165172cb4e7b2 | import numpy as np
import cv2
img = cv2.imread('pro.png', cv2.IMREAD_COLOR)
cv2.line(img, (0,0), (150, 150), (255, 255, 255), 15) #opencv is BGR
cv2.rectangle(img, (15,25), (200,150), (255, 0, 0), 5)
cv2.circle(img, (100,63), 55, (0,255,0), -1)
pts = np.array([[10,5],[20,30],[70,30],[50, 10]],np.int32)
#pts = pts.reshape((-1,1,2))
cv2.polylines(img, [pts], True, (0,0,255), 3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OPENCV TUTS!', (0, 400), font, 1, (255,255,255), 2, cv2.LINE_AA)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
983,325 | b736e9eb5677c8349a7676dc34cb8713a4355ba3 | from MDRSREID.Settings.parser_args.parser_args import parser_args
import os.path as osp
from MDRSREID.utils.src_copy_to_dst import src_copy_to_dst
from MDRSREID.utils.config_utils.overwrite_config_file import overwrite_config_file
from MDRSREID.utils.import_file import import_file
def init_config(args=None):
"""
args can be parsed from command line, or provided by function caller.
Load the args.
Set the experiment directory.
Copy default config file to dst file.
Overwrite the contents.
"""
if args is None:
args = parser_args()
# Set the experiment directory
exp_dir = args.exp_dir
if exp_dir is None:
exp_dir = 'experiment/' + args.model_name + '/' + osp.splitext(osp.basename(args.default_config_path))[0]
# copy file
dst_config_path = osp.join(exp_dir, osp.basename(args.default_config_path))
src_copy_to_dst(args.default_config_path, dst_config_path)
# overwrite
if args.ow_config_path != 'None':
print('ow_config_path is: {}'.format(args.ow_config_path))
overwrite_config_file(dst_config_path, ow_file=args.ow_config_path)
if args.ow_str != 'None':
print('ow_str is: {}'.format(args.ow_str))
overwrite_config_file(dst_config_path, ow_str=args.ow_str)
# import config
cfg = import_file(dst_config_path).cfg
# Set log experiment dir
cfg.log.exp_dir = exp_dir
return cfg
if __name__ == '__main__':
cfg = init_config()
|
983,326 | 942a441b7ca78ad70d30e1d9968a1f561fae6e84 | def factorial(x):
if x == 1:
return 1
else:
return(x * factorial(x-1))
if __name__ == "__main__":
num = input("What do you want the factorial of? ")
print("The factorial of " , num, "is", factorial(int(num)))
|
983,327 | d947383e97ba3816880bda2052b48173740a36b0 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets, model_selection
# 1. Load the iris dataset
iris = datasets.load_iris() # load iris dataset
X = iris.data[:, :2] # store the first two features
Y = iris.target # store the labels
seed = 666
# separate training data from validation and test
X_train, X_val_test, Y_train, Y_val_test = model_selection.train_test_split(X, Y, test_size=0.7, shuffle=True, random_state=seed)
seed = 221
# separate validation and test data
X_val, X_test, Y_val, Y_test = model_selection.train_test_split(X_train, Y_train, test_size=0.5, shuffle=True, random_state=seed)
# 2. Perform an k-NN classification for each k in 1, 5, 10, 20, 30
N_train = len(X_train)
N_val = len(X_val)
N_test = len(X_test)
print("Datapoints used for training: ", N_train)
print("Datapoints used for validation: ", N_val)
print("Datapoints used for testing: ", N_test)
# Plot params
cmap_light = ListedColormap(['orange', 'cyan', 'cornflowerblue'])
cmap_bold = ListedColormap(['red', 'darkcyan', 'darkblue'])
def plot_iris(X_train, Y_train, X_val_test, Y_val_test):
ax = plt.gca()
# plot validation and testing points
ax.scatter(X_val_test[:,0], X_val_test[:,1], c=Y_val_test, cmap=cmap_light, edgecolor='k', s=20, zorder=2)
# plot the training data in bold colors
ax.scatter(X_train[:,0], X_train[:,1], c=Y_train, cmap=cmap_bold, edgecolor='k', s=20, zorder=2)
# add labels to the x/y axxis
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
return ax
def draw_knn_boundaries(knn, h=0.02): # h = Step size in the mesh
"""
Draw boundaries as decided by the trained knn
"""
ax = plt.gca()
[xmin, xmax] = ax.get_xlim()
[ymin, ymax] = ax.get_ylim()
# Generate the axis associated to the first feature:
x_axis = np.arange(xmin, xmax, h)
# Generate the axis associated to the 2nd feature:
y_axis = np.arange(ymin, ymax, h)
# Generate a meshgrid (2D grid) from the 2 axis:
x_grid, y_grid = np.meshgrid(x_axis, y_axis)
# Vectorize the grids into column vectors:
x_grid_vectorized = x_grid.flatten()
x_grid_vectorized = np.expand_dims(x_grid_vectorized, axis=1)
y_grid_vectorized = y_grid.flatten()
y_grid_vectorized = np.expand_dims(y_grid_vectorized, axis=1)
# Concatenate the vectorized grids
grid = np.concatenate((x_grid_vectorized, y_grid_vectorized), axis=1)
# Now you can use 'grid' as data to classify by the knn
# Predict concatenated features to get the decision boundaries:
decision_boundaries = ... #TODO!
# Reshape the decision boundaries into a 2D matrix:
decision_boundaries = decision_boundaries.reshape(x_grid.shape)
plt.pcolormesh(x_grid, y_grid, decision_boundaries, cmap=cmap_light, zorder=1)
return ax
# Main work here:
def knn_on_iris(k, X_train, Y_train, X_val, Y_val):
"""
Train a knn and plot its boundaries on iris data
"""
# --------------------
# Plot iris data
# --------------------
plot_iris(X_train, Y_train, X_val, Y_val)
# --------------------
# Train the knn
# --------------------
# Create an instance of the KNeighborsClassifier class for current value of k:
k_NN = KNeighborsClassifier(n_neighbors=k)
# Train the classifier with the training data
k_NN.fit(X_train, Y_train)
# --------------------
# Draw knn boundaries
# --------------------
draw_knn_boundaries(k_NN)
plt.title("k-NN classification on Iris, k = " + str(k_NN.get_params().get("n_neighbors")))
plt.show()
# --------------------
# Model accuracy:
# --------------------
# Accuracy on train set:
train_predictions = k_NN.predict(X_train)
good_train_predictions = (train_predictions == Y_train)
train_accuracy = np.sum(good_train_predictions) / len(X_train)
# Accuracy on test set:
val_predictions = k_NN.predict(X_val)
good_val_predictions = (val_predictions == Y_val)
val_accuracy = np.sum(good_val_predictions) / len(X_val)
return (k_NN, train_accuracy, val_accuracy)
### k-NN on the Iris dataset for different values of k:
# Create vectors to store the results for each k:
train_accuracies = []
val_accuracies = []
# Train a knn for each value of k in k_list
k_list = [1, 5, 10, 20, 30]
for k in k_list:
knn, train_acc, val_acc = knn_on_iris(k, X_train, Y_train, X_val, Y_val)
print("K-nn trained with k = ", k)
print("Train accuracy: ", train_acc, " ----- ", "Validation accuracy: ", val_acc)
train_accuracies.append(train_acc)
val_accuracies.append(val_acc)
# Plot accuracy curves:
plt.plot(k_list, train_accuracies)
plt.plot(k_list, val_accuracies)
plt.ylim(0, 1)
plt.xlabel('k')
plt.ylabel('Accuracy')
plt.legend(['Train', 'Validation'], loc='best')
plt.title("k-NN accuracy curves on Iris")
# Display plots:
plt.show() |
983,328 | e7523e12dc1dd6826b8d056f7b6f5651518be1e9 | from itertools import chain
from hathor.conf import HathorSettings
from hathor.daa import TestMode, _set_test_mode, get_weight_decay_amount
from hathor.transaction import sum_weights
from hathor.transaction.storage import TransactionMemoryStorage
from tests import unittest
from tests.utils import add_new_blocks, add_new_transactions
settings = HathorSettings()
class BaseBlockchainTestCase(unittest.TestCase):
__test__ = False
"""
Thus, there are eight cases to be handled when a new block arrives, which are:
(i) Single best chain, connected to the head of the best chain
(ii) Single best chain, connected to the tail of the best chain
(iii) Single best chain, connected to the head of a side chain
(iv) Single best chain, connected to the tail of a side chain
(v) Multiple best chains, connected to the head of a best chain
(vi) Multiple best chains, connected to the tail of a best chain
(vii) Multiple best chains, connected to the head of a side chain
(viii) Multiple best chains, connected to the tail of a side chain
"""
def setUp(self):
super().setUp()
self.tx_storage = TransactionMemoryStorage()
self.genesis = self.tx_storage.get_all_genesis()
self.genesis_blocks = [tx for tx in self.genesis if tx.is_block]
self.genesis_txs = [tx for tx in self.genesis if not tx.is_block]
def test_single_chain(self):
""" All new blocks belong to case (i).
"""
self.assertEqual(len(self.genesis_blocks), 1)
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
# The initial score is the sum of the genesis
score = self.genesis_blocks[0].weight
for tx in self.genesis_txs:
score = sum_weights(score, tx.weight)
# Mine 100 blocks in a row with no transaction but the genesis
blocks = add_new_blocks(manager, 100, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata(force_reload=True)
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Add some transactions between blocks
txs = add_new_transactions(manager, 30, advance_clock=15)
for tx in txs:
score = sum_weights(score, tx.weight)
# Mine 50 more blocks in a row with no transactions between them
blocks = add_new_blocks(manager, 50)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
consensus_context = manager.consensus_algorithm.create_context()
self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)
# Mine 15 more blocks with 10 transactions between each block
for _ in range(15):
txs = add_new_transactions(manager, 10, advance_clock=15)
for tx in txs:
score = sum_weights(score, tx.weight)
blocks = add_new_blocks(manager, 1)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
consensus_context = manager.consensus_algorithm.create_context()
self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)
self.assertConsensusValid(manager)
def test_single_fork_not_best(self):
""" New blocks belong to cases (i), (ii), (iii), and (iv).
The best chain never changes. All other chains are side chains.
"""
self.assertEqual(len(self.genesis_blocks), 1)
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
# The initial score is the sum of the genesis
score = self.genesis_blocks[0].weight
for tx in self.genesis_txs:
score = sum_weights(score, tx.weight)
# Mine 30 blocks in a row with no transactions
blocks = add_new_blocks(manager, 30, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Add some transactions between blocks
txs = add_new_transactions(manager, 5, advance_clock=15)
for tx in txs:
score = sum_weights(score, tx.weight)
# Mine 1 blocks
blocks = add_new_blocks(manager, 1, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Generate a block which will be a fork in the middle of the chain
# Change the order of the transactions to change the hash
fork_block1 = manager.generate_mining_block()
fork_block1.parents = [fork_block1.parents[0]] + fork_block1.parents[:0:-1]
fork_block1.resolve()
fork_block1.verify()
# Mine 8 blocks in a row
blocks = add_new_blocks(manager, 8, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Fork block must have the same parents as blocks[0] as well as the same score
self.assertEqual(set(blocks[0].parents), set(fork_block1.parents))
# Propagate fork block.
# This block belongs to case (ii).
self.assertTrue(manager.propagate_tx(fork_block1))
fork_meta1 = fork_block1.get_metadata()
self.assertEqual(fork_meta1.voided_by, {fork_block1.hash})
# Add some transactions between blocks
txs = add_new_transactions(manager, 5, advance_clock=15)
for tx in txs:
score = sum_weights(score, tx.weight)
# Mine 5 blocks in a row
# These blocks belong to case (i).
blocks = add_new_blocks(manager, 5, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Add some transactions between blocks
txs = add_new_transactions(manager, 2, advance_clock=15)
for tx in txs:
score = sum_weights(score, tx.weight)
# Propagate a block connected to the voided chain
# These blocks belongs to case (iii).
sidechain1 = add_new_blocks(manager, 3, parent_block_hash=fork_block1.hash)
for block in sidechain1:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
# Add some transactions between blocks
txs = add_new_transactions(manager, 2, advance_clock=15)
for tx in txs:
score = sum_weights(score, tx.weight)
# Propagate a block connected to the voided chain
# This block belongs to case (iv).
fork_block3 = manager.generate_mining_block(parent_block_hash=fork_block1.hash)
fork_block3.resolve()
fork_block3.verify()
self.assertTrue(manager.propagate_tx(fork_block3))
fork_meta3 = fork_block3.get_metadata()
self.assertEqual(fork_meta3.voided_by, {fork_block3.hash})
self.assertConsensusValid(manager)
def test_multiple_forks(self):
self.assertEqual(len(self.genesis_blocks), 1)
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
# The initial score is the sum of the genesis
score = self.genesis_blocks[0].weight
for tx in self.genesis_txs:
score = sum_weights(score, tx.weight)
# Mine 30 blocks in a row with no transactions, case (i).
blocks = add_new_blocks(manager, 30, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Add some transactions between blocks
txs1 = add_new_transactions(manager, 5, advance_clock=15)
for tx in txs1:
score = sum_weights(score, tx.weight)
# Mine 1 blocks, case (i).
blocks = add_new_blocks(manager, 1, advance_clock=15)
block_before_fork = blocks[0]
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
for tx in txs1:
meta = tx.get_metadata(force_reload=True)
self.assertEqual(meta.first_block, blocks[0].hash)
# Add some transactions between blocks
txs2 = add_new_transactions(manager, 3, advance_clock=15)
for tx in txs2:
score = sum_weights(score, tx.weight)
# Mine 5 blocks in a row, case (i).
blocks = add_new_blocks(manager, 5, advance_clock=15)
for i, block in enumerate(blocks):
meta = block.get_metadata()
score = sum_weights(score, block.weight)
self.assertAlmostEqual(score, meta.score)
# Mine 4 blocks, starting a fork.
# All these blocks belong to case (ii).
sidechain = add_new_blocks(manager, 4, advance_clock=15, parent_block_hash=blocks[0].parents[0])
# Fork block must have the same parents as blocks[0] as well as the same score
self.assertEqual(set(blocks[0].parents), set(sidechain[0].parents))
for block in blocks:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, None)
for block in sidechain:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
# Propagate a block connected to the voided chain, case (iii).
fork_block2 = manager.generate_mining_block(parent_block_hash=sidechain[-1].hash)
fork_block2.resolve()
fork_block2.verify()
self.assertTrue(manager.propagate_tx(fork_block2))
sidechain.append(fork_block2)
# Now, both chains have the same score.
for block in blocks:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
for block in sidechain:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
for tx in txs1:
meta = tx.get_metadata(force_reload=True)
self.assertEqual(meta.first_block, block_before_fork.hash)
for tx in txs2:
meta = tx.get_metadata(force_reload=True)
self.assertIsNone(meta.first_block)
# Mine 1 block, starting another fork.
# This block belongs to case (vi).
sidechain2 = add_new_blocks(manager, 1, advance_clock=15, parent_block_hash=sidechain[0].hash)
for block in sidechain2:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
# Mine 2 more blocks in the new fork.
# These blocks belong to case (vii).
sidechain2 += add_new_blocks(manager, 2, advance_clock=15, parent_block_hash=sidechain2[-1].hash)
for block in sidechain2:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
# Mine 1 block, starting another fork from sidechain2.
# This block belongs to case (viii).
sidechain3 = add_new_blocks(manager, 1, advance_clock=15, parent_block_hash=sidechain2[-2].hash)
for block in sidechain3:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
# Propagate a block connected to the side chain, case (v).
fork_block3 = manager.generate_mining_block(parent_block_hash=fork_block2.hash)
fork_block3.resolve()
fork_block3.verify()
self.assertTrue(manager.propagate_tx(fork_block3))
sidechain.append(fork_block3)
# The side chains have exceeded the score (after it has the same score)
for block in blocks:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
for block in sidechain:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, None)
# from hathor.graphviz import GraphvizVisualizer
# dot = GraphvizVisualizer(manager.tx_storage, include_verifications=True, include_funds=True).dot()
# dot.render('dot0')
for tx in txs2:
meta = tx.get_metadata(force_reload=True)
self.assertEqual(meta.first_block, sidechain[0].hash)
# Propagate a block connected to the side chain, case (v).
# Another side chain has direcly exceeded the best score.
fork_block4 = manager.generate_mining_block(parent_block_hash=sidechain3[-1].hash)
fork_block4.weight = 10
fork_block4.resolve()
fork_block4.verify()
self.assertTrue(manager.propagate_tx(fork_block4))
sidechain3.append(fork_block4)
for block in blocks:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
for block in sidechain[1:]:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
for block in sidechain2[-1:]:
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, {block.hash})
for block in chain(sidechain[:1], sidechain2[:-1], sidechain3):
meta = block.get_metadata(force_reload=True)
self.assertEqual(meta.voided_by, None)
for tx in txs2:
meta = tx.get_metadata(force_reload=True)
self.assertEqual(meta.first_block, sidechain[0].hash)
# dot = manager.tx_storage.graphviz(format='pdf')
# dot.render('test_fork')
self.assertConsensusValid(manager)
def test_block_height(self):
genesis_block = self.genesis_blocks[0]
self.assertEqual(genesis_block.get_metadata().height, 0)
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
# Mine 50 blocks in a row with no transaction but the genesis
blocks = add_new_blocks(manager, 50, advance_clock=15)
for i, block in enumerate(blocks):
expected_height = i + 1
self.assertEqual(block.get_metadata().height, expected_height)
def test_tokens_issued_per_block(self):
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
# this test is pretty dumb in that it test every possible height until halving has long stopped
initial_reward = settings.INITIAL_TOKENS_PER_BLOCK
final_reward = settings.MINIMUM_TOKENS_PER_BLOCK
expected_reward = initial_reward
height = 1
# check that there are BLOCKS_PER_HALVING with each reward, starting at the first rewardable block (height=1)
for _i_halving in range(0, settings.MAXIMUM_NUMBER_OF_HALVINGS):
for _i_block in range(0, settings.BLOCKS_PER_HALVING):
reward = manager.get_tokens_issued_per_block(height)
self.assertEqual(reward, expected_reward, f'reward at height {height}')
height += 1
expected_reward /= 2
self.assertEqual(expected_reward, final_reward)
# check that halving stops, for at least two "halving rounds"
for _i_block in range(0, 2 * settings.BLOCKS_PER_HALVING):
reward = manager.get_tokens_issued_per_block(height)
self.assertEqual(reward, expected_reward, f'reward at height {height}')
height += 1
def test_block_rewards(self):
# even dumber test that only check if manager.get_tokens_issued_per_block was used correctly for a really large
# number of blocks, probably not worth running all the time
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
block_count = (settings.MAXIMUM_NUMBER_OF_HALVINGS + 1) * settings.BLOCKS_PER_HALVING
blocks = add_new_blocks(manager, block_count, advance_clock=block_count * 30)
for block in blocks:
outputs = block.outputs
self.assertEqual(len(outputs), 1)
output = outputs[0]
height = block.get_metadata().height
self.assertEqual(output.value, manager.get_tokens_issued_per_block(height))
def test_daa_sanity(self):
# sanity test the DAA
_set_test_mode(TestMode.DISABLED)
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
N = settings.BLOCK_DIFFICULTY_N_BLOCKS
T = settings.AVG_TIME_BETWEEN_BLOCKS
manager.avg_time_between_blocks = T
# stabilize weight on 2 and lower the minimum to 1, so it can vary around 2
manager.min_block_weight = 2
add_new_blocks(manager, N * 2, advance_clock=T)
manager.min_block_weight = 1
for i in range(N):
# decreasing solvetime should increase weight
base_weight = manager.generate_mining_block().weight
add_new_blocks(manager, i, advance_clock=T)
add_new_blocks(manager, 1, advance_clock=T * 0.9)
add_new_blocks(manager, N - i, advance_clock=T)
new_weight = manager.generate_mining_block().weight
self.assertGreater(new_weight, base_weight)
add_new_blocks(manager, N, advance_clock=T)
# increasing solvetime should decrease weight
base_weight = manager.generate_mining_block().weight
add_new_blocks(manager, i, advance_clock=T)
add_new_blocks(manager, 1, advance_clock=T * 1.1)
add_new_blocks(manager, N - i, advance_clock=T)
new_weight = manager.generate_mining_block().weight
self.assertLess(new_weight, base_weight)
def test_daa_weight_decay_amount(self):
_set_test_mode(TestMode.DISABLED)
amount = settings.WEIGHT_DECAY_AMOUNT
for distance in range(0, settings.WEIGHT_DECAY_ACTIVATE_DISTANCE, 10):
self.assertEqual(get_weight_decay_amount(distance), 0)
distance = settings.WEIGHT_DECAY_ACTIVATE_DISTANCE - 1
self.assertAlmostEqual(get_weight_decay_amount(distance), 0)
distance = settings.WEIGHT_DECAY_ACTIVATE_DISTANCE
for k in range(1, 11):
for _ in range(settings.WEIGHT_DECAY_WINDOW_SIZE):
self.assertAlmostEqual(get_weight_decay_amount(distance), k * amount)
distance += 1
self.assertAlmostEqual(get_weight_decay_amount(distance), 11 * amount)
def test_daa_weight_decay_blocks(self):
from hathor import daa
orig_avg_time_between_blocks = daa.AVG_TIME_BETWEEN_BLOCKS
orig_min_block_weight = daa.MIN_BLOCK_WEIGHT
try:
self._test_daa_weight_decay_blocks()
finally:
daa.AVG_TIME_BETWEEN_BLOCKS = orig_avg_time_between_blocks
daa.MIN_BLOCK_WEIGHT = orig_min_block_weight
def _test_daa_weight_decay_blocks(self):
_set_test_mode(TestMode.DISABLED)
manager = self.create_peer('testnet', tx_storage=self.tx_storage)
amount = settings.WEIGHT_DECAY_AMOUNT
from hathor import daa
daa.AVG_TIME_BETWEEN_BLOCKS = settings.AVG_TIME_BETWEEN_BLOCKS
daa.MIN_BLOCK_WEIGHT = 2 + 2 * settings.WEIGHT_DECAY_AMOUNT
add_new_blocks(manager, 2 * settings.BLOCK_DIFFICULTY_N_BLOCKS, advance_clock=settings.AVG_TIME_BETWEEN_BLOCKS)
daa.MIN_BLOCK_WEIGHT = 1
base_weight = manager.generate_mining_block().weight
self.assertGreater(base_weight, daa.MIN_BLOCK_WEIGHT)
add_new_blocks(manager, 20, advance_clock=settings.AVG_TIME_BETWEEN_BLOCKS)
dt = settings.AVG_TIME_BETWEEN_BLOCKS # the latest call to add_new_blocks will advance the clock
while dt < settings.WEIGHT_DECAY_ACTIVATE_DISTANCE:
weight = manager.generate_mining_block().weight
self.assertAlmostEqual(weight, base_weight)
manager.reactor.advance(1)
dt += 1
dt = 0
while dt < settings.WEIGHT_DECAY_WINDOW_SIZE:
weight = manager.generate_mining_block().weight
self.assertAlmostEqual(weight, base_weight - amount)
manager.reactor.advance(1)
dt += 1
dt = 0
while dt < settings.WEIGHT_DECAY_WINDOW_SIZE:
weight = manager.generate_mining_block().weight
self.assertAlmostEqual(weight, base_weight - 2*amount)
manager.reactor.advance(1)
dt += 1
manager.reactor.advance(1)
weight = manager.generate_mining_block().weight
self.assertAlmostEqual(weight, daa.MIN_BLOCK_WEIGHT)
class SyncV1BlockchainTestCase(unittest.SyncV1Params, BaseBlockchainTestCase):
__test__ = True
class SyncV2BlockchainTestCase(unittest.SyncV2Params, BaseBlockchainTestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeBlockchainTestCase(unittest.SyncBridgeParams, SyncV2BlockchainTestCase):
pass
|
983,329 | 6c95bce52ce5140187f9c969bfe92db191cbe3e2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-09-19 15:18:34
# @Author : BaoXuebin (baoxbin@hotmail.com)
# @Link : ${link}
# @Version : $Id$
import uuid
import socket
from urllib import request
#获取本机电脑名
def getHostName():
return socket.gethostname()
def getComName():
return socket.getfqdn(getHostName())
#获取本机ip
def getInnerIP():
return socket.gethostbyname(getHostName())
# 获取本机mac地址
def getMacAddress():
mac=uuid.UUID(int = uuid.getnode()).hex[-12:]
return ":".join([mac[e:e+2] for e in range(0,11,2)])
def getByBrowser(url):
result = ''
req = request.Request(url)
# 伪装浏览器
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.101 Safari/537.36')
result = ""
try:
with request.urlopen(req) as f:
if f.status == 200:
result = f.read().decode('gbk')
else:
result = "1"
except Exception as e:
result = "2"
return result
def getOuterIP():
query_url = 'http://1212.ip138.com/ic.asp'
result = getByBrowser(query_url)
if result == "1":
return "网络错误"
elif result == "2":
return "网络连接错误"
else:
# ['您的IP是:[116.226.120.26]', '来自:上海市徐汇区', '电信']
result = ''.join(result.split('/')).split('<center>')[1].split(" ")
outerIP = result[0].split(':')[1]
local = result[1].split(':')[1] + " " + result[2]
return outerIP + ' ' + local
def getComputerInfo():
return '主机名:%s\nMAC地址:%s\n内网IP:[%s]\n外网IP:%s\n' % (getHostName(), getMacAddress(), getInnerIP(), getOuterIP())
desc = '\\ip: 查看电脑基本网络信息\n'
handle_func = getComputerInfo |
983,330 | 3cfd4fe360ce6f8b805321cbbbd4a9666257146e | '''
drizzle -- Drizzle
vfs -- Virtual File System
Defines utilities for exposing an interface via a virtual
file system.
This implementation currently depends on llfuse.
'''
|
983,331 | 2c1996c0c516b1e517cfd2e4e0ddea16f5c9ed27 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import itertools as it
import typing as ty
from functools import lru_cache
import attr
from attr.validators import instance_of as is_a
import networkx as nx
from rnacentral_pipeline.databases.sequence_ontology import tree as so_tree
NORMALIZED_IDS = {
"SO:0000253", # tRNA
"SO:0000274", # snRNA
"SO:0000275", # snoRNA
"SO:0000375", # 5.8S rRNA
"SO:0000650", # SSU rRNA
"SO:0000651", # LSU rRNA
"SO:0000652", # 5S rRNA
"SO:0002128", # mito rRNA
}
NORM_TO = {
"": "",
}
@attr.s(auto_attribs=True, frozen=True, slots=True)
class SoTermInfo:
name: str
so_id: str
@classmethod
def ncRNA(cls):
return cls("ncRNA", "SO:0000655")
def is_a(self, value: str):
return self.name == value or self.so_id == value
@attr.s(frozen=True, slots=True)
class NormalizedSoTermInfo:
so_term = attr.ib(validator=is_a(str))
SoTree = ty.Tuple[SoTermInfo]
@lru_cache()
def normalized_term(so_id: str, ontology: so_tree.SoOntology) -> NormalizedSoTermInfo:
target_path = so_tree.rna_type_tree(ontology, so_id)
if so_id in NORM_TO:
return NormalizedSoTermInfo(NORM_TO[so_id])
for parent_so_id in NORMALIZED_IDS:
if parent_so_id == so_id:
return NormalizedSoTermInfo(ontology.id_to_name[so_id])
parent_path = so_tree.rna_type_tree(ontology, parent_so_id)
if target_path == parent_path:
return NormalizedSoTermInfo(ontology.id_to_name[parent_so_id])
return NormalizedSoTermInfo(ontology.id_to_name[so_id])
@attr.s(auto_attribs=True, frozen=True, slots=True, hash=True)
class RnaType:
insdc: str
so_term: str
ontology_terms: SoTree
normalized_term: SoTermInfo
@classmethod
def build(cls, insdc, so_term, ontology):
so_term = so_tree.RENAME.get(so_term, so_term)
subtree = so_tree.rna_type_tree(ontology, so_term)
subtree = tuple([SoTermInfo(p[1], p[0]) for p in subtree])
so_id = ontology.name_to_id[so_term]
return cls(
insdc=insdc,
so_term=so_term,
ontology_terms=subtree,
normalized_term=normalized_term(so_id, ontology),
)
def is_a(self, so_name):
return (
self.insdc == so_name
or self.so_term == so_name
or any(p.is_a(so_name) for p in self.ontology_terms)
)
def common(self, other: "RnaType") -> ty.Optional[SoTermInfo]:
zipped = zip(self.ontology_terms, other.ontology_terms)
common = list(it.takewhile(lambda z: z[0] == z[1], zipped))
if not common:
return None
return common
def is_parent_of(self, other: "RnaType") -> bool:
if len(other.ontology_terms) >= len(self.ontology_terms):
return False
pairs = zip(self.ontology_terms, other.ontology_terms)
for (left, right) in pairs:
if left != right:
return False
return True
@attr.s(auto_attribs=True, frozen=True, slots=True, hash=True)
class RnaTypeGraph:
graph: nx.Graph
@classmethod
def from_containers(cls, containers):
graph = nx.Graph()
for container in containers:
rna_type = container.rna_type
node_name = rna_type.so_term.name
graph.add_node(node_name, {"data", []})
graph[node_name]["data"].append(container)
for so_term in rna_type.ontology_terms:
graph.add_node(so_term.name)
ns = [n.name for n in rna_type.ontology_terms]
if len(ns) > 1:
edges = tuple(zip(ns, ns[1:]))
graph.add_edges_from(edges)
cls(graph=graph)
|
983,332 | abe835475f063d65f7431158187775ac29a17415 | import torch
import numpy as np
from skimage import transform
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = np.transpose(image,(2, 0, 1))
image = image.astype('float32') / 255
return {'image': torch.from_numpy(image),
'label': torch.from_numpy(label)}
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
done_resizing = False
while not done_resizing:
try:
img = transform.resize(image, (new_h, new_w),mode='constant',anti_aliasing=True)
done_resizing = True
except:
print('Issue resizing. Trying again.')
return {'image': img, 'label': label}
class Rotate(object):
"""Rotate the image in a sample to a given size.
Args:
output_size (float): Maximum rotation.
"""
def __init__(self, max_angle):
assert isinstance(max_angle, (int, float))
self.max_angle = max_angle
def __call__(self, sample):
image, label = sample['image'], sample['label']
angle = (np.random.rand()-0.5)*2*self.max_angle
done_rotating = False
while not done_rotating:
try:
img = transform.rotate(image, angle)
done_rotating = True
except:
print('Issue rotating. Trying again.')
return {'image': img, 'label': label}
class VerticalFlip(object):
"""Flip the image in a sample with probability p.
Args:
p (float): Probability of vertical flip.
"""
def __init__(self, p = 0.5):
assert isinstance(p, (float))
self.p = p
def __call__(self, sample):
image, label = sample['image'], sample['label']
if np.random.rand() < self.p:
image = np.fliplr(image)
return {'image': image, 'label': label}
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
return {'image': image, 'label': label} |
983,333 | 4f607774a9681fa86a16475fc11098293f5c58f7 | from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPool2D, BatchNormalization, Dropout
from tensorflow.keras.models import Model
layers_schema = {
'Input': {
'shape': 'tuple'
},
'Conv2D': {
'filters': 'int',
'kernel_size': 'int',
'padding': 'str',
'activation': 'str'
},
'MaxPool2D': {
'pool_size': 'tuple'
},
'BatchNormalization': {},
'Flatten': {},
'Dense': {
'units': 'int',
'activation': 'str'
},
'Dropout': {
'rate': 'float'
}
}
alias_for_parsing = {
'InputLayer': 'Input',
'MaxPooling2D': 'MaxPool2D'
}
embedded_attributes = {
'activation': 'activation.__name__',
'shape': 'input_shape',
'kernel_size': 'kernel_size[0]'
}
sp_list = {'Input': ['shape']}
def special_process(info, layer_type, param_key):
if layer_type == 'Input':
if param_key == 'shape':
return info[0][1:]
else:
return info
else:
return info
def build_model():
input = Input(shape=(224, 224, 3))
x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu')(input)
x = MaxPool2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(units=64, activation='relu')(x)
x = Dropout(rate=0.2)(x)
x = Dense(units=32, activation='relu')(x)
x = Dropout(rate=0.2)(x)
x = Dense(units=16, activation='relu')(x)
x = Dense(units=8, activation='softmax')(x)
model = Model(inputs=input, outputs=x)
return model
def parse_model(model):
nodes = []
rels = []
for layer in model.layers:
to_node = layer.name
print(to_node)
raw_type = str(type(layer)).split('.')[-1].replace("'>", "")
if raw_type in alias_for_parsing:
raw_type = alias_for_parsing[raw_type]
print('Layer type:', raw_type)
schema = layers_schema[raw_type]
node_properties = {}
for key in schema:
if key in embedded_attributes:
attr = embedded_attributes[key]
else:
attr = key
value = eval('layer.{}'.format(attr))
print(key, ':', value)
node_properties[key] = value
nodes.append({'name': to_node, 'type': raw_type, 'params': node_properties})
from_node = layer.input.name.split('/')[0]
print('Relationship:', from_node, '->', to_node)
if from_node != to_node:
rels.append({'from': from_node, 'to': to_node})
print()
return nodes, rels
if __name__ == "__main__":
model = build_model()
nodes, rels = parse_model(model)
print('Done')
|
983,334 | a2cdb39f2c658c9df588315825c0db9fab800d43 | from django.db import models
# Create your models here.
class Autor(models.Model):
nombre=models.CharField(max_length=200)
telefono=models.CharField(max_length=20)
correo =models.CharField(max_length=50)
edad = models.IntegerField(null= True)
def __str__(self):
return self.nombre |
983,335 | eb174796a4cb019c701d6f841c459ba5aa308781 | import sqlite3
def get_user_id():
uri = 'db.sqlite'
query = 'SELECT 1'
with sqlite3.connect(uri) as connection:
cursor = connection.execute(query)
row = cursor.fetchone()
return row
if __name__ == '__main__':
user_id = get_user_id()
print(user_id)
|
983,336 | 7e7b935dcbef228074f0af6ae5d48ec60001b084 | """Module providing the meta-models."""
from .cnn import build_cnn_meta_model
from .mlp import build_mlp_meta_model
__all__ = [
"build_cnn_meta_model",
"build_mlp_meta_model"
]
|
983,337 | 601bf8af947891206e2666f2b00de3128321a938 | from __future__ import print_function
import argparse
import skimage
import os
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from torchvision.utils import save_image
from torch.autograd import Variable
import torch.autograd as autograd
import torch.nn.functional as F
def generate_mask(b,c,w,h):
'''
Generate random rectangular mask based on the input
'''
#Generate random coordinates
x, y = np.random.randint(0,w//2, size=1)[0], np.random.randint(0,h//2, size=1)[0]
left_x, left_y = w-x, h-y
#Generate random length
x_len, y_len = np.random.randint(0,left_x,size=1)[0], np.random.randint(0,left_y,size=1)[0]
#Make white box
box = np.array([1]*x_len*y_len*1*1).reshape(1, 1, x_len,y_len)
mask = np.zeros((b,1,w,h))
#Replace original image with white box
mask[:,:,x:x+x_len, y:y+y_len] = box
return mask, [y,y+y_len,x,x+x_len]
def compute_gradient_penalty(D, real_samples, fake_samples_g, fake_samples_l):
'''
Calculate the gradient penalty for Generator if the gradient changes too rapidly. Smooth the training process
'''
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples_g)).requires_grad_(True)
d_interpolates = D(interpolates, fake_samples_l)
fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# print("fake shape: ", fake.shape)
# print("d_interpolates shape: ", d_interpolates.shape)
# print("interpolates shape: ", interpolates.shape)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# Generator Code
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# First Layer 64x64x4
nn.Conv2d(in_channels = 4, out_channels = 64, kernel_size = 5, stride = 1, padding=2),
nn.ReLU(),
nn.BatchNorm2d(64),
# Second Layer 64x64x64
nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, stride = 2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(128),
# Third Layer 32x32x128
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, stride = 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(128),
# Fourth Layer 32x32x128
nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 3, stride = 2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(256),
# Fifth Layer 16x16x256
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(256),
# Sixth Layer 16x16x256 dilated
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding=2, dilation = 2),
nn.ReLU(),
nn.BatchNorm2d(256),
# Seventh Layer 16x16x256 dilated
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding=4, dilation = 4),
nn.ReLU(),
nn.BatchNorm2d(256),
# Eighth Layer 16x16x256 dilated
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding=8, dilation = 8),
nn.ReLU(),
nn.BatchNorm2d(256),
# 9th Layer 16x16x256
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(256),
# 10th Layer 16x16x256
nn.ConvTranspose2d(in_channels = 256, out_channels = 128, kernel_size = 4, stride = 2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, stride = 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(in_channels = 128, out_channels = 64, kernel_size = 4, stride = 2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(in_channels = 64, out_channels = 32, kernel_size = 3, stride = 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(in_channels = 32, out_channels = 3, kernel_size = 3, stride = 1, padding=1),
nn.Sigmoid(),
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.global_dis = nn.Sequential(
# input is 3 x 64 x 64
nn.Conv2d(in_channels = 3, out_channels = 64, kernel_size = 5, stride = 2, padding=2, bias=False), # floor(64-5+2*2)/2 + 1 = 32
nn.ReLU(),
# state size. 64 x 32 x 32
nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 5, stride = 2, padding=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
# state size. 128 x 16 x 16
nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 5, stride = 2, padding=2, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
# state size. 256 x 8 x 8
nn.Conv2d(in_channels = 256, out_channels = 512, kernel_size = 5, stride = 2, padding=2, bias=False), # floor(8-5+2*2)/2 + 1 = 4
nn.BatchNorm2d(512),
nn.ReLU(),
)
self.l1 = nn.Sequential(
nn.Linear(512*4*4, 1024),
nn.Sigmoid()
)
self.l2 = nn.Sequential(
nn.Linear(256*4*4, 1024),
nn.Sigmoid()
)
self.local_dis = nn.Sequential(
# state size. 3 x 32 x 32
nn.Conv2d(in_channels = 3, out_channels = 64, kernel_size = 5, stride = 2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
# state size. 64 x 16 x 16
nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 5, stride = 2, padding=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
# state size. 128 x 8 x 8
nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 5, stride = 2, padding=2, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.l3 = nn.Sequential(
nn.Linear(2048, 1),
nn.Sigmoid()
)
def forward(self, input1, input2):
gd = self.global_dis(input1)
ld = self.local_dis(input2)
# Linearlize
gd_l = gd.view(-1, 512*4*4)
gd_l= self.l1(gd_l)
ld_l = ld.view(-1, 256*4*4)
ld_l = self.l2(ld_l)
x = torch.cat((gd_l, ld_l), dim=1)
return self.l3(x)
if __name__ == "__main__":
# minimum mask size
MIN_MASK_SIZE = 25
# Local dict
LOCAL_DICT = 'loacalMSE_noPenalty'
# LOCAL == 1, Generator only use local MSE; otherwise use both global and local
LOCAL = 1
# The path for the training set
dataroot = "./train"
# The path for storing intermediate result (with training data)
os.makedirs(LOCAL_DICT, exist_ok=True)
# Add WGAN_GP penalty
ADD_GP_PEN = 0
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 32
# Spatial size of training images. All images will be resized to this
# Size using a transformer.
image_size = 64
# Number of channels in the training images. For color images this is 3
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 64
# Size of feature maps in discriminator
ndf = 64
# Number of training epochs
num_epochs = 100
# Learning rate for optimizers
lr = 0.00015
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1
dataset = dset.ImageFolder(root=dataroot,
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
# Create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=workers)
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
cuda_ava = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda_ava else torch.FloatTensor
print("cuda available: ", cuda_ava)
print("add wgan_gp penalty coefficient: ", ADD_GP_PEN)
# Create the generator
netG = Generator(ngpu).to(device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netG = nn.DataParallel(netG, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netG.apply(weights_init)
# Create the Discriminator
netD = Discriminator(ngpu).to(device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netD = nn.DataParallel(netD, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netD.apply(weights_init)
# Save the
for i, data in enumerate(dataloader, 0):
fixed_img = data[0].to(device)
while(1):
mask1, _ = generate_mask(batch_size, 3, 64, 64)
if np.sum(mask1)/batch_size > 800:
break
fixed_mask = torch.Tensor(mask1).to(device)
holed_img = fixed_img*(1-fixed_mask)
fixed_input = torch.cat((holed_img, fixed_mask), dim=1)
save_image(holed_img[:16], LOCAL_DICT + "/holed_img.png", nrow=4, normalize=True)
save_image(fixed_img[:16], LOCAL_DICT + "/origin_img.png", nrow=4, normalize=True)
break
'''
The first thing is that the BCE objective for the Generator can more accurately be stated as
"the images output by the generator should be assigned a high probability by the Discriminator."
It's not BCE as you might see in a binary reconstruction loss, which would be BCE(G(Z),X)
where G(Z) is a generated image and X is a sample, it's BCE(D(G(Z)),1) where D(G(Z)) is the probability
assigned to the generated image by the discriminator. Given a "perfect" generator which always has photorealistic
outputs, the D(G(Z)) values should always be close to 1. Obviously in practice there's difficulties getting
this kind of convergence (the training is sort of inherently unstable) but that is the goal.
The second is that in the standard GAN algorithm, the latent vector (the "random noise" which the generator
receives as input and has to turn into an image) is sampled independently of training data.
If you were to use the MSE between the outputs of the GAN and a single image, you might get some sort of result out,
but you'd effectively be saying "given this (random) Z, produce this specific X" and you'd be implicitly forcing the
generator to learn a nonsensical embedding of the image. If you think of the Z vector as a high-level description of
the image, it would be like showing it a dog three times and asking it to generate the same dog given three different
(and uncorrelated) descriptions of the dog. Compare this with something like a VAE which has an explicit inference
mechanism (the encoder network of the VAE infers Z values given an image sample) and then attempts to reconstruct a
given image using those inferred Zs. The GAN does not attempt to reconstruct an image, so in its vanilla form it
doesn't make sense to compare its outputs to a set of samples using MSE or MAE.
'''
criterion = nn.BCELoss()
iters = 0
# img_list = []
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
running_loss_g = 0.0
running_loss_d = 0.0
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch in the dataloader
for i, data in enumerate(dataloader, 0):
# Calculate real image for gradient penalty
real = Variable(data[0].type(Tensor))
# Load images from dataloader
img_batch = data[0].to(device)
real_batch_size = img_batch.shape[0]
# generate related mask with batch size
# Dimention batch_size x channels x width x height
while(1):
mask_batch, idx = generate_mask(real_batch_size, 3, 64, 64)
if np.sum(mask_batch)/batch_size > MIN_MASK_SIZE:
break
mask_batch = torch.Tensor(mask_batch).to(device)
############## Discriminator ###############
netD.zero_grad()
label = torch.full((real_batch_size,), 1, device=device)
# True image for global discriminator
global_img_batch = img_batch
# Create the mask with smaller size
local_img_batch1 = global_img_batch[:, :, idx[0]:idx[1], idx[2]:idx[3]]
# Resize
local_img_batch = F.upsample(local_img_batch1, size=(32,32), mode='bilinear')
output = netD(global_img_batch, local_img_batch).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
# Get the noise output from generator
real_batch_size = img_batch.shape[0]
img_holed_batch = img_batch*(1 - mask_batch).to(device)
train_batch = torch.cat((img_holed_batch, mask_batch), dim=1)
# Generate the predict image batch
predicted_img_batch = netG(train_batch)
global_img_fake_batch = predicted_img_batch
local_img_fake_batch1 = global_img_fake_batch[:, :, idx[0]:idx[1], idx[2]:idx[3]]
local_img_fake_batch = F.upsample(local_img_fake_batch1, size=(32,32), mode='bilinear')
label.fill_(0)
output = netD(global_img_fake_batch.detach(), local_img_fake_batch.detach()).view(-1)
if ADD_GP_PEN != 0:
gradPenalty = compute_gradient_penalty(netD, real.data, global_img_fake_batch.data, local_img_fake_batch.data)
errD_fake = criterion(output, label) + gradPenalty*ADD_GP_PEN
else:
errD_fake = criterion(output, label)
errD_fake.backward()
errD = errD_real + errD_fake
optimizerD.step()
running_loss_d += errD.item()
if iters % 500 == 0: # print every 2000 mini-batches
print('[%d/%d][%d/%d] Discriminator loss: %.3f' %
(epoch, num_epochs, i, len(dataloader), running_loss_d / 500))
running_loss_d = 0.0
############## Generator ###############
netG.zero_grad()
label.fill_(0)
noise_global = torch.randn(real_batch_size, 3, 64, 64, device=device)
noise_local = torch.randn(real_batch_size, 3, 32, 32, device=device)
output = netD(noise_global, noise_local)
errG = criterion(output, label)
errG.backward(retain_graph=True)
# Calculate the MSE (Mean Squared Error)
diff_batch = predicted_img_batch - img_batch
MSE = torch.sum((diff_batch**2)*mask_batch)/(real_batch_size * 64 * 64)
# Determine to use overall MSE or just mask MSE criteria
if LOCAL:
MSE = torch.sum((diff_batch**2)*mask_batch)/(real_batch_size * 64 * 64)
else:
MSE_local = torch.sum((diff_batch**2)*mask_batch)/(real_batch_size * 64 * 64)
MSE_global = torch.sum((diff_batch**2))/(real_batch_size * 64 * 64)
MSE = MSE_local + MSE_global
MSE.backward()
loss = MSE + errG
optimizerG.step()
running_loss_g += loss.item()
# print every 500 mini-batches
if iters % 500 == 0:
print('[%d/%d][%d/%d] Generator loss: %.3f' %
(epoch, num_epochs, i, len(dataloader), running_loss_g / 500))
running_loss_g = 0.0
# Save predicted image every 500 iterations
if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(fixed_input).detach().cpu()
input_fake = (fake * (fixed_mask.detach().cpu())) + holed_img.detach().cpu()
save_image(input_fake.data[:16], LOCAL_DICT + "/%d.png" % iters, nrow=4, normalize=True)
# Save model every 500 iterations, use validate.py to get validation result
if (iters % 10000 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
os.makedirs('./models/%d_' % iters + LOCAL_DICT, exist_ok=True)
torch.save(netG.state_dict(), './models/%d_' % iters + LOCAL_DICT + '/generator')
torch.save(netD.state_dict(), './models/%d_' % iters + LOCAL_DICT + '/discriminator')
iters += 1;
|
983,338 | 6ce326dd13fae654feebbccf6a3d09295aabd6d2 | import logging.handlers
import os
import sys
# выбираем путь до лога сервера и его имя
LOG_FOLDER_PATH = os.path.dirname(os.path.abspath(__file__))
SERVER_LOF_FILE_PATH = os.path.join(LOG_FOLDER_PATH, '../logs/server.log')
# выбор формата записи в обработчик
formatter = logging.Formatter("%(asctime)s - %(module)s - %(levelname)s : %(message)s")
# создание и настройка потока
stream = logging.StreamHandler(sys.stderr)
stream.setFormatter(formatter)
stream.setLevel(logging.INFO)
# создание и настройка обработчика
server_handler = logging.handlers.TimedRotatingFileHandler(SERVER_LOF_FILE_PATH, encoding='utf8', when='d')
server_handler.setFormatter(formatter)
# создание и настройка регистратора
server_logger = logging.getLogger('server_logger_instance')
server_logger.addHandler(stream)
server_logger.addHandler(server_handler)
server_logger.setLevel(logging.INFO)
|
983,339 | 7c01cfdac1eefd67c20aaa203f86c5857a244a40 | '''
Created on Aug 2, 2012
@author: hossain
'''
from django.db import models
class Company(models.Model):
id = models.AutoField(primary_key=True, db_column='ID')
name = models.CharField(max_length=135, blank=True)
address = models.CharField(max_length=135, blank=True)
region = models.CharField(max_length=135, blank=True)
state = models.CharField(max_length=135, blank=True)
country = models.CharField(max_length=135, blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = u'location'
class Product(models.Model):
id = models.AutoField(primary_key=True, db_column='ID')
company = models.ForeignKey(Company, related_name='company')
name = models.CharField(max_length=500, blank=True)
pcode = models.CharField(max_length=500, blank=True)
dosage_form = models.CharField(max_length=500, blank=True)
manufacturer = models.CharField(max_length=500, blank=True)
ingridient1 = models.CharField(max_length=500, blank=True)
ingridient2 = models.CharField(max_length=500, blank=True)
ingridient3 = models.CharField(max_length=500, blank=True)
pclass = models.CharField(max_length=500, blank=True)
pbrand = models.CharField(max_length=500, blank=True)
packet_size = models.CharField(max_length=500, blank=True)
strength = models.CharField(max_length=500, blank=True)
dose_type = models.CharField(max_length=500, blank=True)
price = models.DecimalField(decimal_places=2, max_digits=5)
#buy_price = CurrencyField(decimal_places=2,max_digits=10, null=True, blank=True)
isenabled = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = u'products'
'''
class ProductStat(models.Model):
id = models.AutoField(primary_key=True, db_column='ID')
machine = models.ForeignKey(Machines, related_name='machines')
product = models.ForeignKey(Products, related_name='sold_products')
quantity = models.IntegerField(null=True, blank=True)
sold_price = models.DecimalField(decimal_places=2, max_digits=5)
date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return str(self.id)
class Meta:
db_table = u'sales'
'''
|
983,340 | e3a0fa49be2ba4c155b455264b3f43bde1459efe | import os
import sys
import time
import glob
import numpy as np
import random
import torch
import utils
import time
import logging
import argparse
import torch.nn as nn
import torch.utils
import datetime
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import math
import pickle
import re
import copy
from torch.autograd import Variable
from model import NetworkCIFAR as Network
from model import NetworkEnsemble
from GP import GPUCB, Environment
import genotypes
parser = argparse.ArgumentParser("model selection")
parser.add_argument('--data', type=str, default='data', help='location of the data corpus')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=30, help='num of training epochs')
parser.add_argument('--batch_size', type=int, default=96, help='batch size') # here
parser.add_argument('--learning_rate', type=float, default=0.01, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=200, help='report frequency')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--arch', type=str, default='DARTS', help='the previously stored genotype')
parser.add_argument('--max_acc', action='store_true',default=False, help='whether maximum acc')
parser.add_argument('--ensemble_size',type=int,default=3,help='maximum number of ensemble size')
parser.add_argument('--transfer', type=str, default='fine_tune', help='transfer method')
parser.add_argument('--mode',type=str,default='simulation',help='experiment mode')
parser.add_argument('--discount',type=int,default='3',help=' a parameter used in discount selection strategy')
parser.add_argument('--selection_strategy',type=str,default='random',help='the model selecton strategy')
parser.add_argument('--ensemble_strategy',type=str,default='simple_voting',help='the model selecton strategy')
parser.add_argument('--source_meta',type=str,default='None',help='the version of dataset meta file')
parser.add_argument('--target_meta',type=str,default='None',help='the version of dataset meta file')
parser.add_argument('--target_stem',type=str,default='None',help='the name of the target stem dataset')
parser.add_argument('--source_stem',type=str,default='None',help='the name of the source stem dataset')
parser.add_argument('--target_dataset', type=str, default='None', help='the name of target')
parser.add_argument('--source_models', type=str, default='None', help='the name of target')
parser.add_argument('--EXP', type=str, default='None', help='specific experiments')
parser.add_argument('--stem_list',type=str,default='None',help='the dirs of target experiments')
args = parser.parse_args()
sourceDir=args.source_stem
targetDir=args.target_stem
now=datetime.datetime.now()
now=datetime.datetime.strftime(now,'%m-%d %H:%M:%S')
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def getOptimizer(model):
if args.transfer=='fine_tune':
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.transfer=='extractor':
optimizer = torch.optim.SGD(
model.classifier.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
logging.info('invalid transfer method')
logging.info('only support fine_tune and extractor now')
sys.exit(1)
return optimizer
def transfer(source,target,epochs,lastepoch=0):
source_dataset=source[0]
source_model=source[1]
if args.mode=='simulation':
source_path=os.path.join(args.target_stem,'EXP',target, source_model+"_"+source_dataset)
if not os.path.exists(source_path):
logging.info('failed in simulation mode: %s does not exist'%source_path)
sys.exit(1)
else:
return utils.getTransferData(source_path,epochs+lastepoch,lastepoch,max_acc=args.max_acc)
start=time.time()
oldTime=0
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if lastepoch==0:
source_path=os.path.join(args.source_stem,'models',source_dataset,source_model+".pt")
else:
source_path=os.path.join(args.target_stem,'models',target,now,source_model+"_"+source_dataset+".pt")
if not os.path.exists(source_path):
logging.info('path: %s'%source_path)
logging.info('source model does not exist')
sys.exit(1)
target_path=os.path.join(args.target_stem,'models',target,now)
if not os.path.exists(target_path):
os.makedirs(target_path)
target_path=os.path.join(target_path,source_model+"_"+source_dataset+".pt")
exp_path=os.path.join(args.target_stem,'EXP',target,now)
if not os.path.exists(exp_path):
os.makedirs(exp_path)
exp_path=os.path.join(exp_path,source_model+"_"+source_dataset)
oldTime,__=utils.getTransferData(exp_path,lastepoch,max_acc=args.max_acc)
f=open(exp_path,"a+")
genotype = eval("genotypes.%s" % args.arch)
if lastepoch==0:
model = Network(args.init_channels, utils.getClasses(source_dataset,args.source_meta), utils.getLayers(source_model), args.auxiliary, genotype)
optimizer=getOptimizer(model)
utils.load(model,source_path)
num_ftrs = model.classifier.in_features
model.classifier=nn.Linear(num_ftrs,utils.getClasses(target,args.target_meta))
else:
model = Network(args.init_channels, utils.getClasses(target,args.target_meta), utils.getLayers(source_model), args.auxiliary, genotype)
optimizer=getOptimizer(model)
utils.load(model,source_path,optimizer)
model = model.cuda()
model_size=utils.count_parameters_in_MB(model)
logging.info("param size = %fMB", model_size)
train_queue,valid_queue=utils.getData(target,args.target_meta,args)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), last_epoch=-1 if lastepoch==0 else lastepoch)
top1_acc=0
epochs=lastepoch+epochs
for epoch in range(lastepoch,epochs):
logging.info('epoch %d lr %e', epoch+1, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('epoch %d train_acc %f', epoch+1,train_acc)
scheduler.step()
top1_acc, top5_acc, valid_obj = infer(valid_queue, model, criterion)
duration=time.time()-start+oldTime
f.write('epoch: %d valid_obj: %f time: %.6f top5_acc: %f top1_acc: %f \n'%(epoch+1,valid_obj,duration,top5_acc,top1_acc))
logging.info('epoch: %d valid_obj: %f time: %.6f top5_acc: %f top1_acc: %f \n'%(epoch+1,valid_obj,duration,top5_acc,top1_acc))
f.close()
torch.save({
'epoch': epochs,
'model_state_dict': model.state_dict(),
'optimizer_state_dict' : optimizer.state_dict(),
},target_path)
torch.cuda.empty_cache()
return time.time()-start,top1_acc
############################################ ensemble strategy ########################################
def greedyForwardSelection(models,target):
# assume the input models have already beed sorted according to validation accuracy
start=time.time()
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
genotype = eval("genotypes.%s" % args.arch)
valid_queue=utils.getData(target,args.target_meta,args,validOnly=True)
logit_list=[]
acc_list=[]
model_size_list=[]
counter=0
for pair in models:
source_dataset=pair[0]
source_model=pair[1]
if args.mode=='simulation':
target_path=os.path.join(args.target_stem,'models',target)
else:
target_path=os.path.join(args.target_stem,'models',target,now)
target_path=os.path.join(target_path,source_model+"_"+source_dataset+".pt")
target_trial_path=target_path+'_'+str(args.epochs)
if os.path.exists(target_trial_path):
target_path=target_trial_path
model = Network(args.init_channels, utils.getClasses(target,args.target_meta), utils.getLayers(source_model), args.auxiliary, genotype)
utils.load(model,target_path)
model.drop_path_prob = args.drop_path_prob
model = model.cuda()
model_size=utils.count_parameters_in_MB(model)
model_size_list.append(model_size)
logging.info('cal logits for model %s'%str(pair))
logits,acc=get_logit(valid_queue,model,criterion)
acc_list.append(acc)
torch.cuda.empty_cache()
logit_list.append(logits)
K=len(models)
best_acc=acc_list[0]
best_digit=copy.deepcopy(logit_list[0])
best_current_digit=[]
model_index=[0]
cadidate_index=[i for i in range(1,K)]
t1=time.time()-start
logging.info('cal logit time: %f'%t1)
while True:
best_candidate=-1
for index in cadidate_index:
## combine with best digit
stem=copy.deepcopy(best_digit)
temp=logit_list[index]
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(valid_queue):
stem[step]=stem[step]+temp[step]
prec1, prec5 = utils.accuracy(stem[step], target, topk=(1, 5))
batchsize = input.size(0)
top1.update(prec1.data.item(), batchsize)
top5.update(prec5.data.item(), batchsize)
if top1.avg>best_acc:
best_acc=top1.avg
best_candidate=index
best_current_digit=stem
counter=counter+1
t=time.time()-start
logging.info('trail: %d time: %f'%(counter,t))
if best_candidate!=-1:
model_index.append(best_candidate)
cadidate_index.remove(best_candidate)
best_digit=best_current_digit
size=len(model_index)
logging.info('###### Ensemble %d models: %s ######'%(size,str(model_index)))
logging.info('Top1 acc: %f'%best_acc)
if best_candidate==-1 or len(cadidate_index)==0:
if best_candidate==-1:
reason='no better candidate'
else:
reason='run out of candidates'
logging.info('Greedy forward selection terminated: %s'%reason)
break
total_model_size=0
for index in model_index:
total_model_size=total_model_size+model_size_list[index]
duration=time.time()-start
return duration, best_acc, len(model_index) ,total_model_size
def simpleVoting(models,target):
start=time.time()
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
genotype = eval("genotypes.%s" % args.arch)
valid_queue=utils.getData(target,args.target_meta,args,validOnly=True)
K=len(models)
logit_list=[]
total_model_size=0
for pair in models:
source_dataset=pair[0]
source_model=pair[1]
if args.mode=='simulation':
target_path=os.path.join(args.target_stem,'models',target)
else:
target_path=os.path.join(args.target_stem,'models',target,now)
target_path=os.path.join(target_path,source_model+"_"+source_dataset+".pt")
target_trial_path=os.path.join(target_path,'_'+str(args.epochs))
if os.path.exists(target_trial_path):
target_path=target_trial_path
model = Network(args.init_channels, utils.getClasses(target,args.target_meta), utils.getLayers(source_model), args.auxiliary, genotype)
utils.load(model,target_path)
model.drop_path_prob = args.drop_path_prob
model = model.cuda()
model_size=utils.count_parameters_in_MB(model)
total_model_size=total_model_size+model_size
logging.info('cal logits for model %s'%str(pair))
logits,acc=get_logit(valid_queue,model,criterion)
torch.cuda.empty_cache()
logit_list.append(logits)
stem=logit_list[0]
best_acc=0
average_acc=0
for n in range(2,K+1):
print('****************Ensemble %d models******************'%n)
temp=logit_list[n-1]
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(valid_queue):
stem[step]=stem[step]+temp[step]
prec1, prec5 = utils.accuracy(stem[step], target, topk=(1, 5))
batchsize = input.size(0)
top1.update(prec1.data.item(), batchsize)
top5.update(prec5.data.item(), batchsize)
average_acc=top1.avg
if top1.avg>best_acc:
best_acc=top1.avg
logging.info('Simple Soft Voting: valid acc %f', top1.avg)
duration=time.time()-start
return duration, average_acc, len(models), total_model_size
def weightedVoting(models,target):
start=time.time()
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
genotype = eval("genotypes.%s" % args.arch)
total_model_size=0
modellist=[]
for pair in models:
source_dataset=pair[0]
source_model=pair[1]
if args.mode=='simulation':
target_path=os.path.join(args.target_stem,'models',target)
else:
target_path=os.path.join(args.target_stem,'models',target,now)
target_path=os.path.join(target_path,source_model+"_"+source_dataset+".pt")
target_trial_path=os.path.jpin(target_path,'_'+str(args.epochs))
if os.path.exists(target_trial_path):
target_path=target_trial_path
model = Network(args.init_channels, utils.getClasses(target,args.target_meta), utils.getLayers(source_model), args.auxiliary, genotype)
utils.load(model,target_path)
model.drop_path_prob = args.drop_path_prob
model.cuda()
model_size=utils.count_parameters_in_MB(model)
total_model_size=total_model_size+model_size
logging.info('load model %s'%str(pair))
modellist.append(model)
ensemble=NetworkEnsemble(modellist,utils.getClasses(target,args.target_meta))
ensemble.cuda()
optimizer = torch.optim.SGD(
ensemble.classifier.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
train_queue,valid_queue=utils.getData(target,args.target_meta,args)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
epochs=args.epochs
best_acc=0
for epoch in range(args.epochs):
logging.info('epoch %d lr %e', epoch+1, scheduler.get_lr()[0])
train_acc, train_obj = trainEnsemble(train_queue, ensemble, criterion, optimizer)
logging.info('epoch %d train_acc %f', epoch+1,train_acc)
scheduler.step()
top1_acc, top5_acc, valid_obj = inferEnsemble(valid_queue, ensemble, criterion)
duration=time.time()-start
if top1_acc>best_acc:
best_acc=top1_acc
logging.info('epoch: %d valid_obj: %f time: %.6f top5_acc: %f top1_acc: %f \n'%(epoch+1,valid_obj,duration,top5_acc,top1_acc))
duration=time.time()-start
return duration, best_acc
############################################# Selection Strategy ##########################################
def normalize(a):
a=np.array(a)
Min=np.min(a)
Max=np.max(a)
return (a-Min)/(Max-Min)
def getStats(models,target):
path=os.path.join('data',args.source_meta)
f=open(path,'rb')
dataset=pickle.load(f)
x=[]
y=[]
for pair in models:
source_dataset=pair[0]
source_model=pair[1]
length=len(dataset[source_dataset])
layer=utils.getLayers(source_model)
x.append(length)
y.append(layer)
x=normalize(x)
y=normalize(y)
rewards=[]
cost=[]
for pair in models:
source_dataset=pair[0]
source_model=pair[1]
target_model=source_model+'_'+source_dataset
path=os.path.join(args.target_stem,'EXP',target,target_model)
acc_list=[]
time_list=[]
f=open(path,"r")
lines=f.readlines()
for line in lines:
time=0
acc=0
line=line.split()
for index,seg in enumerate(line):
if seg=='time:':
time=float(line[index+1])
break
acc=float(line[-1])/100
time_list.append(time)
acc_list.append(acc)
rewards.append(acc_list)
cost.append(time_list)
return x,y,rewards,cost
def selectTopK(models,acc,K):
acc=np.array(acc)
index=np.argsort(-acc)
selectedModels=[ models[index[i]] for i in range(K)]
selectedAcc=[acc[index[i]] for i in range(K)]
return selectedModels,selectedAcc
def randomSelection(models,target,K):
transferTime=0
transferAcc=[]
ensembleTime=0
ensembleAcc=0
selectedModels=[]
if len(models)<=K:
selectedModels=models
else:
selectedModels=random.sample(models,K)
for pair in selectedModels:
time,acc=transfer(pair,target,args.epochs)
transferTime=transferTime+time
transferAcc.append(acc)
logging.info('################## experiments summary ##################')
logging.info('selection strategy: Random')
logging.info('selected models %s'%str(selectedModels))
logging.info('fine-tuning acc %s'%str(transferAcc))
logging.info('fine-tuning time %f'%transferTime)
return selectedModels,transferAcc,[transferTime]
def GPSelection(models,target,K):
x,y,rewards,cost=getStats(models,target)
env = Environment(rewards,cost)
agent = GPUCB(np.array([x, y]), env, K)
selectionTime,selectedIndex, samples=agent.selection(K)
selectedModels=[models[i] for i in selectedIndex]
transferAcc=[]
transferTime=0
for pair in selectedModels:
time,acc=transfer(pair,target,args.epochs-1,1)
transferTime=transferTime+time
transferAcc.append(acc)
logging.info('################## experiments summary ##################')
logging.info('number of samples: %d'%samples)
logging.info('selection strategy: GP-UCB')
logging.info('selection time %s'%str(selectionTime))
logging.info('selected models %s'%str(selectedModels))
logging.info('fine-tuning acc %s'%str(transferAcc))
logging.info('fine-tuning time %f'%transferTime)
return selectedModels,transferAcc,[selectionTime,transferTime]
def oneShotSelection(models,target,K):
oneShotTime=0
oneShotAcc=[]
for pair in models:
time,acc=transfer(pair,target,1)
oneShotTime=oneShotTime+time
oneShotAcc.append(acc)
selectedModels,__=selectTopK(models,oneShotAcc,K)
transferTime=0
transferAcc=[]
for pair in selectedModels:
time,acc=transfer(pair,target,args.epochs-1,1)
transferTime=transferTime+time
transferAcc.append(acc)
logging.info('################## experiments summary ##################')
logging.info('selection strategy: OneShot')
logging.info('selection time: %f'%oneShotTime)
logging.info('selected models %s'%str(selectedModels))
logging.info('fine-tuning acc %s'%str(transferAcc))
logging.info('fine-tuning time %f'%transferTime)
return selectedModels,transferAcc,[oneShotTime,transferTime]
def transferAllSelection(models,target,K):
transferTime=0
transferAcc=[]
for pair in models:
time,acc=transfer(pair,target,args.epochs)
transferTime=transferTime+time
transferAcc.append(acc)
selectedModels,selectedAcc=selectTopK(models,transferAcc,K)
logging.info('################## experiments summary ##################')
logging.info('selection strategy: FA')
logging.info('selected models %s'%str(selectedModels))
logging.info('fine-tuning acc %s'%str(selectedAcc))
logging.info('fine-tuning time %f'%transferTime)
return selectedModels,selectedAcc,[transferTime]
def halvingSeection(models,target,K,double=True):
n=len(models)
selectedModels=models
nIter=int(math.log(n/K,2))
B=1
lastepoch=0
banditTime=0
transferTime=0
transferAcc=[]
for k in range(nIter):
banditAcc=[]
epochs=B
if double==True:
B=B*2
if epochs+lastepoch>=args.epochs:
epochs=args.epochs-lastepoch
for pair in selectedModels:
source_dataset=pair[0]
source_model=pair[1]
time,acc=transfer(pair,target,epochs,lastepoch)
banditTime=banditTime+time
banditAcc.append(acc)
selectedModels,transferAcc=selectTopK(selectedModels,banditAcc,int(len(selectedModels)/2))
lastepoch=lastepoch+epochs
if lastepoch>=args.epochs:
break
if len(selectedModels)>K:
selectedModels,transferAcc=selectTopK(selectedModels,transferAcc,K)
if lastepoch<args.epochs:
transferAcc=[]
epochs=args.epochs-lastepoch
for pair in selectedModels:
source_dataset=pair[0]
source_model=pair[1]
time,acc=transfer(pair,target,epochs,lastepoch)
transferTime=transferTime+time
transferAcc.append(acc)
logging.info('################## experiments summary ##################')
logging.info('selection strategy: SH')
logging.info('selection time: %f'%banditTime)
logging.info('selected models %s'%str(selectedModels))
logging.info('fine-tuning acc %s'%str(transferAcc))
logging.info('fine-tuning time %f'%transferTime)
return selectedModels,transferAcc,[banditTime,transferTime]
def getSourceModels():
data=args.source_models
model_path=os.path.join('data','models.meta')
f=open(model_path,'rb')
models=pickle.load(f)
base=[]
for i in range(1,100+1):
dataset=data+str(i)
layer=models[i-1]
model=args.arch+str(layer)
t=(dataset,model)
base.append(t)
return base
def topKSelection(models,target,K):
if args.selection_strategy=='Random':
return randomSelection(models,target,K)
elif args.selection_strategy=='OneShot':
return oneShotSelection(models,target,K)
elif args.selection_strategy=='SH':
return halvingSeection(models,target,K)
elif args.selection_strategy=='FA':
return transferAllSelection(models,target,K)
elif args.selection_strategy=='GP':
return GPSelection(models,target,K)
def ensemble(models,target):
if args.ensemble_strategy=='simple_voting':
return simpleVoting(models,target)
elif args.ensemble_strategy=='weighted_voting':
return weightedVoting(models,target)
elif args.ensemble_strategy=='greedy_forward':
return greedyForwardSelection(models,target)
else:
return 0,0
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
models=getSourceModels()
target=args.target_dataset
K=args.ensemble_size
selectedModels,transferAcc,time=topKSelection(models,target,K)
ensembleTime,ensembleAcc,_,_=ensemble(selectedModels,target)
def get_logit(valid_queue, model, criterion):
model.eval()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
logit=[]
for step, (input, target) in enumerate(valid_queue):
with torch.no_grad():
input = Variable(input).cuda()
target = Variable(target).cuda()
logits, _ = model(input)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
logits=logits.cpu().data
logit.append(logits)
return logit, top1.avg
def trainEnsemble(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = Variable(input).cuda()
target = Variable(target).cuda()
optimizer.zero_grad()
logits= model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def inferEnsemble(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
with torch.no_grad():
input = Variable(input).cuda()
target = Variable(target).cuda()
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
def infer(valid_queue, model, criterion,split=1):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
nEpoch=0
dataNum=len(valid_queue.dataset)
batchSize=valid_queue.batch_size
validNum=int(dataNum*split)
nEpoch=math.ceil(validNum/batchSize)
for step, (input, target) in enumerate(valid_queue):
if step==nEpoch:
break
with torch.no_grad():
input = Variable(input).cuda()
target = Variable(target).cuda()
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
def train(train_queue, model, criterion, optimizer, split=1):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
nEpoch=0
dataNum=len(train_queue.dataset)
batchSize=train_queue.batch_size
trainNum=int(dataNum*split)
nEpoch=math.ceil(trainNum/batchSize)
for step, (input, target) in enumerate(train_queue):
if step==nEpoch:
break
input = Variable(input).cuda()
target = Variable(target).cuda()
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
|
983,341 | 7a5a691339b1fa51cf7b9ce15f4227e759909a5c | """
This module is for evaluating and converting time to less accurate but more human-understandable format:
1. An hour is divided into three intervals of 20 minutes.
2. Based on the interval the module builds a string with the remaining time.
3. The string is put to the message.
This formatting makes it unnecessary for your interlocutors to count themselves how long it takes you to return.
The module helps build messages only if the user is out for a few hours, but not days.
"""
from datetime import datetime
# 01: 0 h, 0 <= min <= 19; -> None
# 02: 0 h, 20 <= min <= 39
# 03: 0 h, 40 <= min <= 59
# 11: 1 h, 0 <= min <= 19
# 12: 1 h, 20 <= min <= 39; -> 2 h
# 13: 1 h, 40 <= min <= 59
# 1: 2 <= h <= 4, 0 <= min <= 19
# 2: 2 <= h <= 4, 20 <= min <= 39
TIME = {
'01': None,
'02': 'half an hour',
'03': 'an hour',
'11': 'an hour',
'12': 'an hour and a half',
'13': 'two hours',
'1': ' hours',
'2': ' hours and a half'
}
HOURS = {
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven'
}
def get_message(seconds):
"""Return the message with the remaining time for Telegram users."""
time = get_human_time(seconds)
if time:
return f'Hello! I am busy right now and I will get back to work in {time}, sorry.\n\n' \
f'If it is something urgent, write {...}, please.\n\n' \
'Or wait for me if you can. I will take on your task as soon as I am free.'
def get_remaining_seconds(str_time):
"""Return the remaining time in seconds."""
current_time = datetime.utcnow().strftime('%H:%M')
# 10_800 - the difference in seconds from Moscow (3 hours)
return (datetime.strptime(str_time, '%H:%M') - datetime.strptime(current_time, '%H:%M')).seconds - 10_800
def get_human_time(seconds):
"""Count how long it will take you before coming back."""
hours = seconds // 3660
minutes = (seconds // 60) % 60
if minutes <= 19:
m = '1'
elif 20 <= minutes <= 39:
m = '2'
elif minutes >= 40:
m = '3'
if not hours:
h = '0'
elif hours == 1:
h = '1'
elif hours >= 2:
if m == '3':
return f'{HOURS[hours+1]}{TIME["1"]}'
return f'{HOURS[hours]}{TIME[m]}'
return TIME[f'{h}{m}']
|
983,342 | 670e372b3b40a01681f21f0bf359562b7d7b61bd | from config.dbconfig import pg_config
import psycopg2
class ReservationDAO:
#reservation = reservation_id, customer_id, request_id, reservation_date, reservation_status
#resource_reservation = id, reservation_id, resource_id, reservation_quantity
def __init__(self):
connection_url = "dbname=%s user=%s password=%s" % (pg_config['dbname'], pg_config['user'], pg_config['passwd'])
self.conn = psycopg2._connect(connection_url)
def getAllReservations(self):
cursor = self.conn.cursor()
query = "SELECT reservation_id, customer_id, request_id, reservation_date, reservation_status, resource_id, resource_name, reservation_quantity FROM reservation natural inner join resource_reservations natural inner join resource;"
cursor.execute(query)
result = []
for row in cursor:
result.append(row)
return result
def getReservationById(self, reservation_id):
cursor = self.conn.cursor()
query = "SELECT reservation_id, customer_id, request_id, reservation_date, reservation_status, resource_id, resource_name, reservation_quantity FROM reservation natural inner join resource_reservations natural inner join resource WHERE reservation_id = %s;"
cursor.execute(query, (reservation_id,))
result = []
for row in cursor:
result.append(row)
return result
def getReservationsByDate(self, reservation_date):
cursor = self.conn.cursor()
query = "SELECT reservation_id, customer_id, request_id, reservation_date, reservation_status, resource_id, resource_name, reservation_quantity FROM reservation natural inner join resource_reservations natural inner join resource WHERE reservation_date = %s;"
cursor.execute(query, (reservation_date,))
result = []
for row in cursor:
result.append(row)
return result
def getReservationsByStatus(self, reservation_status):
cursor = self.conn.cursor()
query = "SELECT reservation_id, customer_id, request_id, reservation_date, reservation_status, resource_id, resource_name, reservation_quantity FROM reservation natural inner join resource_reservations natural inner join resource WHERE reservation_status = %s;"
cursor.execute(query, (reservation_status,))
result = []
for row in cursor:
result.append(row)
return result
def getReservationsByDateAndStatus(self, reservation_date, reservation_status):
cursor = self.conn.cursor()
query = "SELECT reservation_id, customer_id, request_id, reservation_date, reservation_status, resource_id, resource_name, reservation_quantity FROM reservation natural inner join resource_reservations natural inner join resource WHERE reservation_date = %s AND reservation_status = %s;"
cursor.execute(query, (reservation_date, reservation_status,))
result = []
for row in cursor:
result.append(row)
return result
def getReservationsByCustomerId(self, customer_id):
cursor = self.conn.cursor()
query = "SELECT reservation_id, customer_id, request_id, reservation_date, reservation_status, resource_id, resource_name, reservation_quantity FROM reservation natural inner join resource_reservations natural inner join resource WHERE customer_id = %s;"
cursor.execute(query, (customer_id,))
result = []
for row in cursor:
result.append(row)
return result
def getResourcesByReservationId(self, reservation_id):
cursor = self.conn.cursor()
query = "SELECT resource_id, supplier_id, category_id, resource_name, resource_brand, resource_quantity, resource_price FROM reservation natural inner join resource_reservations natural inner join resource WHERE reservation_id = %s;"
cursor.execute(query, (reservation_id,))
result = []
for row in cursor:
result.append(row)
return result
def insert(self, customer_id, request_id, reservation_date, reservation_status):
cursor = self.conn.cursor()
if request_id:
query = "INSERT INTO reservation (customer_id, request_id, reservation_date, reservation_status) VALUES (%s, %s, %s, %s) RETURNING reservation_id;"
cursor.execute(query, (customer_id, request_id, reservation_date, reservation_status,))
else:
query = "INSERT INTO reservation (customer_id, reservation_date, reservation_status) VALUES (%s, %s, %s) RETURNING reservation_id;"
cursor.execute(query, (customer_id, reservation_date, reservation_status,))
reservation_id = cursor.fetchone()[0]
self.conn.commit()
return reservation_id
def update(self, reservation_id, customer_id, request_id, reservation_date, reservation_status):
cursor = self.conn.cursor()
query = "update reservation set customer_id = %s, request_id = %s, reservation_date = %s, reservation_status = %s where reservation_id = %s returning reservation_id;"
cursor.execute(query, (customer_id, request_id, reservation_date, reservation_status, reservation_id,))
reservation_id = cursor.fetchone()[0]
self.conn.commit()
return reservation_id
def delete(self, reservation_id):
cursor = self.conn.cursor()
query = "delete from reservation where reservation_id = %s;"
cursor.execute(query,(reservation_id,))
self.conn.commit()
return reservation_id |
983,343 | a68d7852eae30fa7132b24847f54b7cf85fa0091 | from django.test import TestCase
import plyplus
import os
MODULE_DIR = os.getcwd() + '/raws_parser'
TEST_ASSETS = MODULE_DIR + '/tests/assets'
class RawsParsingTestCase(TestCase):
def setUp(self):
self.grammar = plyplus.Grammar(plyplus.grammars.open(
MODULE_DIR + '/dictionaries/item.g'),
auto_filter_tokens=False)
def test_parsing_action(self):
"""Parsing actually ends successful"""
self.grammar.parse(open(TEST_ASSETS + '/test_item.txt').read())
def test_elements(self):
result = self.grammar.parse(open(TEST_ASSETS + '/test_item.txt').read())
self.assertEqual(result.select('main > object_stm *')[0],
'[OBJECT:ITEM]')
self.assertEqual(result.select('token_item_name *')[:]
, ['ITEM_PANTS_PANTS', 'ITEM_PANTS_GREAVES'])
class TransformsTestCase(TestCase):
def setUp(self):
self.grammar = plyplus.Grammar(plyplus.grammars.open(
MODULE_DIR + '/dictionaries/base.g'),
auto_filter_tokens=False
)
self.result = self.grammar.parse(open(TEST_ASSETS + '/test_item.txt')
.read())
def test_json_formating(self):
import json
from raws_parser.tojson import MakeJson
with open(TEST_ASSETS + '/test_json.json') as f:
test_json = json.load(f)
make_json = MakeJson()
self.assertEqual(json.loads(make_json.transform(self.result.tail[0])),
test_json)
def test_d3_json_formating(self):
import json
from raws_parser.tojson import MakeD3Json
with open(TEST_ASSETS + '/test_d3_json.json') as f:
test_d3_json = json.load(f)
make_json = MakeD3Json()
self.assertEqual(json.loads(make_json.transform(self.result.tail[0])),
test_d3_json)
|
983,344 | f6006ea15c6500a1c682d38b6714b285c1509cb5 | def print_list(list):
for item in list:
print(item)
list1 = ["Ro.bin", "S.mith", "is", "fu.ll", "o.f.", "d.o.t.s!"]
list2 = [word.replace(".", "") for word in list1]
print_list(list2)
|
983,345 | b3f764eda1f419e31dabc4b4a26de3a63d4947d7 | # -*- coding: utf-8 -*-
"""
Created on Fri May 18 20:44:22 2018
@author: Mauro
"""
#==============================================================================
# Matrix class
# utilities for an nxm 2D matrix
#==============================================================================
# errors
class MatExcept(Exception):
pass
# class
class Matrix:
''' MxN matrix class
- can be initializated with aa nested list or with sizes
- item setters and getters throw MathException
- implemented:
- operator +
- operator -
- operator *
- can multy by scalar
- can be transposed
- cam be printed
'''
def __init__(self, m, n = None):
# store matrix in a linear vector
self.mat = []
# if the first argument is a list and is a list of list
# construct the matrix starting with that as an element
if type(m) == list and type(m[0]) is list and n is None:
self.m = len(m)
self.n = len(m[0])
self.init_mat()
for i in range(self.m):
for j in range(self.n):
self[i, j] = m[i][j]
# if the first argument is a list, yet not a list of list
# assume the user wants to create a mx1 vector
elif type(m) is list and n is None:
self.m = len(m)
self.n = 1
self.init_mat()
for i in range(self.m):
self[i, 0] = m[i]
# else initialize a 0ed mxn matrix
else:
self.m = m
self.n = n
self.init_mat()
def init_mat(self):
for i in range(self.m):
for j in range(self.n):
self.mat.append(0)
# getter
def __getitem__(self, idx):
linear_index = idx[1] * self.m + idx[0]
return self.mat[linear_index]
# setter
def __setitem__(self, idx, c):
if idx[0] >= self.m or idx[0] < 0: raise MatExcept("Matrix: row out of range")
if idx[1] >= self.n or idx[1] < 0: raise MatExcept("Matrix: col out of range")
linear_index = idx[1] * self.m + idx[0]
self.mat[linear_index] = c
# operator + elementwise sum
def __add__(self, m2):
if self.m == m2.m and self.n == m2.n:
new_mat = []
for i in range(len(self.mat)):
new_mat.append(self.mat[i] + m2.mat[i])
mnew = Matrix(self.m, self.n)
mnew.mat = new_mat
return mnew
else:
raise MatExcept("Matrix: addition matrices not same size")
# operator - elementwise
def __sub__(self, m2):
if self.m == m2.m and self.n == m2.n:
new_mat = []
for i in range(len(self.mat)):
new_mat.append(self.mat[i] - m2.mat[i])
mnew = Matrix(self.m, self.n)
mnew.mat = new_mat
return mnew
else:
raise MatExcept("Matrix: subtraction matrices not same size")
# matrix multiplication
def __mul__(self, m2):
if self.n == m2.m:
mulmat = Matrix(self.m, m2.n)
for i in range(mulmat.m):
for j in range(mulmat.n):
for m in range(self.n):
mulmat[i, j] += self[i, m] * m2[m, j]
return mulmat
else:
raise MatExcept("Matrix: multiplication matrix columns different then other matrix rows")
def scalar(self, k):
mat_new = []
for m in self.mat:
mat_new.append(m * k)
mres = Matrix(self.m, self.n)
mres.mat = mat_new
return mres
def transpose(self):
tmat = Matrix(self.n, self.m)
for i in range(self.m):
for j in range(self.n):
tmat[j, i] = self[i, j]
return tmat
def __str__(self):
s = ""
for i in range(self.m):
for j in range(self.n):
s += str(self[i, j]) + " "
s += "\n"
return s
#==============================================================================
# Squared Matrix utilities
# for a squared matrix (mxm)
#==============================================================================
class SquareMatrix(Matrix):
def __init__(self, m):
if type(m) is list:
if len(m) != len(m[0]): raise MatExcept("SqMat: Not a square matrix")
super().__init__(m)
else:
super().__init__(m, m)
def is_diagonal(self):
for i in range(self.m):
for j in range(self.n):
if i == j and self[i, j] == 0:
return False
if i != j and self[i, j] != 0:
return False
return True
def is_lower_triangular(self):
for i in range(self.m):
for j in range(self.n):
if j <= i and self[i, j] == 0:
return False
if j > i and self[i, j] != 0:
return False
return True
def is_upper_triangular(self):
for i in range(self.m):
for j in range(self.n):
if i <= j and self[i, j] == 0:
return False
if i > j and self[i, j] != 0:
return False
return True
def get_identity(self):
imatrix = SquareMatrix(self.m)
for i in range(self.m):
imatrix[i, i] = 1
return imatrix
if __name__ == "__main__":
print("Test size initialization")
m = Matrix(2, 3)
print(m)
print ("Test list initialization")
m_ini = [ [2, 3, 4],
[1, 0, 0] ]
m = Matrix(m_ini)
print(m)
print("Test setter")
m[1, 2] = 1
print(m)
print("Test transpose")
m2 = Matrix(2, 3)
m2[1, 2] = 3
print(m2)
print(m2.transpose())
print("Test addition and scalar multiplication")
print("m + m2*4")
print(m + m2.scalar(4))
print("Test multiplication")
m1 = Matrix(2, 3)
m1[0, 0] = 2
m1[0, 1] = 3
m1[0, 2] = 4
m1[1, 0] = 1
print(m1)
m2 = Matrix(3, 2)
m2[0, 1] = 1000
m2[1, 0] = 1
m2[1, 1] = 100
m2[2, 1] = 10
print(m2)
print("m1 * m2")
print(m1 * m2)
print("m1 and m2")
m1 = [ [1, 2],
[3, 4] ]
m1 = Matrix(m1)
print(m1)
m2 = [ [0, 1],
[0, 0] ]
m2 = Matrix(m2)
print(m2)
mres = m1 * m2
print(mres)
print("m1 * m2")
mres = m2 * m1
print(mres)
print("Test square matrix")
m = [ [1, 1, 1],
[0, 1, 1],
[0, 0, 1] ]
m = SquareMatrix(m)
print(m)
print("Is diagonal, is lower, is upper triangular")
print(m.is_diagonal())
print(m.is_lower_triangular())
print(m.is_upper_triangular())
print()
print("Test identity")
print(m.get_identity())
|
983,346 | 8fcddf96dd722888aa7f65fad4d619d2a3f28e4b | from __future__ import print_function
from __future__ import print_function
import json
import datetime
from django.db import IntegrityError
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from gcm.models import get_device_model
from django.http import HttpResponse
from icebreaker_backend.forms import UploadFileForm
import time
from icebreaker_backend.models import *
@csrf_exempt
def testing(request):
if request.method == 'POST':
milli_sec = int(round(time.time() * 1000))
print(milli_sec)
Device = get_device_model()
projects_donated = []
# phone = Device.objects.get(dev_id='anip2')
for my_phone in Device.objects.all():
# my_phone.send_message({'title': 'Hello World','message': 'my test message'}, collapse_key='something')
record = {"name": my_phone.reg_id + my_phone.dev_id + "dev_name " + my_phone.name}
projects_donated.append(record)
# print temp
# print type(temp)
return JsonResponse({'name': projects_donated})
@csrf_exempt
def send(request):
if request.method == 'POST':
body = json.loads(request.body)
Device = get_device_model()
now = datetime.datetime.now()
millis = int(round(time.time() * 1000))
try:
phone = Device.objects.get(name=body['to'])
temp = phone.send_message({'title': body['from'], 'message': body['message'], 'id': body['id'], 'time':
str(millis), 'type': body['type']},
collapse_key=str(millis))
# print(temp)
return JsonResponse({'status': 'true', 'time': millis})
except Device.DoesNotExist:
return JsonResponse({'status': 'false'})
@csrf_exempt
def signup(request):
if request.method == 'POST':
body = json.loads(request.body)
user = User.objects.get(enroll=body['enroll'])
user.gender = body['gender']
user.branch = body['branch']
user.college = body['college']
user.batch = body['batch']
user.save()
contacts = []
for contact in user.contacts.all():
contacts.append(contact.enroll)
profile = {"id": user.pk, "enroll": user.enroll, "gender": user.gender, "branch": user.branch,
"college": user.college, "batch": user.batch,"contacts":contacts}
return JsonResponse({"status": "created", "data": profile})
@csrf_exempt
def block(request):
if request.method == 'POST':
body = json.loads(request.body)
if User.objects.filter(enroll=body['user_enroll']).count() > 0:
user = User.objects.get(enroll=body['user_enroll'])
block = Blocked(enroll=body['block_enroll'])
block.save()
user.blocked.add(block)
return JsonResponse({'status': 'Added to block list'})
else:
return JsonResponse({'status': 'error'})
@csrf_exempt
def block_list(request):
body = json.loads(request.body)
if request.method == 'POST':
if User.objects.filter(enroll=body['enroll']).count() > 0:
user = User.objects.get(enroll=body['enroll'])
blocked = []
for block in user.blocked.all():
data = block.enroll
print(block.enroll)
blocked.append(data)
return JsonResponse({'blocked': blocked}, safe=False)
else:
return JsonResponse({'status': 'error'})
@csrf_exempt
def random_chat(request):
if request.method == 'POST':
Device = get_device_model()
body = json.loads(request.body)
milli_sec = int(round(time.time() * 1000))
user = User.objects.get(enroll=body['enroll'])
user_profile = {"id": user.pk, "enroll": user.enroll, "gender": user.gender, "branch": user.branch,
"college": user.college, "batch": user.batch}
if str(user.gender) == 'male':
try:
female = Random.objects.filter(gender='female').order_by('time').first()
female_random = User.objects.get(enroll=female.enroll)
female_profile = {"id": female_random.pk, "enroll": female_random.enroll,
"gender": female_random.gender,
"branch": female_random.branch,
"college": female_random.college, "batch": female_random.batch}
female_device = Device.objects.get(name=female_random.enroll)
female_device.send_message(
{'title': user.enroll, 'message': 'We have found a match for you!!', 'id': 2, 'time':
str(milli_sec), 'type': 'random', 'profile': user_profile},
collapse_key=str(milli_sec))
male_device = Device.objects.get(name=user.enroll)
male_device.send_message(
{'title': female_random.enroll, 'message': 'We have found a match for you!!', 'id': 2, 'time':
str(milli_sec), 'type': 'random', 'profile': female_profile},
collapse_key=str(milli_sec))
female.delete()
return JsonResponse({"status": "found", "profile": None})
except:
new_random = Random(enroll=user.enroll,
gender=user.gender,
time=milli_sec
)
new_random.save()
return JsonResponse({"status": "wait", "profile": None})
elif str(user.gender) == 'female':
try:
male = Random.objects.filter(gender='male').order_by('time').first()
male_random = User.objects.get(enroll=male.enroll)
male_profile = {"id": male_random.pk, "enroll": male_random.enroll, "gender": male_random.gender,
"branch": male_random.branch,
"college": male_random.college, "batch": male_random.batch}
male_device = Device.objects.get(name=male_random.enroll)
male_device.send_message(
{'title': user.enroll, 'message': 'We have found a match for you!!', 'id': 2, 'time':
str(milli_sec), 'type': 'random', 'profile': user_profile},
collapse_key=str(milli_sec))
female_device = Device.objects.get(name=user.enroll)
female_device.send_message(
{'title': male_random.enroll, 'message': 'We have found a match for you!!', 'id': 2, 'time':
str(milli_sec), 'type': 'random', 'profile': male_profile},
collapse_key=str(milli_sec))
male.delete()
return JsonResponse({"status": "found", "profile": None})
except:
new_random = Random(enroll=user.enroll,
gender=user.gender,
time=milli_sec
)
new_random.save()
return JsonResponse({"status": "wait", "profile": None})
else:
return JsonResponse({"status": "error", "profile": None})
@csrf_exempt
def search(request):
if request.method == 'POST':
body = json.loads(request.body)
if User.objects.filter(enroll=body['search']) and User.objects.filter(enroll=body['sender']):
contact_user = User.objects.get(enroll=body['search'])
user = User.objects.get(enroll=body['sender'])
contact = Contacts(
enroll=contact_user.enroll
)
try:
if user.contacts.filter(enroll=contact.enroll).count() == 0:
contact.save()
user.contacts.add(contact)
profile = {"id": contact_user.pk, "enroll": contact_user.enroll, "gender": contact_user.gender, "branch": contact_user.branch,
"college": contact_user.college, "batch": contact_user.batch, "status":contact_user.status}
return JsonResponse({'status': 'found', 'contact_status': contact_user.status,'profile' : profile})
else:
return JsonResponse({'status': 'already'})
except IntegrityError:
return JsonResponse({'status': 'already'})
else:
return JsonResponse({'status': 'error'})
else:
return JsonResponse({'status': 'error'})
@csrf_exempt
def delivered(request):
if request.method == 'POST':
Device = get_device_model()
body = json.loads(request.body)
millis = int(round(time.time() * 1000))
try:
phone = Device.objects.get(name=body['to'])
temp = phone.send_message({'title': body['from'], 'message': True, 'id': body['id'], 'type': 'deliver'},
collapse_key=str(millis))
except Device.DoesNotExist:
return JsonResponse({'status': 'false'})
return JsonResponse({'status': 'true'})
else:
return JsonResponse({'status': 'false'})
@csrf_exempt
def removeRandom(request):
if request.method == 'POST':
body = json.loads(request.body)
user = User.objects.get(enroll=body['enroll'])
random = Random()
if str(user.gender) == 'male':
random.slice_male(user.enroll)
elif str(user.gender) == 'female':
random.slice_female(user.enroll)
return JsonResponse({'status': 'true'})
else:
return JsonResponse({'status': 'false'})
@csrf_exempt
def upload_pic(request, enroll):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
m = Picture()
m.picture = form.cleaned_data['picture']
m.save()
# print(str(m.picture.storage.url))
# print(m.picture.url)
# enroll = str(m.picture.url).replace("images/uploads/","")
# print(str(m.picture.path))
# enroll.replace(".png",""))
user = User.objects.get(enroll=enroll)
# print(enroll)
# Picture.objects.get(picture=m.picture))
user.picture = Picture.objects.get(picture=m.picture)
# user.picture.save()
user.save()
return JsonResponse({"status": "true"}, safe=False)
else:
return JsonResponse({"status": "false"}, safe=False)
@csrf_exempt
def show_image(request, enroll):
if request.method == 'GET':
# body = json.loads(request.body)
# pic = Picture()
user = User.objects.get(enroll=enroll)
try:
image_data = open(user.picture.picture.url, "rb").read()
except:
if user.gender == 'male':
image_data = open("images/uploads/male.jpg")
else:
image_data = open("images/uploads/female.jpg")
return HttpResponse(image_data, content_type="image/png")
# return JsonResponse({"url":user.picture.picture.url})
@csrf_exempt
def verify(request):
if request.method == 'POST':
body = json.loads(request.body)
if User.objects.filter(enroll=body['enroll']).count() > 0:
user = User.objects.get(enroll=body['enroll'])
contacts = []
for contact in user.contacts.all():
contacts.append(contact.enroll)
profile = {"id": user.pk, "enroll": user.enroll, "gender": user.gender, "branch": user.branch,
"college": user.college, "batch": user.batch,'contacts':contacts}
return JsonResponse({"status": "exist", "data": profile},
safe=False)
else:
user = User(
enroll=body['enroll']
)
user.save()
return JsonResponse({"status": "created"})
else:
return JsonResponse({'status': 'error'})
@csrf_exempt
def edit(request):
if request.method == 'POST':
body = json.loads(request.body)
try:
user = User.objects.get(enroll=body['enroll'])
except:
return JsonResponse({'status': 'false'})
user.gender = body['gender']
user.branch = body['branch']
user.college = body['college']
user.batch = body['batch']
user.status = body['status']
user.save()
return JsonResponse({'status': 'true'})
|
983,347 | ac60db1fc98caaf81cc1d58d166eb7a2ba6fbb82 | import pytest
from zhiliao.config import RunConfig
# 登录用户名/密码
b_account = [
{
'username': 'admin',
'password': '123456'
},
{
'username': 'niejun',
'password': 'Shengjiang@1541%'
}
]
# 管理后台登录用户名/密码
@pytest.fixture(scope='function')
def back_account():
if RunConfig.flag == 0:
return b_account[0]
else:
return b_account[1]
|
983,348 | d960c5da6cfd425c56d6bf42d8f10ffc00e9997d | import pGMT
import viscojapan as vj
dep = 80
plt = vj.gmt.FaultModelPlotter(
'../fault_bott%dkm.h5'%dep
)
plt.plot('fault_model_map_view_%dkm.pdf'%dep)
plt.clean()
|
983,349 | da184e5a16c07f3a7bb3c6f9a148cff4a7f0be85 | """
In front of you is a row of N coins, with values v1, v1, ..., vn.
You are asked to play the following game. You and an opponent take turns choosing either the first or last coin from the row, removing it from the row, and receiving the value of the coin.
Write a program that returns the maximum amount of money you can win with certainty, if you move first, assuming your opponent plays optimally.
"""
from typing import List
def maxMoney(values: List[int], curr: int = 0, me: bool = True) -> int:
if len(values) == 0:
return curr
if me:
return max(
maxMoney(values[1:], curr + values[0], False),
maxMoney(values[:-1], curr + values[-1], False))
if values[0] > values[-1]:
return maxMoney(values[1:], curr, True)
return maxMoney(values[:-1], curr, True)
assert maxMoney([9, 4, 5, 7, 6, 10, 2, 5]) == 30
print('passed')
|
983,350 | 5077ac005445442331d97542879131474131e19c | # -*- coding: utf-8 -*-
import csv
def load_access():
X = []
Y = []
# with open('acesso.csv', 'r') as csv_file:
# read = csv.reader(csv_file, delimiter=',')
arq = open('acesso.csv', 'rb')
read = csv.reader(arq)
read.next()
for home,como_funciona,contato,comprou in read:
dado = [int(home),int(como_funciona),int(contato)]
X.append(dado)
Y.append(int(comprou))
return X, Y
# x, y = load_access()
# print(x)
def load_find():
X = []
Y = []
arq = open('cursos.csv', 'rb')
read = csv.reader(arq)
read.next()
for home, busca, logado, comprou in read:
dataset = [int(home), busca, int(logado)]
X.append(dataset)
Y.append(comprou)
return X, Y
|
983,351 | 2850126e929651213a94abf09e93b4344f8b69c3 | ################################################################################
# Filename: test_language.py
# Author: Brandon Milton, http://brandonio21.com
# Date: 17 September 2015
#
# Tests to ensure the functionality of util/language.py
################################################################################
import unittest
from util.language import Language, Languages
from unittest import mock
from util.pathmapper import PathMapper
import os
from nose.plugins.deprecated import DeprecatedTest
class TestLanguage(unittest.TestCase):
def test_init(self):
"""
Ensure Language.__init__ sets instance vars properly
"""
testLanguage = Language('name', compileExtension='ext',
compileCommand='cc', compileArguments='ca', runExtension='re',
runCommand='rc', runArguments='ra')
self.assertEqual(testLanguage.name, 'name')
self.assertEqual(testLanguage._compileExtension, 'ext')
self.assertEqual(testLanguage._compileCommand, 'cc')
self.assertEqual(testLanguage._compileArguments, 'ca')
self.assertEqual(testLanguage._runExtension, 're')
self.assertEqual(testLanguage._runCommand, 'rc')
self.assertEqual(testLanguage._runArguments, 'ra')
def test_load_from_dict(self):
"""
Ensure Language.load_from_dict properly populates information
"""
languageDictionary = {'language' : 'RUST',
'compileExtension' : 'rs',
'compileCommand' : 'rustc',
'compileArguments' : [],
'runExtension' : '',
'runCommand' : '{directory}/{fileNameWoExtension}',
'runArguments' : []
}
testLanguage = Language.load_from_dict(languageDictionary)
self.assertEqual(testLanguage.name, 'RUST')
self.assertEqual(testLanguage._compileExtension, 'rs')
self.assertEqual(testLanguage._compileCommand, 'rustc')
self.assertEqual(testLanguage._compileArguments, [])
self.assertEqual(testLanguage._runExtension, '')
self.assertEqual(testLanguage._runCommand, '{directory}/{fileNameWoExtension}')
self.assertEqual(testLanguage._runArguments, [])
class TestLanguages(unittest.TestCase):
def tearDown(self):
"""
Relinquish any languagesDict assignments
"""
Languages._languagesDict = None
def test_globals(self):
"""
Ensure Languages globals are not bad
"""
self.assertNotEqual(Languages.LANGUAGES_FILE, None)
self.assertEqual(Languages._languagesDict, None)
@mock.patch.object(Language, 'load_from_dict')
@mock.patch.object(Languages, 'get_prevalent_extension_from_block')
@mock.patch.object(Languages, 'get_languages_filepath')
@mock.patch('util.language.fileops')
def test_load_languages(self, mocked_language_fileops,
mocked_languages_get_filepath, mocked_get_prev_extension,
mocked_language_load_from_dict):
"""
Ensure Languages.load_languages properly delegates to Language's load from dict
"""
raise DeprecatedTest
mocked_language_fileops.get_json_dict.return_value = {
'languages' : [
{ 'block' : 1 },
{ 'block' : 2 }
]
}
mocked_languages_get_filepath.return_value = 'path'
mocked_get_prev_extension.side_effect = lambda x : 'ext1' if x == { 'block' : 1} else 'ext2'
mocked_language_load_from_dict.return_value = 'haha'
Languages.load_languages()
self.assertEqual(Languages._languagesDict, {'ext1' : 'haha', 'ext2' : 'haha'})
mocked_language_fileops.get_json_dict.assert_called_with('path')
mocked_get_prev_extension.assert_called_any({'block' : 1})
mocked_get_prev_extension.assert_called_any({'block' : 2})
mocked_language_load_from_dict.assert_called_any({'block' : 1})
mocked_language_load_from_dict.assert_called_any({'block' : 2})
def test_get_prev_extension_from_block(self):
"""
Ensure Languages.get_prevalent_extension_from_block works as intended
"""
block = {'compileExtension' : 'cpp',
'runExtension' : 'o'}
self.assertEqual(Languages.get_prevalent_extension_from_block(block), 'cpp')
block = { 'runExtension' : 'o'}
self.assertEqual(Languages.get_prevalent_extension_from_block(block), 'o')
block = {'compileExtension' : 'cpp'}
self.assertEqual(Languages.get_prevalent_extension_from_block(block), 'cpp')
@mock.patch.object(PathMapper, 'get_config_path')
def test_get_languages_filepath(self, mocked_get_config_path):
"""
Ensure Languages.get_languages_filepath properly delegates to PathMapper
"""
mocked_get_config_path.return_value = 'confPath'
self.assertEqual(Languages.get_languages_filepath(), os.path.join(
'confPath', Languages.LANGUAGES_FILE))
@mock.patch.object(Languages, 'load_languages')
def test_get_language_from_extension(self, mocked_load_languages):
"""
Ensure Languages.get_language_from_extension properly gets a language from ext
"""
def populateDict():
Languages._languagesDict = {'ext1' : 'lang1', 'ext2' : 'lang2'}
mocked_load_languages.side_effect = populateDict
self.assertEqual(Languages.get_language_from_extension('ext1'), 'lang1')
mocked_load_languages.assert_called_with()
self.assertEqual(Languages.get_language_from_extension('ext3'), None)
@mock.patch.object(Languages, 'load_languages')
def test_get_language_by_name(self, mocked_load_languages):
"""
Ensure Languages.get_language_by_name properly gets a language from name
"""
mockedLangOne = mock.MagicMock(spec=Language)
mockedLangTwo = mock.MagicMock(spec=Language)
mockedLangOne.name = 'Python'
mockedLangTwo.name = 'C++'
def populateDict():
Languages._languagesDict = {'ext1' :mockedLangOne, 'ext2' : mockedLangTwo}
mocked_load_languages.side_effect = populateDict
self.assertEqual(Languages.get_language_by_name('C++'), mockedLangTwo)
mocked_load_languages.assert_called_with()
self.assertEqual(Languages.get_language_by_name('non'), None)
|
983,352 | 1545c7c4c9497d94741a96a1beb12c2836d877a0 | # https://leetcode.com/problems/minimum-size-subarray-sum/
class Solution:
def minSubArrayLen(self, target: int, nums: [int]) -> int:
subarrayLen = float('inf')
runSum = 0
loIdx = 0
for i,n in enumerate(nums):
runSum += n
while runSum >= target:
subarrayLen = min(subarrayLen, i-loIdx+1)
runSum -= nums[loIdx]
loIdx += 1
return subarrayLen if subarrayLen != float('inf') else 0
'''
Time complexity: O(n) where n ~ len(nums)
Space complexity: O(1), since we aren't using any data structures that expand with nums
'''
def minSubArrayLen2(self, target: int, nums: [int]) -> int:
subarrayLen = float('inf')
runSum = 0
loIdx = 0
i = 0
while i < len(nums):
if runSum >= target:
if loIdx < len(nums) and loIdx < i:
subarrayLen = min(subarrayLen, i - loIdx)
runSum -= nums[loIdx]
loIdx += 1
else:
runSum += nums[i]
i += 1
while loIdx < len(nums):
if runSum >= target:
subarrayLen = min(subarrayLen, i - loIdx)
runSum -= nums[loIdx]
loIdx += 1
return subarrayLen if subarrayLen != float('inf') else 0
def test(self):
cases = [
#dict(target=7, nums=[2,3,1,2,4,3], expected=2),
#dict(target=4, nums=[1,4,4], expected=1),
#dict(target=11, nums=[1,1,1,1,1,1,1,1], expected=0)
dict(target=11, nums=[1,2,3,4,5], expected=3)
]
for c in cases:
print("executing case: ", c)
result = self.minSubArrayLen(c["target"], c["nums"])
print("result: ", result)
assert result == c["expected"]
|
983,353 | f7947240ea1318520e43c86777cbf886153fe4e8 | # -*- coding:utf-8 -*-
# Author: Evan Mi
import threading
import time
# Event 类似java中的synchronized中的signal和wait
event = threading.Event()
event.is_set()
def ligther():
count = 0
event.set()
while True:
if 20 < count < 30:
if event.is_set():
event.clear()
print('red')
elif count > 30:
if not event.is_set():
event.set()
count = 0
else:
print('green')
time.sleep(1)
count += 1
def car(name):
while True:
event.wait()
print('car name # %s # is running' % name)
time.sleep(2)
light = threading.Thread(target=ligther)
light.start()
car = threading.Thread(target=car, args=('car1',))
car.start()
|
983,354 | 21a781ced741c4570915a157422b2b36e706c1af | import time
start_time = time.time()
import math
import csv
import json
import sys
import TSP_DPC
#csv.field_size_limit(sys.maxsize)
csv.field_size_limit(sys.maxsize)
n = 1500
paso = int(75000/n)
def csv2txt():
distancias = []
node = []
global n
global paso
with open('LE.csv') as file:
reader = csv.reader(file)
cont = 0
p = 1
for fila in reader:
if p == paso:
if cont != 0:
f = fila[0].split(";")
if len(f)>=5:
try:
CODCP = int(f[0]) # CODCPSIG
except ValueError:
print("Except: ", ValueError)
break
CEN = f[1] # CEN_EDU_L
NOM = f[2] # NOMCPSIG
y = float(f[3])*100.0
x = float(f[4])*100.0
distancias.append([x, y, CODCP, CEN, NOM])
node.append((x,y,CODCP))
else:
print(f)
p-=1
continue
if cont>n:
break
cont+=1
p = 0
p+=1
return node, distancias
def createLinks(path):
mn = len(path)
ret = []
for i in path:
ret.append({"source": i[0], "target": i[1]})
return ret
def createNodes(node):
ret = []
for i in node:
x = i[1]
y = -i[0]
idx = i[2]
ret.append({"x": x, "y": y, "cod": idx})
return ret
def crearLugar(distancias):
ret = []
for i in distancias:
ret.append( { "CODCPSIG": i[2], "CEN_EDU_L": i[3], "NOMCPSIG": i[4]} )
return ret
node, distancias = csv2txt()
path, w = TSP_DPC.TSP(node)
print("Distancia recorrida: ", w)
nd = createNodes(node)
lk = createLinks(path)
data = {"nodes": nd, "links": lk}
with open('graph.json', 'w') as outfile:
json.dump(data, outfile)
lg = crearLugar(distancias)
data = {"lugar": lg}
with open('info.json', 'w') as outfile:
json.dump(data, outfile)
nombres = []
for a in distancias:
nombres.append(a[3])
print("--- %s seconds ---" % (time.time() - start_time))
print("nodos:",n);
|
983,355 | 439afe0d860ff81a867916cab528264c3292aa07 | # i = 0
# while i < 11:
# print(i)
# i += 1
# while True:
# i = 0
# for i in range(0,11):
# print(i)
# i += 1
# break
# i = input()
# print(i)
# i = int(input())
# print(i)
# import re
#
#
# def is_number(num):
# pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
# result = pattern.match(num)
# if result:
# return True
# else:
# return False
#
#
# while True:
# i = input("Enter Number:")
# if i == 'q':
# print('-----End----')
# break
# if is_number(i):
# if type(eval(i)) == int:
# i = int(i)
# if i > 0:
# print('{} is positive number'.format(i))
# elif i < 0:
# print('{} is negative number'.format(i))
# elif i == 0:
# print('{} is Zero'.format(i))
# else:
# print('{} is decimals'.format(i))
# else:
# print('{} is a string'.format(i))
# str = input("Enter something:")
# for i in str:
# print(i)
# while True:
# for i in str:
# print(i)
# break
# lists = []
# # i = 0
# # while i < 5:
# # list_num = input('enter number:')
# # if list_num == 'q':
# # break
# # else:
# # list_num = int(list_num)
# # lists.append(list_num)
# # print(lists)
# # sum_num = sum(lists)
# # print('sum:{}'.format(sum_num))
# # i += 1
|
983,356 | 56c7ca147a29c6c3f6029c96d133c2ad1de9fb42 | import subprocess
import csv
if __name__ == '__main__':
players = ["ReflexAgent", "MinimaxAgent", "AlphaBetaAgent", "RandomExpectimaxAgent"]
layouts = ["capsuleClassic", "contestClassic", "mediumClassic", "minimaxClassic", "openClassic", "originalClassic",
"smallClassic", "testClassic", "trappedClassic", "trickyClassic"]
depths = [2,3,4]
#depths = [2, 3]
#experiments = pd.DataFrame(columns=['Name', 'Depth Limit', 'Layout', 'Average Score', 'Average Time'])
experiments = list()
i = 0
for layout in layouts:
for player in players:
if not player == 'ReflexAgent':
for depth in depths:
command = 'python pacman.py -l ' + layout + ' -q -k 2 ' + '-n 7 -p ' + player + ' -a depth=' + str(depth)
res = subprocess.run(command, stdout=subprocess.PIPE).stdout.decode('utf-8')
avgScore = float([c for c in res.replace('\n', ' ').replace('\r', ' ').split(" ") if "Average Score: " in c][0].split()[2])
times = [float(c.split()[2]) for c in res.replace('\n', ' ').replace('\r', ' ').split(" ") if "Step Time: " in c]
avgTime = sum(times) / float(len(times))
experiment = [player, depth, layout, avgScore, avgTime]
experiments.append(experiment)
i += 1
print('[INFO] - finished exp No.' + str(i) + ": " + command)
else:
command = 'python pacman.py -l ' + layout + ' -q -k 2 ' + '-n 7 -p ' + player
res = subprocess.run(command, stdout=subprocess.PIPE).stdout.decode('utf-8')
avgScore = float([c for c in res.replace('\n', ' ').replace('\r', ' ').split(" ") if "Average Score: " in c][0].split()[2])
times = [float(c.split()[2]) for c in res.replace('\n', ' ').replace('\r', ' ').split(" ") if "Step Time: " in c]
avgTime = sum(times) / float(len(times))
experiment = [player, 1, layout, avgScore, avgTime]
experiments.append(experiment)
i += 1
print('[INFO] - finished exp No.' + str(i) + ": " + command)
with open('experiments.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(experiments)
f.close()
|
983,357 | 0380275428c1a197b12cf5f2df7668f9c9090784 | from typing import Any, Dict, Union
from .base import Request, TelegramMethod
class DeleteChatStickerSet(TelegramMethod[bool]):
"""
Use this method to delete a group sticker set from a supergroup. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights. Use the
field can_set_sticker_set optionally returned in getChat requests to check if the bot can use
this method. Returns True on success.
Source: https://core.telegram.org/bots/api#deletechatstickerset
"""
__returning__ = bool
chat_id: Union[int, str]
"""Unique identifier for the target chat or username of the target supergroup (in the format
@supergroupusername)"""
def build_request(self) -> Request:
data: Dict[str, Any] = self.dict()
return Request(method="deleteChatStickerSet", data=data)
|
983,358 | 76e147f5ae592510ea7aed3c8e24b6ef8fa00dfe | import sys, os, re
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from nuc_tools import io, util
from formats import bed, sam
sam_paths = sys.argv[1:]
size_ranges = [(0, 200), (200, 400), (400, 500), (500, 1000)]
for bin_size in (1000, 100):
msg = 'Bin size: {}'.format(bin_size)
print(msg)
for size_range in size_ranges:
a, b = size_range
msg = ' Mol size range: {}-{}'.format(a, b)
if bin_size < 1e3:
file_end = '_sz{}-{}_{}bp.bed'.format(a, b, int(bin_size))
else:
file_end = '_sz{}-{}_{}k.bed'.format(a, b, int(bin_size/1e3))
for sam_path in sam_paths:
bed_path = os.path.splitext(sam_path)[0] + file_end
util.info('Loading and binning {}'.format(sam_path))
data_dict = sam.load_data_track(sam_path, bin_size, min_qual=10, num_cpu=8, mol_size_range=size_range)
#for chromo in data_dict:
# data_dict[chromo]['strand'] = True
util.info('Saving {}'.format(bed_path))
bed.save_data_track(bed_path, data_dict, as_float=True)
|
983,359 | a9d4dce5f4c7ef1c614c91391eb0c451818dc5d3 | def selection_sort(arr):
for i in range(len(arr)):
min = i
for j in range(i+1, len(arr)):
if arr[j] < arr[min]:
min = j
swap = arr[min]
arr[min] = arr[i]
arr[i] = swap
return arr
def insertion_sort(arr):
for i in range(len(arr)):
for j in range(i, 0, -1):
if arr[j] < arr[j-1]:
swap = arr[j-1]
arr[j-1] = arr[j]
arr[j] = swap
else:
break
return arr
def shell_sort(arr):
n = len(arr)
h = 1
while h < n/3:
h = 3*h + 1
while h >= 1:
for i in range(h, n):
for j in range(i, h-1, -h):
if arr[j] < arr[j - h]:
swap = arr[j-h]
arr[j-h] = arr[j]
arr[j] = swap
else:
break
h = h//3
return arr |
983,360 | 8a647a4651b4cedb85ba942979e1e3f1076576a0 | from __future__ import print_function
from datetime import datetime
from fairness.drivers.libvirt_driver import LibvirtConnection
class RUI(object):
""" This class represents the RUI data model (resource utilization information) """
server_greediness = {}
def __init__(self):
self.cpu_time = 0
self.memory_used = 0
self.disk_bytes_read = 0
self.disk_bytes_written = 0
self.network_bytes_received = 0
self.network_bytes_transmitted = 0
self.time_stamp = datetime.now()
def get_utilization(self, domain_id):
# memorizing the values of the last period
last_cpu_time = self.cpu_time
last_disk_bytes_read = self.disk_bytes_read
last_disk_bytes_written = self.disk_bytes_written
last_network_bytes_rx = self.network_bytes_received
last_network_bytes_tx = self.network_bytes_transmitted
last_time_stamp = self.time_stamp
# retrieving the new values and calculating difference.
conn = LibvirtConnection()
# difference in cpu_bogo time used.
self.cpu_time = conn.get_vcpu_stats(domain_id)
cpu_time = self.cpu_time - last_cpu_time
# get the current memory in use. Memory is not time shared.
# No difference to previous period is needed.
memory_used = conn.get_memory_stats(domain_id)
# get difference in DiskIO and NetworkIO
self.disk_bytes_read = conn.get_disk_stats(domain_id)[0] # 2 for IOPS
disk_bytes_read = self.disk_bytes_read - last_disk_bytes_read
self.disk_bytes_written = conn.get_disk_stats(domain_id)[1] # 3 for IOPS
disk_bytes_written = self.disk_bytes_written - last_disk_bytes_written
self.network_bytes_received = conn.get_network_stats(domain_id)[0]
network_bytes_rx = self.network_bytes_received - last_network_bytes_rx
self.network_bytes_transmitted = conn.get_network_stats(domain_id)[1]
network_bytes_tx = self.network_bytes_transmitted - last_network_bytes_tx
# get the elapsed time between this and the last measurement.
self.time_stamp = datetime.now()
time_lapse = self.time_stamp - last_time_stamp
return [cpu_time, memory_used, disk_bytes_read, disk_bytes_written, network_bytes_rx, network_bytes_tx, time_lapse]
|
983,361 | 7fb643d60b43c562189e664d640b173ae9b0f7c6 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst,MapCompose
import re
from datetime import datetime
def change_date(_values):
time = datetime.strptime(_values,'%Y-%m-%d %H:%M')
return time
def change_blank(_values):
#去掉content中的空格
return _values.strip().encode('utf-8')
def change_int(_values):
#转换为int型
match_re = re.match(".*?(\d+).*",_values)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
class BeihuaItemLoader(ItemLoader):
#自定义itemloader
default_output_processor = TakeFirst()
class BeihuaItem(scrapy.Item):
title = Field(
input_processor = MapCompose(change_blank)
)
url = Field()
author_name = Field()
content = Field(
input_processor = MapCompose(change_blank)
)
created_time = Field(
input_processor = MapCompose(change_date)
)
comments_num = Field(
input_processor = MapCompose(change_int)
)
crawl_time = Field()
def get_insert_sql(self):
insert_sql = """
INSERT INTO your_name(url,title,author_name,content,comments_num,
created_time,crawl_time)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE comments_num=VALUES(comments_num)
"""
params = (
self["url"],self["title"],self["author_name"],self["content"],self["comments_num"],
self["created_time"],self["crawl_time"]
)
return insert_sql,params
|
983,362 | 1d2a98772a5a271566a46f42d7c9cbe6886973ec | from openerp import api
from openerp import models, fields
from openerp.osv import fields as Fields
from openerp.exceptions import Warning
from openerp.exceptions import UserError, ValidationError
class master_deliver(models.Model):
_name= "master.deliver"
_inherit = ['mail.thread']
_order = "create_date"
def _get_default(self):
return
@api.model
def default_get(self, fields):
rec = super(master_deliver, self).default_get(fields)
if self._context.get('is_updating_rev'):
rec.update({
'external_status': self._context.get('update_external_status'),
'status_date': self._context.get('update_status_date'),
})
elif self._context.get('is_updating_status'):
rec.update({
'rev_num': self._context.get('update_rev_num'),
'revision_date': self._context.get('update_revision_date'),
})
return rec
def _set_external(self):
pass
@api.multi
def renotes(self):
#ini gunanya untuk DASHBOARD IFI, IFR, IFA, IFC yang sudah tersubmite berapa
#data transmit date pada setiap anak harus di tulis ulang.
#setiap dokumen yang anaknya pernah dikirim. pasti punya trans_date
self._get_external()
# last_history = self.history_ids.sorted(key=lambda r: r.status_date, reverse=True).sorted(key=lambda r: r.rev_num_seq, reverse=True)
# # last_history = self.history_ids.filtered(lambda r: r.trans_date <> False).sorted(key=lambda r: r.status_date, reverse=True).sorted(key=lambda r: r.rev_num_seq, reverse=True)
# if len(last_history) > 0:
# self.write({
# 'trans_date': last_history[0].trans_date,
# 'recv_rece_date': last_history[0].recv_rece_date,
# # 'external_status': last_history[0].external_status,
# # 'status_date': last_history[0].status_date,
# # 'rev_num': last_history[0].rev_num,
# # 'revision_date': last_history[0].revision_date,
# })
return
@api.one
def active_all(self):
doc_to_change = self.search([['state', '=', False]])
doc_to_change.write({'state': 'Active'})
return
@api.one
def _change_rev(self, param):
rev_all_ids = self.env['conf.rev.num'].search([['name', '=', param]]).ids
doc_to_change = self.search([['rev_num', 'in', rev_all_ids]])
jadi_kesini = rev_all_ids.pop(0)
doc_to_change.write({'rev_num': jadi_kesini})
self.env['conf.rev.num'].search([['id', 'in', rev_all_ids]]).unlink()
return
@api.one
def change_rev(self):
master_deliver._change_rev(self, "0")
master_deliver._change_rev(self, "1")
master_deliver._change_rev(self, "2")
master_deliver._change_rev(self, "3")
# master_deliver._change_rev(self, "4")
@api.one
def _change_exstat(self, param):
rev_all_ids = self.env['conf.external.status'].search([['name', '=', param]]).ids
if len(rev_all_ids)>0:
doc_to_change = self.search([['external_status', 'in', rev_all_ids]])
jadi_kesini = rev_all_ids.pop(0)
doc_to_change.write({'external_status': jadi_kesini})
self.env['conf.external.status'].search([['id', 'in', rev_all_ids]]).unlink()
return
@api.one
def change_exstat(self):
master_deliver._change_exstat(self, "AFC")
master_deliver._change_exstat(self, "APC")
master_deliver._change_exstat(self, "IFR")
master_deliver._change_exstat(self, "RE-AFC")
master_deliver._change_exstat(self, "RE-IFA")
master_deliver._change_exstat(self, "IFC")
master_deliver._change_exstat(self, "IFR")
master_deliver._change_exstat(self, "IFR")
@api.one
@api.depends('history_ids')
def _get_external(self):
for record in self:
if record.is_history is False :
#Mengecek Urutan Terakhir berdasarkan status_date, kemudian rev_num_seq
tes = record.history_ids.sorted(key=lambda r: r.status_date, reverse=True).sorted(key=lambda r: r.rev_num_seq, reverse=True)
# tes = record.history_ids.filtered(lambda r: r.trans_date <> False).sorted(key=lambda r: r.status_date, reverse=True).sorted(key=lambda r: r.rev_num_seq, reverse=True)
# tes = record.history_ids.sorted(key=lambda r: r.status_date, reverse=True).sorted(key=lambda r: r.rev_num_seq, reverse=True)
if len(tes) > 0:
record.external_status = tes[0].external_status
record.status_date = tes[0].status_date
record.rev_num = tes[0].rev_num
record.revision_date = tes[0].revision_date
record.trans_date = tes[0].trans_date
record.recv_rece_date = tes[0].recv_rece_date
elif self._context.get('external_status') is not None:
record.external_status = self._context.get('external_status')
record.status_date = self._context.get('status_date')
elif self._context.get('rev_num') is not None:
record.rev_num = self._context.get('rev_num')
record.revision_date = self._context.get('revision_date')
discipline = fields.Many2one('conf.discipline', 'Discipline', ondelete='restrict', copy=True)
doc_categ = fields.Many2one('conf.doc.categ', 'Category', ondelete='restrict', copy=True)
doc_sub = fields.Many2one('conf.doc.sub', 'Subsystem', ondelete='restrict', copy=True)
name = fields.Char(string='Document Number', copy=True)
doc_title = fields.Char(string='Document Title', copy=True)
doc_type = fields.Many2one('conf.doc.type', 'Document Type', ondelete='restrict', copy=True)
doc_type_desc = fields.Char(string='Type Description', related='doc_type.desc', store=True)
originator = fields.Many2one('res.partner', string='Originator', copy=False)
version_id = fields.Many2one('master.deliver', string='Versions', copy=False)
history_ids = fields.One2many('master.deliver', 'version_id', 'History', copy=False)
revision_date = fields.Date(string='Revision Date')
status_date = fields.Date(string='Status Date')
is_history = fields.Boolean(string='Is History')
doc_status = fields.Many2one('conf.doc.status', 'Status', ondelete='restrict', copy=False)
rev_num = fields.Many2one('conf.rev.num', 'Revision Number', ondelete='restrict', copy=False)
rev_num_seq = fields.Integer(string='Sequence', related='rev_num.sequence', store=False)
#IFA IFI RE-IDC
external_status = fields.Many2one('conf.external.status', 'External Status', compute='_get_external', inverse='_set_external', store=True)
doc_pred = fields.Char(string='Predecessor', copy=True)
alt_doc = fields.Char(string='Alternative Document #', copy=True)
notes = fields.Text(string='Notes', copy=True)
idc_id = fields.Many2one('doc.idc', 'IDC', ondelete='restrict', copy=False)
# idc_id = fields.Many2many('doc.idc', 'master_to_idc', 'line_ids', 'idc_id', string="Related IDC", copy=False)
idc_number = fields.Char(string='IDC Number', copy=False)
created_date = fields.Date(string='Created Date', default=lambda self: self._context.get('date', fields.Date.context_today(self)))
sched_plan = fields.Date(string='Schedule Plan')
sched_date = fields.Date(string='Schedule Date')
send_date = fields.Date(string='IDC Sending Date', copy=False)
rece_date = fields.Date(string='IDC Receiving Date', copy=False)
due_date = fields.Date(string='Due Date', copy=False)
status_comment = fields.Many2one('conf.rec.comment', 'Status Comment')
#SEND_ID dan RECE_ID selamanya harus selalu dipisah
#KARENA Amplop dari Pihak Ketiga, selalu rapih dan juga dicatat.
#BERBEDA dengan IDC yang ketika dokumennya diterima, amplop dari pihak ketiga diabaikan
send_id = fields.Many2many('doc.rece', 'master_to_send', 'line_ids', 'send_id', string="Related Sending", copy=False)
trans_number = fields.Char(string='Outgoing Transmittal Number', copy=False)
trans_date = fields.Date(string='Transmittal Date', copy=False)
trans_due_date = fields.Date(string='Transmittal Due Date', copy=False)
recipient_rece_date = fields.Date(string='Recipient Receive Date', copy=False)
rece_id = fields.Many2many('doc.rece', 'master_to_rece', 'line_ids', 'rece_id', string="Related Receiving", copy=False)
recv_trans_number = fields.Char(string='Incoming Transmittal Number', copy=False)
recv_rece_date = fields.Date(string='Receiving Date', store=True, copy=False)
recv_comment = fields.Many2one('conf.rec.comment', 'Status Comment', ondelete='restrict', copy=False)
state = fields.Selection(selection=[('Active', 'Active'), ('Inactive', 'Inactive')], string='Active')
_defaults = {
'state': 'Active',
}
_sql_constraints = [
('name', 'Check(1=1)', "The system record Can't be duplicate value for this field!")
# ('name', 'unique(name)', "The system record Can't be duplicate value for this field!")
]
@api.constrains('name')
def _check_name(self):
# self.env['res.partner'].search([['is_company', '=', True], ['customer', '=', True]])
existing_ids = self.search([['name', '=', self.name], ['is_history', '=', False]])
if len(existing_ids) > 1 :
raise ValidationError("There is already existing MDR with similiar name Doc Number")
@api.model
def create(self, vals):
if self._context.get("update") is not None:
if vals['is_history']:
if vals['version_id']:
parent_obj = self.browse(vals['version_id'])
vals['discipline'] = parent_obj.discipline.id
vals['doc_categ'] = parent_obj.doc_categ.id
vals['doc_sub'] = parent_obj.doc_sub.id
vals['name'] = parent_obj.name
vals['doc_title'] = parent_obj.doc_title
vals['doc_pred'] = parent_obj.doc_pred
vals['alt_doc'] = parent_obj.alt_doc
vals['doc_type'] = parent_obj.doc_type.id
vals['sched_plan'] = parent_obj.sched_plan
vals['notes'] = parent_obj.notes
vals['is_history'] = True
if self._context.get("update") == "status":
context_to_pass={
'rev_num': vals['rev_num'],
'revision_date': vals['revision_date'],
'external_status': vals['external_status'],
'status_date': vals['status_date'],
'parent_id': vals['version_id']
}
elif self._context.get("update") == "rev":
context_to_pass={
'rev_num': vals['rev_num'],
'revision_date': vals['revision_date'],
'external_status': vals['external_status'],
'status_date': vals['status_date'],
'parent_id': vals['version_id']
}
res = super(master_deliver, self.with_context(context_to_pass)).create(vals)
return res
else:
res = super(master_deliver, self).create(vals)
return res
@api.one
def unlink(self):
eja = self._context.get('doc_send')
text = "Please unlink this document from IDC / Sending / Incoming Transmittal"
if self.idc_id.id != False:
raise Warning(text)
return
elif self.send_id.id != False:
raise Warning(text)
return
elif self.rece_id.id != False:
raise Warning(text)
return
else:
return super(master_deliver, self).unlink()
@api.multi
def unlink_doc_send(self):
parent_doc = self.env['doc.send'].browse(self._context['parent_id'])
parent_doc.write({
'line_ids' : [(3, self.id)]
})
self.write({
'trans_date' : False
})
self.version_id.renotes()
return
@api.multi
def unlink_doc_rece(self):
parent_doc = self.env['doc.rece'].browse(self._context['parent_id'])
parent_doc.write({
'line_ids' : [(3, self.id)]
})
self.write({
'recv_rece_date' : False
})
self.version_id.renotes()
return
|
983,363 | a9eae23a608b4260a167c6f205bf797b56b32dbb | #!/usr/bin/python
#-*- coding:utf-8 -*-
#****************************************************
# Author: nile cui - nile.cui@gmail.com
# Last modified: 2012-12-18 16:23
# Filename: unicode.py
# Description:
#****************************************************
s1=u'哈'
print s1
s2=unicode('蛤','utf-8')
print s2
s3=u'test汉'
print repr(s3)
s4=s3.encode('utf-8')
print s4
|
983,364 | bda8d8241ae1c63012acc1ecbc7e36884f4735a2 | #스쿨맘 매크로
import openpyxl
wb = openpyxl.load_workbook('schmomtext.xlsx')
sheet1 = wb.active
classnum=input("(1, 2와 같이 숫자만 입력할 것 )/// 학년을 입력해주세요: ")
classnum2=input("(1, 2와 같이 숫자만 입력할 것 )/// 반을 입력해주세요: ")
classroom=input("(정보, 과학B)/// 수업을 입력하세요 : ")
classtime=input("(1, 2와 같이 숫자만 입력할 것 )/// 수업 교시를 입력하세요 : ")
temid=0
stuid=list()
#초기화
for i in range(2,30):
for j in range(1,8):
sheet1.cell(row=i, column=j).value=None
print(("(1, 2와 같이 숫자만 입력할 것 ) 다 입력시에는 t입력 후 엔터"))
while(temid!='t'):
temid=input("학생 번호를 입력해주세요 : ")
stuid.append(temid)
for i in range(len(stuid)-1):
sheet1.cell(row=i+2, column=1).value=classnum
sheet1.cell(row=i+2, column=2).value=classnum2
sheet1.cell(row=i+2, column=3).value=stuid[i]
sheet1.cell(row=i+2, column=4).value=classroom
sheet1.cell(row=i+2, column=5).value='담당교사입니다. '
sheet1.cell(row=i+2, column=6).value=classtime+'교시'
sheet1.cell(row=i+2, column=7).value='수업이 시작되었으니 즉시 수강 바랍니다.'
wb.save('schmomtext.xlsx')
|
983,365 | b7ab2a21d3ab349919d2527e781d53933ecb78dd | """
Performs software triggered tomography
"""
#from pcoDetectorWrapper import PCODetectorWrapper
from gda.configuration.properties import LocalProperties
from gda.data.scan.datawriter import NXSubEntryWriter, NXTomoEntryLinkCreator
from gda.data.scan.datawriter.DefaultDataWriterFactory import \
createDataWriterFromFactory
from gda.device.scannable import ScannableBase, ScannableUtils, SimpleScannable
from gda.device.scannable.scannablegroup import ScannableGroup
from gda.jython import InterfaceProvider
from gda.jython.commands.ScannableCommands import createConcurrentScan
from gda.scan import ScanPositionProvider
from gda.util import OSCommandRunner
from gdascripts.messages import handle_messages
from gdascripts.metadata.metadata_commands import setTitle
from gdascripts.parameters import beamline_parameters
from java.lang import InterruptedException
import sys
from gdascripts.metadata.metadata_commands import meta_add
class EnumPositionerDelegateScannable(ScannableBase):
"""
Translate positions 0 and 1 to Close and Open
"""
def __init__(self, name, delegate):
self.name = name
self.inputNames = [name]
self.delegate = delegate
def isBusy(self):
return self.delegate.isBusy()
def rawAsynchronousMoveTo(self, new_position):
if int(new_position) == 1:
self.delegate.asynchronousMoveTo("Open")
elif int(new_position) == 0:
self.delegate.asynchronousMoveTo("Close")
def rawGetPosition(self):
pos = self.delegate.getPosition()
if pos == "Open":
return 1
return 0
def make_tomoScanDevice(tomography_theta, tomography_shutter, tomography_translation,
tomography_optimizer, image_key, tomography_imageIndex):
tomoScanDevice = ScannableGroup()
tomoScanDevice.addGroupMember(tomography_theta)
tomoScanDevice.addGroupMember(EnumPositionerDelegateScannable("tomography_shutter", tomography_shutter))
tomoScanDevice.addGroupMember(tomography_translation)
tomoScanDevice.addGroupMember(tomography_optimizer)
tomoScanDevice.addGroupMember(image_key)
tomoScanDevice.addGroupMember(tomography_imageIndex)
tomoScanDevice.setName("tomoScanDevice")
tomoScanDevice.configure()
return tomoScanDevice
def generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,
imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern="default"):
numberSteps = len(theta_points) - 1
optimizeBeamNo = 0
optimizeBeamYes = 1
shutterOpen = 1
shutterClosed = 0
shutterNoChange = 2
scan_points = []
if pattern == 'default' or pattern == 'DFPFD':
print "Using scan-point pattern:", pattern
theta_pos = theta_points[0]
index = 0
#Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator,
#if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.
for d in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][d != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
for f in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][f != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project, index)) #first
index = index + 1
imageSinceDark = 1
imageSinceFlat = 1
optimizeBeam = 0
for i in range(numberSteps):
theta_pos = theta_points[i + 1]
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project, index))#main image
index = index + 1
imageSinceFlat = imageSinceFlat + 1
if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:
for f in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][f != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))
index = index + 1
imageSinceFlat = 0
imageSinceDark = imageSinceDark + 1
if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:
for d in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][d != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))
index = index + 1
imageSinceDark = 0
optimizeBeam = optimizeBeam + 1
if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))
index = index + 1
optimizeBeam = 0
#add dark and flat only if not done in last steps
if imageSinceFlat != 0:
for f in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][f != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
if imageSinceDark != 0:
for d in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][d != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
elif pattern == 'PFD':
print "Using scan-point pattern:", pattern
theta_pos = theta_points[0]
index = 0
# Don't take any dark or flat images at the beginning
scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project, index)) #first
index = index + 1
imageSinceDark = 1
imageSinceFlat = 1
optimizeBeam = 0
for i in range(numberSteps):
theta_pos = theta_points[i + 1]
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project, index))#main image
index = index + 1
imageSinceFlat = imageSinceFlat + 1
if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:
for f in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][f != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))
index = index + 1
imageSinceFlat = 0
imageSinceDark = imageSinceDark + 1
if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:
for d in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][d != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))
index = index + 1
imageSinceDark = 0
optimizeBeam = optimizeBeam + 1
if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))
index = index + 1
optimizeBeam = 0
#add dark and flat only if not done in last steps
if imageSinceFlat != 0:
for f in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][f != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
if imageSinceDark != 0:
for d in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][d != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
else:
print "Unsupported scan-point pattern:", pattern
return scan_points
def addNXTomoSubentry(scanObject, tomography_detector_name, tomography_theta_name):
if scanObject is None:
raise ValueError("Input scanObject must not be None")
nxLinkCreator = NXTomoEntryLinkCreator()
default_placeholder_target = "entry1:NXentry/scan_identifier:SDS"
# detector independent items
nxLinkCreator.setControl_data_target("entry1:NXentry/instrument:NXinstrument/source:NXsource/current:SDS")
#nxLinkCreator.setInstrument_detector_distance(default_placeholder_target)
nxLinkCreator.setInstrument_detector_image_key_target("entry1:NXentry/instrument:NXinstrument/tomoScanDevice:NXpositioner/image_key:SDS")
#nxLinkCreator.setInstrument_detector_x_pixel_size_target(default_placeholder_target)
#nxLinkCreator.setInstrument_detector_y_pixel_size_target(default_placeholder_target)
nxLinkCreator.setInstrument_source_target("entry1:NXentry/instrument:NXinstrument/source:NXsource")
sample_rotation_angle_target = "entry1:NXentry/instrument:NXinstrument/tomoScanDevice:NXpositioner/"
sample_rotation_angle_target += tomography_theta_name + ":SDS"
nxLinkCreator.setSample_rotation_angle_target(sample_rotation_angle_target);
nxLinkCreator.setSample_x_translation_target(default_placeholder_target)
nxLinkCreator.setSample_y_translation_target(default_placeholder_target)
nxLinkCreator.setSample_z_translation_target(default_placeholder_target)
nxLinkCreator.setTitle_target("entry1:NXentry/title:SDS")
# detector dependent items
if tomography_detector_name == "pco4000_dio_hdf":
# external file
instrument_detector_data_target = "!entry1:NXentry/instrument:NXinstrument/"
instrument_detector_data_target += tomography_detector_name + ":NXdetector/"
instrument_detector_data_target += "data:SDS"
nxLinkCreator.setInstrument_detector_data_target(instrument_detector_data_target)
elif tomography_detector_name == "pco4000_dio_tif":
# image filenames
instrument_detector_data_target = "entry1:NXentry/instrument:NXinstrument/"
instrument_detector_data_target += tomography_detector_name + ":NXdetector/"
instrument_detector_data_target += "image_data:SDS"
nxLinkCreator.setInstrument_detector_data_target(instrument_detector_data_target)
elif tomography_detector_name == "pco":
# image filenames
instrument_detector_data_target = "entry1:NXentry/instrument:NXinstrument/"
instrument_detector_data_target += tomography_detector_name + ":NXdetector/"
instrument_detector_data_target += "data_file:NXnote/file_name:SDS"
nxLinkCreator.setInstrument_detector_data_target(instrument_detector_data_target)
else:
print "Default target used for unsupported tomography detector in addNXTomoSubentry: " + tomography_detector_name
instrument_detector_data_target = default_placeholder_target
nxLinkCreator.setInstrument_detector_data_target(instrument_detector_data_target)
nxLinkCreator.afterPropertiesSet()
dataWriter = createDataWriterFromFactory()
subEntryWriter = NXSubEntryWriter(nxLinkCreator)
dataWriter.addDataWriterExtender(subEntryWriter)
scanObject.setDataWriter(dataWriter)
def reportJythonNamespaceMapping():
jns = beamline_parameters.JythonNameSpaceMapping()
objectOfInterestSTEP = {}
objectOfInterestSTEP['tomography_theta'] = jns.tomography_theta
objectOfInterestSTEP['tomography_shutter'] = jns.tomography_shutter
objectOfInterestSTEP['tomography_translation'] = jns.tomography_translation
objectOfInterestSTEP['tomography_detector'] = jns.tomography_detector
objectOfInterestSTEP_INFO = {}
objectOfInterestSTEP_INFO['tomography_camera_stage'] = jns.tomography_camera_stage
objectOfInterestSTEP_INFO['tomography_sample_stage'] = jns.tomography_sample_stage
objectOfInterestFLY = {}
objectOfInterestFLY['tomography_shutter'] = jns.tomography_shutter
objectOfInterestFLY['tomography_translation'] = jns.tomography_translation
objectOfInterestFLY['tomography_flyscan_theta'] = jns.tomography_flyscan_theta
objectOfInterestFLY['tomography_flyscan_det'] = jns.tomography_flyscan_det
objectOfInterestFLY['tomography_flyscan_flat_dark_det'] = jns.tomography_flyscan_flat_dark_det
msg = "\n These mappings can be changed by editing a file named jythonNamespaceMapping_live, "
msg += "\n located in GDA Client under Scripts: Config (this can be done by beamline staff)."
msg += "\n Note that PRIMARY SETTINGS for the desired type of tomography scan should normally"
msg += "\n not include any dummy objects because they are typically used only for testing."
print "\n ****** STEP-SCAN PRIMARY SETTINGS (essential for running a tomography step-scan) ******"
idx=1
for key, val in objectOfInterestSTEP.iteritems():
name = "object undefined!"
if val is not None:
name = str(val.getName())
print `idx` + "."+ key + ' = ' + name
idx += 1
#print msg
print "\n ****** STEP-SCAN SECONDARY SETTINGS (for additional, per-scan-point data, ie NOT essential for running a tomography step-scan) ******"
idx=1
for key, val in objectOfInterestSTEP_INFO.iteritems():
name = "object undefined!"
if val is not None:
name = str(val.getName())
print `idx` + "."+ key + ' = ' + name
idx += 1
#print msg
print "\n ****** FLY-SCAN PRIMARY SETTINGS (essential for running a tomography fly-scan) ******"
idx=1
for key, val in objectOfInterestFLY.iteritems():
name = "object undefined!"
if val is not None:
name = str(val.getName())
print `idx` + "."+ key + ' = ' + name
idx += 1
print msg
def reportTomo():
return reportJythonNamespaceMapping()
class tomoScan_positions(ScanPositionProvider):
def __init__(self, start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat,
inBeamPosition, outOfBeamPosition, optimizeBeamInterval, points):
self.start = start
self.stop = stop
self.step = step
self.darkFieldInterval = darkFieldInterval
self.imagesPerDark = imagesPerDark
self.flatFieldInterval = flatFieldInterval
self.imagesPerFlat = imagesPerFlat
self.inBeamPosition = inBeamPosition
self.outOfBeamPosition = outOfBeamPosition
self.optimizeBeamInterval = optimizeBeamInterval
self.points = points
def get(self, index):
return self.points[index]
def size(self):
return len(self.points)
def __str__(self):
return "Start: %f Stop: %f Step: %f Darks every:%d imagesPerDark:%d Flats every:%d imagesPerFlat:%d InBeamPosition:%f OutOfBeamPosition:%f Optimize every:%d numImages %d " % \
(self.start, self.stop, self.step, self.darkFieldInterval, self.imagesPerDark, self.flatFieldInterval, self.imagesPerFlat, self.inBeamPosition, self.outOfBeamPosition, self.optimizeBeamInterval, self.size())
def toString(self):
return self.__str__()
image_key_dark = 2
image_key_flat = 1 # also known as bright
image_key_project = 0 # also known as sample
"""
perform a simple tomography scan
"""
def tomoScan(description, inBeamPosition, outOfBeamPosition, exposureTime=1., start=0., stop=180., step=0.1, darkFieldInterval=0, flatFieldInterval=0,
imagesPerDark=10, imagesPerFlat=10, optimizeBeamInterval=0, pattern="default", tomoRotationAxis=0, addNXEntry=True, autoAnalyse=True, additionalScannables=[]):
"""
Function to collect a tomography step scan
Arguments:
description - description of the scan or the sample that is being scanned. This is generally user-specific information that may be used to map to this scan later and is available in the NeXus file)
inBeamPosition - position of X drive to move sample into the beam to take a projection
outOfBeamPosition - position of X drive to move sample out of the beam to take a flat field image
exposureTime - exposure time in seconds (default=1.0)
start - first rotation angle (default=0.0)
stop - last rotation angle (default=180.0)
step - rotation step size (default=0.1)
darkFieldInterval - number of projections between each dark-field sub-sequence.
NOTE: at least 1 dark is ALWAYS taken both at the start and end of the scan provided imagesPerDark>0
(default=0: use this value if you DON'T want to take any darks between projections)
flatFieldInterval - number of projections between each flat-field sub-sequence.
NOTE: at least 1 flat is ALWAYS taken both at the start and end the scan provided imagesPerFlat>0
(default=0: use this value if you DON'T want to take any flats between projections)
imagesPerDark - number of images to be taken for each dark-field sub-sequence (default=10)
imagesPerFlat - number of images to be taken for each flat-field sub-sequence (default=10)
General scan sequence is: D, F, P,..., P, F, D
where D stands for dark field, F - for flat field, and P - for projection.
"""
dataFormat = LocalProperties.get("gda.data.scan.datawriter.dataFormat")
try:
darkFieldInterval = int(darkFieldInterval)
flatFieldInterval = int(flatFieldInterval)
optimizeBeamInterval = int(optimizeBeamInterval)
jns = beamline_parameters.JythonNameSpaceMapping(InterfaceProvider.getJythonNamespace())
tomography_theta = jns.tomography_theta
if tomography_theta is None:
raise NameError("tomography_theta is not defined in Jython namespace")
tomography_shutter = jns.tomography_shutter
if tomography_shutter is None:
raise NameError("tomography_shutter is not defined in Jython namespace")
tomography_translation = jns.tomography_translation
if tomography_translation is None:
raise NameError("tomography_translation is not defined in Jython namespace")
tomography_detector = jns.tomography_detector
if tomography_detector is None:
raise NameError("tomography_detector is not defined in Jython namespace")
tomography_optimizer = jns.tomography_optimizer
if tomography_optimizer is None:
raise NameError("tomography_optimizer is not defined in Jython namespace")
tomography_time = jns.tomography_time
if tomography_time is None:
raise NameError("tomography_time is not defined in Jython namespace")
tomography_beammonitor = jns.tomography_beammonitor
if tomography_beammonitor is None:
raise NameError("tomography_beammonitor is not defined in Jython namespace")
tomography_camera_stage = jns.tomography_camera_stage
if tomography_camera_stage is None:
raise NameError("tomography_camera_stage is not defined in Jython namespace")
tomography_sample_stage = jns.tomography_sample_stage
if tomography_sample_stage is None:
raise NameError("tomography_sample_stage is not defined in Jython namespace")
tomo_additional_scannables = jns.tomography_additional_scannables
if tomo_additional_scannables is None:
raise NameError("tomo_additional_scannables is not defined in Jython namespace")
index = SimpleScannable()
index.setCurrentPosition(0.0)
index.setInputNames(["imageNumber"])
index.setName("imageNumber")
index.configure()
image_key = SimpleScannable()
image_key.setCurrentPosition(0.0)
image_key.setInputNames(["image_key"])
image_key.setName("image_key")
image_key.configure()
tomoScanDevice = make_tomoScanDevice(tomography_theta, tomography_shutter,
tomography_translation, tomography_optimizer, image_key, index)
# return tomoScanDevice
#generate list of positions
numberSteps = ScannableUtils.getNumberSteps(tomography_theta, start, stop, step)
theta_points = []
theta_points.append(start)
previousPoint = start
for i in range(numberSteps):
nextPoint = ScannableUtils.calculateNextPoint(previousPoint, step);
theta_points.append(nextPoint)
previousPoint = nextPoint
#generateScanPoints
optimizeBeamNo = 0
optimizeBeamYes = 1
shutterOpen = 1
shutterClosed = 0
shutterNoChange = 2
scan_points = []
theta_pos = theta_points[0]
index = 0
#Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator,
#if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.
for i in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
for i in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project, index)) #first
index = index + 1
imageSinceDark = 1
imageSinceFlat = 1
optimizeBeam = 0
for i in range(numberSteps):
theta_pos = theta_points[i + 1]
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project, index))#main image
index = index + 1
imageSinceFlat = imageSinceFlat + 1
if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:
for i in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))
index = index + 1
imageSinceFlat = 0
imageSinceDark = imageSinceDark + 1
if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:
for i in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))
index = index + 1
imageSinceDark = 0
optimizeBeam = optimizeBeam + 1
if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))
index = index + 1
optimizeBeam = 0
#add dark and flat only if not done in last steps
if imageSinceFlat != 0:
for i in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
if imageSinceDark != 0:
for i in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
scan_points1 = generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,
imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern=pattern)
if pattern == 'default' or pattern == 'DFPFD':
i = 0
for pt1 in scan_points1:
pt = scan_points[i]
if pt1 != pt:
print "Mismatch - please tell Kaz about your scan and its arguments!"
print "i = ", i
print "pt = ", pt
print "pt1 = ", pt1
i += 1
#return None
positionProvider = tomoScan_positions(start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat, \
inBeamPosition, outOfBeamPosition, optimizeBeamInterval, scan_points)
scan_args = [tomoScanDevice, positionProvider, tomography_time, tomography_beammonitor, tomography_detector, exposureTime, tomography_camera_stage, tomography_sample_stage]
#scan_args.append(RotationAxisScannable("approxCOR", tomoRotationAxis))
#meta_add(RotationAxisScannable("approxCOR", tomoRotationAxis))
#meta_add("RotationCoord_as_list", [tomoRotationAxis])
meta_add("approxCOR", tomoRotationAxis)
for scannable in additionalScannables:
scan_args.append(scannable)
for scannable in tomo_additional_scannables:
scan_args.append(scannable)
''' setting the description provided as the title'''
if not description == None:
setTitle(description)
else :
setTitle("undefined")
dataFormat = LocalProperties.get("gda.data.scan.datawriter.dataFormat")
if not dataFormat == "NexusDataWriter":
handle_messages.simpleLog("Data format inconsistent. Setting 'gda.data.scan.datawriter.dataFormat' to 'NexusDataWriter'")
LocalProperties.set("gda.data.scan.datawriter.dataFormat", "NexusDataWriter")
scanObject = createConcurrentScan(scan_args)
if addNXEntry:
addNXTomoSubentry(scanObject, tomography_detector.name, tomography_theta.name)
scanObject.runScan()
if autoAnalyse:
lsdp=jns.lastScanDataPoint()
OSCommandRunner.runNoWait(["/dls_sw/apps/tomopy/tomopy/bin/gda/tomo_at_scan_end_kz", lsdp.currentFilename], OSCommandRunner.LOGOPTION.ALWAYS, None)
return scanObject;
except InterruptedException:
exceptionType, exception, traceback = sys.exc_info()
handle_messages.log(None, "User interrupted the scan", exceptionType, exception, traceback, False)
raise InterruptedException("User interrupted the scan")
except:
exceptionType, exception, traceback = sys.exc_info()
handle_messages.log(None, "Error during tomography scan", exceptionType, exception, traceback, False)
raise Exception("Error during tomography scan", exception)
finally:
handle_messages.simpleLog("Data Format reset to the original setting: " + dataFormat)
LocalProperties.set("gda.data.scan.datawriter.dataFormat", dataFormat)
def tomoScanWithFrames(description, inBeamPosition, outOfBeamPosition, exposureTime=1., start=0., stop=180., step=0.1, darkFieldInterval=0, flatFieldInterval=0,
imagesPerDark=10, imagesPerFlat=10, optimizeBeamInterval=0, pattern="default", nframes=1, tomoRotationAxis=0, addNXEntry=True, autoAnalyse=True, additionalScannables=[]):
"""
Function to collect a tomography step scan with multiple projection frames per scan point
Arguments:
description - description of the scan or the sample that is being scanned. This is generally user-specific information that may be used to map to this scan later and is available in the NeXus file)
inBeamPosition - position of X drive to move sample into the beam to take a projection
outOfBeamPosition - position of X drive to move sample out of the beam to take a flat field image
exposureTime - exposure time in seconds (default=1.0)
start - first rotation angle (default=0.0)
stop - last rotation angle (default=180.0)
step - rotation step size (default=0.1)
darkFieldInterval - number of projection-frame sub-series between each dark-field sub-series.
NOTE: at least 1 dark is ALWAYS taken both at the start and end of the scan, provided imagesPerDark>0
(default=0: use this value if you DON'T want to take any darks between projections)
flatFieldInterval - number of projection-frame sub-series between each flat-field sub-series.
NOTE: at least 1 flat is ALWAYS taken both at the start and end the scan, provided imagesPerFlat>0
(default=0: use this value if you DON'T want to take any flats between projections)
imagesPerDark - number of images to be taken in each dark-field sub-series (default=10)
imagesPerFlat - number of images to be taken in each flat-field sub-series (default=10)
nframes - number of projection frames per angular position (default=1)
General scan sequence is: D, F, P,..., P, F, D
where D stands for dark field, F - for flat field, and P - for projection.
"""
dataFormat = LocalProperties.get("gda.data.scan.datawriter.dataFormat")
try:
darkFieldInterval = int(darkFieldInterval)
flatFieldInterval = int(flatFieldInterval)
optimizeBeamInterval = int(optimizeBeamInterval)
image_key_frame = 3
nframes = int(nframes)
if nframes < 1:
nframes = 1
jns = beamline_parameters.JythonNameSpaceMapping(InterfaceProvider.getJythonNamespace())
tomography_theta = jns.tomography_theta
if tomography_theta is None:
raise NameError("tomography_theta is not defined in Jython namespace")
tomography_shutter = jns.tomography_shutter
if tomography_shutter is None:
raise NameError("tomography_shutter is not defined in Jython namespace")
tomography_translation = jns.tomography_translation
if tomography_translation is None:
raise NameError("tomography_translation is not defined in Jython namespace")
tomography_detector = jns.tomography_detector
if tomography_detector is None:
raise NameError("tomography_detector is not defined in Jython namespace")
tomography_optimizer = jns.tomography_optimizer
if tomography_optimizer is None:
raise NameError("tomography_optimizer is not defined in Jython namespace")
tomography_time = jns.tomography_time
if tomography_time is None:
raise NameError("tomography_time is not defined in Jython namespace")
tomography_beammonitor = jns.tomography_beammonitor
if tomography_beammonitor is None:
raise NameError("tomography_beammonitor is not defined in Jython namespace")
tomography_camera_stage = jns.tomography_camera_stage
if tomography_camera_stage is None:
raise NameError("tomography_camera_stage is not defined in Jython namespace")
tomography_sample_stage = jns.tomography_sample_stage
if tomography_sample_stage is None:
raise NameError("tomography_sample_stage is not defined in Jython namespace")
tomo_additional_scannables = jns.tomography_additional_scannables
if tomo_additional_scannables is None:
raise NameError("tomo_additional_scannables is not defined in Jython namespace")
index = SimpleScannable()
index.setCurrentPosition(0.0)
index.setInputNames(["imageNumber"])
index.setName("imageNumber")
index.configure()
image_key = SimpleScannable()
image_key.setCurrentPosition(0.0)
image_key.setInputNames(["image_key"])
image_key.setName("image_key")
image_key.configure()
tomoScanDevice = make_tomoScanDevice(tomography_theta, tomography_shutter,
tomography_translation, tomography_optimizer, image_key, index)
# return tomoScanDevice
#generate list of positions
numberSteps = ScannableUtils.getNumberSteps(tomography_theta, start, stop, step)
theta_points = []
theta_points.append(start)
previousPoint = start
for i in range(numberSteps):
nextPoint = ScannableUtils.calculateNextPoint(previousPoint, step);
theta_points.append(nextPoint)
previousPoint = nextPoint
#generateScanPoints
optimizeBeamNo = 0
optimizeBeamYes = 1
shutterOpen = 1
shutterClosed = 0
shutterNoChange = 2
scan_points = []
theta_pos = theta_points[0]
index = 0
#Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator,
#if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.
for i in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
for i in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
for frm in range(nframes):
scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project if frm==0 else image_key_frame, index)) #first
index = index + 1
imageSinceDark = 1
imageSinceFlat = 1
optimizeBeam = 0
for i in range(numberSteps):
theta_pos = theta_points[i + 1]
for frm in range(nframes):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project if frm==0 else image_key_frame, index))#main image
index = index + 1
imageSinceFlat = imageSinceFlat + 1
if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:
for i in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))
index = index + 1
imageSinceFlat = 0
imageSinceDark = imageSinceDark + 1
if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:
for i in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))
index = index + 1
imageSinceDark = 0
optimizeBeam = optimizeBeam + 1
if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))
index = index + 1
optimizeBeam = 0
#add dark and flat only if not done in last steps
if imageSinceFlat != 0:
for i in range(imagesPerFlat):
scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat
index = index + 1
if imageSinceDark != 0:
for i in range(imagesPerDark):
scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark
index = index + 1
# scan_points1 = generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,
# imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern=pattern)
# if pattern == 'default' or pattern == 'DFPFD':
# i = 0
# for pt1 in scan_points1:
# pt = scan_points[i]
# if pt1 != pt:
# print "Mismatch - please tell Kaz about your scan and its arguments!"
# print "i = ", i
# print "pt = ", pt
# print "pt1 = ", pt1
# i += 1
#return None
positionProvider = tomoScan_positions(start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat, \
inBeamPosition, outOfBeamPosition, optimizeBeamInterval, scan_points)
scan_args = [tomoScanDevice, positionProvider, tomography_time, tomography_beammonitor, tomography_detector, exposureTime, tomography_camera_stage, tomography_sample_stage]
#scan_args.append(RotationAxisScannable("approxCOR", tomoRotationAxis))
#meta_add(RotationAxisScannable("approxCOR", tomoRotationAxis))
#meta_add("RotationCoord_as_list", [tomoRotationAxis])
meta_add("approxCOR", tomoRotationAxis)
for scannable in additionalScannables:
scan_args.append(scannable)
for scannable in tomo_additional_scannables:
scan_args.append(scannable)
''' setting the description provided as the title'''
if not description == None:
setTitle(description)
else :
setTitle("undefined")
dataFormat = LocalProperties.get("gda.data.scan.datawriter.dataFormat")
if not dataFormat == "NexusDataWriter":
handle_messages.simpleLog("Data format inconsistent. Setting 'gda.data.scan.datawriter.dataFormat' to 'NexusDataWriter'")
LocalProperties.set("gda.data.scan.datawriter.dataFormat", "NexusDataWriter")
scanObject = createConcurrentScan(scan_args)
if addNXEntry:
addNXTomoSubentry(scanObject, tomography_detector.name, tomography_theta.name)
scanObject.runScan()
if autoAnalyse:
lsdp=jns.lastScanDataPoint()
OSCommandRunner.runNoWait(["/dls_sw/apps/tomopy/tomopy/bin/gda/tomo_at_scan_end_kz", lsdp.currentFilename], OSCommandRunner.LOGOPTION.ALWAYS, None)
return scanObject;
except InterruptedException:
exceptionType, exception, traceback = sys.exc_info()
handle_messages.log(None, "User interrupted the scan", exceptionType, exception, traceback, False)
raise InterruptedException("User interrupted the scan")
except:
exceptionType, exception, traceback = sys.exc_info()
handle_messages.log(None, "Error during tomography scan", exceptionType, exception, traceback, False)
raise Exception("Error during tomography scan", exception)
finally:
handle_messages.simpleLog("Data Format reset to the original setting: " + dataFormat)
LocalProperties.set("gda.data.scan.datawriter.dataFormat", dataFormat)
def __test1_tomoScan():
jns = beamline_parameters.JythonNameSpaceMapping()
sc = tomoScan(step=5, darkFieldInterval=5, flatFieldInterval=5,
inBeamPosition=0., outOfBeamPosition=10., exposureTime=1.)
print `jns`
lsdp = jns.lastScanDataPoint()
positions = lsdp.getPositionsAsDoubles()
if positions[0] != 180. or positions[4] != 54.:
print "Error - points are not correct :" + `positions`
return sc
def __test2_tomoScan():
jns = beamline_parameters.JythonNameSpaceMapping()
sc = tomoScan(step=5, darkFieldInterval=5, flatFieldInterval=0,
inBeamPosition=0., outOfBeamPosition=10., exposureTime=1.)
lsdp = jns.lastScanDataPoint()
positions = lsdp.getPositionsAsDoubles()
if positions[0] != 180. or positions[4] != 47.:
print "Error - points are not correct :" + `positions`
return sc
def __test3_tomoScan():
jns = beamline_parameters.JythonNameSpaceMapping()
sc = tomoScan(step=5, darkFieldInterval=0, flatFieldInterval=5,
inBeamPosition=0., outOfBeamPosition=10., exposureTime=1.)
lsdp = jns.lastScanDataPoint()
positions = lsdp.getPositionsAsDoubles()
if positions[0] != 180. or positions[4] != 47.:
print "Error - points are not correct :" + `positions`
return sc
def __test4_tomoScan():
jns = beamline_parameters.JythonNameSpaceMapping()
sc = tomoScan(step=5, darkFieldInterval=0, flatFieldInterval=0,
inBeamPosition=0., outOfBeamPosition=10., exposureTime=1.)
lsdp = jns.lastScanDataPoint()
positions = lsdp.getPositionsAsDoubles()
if positions[0] != 180. or positions[4] != 40.:
print "Error - points are not correct :" + `positions`
return sc
def __test5_tomoScan():
"""
Test optimizeBeamInterval=10
"""
jns = beamline_parameters.JythonNameSpaceMapping()
sc = tomoScan(step=5, darkFieldInterval=0, flatFieldInterval=0,
inBeamPosition=0., outOfBeamPosition=10., exposureTime=1., optimizeBeamInterval=10)
lsdp = jns.lastScanDataPoint()
positions = lsdp.getPositionsAsDoubles()
if positions[0] != 180. or positions[4] != 43.:
print "Error - points are not correct :" + `positions`
return sc
def test_all():
__test1_tomoScan()
__test2_tomoScan()
__test3_tomoScan()
__test4_tomoScan()
def standardtomoScan():
jns = beamline_parameters.JythonNameSpaceMapping()
sc = tomoScan(step=1, darkFieldInterval=0, flatFieldInterval=20,
inBeamPosition=0., outOfBeamPosition=10., exposureTime=1.)
lsdp = jns.lastScanDataPoint()
positions = lsdp.getPositionsAsDoubles()
if positions[0] != 180. or positions[4] != 40.:
print "Error - points are not correct :" + `positions`
return sc
class RotationAxisScannable(ScannableBase):
def __init__(self, name, value):
self.name = name
self.value = value
# self.count = 0
pass
def isBusy(self):
return False
def rawAsynchronousMoveTo(self, new_position):
return
def rawGetPosition(self):
# if self.count > 0:
# return None
# self.count = 1
return self.value
|
983,366 | 991f5811385a548f8c8f57a48505af7a387ee8f4 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
sales_price = float(input('Please enter the cost of the house in US$: '))
down_payment = float(input('Please enter the amount of down payment as a porcentage of Sales Price. eg 10 for a 10%: '))
loan_amount = sales_price*(1-down_payment/100)
mortgage_time = float(input('Please enter the amount of time of you mortgage loan: eg 10 for 10 years: '))
loan_term = int(12*mortgage_time)
interest_rate = float(input('Please enter loan interest rate: eg 2 for 2%: '))
R = 1 + (interest_rate)/(12*100)
X = loan_amount *(R**loan_term)* (1-R)/ (1-R**loan_term)
monthly_interestrate = []
monthly_balance = []
for i in range(1,loan_term +1):
Interest = loan_amount*(R-1)
loan_amount = loan_amount - (X-Interest)
monthly_interestrate = np.append(monthly_interestrate,Interest)
monthly_balance = np.append(monthly_balance, loan_amount)
print('The home sales price is: = '+ str('$') + str(sales_price))
print('The down payment as a percentage of the sales price is: = ' + str(down_payment) + str(' %'))
print('The Loan Amount is: = ' + str(sales_price*(1-down_payment/100)) + str(' $'))
print('The interest rate on annual percentage basis is: = ' + str(interest_rate) + str(' %'))
print('The duration of this loan in months: ' + str(loan_term) + str('months'))
print('Monthly payment for this mortgage(P & I) is: = ' + str('$') + str(np.round(X,2)) )
print('Total interest paid over life cycle of the loan is: = ' + str('$') + str(np.round(np.sum(monthly_interestrate),2)))
#produce visualization of monthly loan balance and interest
plt.plot(range,(loan_term+1), monthly_interestrate, 'r',lw=2)
plt.xlabel('month')
plt.ylabel('monthly interest ($)')
plt.show()
#plt.plot(range(1,(loan_term+1), monthly_balance, 'b',lw=2)
#plt.xlabel('month')
#plt.ylabel('monthly loan balance ($) ')
#plt.show() |
983,367 | 9b0f1a252e45b6da24c935ec4246f951e36ba326 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from numpy import dot, multiply, diag, power,ones,average
from scipy.signal import convolve2d
from numpy import pi, exp, sin, cos, cosh, tanh, real, imag
from numpy.linalg import inv, eig, pinv
from scipy.linalg import svd, svdvals
from scipy.integrate import odeint, ode, complex_ode
from warnings import warn
import glob,sys,os
from scipy import array,log2,shape, argsort,loadtxt
from numpy.lib.stride_tricks import as_strided as ast
from itertools import product
Rfactor=1
inc_full0=200
inc_test0=140
num_pred=20
import matplotlib as mpl
import numpy as np
from scipy.stats import gaussian_kde
mpl.rc('lines', linewidth=1, color='black')
mpl.rc('font', size=20,family='serif')
mpl.rc('text',color='black')
mpl.rcParams['xtick.major.size']=16
mpl.rcParams['xtick.minor.size']=10
mpl.rcParams['xtick.labelsize']=20
mpl.rcParams['ytick.labelsize']=20
mpl.rcParams['ytick.major.size']=16
mpl.rcParams['ytick.minor.size']=10
mpl.rcParams['grid.linewidth']=2.0
mpl.rcParams['axes.labelsize']=28
mpl.rcParams['legend.fontsize']=20
mpl.rcParams['savefig.dpi']=250
mtype=['o','s','>','<','^','v','p','*','h','D','x','H','.']
ltype=['-','--','-.','-','--','-.','--','-','-.']
col=['b','g','r','c','m','y','brown','cyan','black']
G=1e11
def DislocationState(f):
A=loadtxt(f)
strain_zz=A.copy()
sigma_zz =A.copy() * G
return strain_zz, sigma_zz,shape(A)
def Make2DImageField(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
mpb=ax1.imshow(z)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
fig.colorbar(mpb)
fig.savefig('Fig5_damage_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
return fig,ax1
def Make2DImageSigma(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
mpb=ax1.imshow(z)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
fig.colorbar(mpb)
fig.savefig('Fig5_sigma_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig)
return fig,ax1
def Make2DImageStrain(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
mpb=ax1.imshow(z)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
fig.colorbar(mpb)
fig.savefig('Fig5_strain_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig)
return fig,ax1
def Make2DImageTexture(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
from matplotlib import cm
mpb=ax1.imshow(z*10,cmap=cm.gist_ncar,alpha=0.9)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
#fig.colorbar(mpb)
fig.savefig('Fig5_texture_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig)
return fig,ax1
def MakeStressStrainPlot(s_test, e_test, s_full, e_full, e_pred , inc):
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(e_test, s_test, 's',c='blue',alpha=0.75)
axt=ax.twinx()
#axt.plot(e_test, d_test, 's',c='red' ,alpha=0.75)
ax.plot(e_full, s_full, '-' ,c='blue')
#axt.plot(e_full, d_full, '-',c='maroon',lw=1)
#ax.plot(e_full, d_full, '-',c='maroon',lw=1,alpha=0.45,label=' ')
#axt.plot(e_pred, d_pred, '--',c='purple',lw=3,alpha=0.75)
ax.plot([0], [0], '--',c='purple',lw=3,alpha=0.75,label=' ')
#from signalsmooth import smooth
w0=35
e_pred2=e_full #smooth(e_full,window_len=w0)
#print len(e_pred2)
s_pred2=s_full #smooth(s_full,window_len=w0)
#d_pred2=smooth(d_full,window_len=w0)
print(len(s_pred2))
ax.plot([0], [0], '-',c='red',lw=5,alpha=0.4,label=' ')
ax.plot(e_pred2, s_pred2, '-',c='navy',lw=5,alpha=0.5,label=' ')
#axt.plot(e_pred2, 0.95*d_pred2, '-',c='red',lw=5,alpha=0.5)
ax.set_xlabel(r'$\langle\epsilon\rangle$')
ax.set_ylabel(r'$\langle \sigma_{zz}\rangle$'+'(MPa)')
axt.set_ylabel(r'$\langle I_{1}^{(\epsilon)}\rangle$')
"""
ax.set_xlim((0.0,0.0012))
axt.set_ylim(bottom=-0.005)
axt.set_xlim((0.0,0.0012))
ax.set_ylim(bottom=-0.5)
"""
#ax.set_xticks(ax.get_xticks()[::2])
axt.spines['right'].set_color('red')
axt.yaxis.label.set_color('red')
axt.tick_params(axis='y', colors='red')
ax.spines['left'].set_color('blue')
ax.yaxis.label.set_color('blue')
l=ax.legend(loc='upper left')
l.draw_frame(False)
#l=axt.legend(loc='upper left', bbox_to_anchor=(0., 0.))
#l.draw_frame(False)
ax.tick_params(axis='y', colors='blue')
fig.savefig('Fig5_s-e-d_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig)
#plt.show()
return fig , ax , axt
def BuildDataMatrix(Dms):
return Dms
def Energy(p):
e=array([sum(p[i,:] * p[i,:]) for i in range(len(p[:,0]))])
return e
def MakeImage(P,col,s1,cnter,char):
fig41=plt.figure()
ax41=fig41.add_subplot(111)
p0=P[:,col].reshape(s1)
#p0=Energy(p).reshape(s1)
rp0=real(p0)
mpb=plt.imshow(rp0/max(rp0.flatten()))
#plt.clim(0,1e5) # For dislocation examples
plt.axis('off')
ax41.set_yticklabels([])
ax41.set_xticklabels([])
sc=str(col)
fname='Fig5_'+sc+'th-StrainInvariantMode_NoCBar.jpg'
fig41.savefig(fname,bbox_inches='tight', pad_inches = 0, transparent=True)
if sc=='1': # Second-Predominant Mode
os.system('cp Fig5_'+sc+'th-StrainInvariantMode_NoCBar.jpg '+'../'+char+'.'+cnter+'.jpg')
plt.colorbar(mpb)#,extend='both')
fig41.savefig('Fig5_'+sc+'th-StrainInvariantMode.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig41)
#plt.title(sc+'-th Damage Mode')
#fig=plt.figure()
#ax=fig.add_subplot(111)
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
#fig.colorbar(mpb)
#fig.savefig('Fig5_colorbar.png',bbox_inches='tight', pad_inches = 0, transparent=True)
return None
def MakeImagePred(P,col,s1,eps):
fig41=plt.figure()
ax41=fig41.add_subplot(111)
p=P.reshape(s1)
sav=real(p.flatten().mean())
p0=p #Energy(p).reshape(s1)
rp0=real(p0)
print(rp0.flatten().mean(),rp0.flatten().max())
mpb=plt.imshow(rp0)
plt.clim(-.1,.1)
plt.axis('off')
ax41.set_yticklabels([])
ax41.set_xticklabels([])
sc=str(format(eps,'.0e'))[:]
fig41.savefig('Fig5_'+sc+'th-TimeStepStrainInvariant.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.colorbar(mpb)#,extend='both')
fig41.savefig('Fig5_'+sc+'th-TimeStepStrainInvariant_WithCbar.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig41)
#plt.title(' Damage Field '+r'$\phi$'+' at '+r'$\epsilon=$'+sc)
return sav
def MakePlot_SV(Sig,r):
####Plotting
fig2=plt.figure()
ax2=fig2.add_subplot(111)
ax2.plot(Sig,'s',markersize=20)
ax2.set_xlabel('index '+r'$j$')
ax2.set_ylabel(r'$\varsigma_j$')
ax2.set_xlim((-0.2,r))
fig2.tight_layout()
fig2.savefig('Fig5_SV.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig2)
############
return fig2,ax2
def MakePlot_Eigen(mu):
t0 = np.linspace(0, 2*pi, 20)
fig3=plt.figure()
ax3=fig3.add_subplot(111)
ax3.plot(real(mu),imag(mu),'s',markersize=20)
ax3.plot(cos(t0), sin(t0),'--')
ax3.set_xlabel(r'$Re(\mu)$')
ax3.set_ylabel(r'$Im(\mu)$')
fig3.tight_layout()
fig3.savefig('Fig5_Eigen.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.close(fig3)
return fig3,ax3,t0
def Predict(Phi,b,mu,s,t,r):
print(t,'--t')
dt=t[1]-t[0]
tmin=min(t)
tmax=max(t)
t2 = np.linspace(tmin, tmax, num_pred)
Psi = np.zeros([r, len(t2)], dtype='complex')
for i,_x in enumerate(t2):
Psi[:,i] = multiply(power(mu, _x/dt), b)
# compute DMD reconstruction
D2 = dot(Phi, Psi)
#np.allclose(D, D2) # True
sigmaps=[]
tps=[]
for i in range(len(D2[0,:])):
print(str(i)+'--predicted...'+str(t2[i]))
F=D2[:,i]
if i==0: #subtract background
F0=average(F)
eps=t2[i]
sigma=MakeImagePred((F-F0),i,s,eps)
tps.append(t2[i])
sigmaps.append(sigma+eps)
return tps,sigmaps
def Perform_and_PredictFuture(D0,eps,s,cnter,char):
D=D0.T #Data Matrix
X=D[:,:-1]
Y=D[:,1:]
# SVD of input matrix
U2,Sig2,Vh2 = svd(X, False)
r = 7 # rank-5 truncation
fig_SV,ax_SV=MakePlot_SV(Sig2,r)
U = U2[:,:r]
Sig = diag(Sig2)[:r,:r]
V = Vh2.conj().T[:,:r]
# build A tilde
Atil = dot(dot(dot(U.conj().T, Y), V), inv(Sig))
mu,W = eig(Atil)
fig_Eigen,ax_Eigen,t0=MakePlot_Eigen(mu)
# build DMD modes
Phi = dot(dot(dot(Y, V), inv(Sig)), W)
MakeImage(Phi,0,s,cnter,char)
MakeImage(Phi,1,s,cnter,char)
MakeImage(Phi,2,s,cnter,char)
MakeImage(Phi,3,s,cnter,char)
MakeImage(Phi,4,s,cnter,char)
MakeImage(Phi,5,s,cnter,char)
MakeImage(Phi,6,s,cnter,char)
# compute time evolution
b = dot(pinv(Phi), X[:,1])
tps,sigmaps=Predict(Phi,b,mu,s,eps,r)
return tps,sigmaps
if len(sys.argv)!=3:
sys.exit('python RunSEAmodes.py Nucleation/Glide Training/Testing')
character=sys.argv[1]
character2=sys.argv[2]
dirp=os.getcwd()
dir0s=glob.glob('datasets/'+character2+'/'+character+'/Case*')
for dir0 in dir0s:
print(dir0)
c=dir0.split('Case')[1].split('_'+character+'_N')[0]
N=dir0.split('_'+character+'_N')[1]
os.chdir(dir0)
fs=glob.glob('I-Field*.txt')
i_incs=[]
Zs_test=[]
Sigmas=[]
from os.path import getsize
inc_test=[]
inc_full=[]
s_test=[]
s_full=[]
e_test=[]
e_full=[]
d_test=[]
d_full=[]
cnt2=0
for f in fs:
inc0=int(f.split('_')[-1].split('.txt')[0])+10
sizef=getsize(f)
if sizef > 100 and inc0 <= inc_full0:
Out = DislocationState(f)
cnt2+=1
strain, stress,s = Out
szz_av=average(stress.flatten())/1e6
ezz_av=1e-8 + inc0 * 0.01 #average(strain.flatten())
inc_full.append(inc0)
e_full.append(ezz_av)
s_full.append(szz_av)
d_av=0.
if inc0 <= inc_test0:
inc_test.append(inc0)
s_test.append(szz_av)
e_test.append(ezz_av)
d_test.append(d_av)
Zs_test.append(strain.flatten())
f0=ezz_av/float(inc0)
i0_full=argsort(inc_full)
i0_test=argsort(inc_test)
s_full=array(s_full)[i0_full[:]]
#d_full=array(d_full)[i0_full[:]]
e_full=array(e_full)[i0_full[:]]
s_test=array(s_test)[i0_test[:]]
#d_test=array(d_test)[i0_test[:]]
e_test=array(e_test)[i0_test[:]]
Zs_test=array(Zs_test)[i0_test[:]]
#figesd,axesd,axtesd=MakeStressStrainPlot(s_test, e_test, s_full, e_full, d_test, d_full,inc_full0)
#DMD part follows
D0=BuildDataMatrix(Zs_test)
cnter=str(c).rjust(3,'0')+str(N).rjust(3,'0')
e_pred,d_pred=Perform_and_PredictFuture(abs(D0),e_full,s,cnter,character)
figesd,axesd,axtesd=MakeStressStrainPlot(s_test, e_test, s_full, e_full, e_pred,inc_full0)
os.chdir(dirp)
|
983,368 | 50c83b958fa60c4929af5d2f27899274bec0d015 | # coding: utf-8
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator
from django.utils.translation import pgettext_lazy as _
from email_confirm_la.models import EmailConfirmation
class AuthUserEmailValidator(EmailValidator):
def __call__(self, value):
super(AuthUserEmailValidator, self).__call__(value)
from django.contrib.auth.models import User
if User.objects.filter(email__iexact=value).exists():
raise ValidationError(_('ecla', 'This email has already been taken.'))
class EmailConfirmationValidator(EmailValidator):
def __init__(self, content_object_model, email_field_name='email', regex=None, message=None, code=None):
super(EmailConfirmationValidator, self).__init__(regex, message, code)
self.content_type = ContentType.objects.get_for_model(content_object_model)
self.email_field_name = email_field_name
def __call__(self, value):
super(EmailConfirmationValidator, self).__call__(value)
email_exists = EmailConfirmation.objects \
.filter(content_type=self.content_type, email_field_name=self.email_field_name) \
.filter(email__iexact=value) \
.exists()
if email_exists:
raise ValidationError(_('ecla', 'This email has already been taken.'))
|
983,369 | 86eb2d9ffe883418cca0e0258a8df2ace661200c | import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.tseries.offsets import BDay
from data_fetcher import get_data
class BackTestingBase(object):
DEFAULT_INITIAL_CAPITAL = float(100000.0)
DEFAULT_QTY_TRADES = 100
def __init__(self, ticker, look_back_days=None):
self.ticker = ticker
self.asset_prices = pd.DataFrame()
self.signals = None
self.portfolio = None
self.positions = None
self.get_underlying_data(look_back_days)
def get_underlying_data(self, look_back_days=None):
if look_back_days:
start_date = datetime.datetime.today() - BDay(look_back_days)
self.asset_prices = get_data(self.ticker, start=start_date, useQuandl=True)
else:
self.asset_prices = get_data(self.ticker, useQuandl=True)
def generate_signals(self):
raise NotImplementedError("Child class needs to implement this method.")
def plot_signals(self):
raise NotImplementedError("Child class needs to implement this method.")
def _generate_positions(self):
self.positions = pd.DataFrame(index=self.signals.index).fillna(0.0)
self.positions[self.ticker] = self.DEFAULT_QTY_TRADES * self.signals['signal']
def backtest_portfolio(self):
self._generate_positions()
print(self.positions)
# Initialize the portfolio with value owned
self.portfolio = self.positions.multiply(self.asset_prices['Adj. Close'], axis=0)
# Store the difference in shares owned
pos_diff = self.positions.diff()
# Add `holdings` to portfolio
self.portfolio['holdings'] = (self.positions.multiply(self.asset_prices['Adj. Close'], axis=0)).sum(axis=1)
# Add `cash` to portfolio
self.portfolio['cash'] = self.DEFAULT_INITIAL_CAPITAL - \
(pos_diff.multiply(self.asset_prices['Adj. Close'], axis=0)).sum(axis=1).cumsum()
self.portfolio['total'] = self.portfolio['cash'] + self.portfolio['holdings']
self.portfolio['returns'] = self.portfolio['total'].pct_change()
def plot_portfolio(self):
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Portfolio value in $')
ax1.plot(self.signals.index.map(mdates.date2num), self.portfolio['total'])
ax1.plot(self.portfolio.loc[self.signals.positions == 1.0].index,
self.portfolio.total[self.signals.positions == 1.0],
'^', markersize=10, color='m')
ax1.plot(self.portfolio.loc[self.signals.positions == -1.0].index,
self.portfolio.total[self.signals.positions == -1.0],
'v', markersize=10, color='k')
plt.show()
def sharpe_ratio(self):
# Isolate the returns of your strategy
returns = self.portfolio['returns']
# annualized Sharpe ratio
sharpe_ratio = np.sqrt(252) * (returns.mean() / returns.std())
# Print the Sharpe ratio
print("Sharpe Ratio", sharpe_ratio)
return sharpe_ratio
def cagr(self):
# Compound Annual Growth Rate (CAGR)
# Get the number of days in `aapl`
days = (self.asset_prices.index[-1] - self.asset_prices.index[0]).days
# Calculate the CAGR
cagr = ((self.asset_prices['Adj. Close'][-1] / self.asset_prices['Adj. Close'][1])
** (365.0 / days)) - 1
# Print the CAGR
print("CAGR ", cagr)
return cagr
|
983,370 | 2ca4f64a3760265dda5bc55fcd8f3ebe2c811a0a | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2020 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading and writing Mir files."""
import numpy as np
from .uvdata import UVData
from . import mir_parser
from .. import utils as uvutils
from .. import get_telescope
__all__ = ["Mir"]
class Mir(UVData):
"""
A class for Mir file objects.
This class defines an Mir-specific subclass of UVData for reading and
writing Mir files. This class should not be interacted with directly,
instead use the read_mir and write_mir methods on the UVData class.
"""
def read_mir(self, filepath, isource=None, irec=None, isb=None, corrchunk=None):
"""
Read in data from an SMA MIR file, and map to the UVData model.
Note that with the exception of filename, the rest of the parameters are
used to sub-select a range of data that matches the limitations of the current
instantiation of pyuvdata -- namely 1 spectral window, 1 source. These could
be dropped in the future, as pyuvdata capabilities grow.
Parameters
----------
filepath : str
The file path to the MIR folder to read from.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
"""
# Use the mir_parser to read in metadata, which can be used to select data.
mir_data = mir_parser.MirParser(filepath)
# Select out data that we want to work with.
if isource is None:
isource = mir_data.in_read["isource"][0]
if irec is None:
irec = mir_data.bl_read["irec"][0]
if isb is None:
isb = mir_data.bl_read["isb"][0]
if corrchunk is None:
corrchunk = mir_data.sp_read["corrchunk"][0]
mir_data.use_in = mir_data.in_read["isource"] == isource
mir_data.use_bl = np.logical_and(
np.logical_and(
mir_data.bl_read["isb"] == isb, mir_data.bl_read["ipol"] == 0
),
mir_data.bl_read["irec"] == irec,
)
mir_data.use_sp = mir_data.sp_read["corrchunk"] == corrchunk
# Load up the visibilities into the MirParser object. This will also update the
# filters, and will make sure we're looking at the right metadata.
mir_data._update_filter()
if len(mir_data.in_data) == 0:
raise IndexError("No valid records matching those selections!")
mir_data.load_data(load_vis=True, load_raw=True)
# Create a simple array/list for broadcasting values stored on a
# per-intergration basis in MIR into the (tasty) per-blt records in UVDATA.
bl_in_maparr = [mir_data.inhid_dict[idx] for idx in mir_data.bl_data["inhid"]]
# Derive Nants_data from baselines.
self.Nants_data = len(
np.unique(
np.concatenate((mir_data.bl_data["iant1"], mir_data.bl_data["iant2"]))
)
)
self.Nants_telescope = 8
self.Nbls = int(self.Nants_data * (self.Nants_data - 1) / 2)
self.Nblts = len(mir_data.bl_data)
self.Nfreqs = int(mir_data.sp_data["nch"][0])
self.Npols = 1 # todo: We will need to go back and expand this.
self.Nspws = 1 # todo: We will need to go back and expand this.
self.Ntimes = len(mir_data.in_data)
self.ant_1_array = mir_data.bl_data["iant1"] - 1
self.ant_2_array = mir_data.bl_data["iant2"] - 1
self.antenna_names = [
"Ant 1",
"Ant 2",
"Ant 3",
"Ant 4",
"Ant 5",
"Ant 6",
"Ant 7",
"Ant 8",
]
self.antenna_numbers = np.arange(8)
# Prepare the XYZ coordinates of the antenna positions.
antXYZ = np.zeros([self.Nants_telescope, 3])
for idx in range(self.Nants_telescope):
if (idx + 1) in mir_data.antpos_data["antenna"]:
antXYZ[idx] = mir_data.antpos_data["xyz_pos"][
mir_data.antpos_data["antenna"] == (idx + 1)
]
# Get the coordinates from the entry in telescope.py
lat, lon, alt = get_telescope("SMA")._telescope_location.lat_lon_alt()
self.telescope_location_lat_lon_alt = (lat, lon, alt)
# Calculate antenna postions in EFEF frame. Note that since both
# coordinate systems are in relative units, no subtraction from
# telescope geocentric position is required , i.e we are going from
# relRotECEF -> relECEF
self.antenna_positions = uvutils.ECEF_from_rotECEF(antXYZ, lon)
self.baseline_array = self.antnums_to_baseline(
self.ant_1_array, self.ant_2_array, attempt256=False
)
fsky = mir_data.sp_data["fsky"][0] * 1e9 # GHz -> Hz
fres = mir_data.sp_data["fres"][0] * 1e6 # MHz -> Hz
nch = mir_data.sp_data["nch"][0]
self.channel_width = fres
# Need the half-channel offset below because of the weird way
# in which MIR identifies the "center" of the band
self.freq_array = fsky + fres * (np.arange(nch) - (nch / 2 - 0.5))
# TODO: This will need to be fixed when spw > 1
self.freq_array = np.reshape(self.freq_array, (1, -1))
self.history = "Raw Data"
self.instrument = "SWARM"
# todo: This won't work when we have multiple spectral windows.
self.integration_time = mir_data.sp_data["integ"]
# todo: Using MIR V3 convention, will need to be V2 compatible eventually.
self.lst_array = (
mir_data.in_data["lst"][bl_in_maparr].astype(float) + (0.0 / 3600.0)
) * (np.pi / 12.0)
# todo: We change between xx yy and rr ll, so we will need to update this.
self.polarization_array = np.asarray([-5])
self.spw_array = np.asarray([0])
self.telescope_name = "SMA"
time_array_mjd = mir_data.in_read["mjd"][bl_in_maparr]
self.time_array = time_array_mjd + 2400000.5
# Need to flip the sign convention here on uvw, since we use a1-a2 versus the
# standard a2-a1 that uvdata expects
self.uvw_array = (-1.0) * np.transpose(
np.vstack(
(mir_data.bl_data["u"], mir_data.bl_data["v"], mir_data.bl_data["w"])
)
)
# todo: Raw data is in correlation coefficients, we may want to convert to Jy.
self.vis_units = "uncalib"
self._set_phased()
sou_list = mir_data.codes_data[mir_data.codes_data["v_name"] == b"source"]
self.object_name = sou_list[sou_list["icode"] == isource]["code"][0].decode(
"utf-8"
)
self.phase_center_ra = mir_data.in_data["rar"][0]
self.phase_center_dec = mir_data.in_data["decr"][0]
self.phase_center_epoch = mir_data.in_data["epoch"][0]
self.phase_center_epoch = float(self.phase_center_epoch)
self.antenna_diameters = np.zeros(self.Nants_telescope) + 6
self.blt_order = ("time", "baseline")
self.data_array = np.reshape(
np.array(mir_data.vis_data),
(self.Nblts, self.Nspws, self.Nfreqs, self.Npols),
)
# Don't need the data anymore, so drop it
mir_data.unload_data()
self.flag_array = np.zeros(self.data_array.shape, dtype=bool)
self.nsample_array = np.ones(self.data_array.shape, dtype=np.float32)
def write_mir(self, filename):
"""
Write out the SMA MIR files.
Parameters
----------
filename : str
The path to the folder on disk to write data to.
"""
raise NotImplementedError
|
983,371 | 6052cc730a2a5faede5bb3efe242880f9f5d8236 | """
Contains the PredictionTrainer class which handles the training of the prediction decoders,
PredictionDecoder and PredictionDecoderVelocity, and records the training and validation losses.
Also contains the encode_inputs helper function, which encodes inputs.
Based on:
File Name: trainer.py
Developed by Nikolas Pitsillos, PhD Candidate in Computer Vision and Autonomous Systems, University of Glasgow
Taken from: https://github.com/npitsillos/productivity_efficiency/blob/master/torch_trainer/trainer.py
"""
import torch
from torch import nn
class PredictionTrainer:
"""
Handles the training of the PredictionDecoder and PredictionDecoderVelocity neural networks.
"""
def __init__(self, encoder1, encoder2, decoder, num_epochs, train_loader_enc1, train_loader_enc2, train_loader_pred,
val_loader_enc1, val_loader_enc2, val_loader_pred,
device, optimizer):
"""
Initializes internal PredictionTrainer state.
Args:
encoder1 (Encoder): .
encoder2 (Encoder): .
decoder (): .
num_epochs (int): number of epochs for training.
train_loader_enc1 (): data loader holding training data for encoder1.
train_loader_enc2 (): data loader holding training data for encoder2.
train_loader_pred (): data loader holding training data to be predicted by the decoder.
val_loader_enc1 (): data loader holding validation data for encoder1.
val_loader_enc2 (): data loader holding validation data for encoder2.
val_loader_pred (): data loader holding validation data to be predicted by the decoder.
device (torch.device): torch device.
optimizer (torch.optim.adam.Adam): Adam optimizer for VAE.
"""
self.loss_criterion = nn.MSELoss()
self.encoder1 = encoder1
self.encoder2 = encoder2
self.decoder = decoder
self.num_epochs = num_epochs
self.train_loader_enc1 = train_loader_enc1
self.train_loader_enc2 = train_loader_enc2
self.train_loader_pred = train_loader_pred
self.val_loader_enc1 = val_loader_enc1
self.val_loader_enc2 = val_loader_enc2
self.val_loader_pred = val_loader_pred
self.device = device
self.optimizer = optimizer
self.epoch = 0
def train_model(self):
"""
Trains the neural network self.decoder for self.num_epochs epochs with the data in
self.train_loader_enc1, self.train_loader_enc2 and self.train_loader_pred.
Evaluates the neural network self.decoder with the data in self.val_loader_enc1, self.val_loader_enc2
and self.val_loader_pred after each epoch.
Returns:
tuple: (list: average training losses, list: average validation losses).
"""
self.encoder1.to(self.device)
self.encoder2.to(self.device)
self.decoder.to(self.device)
average_training_losses = []
average_validation_losses = []
# epochs loop
while self.epoch < self.num_epochs:
self.decoder.train()
training_losses_in_epoch = []
# iterations loop
for inputs1, inputs2, targets in zip(self.train_loader_enc1, self.train_loader_enc2,
self.train_loader_pred):
# get encoded inputs and targets
encoded_inputs_tensor = encode_inputs(inputs1, inputs2, self.encoder1, self.encoder2, self.device)
targets = targets[0]
# zero gradient and get outputs
self.optimizer.zero_grad()
outputs = self.decoder(encoded_inputs_tensor)
# calculate loss and do backpropagation
loss = self.loss_criterion(outputs, targets)
loss.backward()
self.optimizer.step()
# add training loss to list
loss_item = loss.cpu().detach().item()
training_losses_in_epoch.append(loss_item)
# print and add average training loss for epoch to list
average_training_loss = sum(training_losses_in_epoch) / len(training_losses_in_epoch)
average_training_losses.append(average_training_loss)
print("Epoch {}: Average Training Loss: {}".format(self.epoch, average_training_loss))
# print and add average validation loss for epoch to list
average_validation_loss = self.eval_model()
average_validation_losses.append(average_validation_loss)
print("Epoch {}: Average Validation Loss: {}".format(self.epoch, average_validation_loss))
# increment epoch
self.epoch += 1
return average_training_losses, average_validation_losses
def eval_model(self):
"""
Evaluates the neural network self.decoder with the data in self.val_loader_enc1, self.val_loader_enc2
and self.val_loader_pred.
Returns:
float: average validation loss.
"""
self.decoder.eval()
validation_losses_in_epoch = []
with torch.no_grad():
# validation iterations loop
for inputs1, inputs2, targets in zip(self.train_loader_enc1, self.train_loader_enc2,
self.train_loader_pred):
# get encoded inputs and targets
encoded_inputs_tensor = encode_inputs(inputs1, inputs2, self.encoder1, self.encoder2, self.device)
targets = targets[0]
# get outputs
outputs = self.decoder(encoded_inputs_tensor)
# calculate loss and add to list
loss = self.loss_criterion(outputs, targets)
loss_item = loss.cpu().detach().item()
validation_losses_in_epoch.append(loss_item)
# calculate average validation loss for epoch
average_validation_loss = sum(validation_losses_in_epoch) / len(validation_losses_in_epoch)
return average_validation_loss
def encode_inputs(inputs1, inputs2, encoder1, encoder2, device):
"""
Encodes the inputs, inputs1 and inputs2, using encoder1 and encoder2, respectively.
Concatenates the resulting means and log variances into tensors, which are to be
decoded by PredictionDecoder or PredictionDecoderVelocity.
Args:
inputs1 (list): list containing a Tensor with the inputs for encoder1.
inputs2 (list): list containing a Tensor with the inputs for encoder2.
encoder1 (Encoder): encoder for inputs1
encoder2 (Encoder): encoder for inputs2
device (torch.device): torch device.
Returns:
Tensor: decoded input.
"""
# get inputs
inputs1 = inputs1[0]
inputs2 = inputs2[0]
# encode inputs into means and logs of variances
z_mean1, z_logvar1 = encoder1(inputs1)
z_mean2, z_logvar2 = encoder2(inputs2)
encoded_inputs_list = []
# put the means and logs of variances together
for x0, x1, x2, x3 in zip(z_mean1, z_logvar1,
z_mean2, z_logvar2):
x0 = x0.item()
x1 = x1.item()
x2 = x2.item()
x3 = x3.item()
sequence = [x0, x1, x2, x3]
encoded_inputs_list.append(sequence)
# transform encoded inputs into tensor on device
encoded_inputs_tensor = torch.tensor(encoded_inputs_list)
encoded_inputs_tensor = encoded_inputs_tensor.to(device)
return encoded_inputs_tensor
|
983,372 | d1fc0850482275e20ab8ea70a585b5aaeb2f150d | # encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: billions.richard@qq.com
@site:
@software: PyCharm
@file: 线程demo01.py
@time: 2018/5/22 8:20
"""
from pprint import pprint as P
import threading
import time
def hello(num):
time.sleep(5)
print('hello %s' %num)
if __name__ == '__main__':
t1 = threading.Thread(target=hello, args=(10,))
t1.start()
t2 = threading.Thread(target=hello, args=(9,))
t2.start()
print('ending...')
|
983,373 | b2cbd02c129a7238434fa8a5dc1e789487748341 | name = 'Alice'
place = 'Main Street'
time = '6pm'
food = 'pizza'
print ('Hello %s, you are invited to a party located at %s during %s. Please bring %s to eat' %(name, place, time, food)) |
983,374 | 0ea5554afe986ef9aee1bfcffcc01a1cf78dd10a | x = input()
m = int(input())
d = 0
for y in x:
d = max(d, int(y))
def base(x, n):
res = 0
for i, y in enumerate(x[::-1]):
res += int(y) * n ** i
return res
def okay(n):
return base(x, n) <= m
def meguru_bisect(ng, ok):
while abs(ok - ng) > 1:
mid = (ok + ng) // 2
if okay(mid):
ok = mid
else:
ng = mid
return ok
if len(x) == 1:
if int(x) > m:
print(0)
else:
print(1)
else:
if not okay(d + 1):
print(0)
else:
print(meguru_bisect(m + 1, d + 1) - d) |
983,375 | b71f965d221bf32d657a4b5724ea16dfbc964615 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import random
import sys
XDIM = 0
USE_TRIPLES = False
USE_QUADS = False
LABELS = ""
#prepare a set of pseudo-random numbers indices into labels in advance, of prime length, such that randint() doesn't need to be called at high frequency
RAND_LABEL_INDICES = []
RAND_RING_INDEX = 0
RAND_INTS_LEN = 0
#labels = list("ACDIGOMN")
K = len(LABELS)
#create the global vector index dicts
g_unaryFeatureVectorIndices = {} #key=alpha (the class) val=tuple of (start,end+1) indices of the z/w vector components corresponding with each x-yi
g_pairwiseFeatureVectorIndices = {}
g_tripleFeatureVectorIndices = {}
g_quadFeatureVectorIndices = {}
"""
Builds the set of dictionaries for looking up indices into the feature vector.
"""
def _buildVectorIndexDicts(xdim,labels):
global g_unaryFeatureVectorIndices
global g_pairwiseFeatureVectorIndices
global g_tripleFeatureVectorIndices
global g_quadFeatureVectorIndices
k = len(labels)
#build the lookup table of feature vector indices
g_unaryFeatureVectorIndices = {} #key=alpha (the class) val=tuple of (start,end+1) indices of the z/w vector components corresponding with each x-yi
for i in range(len(labels)):
g_unaryFeatureVectorIndices[labels[i]] = (i*xdim, (i+1)*xdim)
#k**2 pairwise indices come immediately after the k unary indices,
g_pairwiseFeatureVectorIndices = {}
i = g_unaryFeatureVectorIndices[labels[-1]][1]
for alpha1 in labels:
for alpha2 in labels:
g_pairwiseFeatureVectorIndices[alpha1+alpha2] = i
i += 1
g_tripleFeatureVectorIndices = {}
for alpha1 in labels:
for alpha2 in labels:
for alpha3 in labels:
g_tripleFeatureVectorIndices[alpha1+alpha2+alpha3] = i
i += 1
g_quadFeatureVectorIndices = {}
for alpha1 in labels:
for alpha2 in labels:
for alpha3 in labels:
for alpha4 in labels:
g_quadFeatureVectorIndices[alpha1+alpha2+alpha3+alpha4] = i
i += 1
"""
Gets a random label via a list of cached random numbers in the range of the length of the label set.
"""
def _getRandLabel():
global RAND_RING_INDEX
RAND_RING_INDEX += 1
if RAND_RING_INDEX >= RAND_INTS_LEN:
RAND_RING_INDEX = 0
return LABELS[ RAND_LABEL_INDICES[RAND_RING_INDEX] ]
"""
[print(str(item)) for item in g_unaryFeatureVectorIndices.items() if item[0] == "A" or item[0] == "Z" or item[0]=="Y"]
[print(str(item)) for item in g_pairwiseFeatureVectorIndices.items() if item[0] == "AA" or item[0] == "ZZ" or item[0]=="YZ"]
exit()
print(str(g_unaryFeatureVectorIndices))
print(str(g_pairwiseFeatureVectorIndices))
print(str(g_tripleFeatureVectorIndices))
exit()
"""
"""
Returns a random label sequence of the same length as x, as a list.
"""
def _getRandomY(yLabels, length):
#c = _getRandLabel()
#return [c for i in range(0,length)]
return [_getRandLabel() for i in range(0,length)]
#opted to use a prepared list of random ints instead of randint(), which is likely to be slow
#return [yLabels[random.randint(0,len(yLabels)-1)] for i in range(0,length)]
"""
Inner driver for feature function Phi1.
@x: The x sequence of inputs
@y: The y sequence of inputs
@i: The index into the sequence; NOTE the index may be less than the span of the features, for instance i==0, when we're considering
pairwise features (y_i-1,y_i). For these cases 0 is returned.
@d: The dimension of the weight vector
def _phi1(x,y,i,d):
#init an all zero vector
z = np.zeros((1,d), dtype=np.float32)
#print("y: "+str(y))
#unary features
unaryIndex = g_unaryFeatureVectorIndices[y[i]]
z[0,unaryIndex] = x[i]
#pairwise features; z_yi_i == 1 iff y_i == alpha and y_i-1 == alpha
if i > 0:
pairwiseIndex = g_pairwiseFeatureVectorIndices[y[i-1]+y[i]]
z[0, pairwiseIndex] = 1.0
return z
"""
"""
The structured feature function, mapping x and y to some score.
Given a structured input and output, returns a d-dimensional vector of the
combined features, eg, to be dotted with some w vector in a perceptron or other method.
This is phi_1 since it uses only up to pairwise features: phi(x,y) + phi(y_k,y_k-1) giving (d = m*k + k**2).
@x: A list of m-dimensional binary strings; here, likely 128-bit binary vectors of optical character data
@y: The structured output for this input as a list of labels,
@d: The dimension of the Phi() and little phi() functions
returns: an R^d numpy vector representing the sum over all little phi features for the entire sequence
"""
def _Phi1(xseq, yseq, d):
z = np.zeros((1,d), dtype=np.float32)
#print("z shape: "+str(z.shape[1]))
#0th unary features are done first to avert if-checking index-bounds for pairwise features inside this high-frequency loop
urange = g_unaryFeatureVectorIndices[yseq[0]]
z[0,urange[0]:urange[1]] = xseq[0][0,:]
#iterate pairwise and other y sequence features
for i in range(1,len(yseq)):
#unary features
urange = g_unaryFeatureVectorIndices[yseq[i]]
z[0,urange[0]:urange[1]] += xseq[i][0,:]
#pairwise features; z_yi_i == 1 iff y_i == alpha and y_i-1 == alpha
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[i-1]+yseq[i]]
z[0, pairwiseIndex] += 1.0
return z
"""
Includes up to triple-gram features
"""
def _Phi2(xseq,yseq,d):
z = np.zeros((1,d), dtype=np.float32)
#print("z shape: "+str(z.shape[1]))
#0th unary features are done first to avert if-checking index-bounds for pairwise features inside this high-frequency loop
urange = g_unaryFeatureVectorIndices[yseq[0]]
z[0,urange[0]:urange[1]] = xseq[0][0,:]
#initialize the first unary, pairwise features at index 1
urange = g_unaryFeatureVectorIndices[yseq[1]]
z[0,urange[0]:urange[1]] += xseq[1][0,:]
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[0]+yseq[1]]
z[0, pairwiseIndex] = 1.0 #assignment, since this is the first pairwise feature
#iterate pairwise, and triples from index 2 forward
for i in range(2,len(yseq)):
#unary features
urange = g_unaryFeatureVectorIndices[yseq[i]]
z[0,urange[0]:urange[1]] += xseq[i][0,:]
#pairwise features; z_yi_i == 1 iff y_i == alpha and y_i-1 == alpha
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[i-1]+yseq[i]]
z[0, pairwiseIndex] += 1.0
#triple features; z_yi_i == 1 iff y_i == alpha and y_i-1 == alpha
tripleIndex = g_tripleFeatureVectorIndices[yseq[i-2]+yseq[i-1]+yseq[i]]
z[0, tripleIndex] += 1.0
return z
def _Phi3(xseq,yseq,d):
z = np.zeros((1,d), dtype=np.float32)
#print("z shape: "+str(z.shape[1]))
#0th unary features are done first to avert if-checking index-bounds for pairwise features inside this high-frequency loop
urange = g_unaryFeatureVectorIndices[yseq[0]]
z[0,urange[0]:urange[1]] = xseq[0][0,:]
#initialize the first unary and pairwise features at index 1
urange = g_unaryFeatureVectorIndices[yseq[1]]
z[0,urange[0]:urange[1]] += xseq[1][0,:]
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[0]+yseq[1]]
z[0, pairwiseIndex] = 1.0 #assignment, since this is the first pairwise feature increment
#initialize the first unary, pairwise, and triple at index 2
urange = g_unaryFeatureVectorIndices[yseq[2]]
z[0,urange[0]:urange[1]] += xseq[2][0,:]
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[1]+yseq[2]]
z[0, pairwiseIndex] += 1.0
tripleIndex = g_tripleFeatureVectorIndices[yseq[0]+yseq[1]+yseq[2]]
z[0, tripleIndex] = 1.0 #assignment, since this is the first triple feature increment
#iterate pairwise, triples, and quads from index 2 forward
for i in range(3,len(yseq)):
#unary features
urange = g_unaryFeatureVectorIndices[yseq[i]]
z[0,urange[0]:urange[1]] += xseq[i][0,:]
#pairwise features
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[i-1]+yseq[i]]
z[0, pairwiseIndex] += 1.0
#triple features
tripleIndex = g_tripleFeatureVectorIndices[yseq[i-2]+yseq[i-1]+yseq[i]]
z[0, tripleIndex] += 1.0
#quad features
quadIndex = g_quadFeatureVectorIndices[yseq[i-3]+yseq[i-2]+yseq[i-1]+yseq[i]]
z[0, quadIndex] += 1.0
return z
"""
Just implements score(x,y,w) = w dot _Phi(x,y). Returns float.
@x
"""
def _score(x,y,w,phi):
"""
if len(x) != len(y):
print("ERROR |x| != |y| in _score()! exiting")
exit()
"""
return w.dot(phi(x,y,w.shape[1]).T)[0,0]
"""
"""
def SaveLosses(accuracies, finalAccuracy, beamWidth, dataPath, searchMethod, beamUpdateType, titleStr, show, isTraining):
#write the losses to file, for repro
if isTraining:
fnamePrefix = "Results/"+dataPath[0:4]+"Train_b"+str(beamWidth)+"_"+searchMethod+"_"+beamUpdateType
else:
fnamePrefix = "Results/"+dataPath[0:4]+"Test_b"+str(beamWidth)+"_"+searchMethod+"_"+beamUpdateType
ofile = open(fnamePrefix+".txt","w+")
ofile.write(str(accuracies)+"\n")
ofile.write(str(finalAccuracy)+"\n")
ofile.close()
if isTraining:
#plot the training loss
xs = [i for i in range(0,len(accuracies))]
plt.ylim([0,max(accuracies)])
plt.title(titleStr)
plt.xlabel("Iteration")
plt.ylabel("Hamming Accuracy")
plt.plot(xs, accuracies)
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0.0,1.0))
plt.savefig(fnamePrefix+".png")
if show:
plt.show()
"""
#Old; this was for InferRGS methods, hw1
def eSaveLosses(losses, datapath, show=True):
def SaveLosses(losses, datapath, show=True):
#plot the losses
xs = [i for i in range(0,len(losses))]
plt.ylim([0,max(losses)])
plt.title("Total Hamming Loss per Iteration")
plt.xlabel("Iteration")
plt.ylabel("Sum Hamming Loss")
plt.plot(xs, losses)
plt.savefig(datapath[0:5]+"_HamLoss_Phi"+str(phiNum)+"_R"+str(R)+"_maxIt"+str(maxIt)+".png")
if show:
plt.show()
"""
"""
Randomized greedy search inference, as specified in the hw1 spec.
@xseq: Structured input
@w: the weights for the structured featured function as a 1xd numpy vector
@phi: the structured feature function
@R: the number of random restarts for the greedy search
def InferRGS(xseq, w, phi, R):
d = w.shape[1]
#intialize y_hat structured output to random labels
#y_max = _getRandomY(LABELS, len(xseq))
#phi_y_max = np.zeros((1,d), dtype=np.float32) #the vector phi(x,y_hat,d), stored and returned so the caller need not recompute it
#maxScore = _score(xseq, y_max, w, phi)
yLen = len(xseq)
maxScore = -1000000
#print("y_max: "+str(y_max)+" score: "+str(maxScore))
for dummy in range(R):
y_r = _getRandomY(LABELS, yLen)
#there is an optimization here, since only changing one label at a time, only certain components of z change in the loop below; hence one can leverage this to avoid repeated calls to phi()
z = phi(xseq, y_r, d)
baseScore = w.dot(z.T)[0,0]
#print("y_test: "+str(y_test))
#evaluate all one label changes for y_test, a search space of size len(y_test)*k, where k is the number of labels/colors
for i in range(yLen):
#c_original = str(y_test[j])
cOriginal = y_r[i]
xi = xseq[i][0,:]
#get the unary feature component
urange = g_unaryFeatureVectorIndices[cOriginal]
w_xy_c_original = w[0, urange[0]:urange[1]]
#decrement the original unary component/feature
tempScore = baseScore - w_xy_c_original.dot(xi)
#decrement all the other relevant pairwise, triple, or quad components from score
if i > 0:
pairwiseIndex = g_pairwiseFeatureVectorIndices[y_r[i-1]+cOriginal]
tempScore -= w[0, pairwiseIndex]
if USE_TRIPLES and i > 1:
tripleIndex = g_tripleFeatureVectorIndices[y_r[i-2]+y_r[i-1]+cOriginal]
tempScore -= w[0, tripleIndex]
if USE_QUADS and i > 2:
quadIndex = g_quadFeatureVectorIndices[y_r[i-3]+y_r[i-2]+y_r[i-1]+cOriginal]
tempScore -= w[0, quadIndex]
#evaluate all k different modifications to this label
for j in range(len(LABELS)):
c = LABELS[j]
urange = g_unaryFeatureVectorIndices[c]
w_c = w[0, urange[0]:urange[1]]
cScore = tempScore + w_c.dot(xi)
#add the pairwise component
if i > 0:
pairwiseIndex = g_pairwiseFeatureVectorIndices[y_r[i-1]+c]
cScore += w[0, pairwiseIndex]
if USE_TRIPLES and i > 1:
tripleIndex = g_tripleFeatureVectorIndices[y_r[i-2]+y_r[i-1]+c]
cScore += w[0, tripleIndex]
if USE_QUADS and i > 2:
quadIndex = g_quadFeatureVectorIndices[y_r[i-3]+y_r[i-2]+y_r[i-1]+c]
cScore += w[0, quadIndex]
if cScore > maxScore:
maxScore = cScore
#save y_hat, z_y_hat
y_max = list(y_r)
y_max[i] = str(c)
#print("ymax: "+str(y_max))
#print("score: "+str(maxScore))
return y_max, phi(xseq, y_max, d), maxScore
"""
def InferRGS(xseq, w, phi, R):
d = w.shape[1]
#intialize y_hat structured output to random labels
#y_max = _getRandomY(LABELS, len(xseq))
#phi_y_max = np.zeros((1,d), dtype=np.float32) #the vector phi(x,y_hat,d), stored and returned so the caller need not recompute it
#maxScore = _score(xseq, y_max, w, phi)
yLen = len(xseq)
maxIterations = 10000
yLenSeq = [i for i in range(yLen)]
maxScore = -10000000
#print("y_max: "+str(y_max)+" score: "+str(maxScore))
for _ in range(R):
y_test = _getRandomY(LABELS, yLen)
#print("y_test: "+str(y_test))
#there is an optimization here, since only changing one label at a time, only certain components of z change in the loop below; hence one can leverage this to avoid repeated calls to phi()
#z = phi(xseq, y_test, d)
#get the initial/base score for this 'good' instance; note the math below only modifies this base score based on the single label component that changes, instead of recalculating the complete score
#baseScore = w.dot(z.T)[0,0]
#y_local_max = y_test
localMaxScore = -10000000
iterations = 0
convergence = False #convergence satisfied when there is no further improvemet to be made via one character changes, eg, the label sequence does not change
while not convergence:
#there is an optimization here, since only changing one label at a time, only certain components of z change in the loop below; hence one can leverage this to avoid repeated calls to phi()
z = phi(xseq, y_test, d)
#get the initial/base score for this 'good' instance; note the math below only modifies this base score based on the single label component that changes, instead of recalculating the complete score
baseScore = w.dot(z.T)[0,0]
#until convergence, evaluate all one label changes for y_test, a search space of size len(y_test)*k, where k is the number of labels
for i in yLenSeq:
######## begin by decrementing the score by the original ith-label's components ###################
cOriginal = y_test[i]
xi = xseq[i][0,:]
#get the unary feature component
urange = g_unaryFeatureVectorIndices[cOriginal]
w_xy_c_original = w[0, urange[0]:urange[1]]
#decrement the original unary component/feature
tempScore = baseScore - w_xy_c_original.dot(xi)
#decrement all the other relevant pairwise, triple, or quad components from score
if i > 0:
pairwiseIndex = g_pairwiseFeatureVectorIndices[y_test[i-1]+cOriginal]
tempScore -= w[0, pairwiseIndex]
if USE_TRIPLES and i > 1:
tripleIndex = g_tripleFeatureVectorIndices[y_test[i-2]+y_test[i-1]+cOriginal]
tempScore -= w[0, tripleIndex]
if USE_QUADS and i > 2:
quadIndex = g_quadFeatureVectorIndices[y_test[i-3]+y_test[i-2]+y_test[i-1]+cOriginal]
tempScore -= w[0, quadIndex]
######## end decrements; now we can add individual components for each label change, below #######
###### evaluate all k different modifications to this label, incrementing the base score by each component ###
for c in LABELS:
if c != cOriginal:
urange = g_unaryFeatureVectorIndices[c]
w_c = w[0, urange[0]:urange[1]]
cScore = tempScore + w_c.dot(xi)
#add active ngram components to cScore
if i > 0:
pairwiseIndex = g_pairwiseFeatureVectorIndices[y_test[i-1]+c]
cScore += w[0, pairwiseIndex]
#add the triple components
if USE_TRIPLES and i > 1:
tripleIndex = g_tripleFeatureVectorIndices[y_test[i-2]+y_test[i-1]+c]
cScore += w[0, tripleIndex]
#add the quad components
if USE_QUADS and i > 2:
quadIndex = g_quadFeatureVectorIndices[y_test[i-3]+y_test[i-2]+y_test[i-1]+c]
cScore += w[0, quadIndex]
if cScore > localMaxScore:
localMaxScore = cScore
#save max character; y_local_max list can be updated outside this loop, to spare repeated list-construction
y_local_max = list(y_test)
y_local_max[i] = c
### end-for: evaluate all k label changes for this position, and possibly obtained max as y_local_max and localMaxScore
### end-for (over entire sequence), check for convergence
if y_local_max == y_test or iterations > maxIterations:
convergence = True
#for debugging only; i just want to know if I ever bottom out in terms of iterations
if iterations >= maxIterations:
print("WARNING: ITERATIONS BOTTOMED OUT IN INFER_RGS()")
else:
y_test = y_local_max
baseScore = localMaxScore
iterations += 1
#print("iterations: "+str(iterations))+" y_test: "+str(y_test)+" y_local_max: "+str(y_local_max), end="")
### end while: converged to single label sequence, so update the global greedy max over R iterations as needed
if localMaxScore > maxScore:
maxScore = localMaxScore
y_max = list(y_local_max)
return y_max, phi(xseq, y_max, d), maxScore
"""
The old core loop from InferRGS
#manipulate z cleverly to avoid full computation of score = w.dot(phi(x,y_test,d)) on every iteration;
#update the unary features f(x|y), decrementing the previous ones and incrementing the new ones
if j > 0:
#decrement the previous f(x|y) features on successive iterations
urange = g_unaryFeatureVectorIndices[prevC]
z[0,urange[0]:urange[1]] -= x[0,:]
#increment current f(x,y) components by x vector
urange = g_unaryFeatureVectorIndices[c]
z[0,urange[0]:urange[1]] += x[0,:]
#update the pairwise features
if i > 0:
#adjust the pairwise component of the z vector
curYYIndex = g_pairwiseFeatureVectorIndices[y_test[i-1]+c]
#add the new feature at its proper index
z[0,curYYIndex] += 1.0
if i > 1:
#subtract the old feature
z[0,prevYYIndex] -= 1.0
prevYYIndex = curYYIndex
prevC = c
#all components updated (without calling phi), so just get the score directly
score = w.dot(z.T)[0,0]
#print("score: "+str(score))
if score > maxScore:
y_max = list(y_test)
y_max[i] = c
maxScore = score
phi_y_max[0,:] = z[0,:]
#print("new y_max: "+str(y_max)+" score: "+str(maxScore))
"""
def InferRGS_Inefficient(x, w, phi, R):
d = w.shape[1]
yLen = len(x)
#phi_y_max = np.zeros((1,d), dtype=np.float32)
maxScore = -10000000
#print("y_max: "+str(y_max)+" score: "+str(maxScore))
for r in range(0,R):
#get a random y as a starting point for greedy search
y_test = _getRandomY(LABELS, yLen)
#print("y_test: "+str(y_test))
convergence = False
while not convergence: #until y_test == y_local_max, keep updating single char changes
#evaluate all one label changes for y_test, a search space of size len(x)*k, where k is the number of labels/colors
localMaxScore = -10000000
for i in range(yLen):
c_original = str(y_test[i])
#evaluate all k different modifications to this label
for c in LABELS:
y_test[i] = c
z = phi(x, y_test, d)
cScore = w.dot(z.T)[0,0]
#print("score: "+str(score)+" maxscore: "+str(maxScore))
if cScore > localMaxScore:
y_local_max = list(y_test)
localMaxScore = cScore
#print("new y_max: "+str(y_max)+" score: "+str(maxScore))
#replace original character and continue to next position
y_test[i] = c_original
#loop convergence check/update
if y_test == y_local_max:
convergence = True
else:
y_test = list(y_local_max)
#end while
#update local max score and sequence as needed
if localMaxScore > maxScore:
maxScore = localMaxScore
y_max = list(y_local_max)
#print("ymax: "+str(y_max))
#print("score: "+str(maxScore))
return y_max, phi(x, y_max, d), maxScore
"""
Given some x sequence, returns the top scoring b nodes representing the first character
in a y sequence, where b is the beam width.
"""
def _beamSearchInitialization(xseq, w, beamWidth):
beam = []
for c in LABELS:
urange = g_unaryFeatureVectorIndices[c]
score = w[0,urange[0]:urange[1]].dot(xseq[0][0,:].T)
beam.append(([c],score))
#sort and beamify the beam
beam.sort(key = lambda tup: tup[1], reverse=True)
if beamWidth > 0:
beam = beam[0:beamWidth]
#print("BEAM: "+str(beam))
return beam
"""
Performs best-first-search beam update: expand only the highest scoring node on the beam,
delete that node from beam, and append all of its children. Re-sort the beam, then truncate the beam
by beamWidth.
@sortedBeam: A sorted beam, with the highest scoring node at the 0th index.
@beamWidth: The beam width; if <= 0, beam is infinity
@xseq: The input x-sequence, required here just for scoring the nodes
@w: The current weight vector, again required just for scoring nodes
@phi: Feature function, also just for scoring nodes
Returns: new, sorted beam, with max at 0th index
"""
def _bestFirstBeamUpdate(sortedBeam,beamWidth,xseq,w,phi):
d = w.shape[1]
#get the max on the beam
y_max = sortedBeam[0][0]
cxSeqIndex = len(y_max)
baseScore = sortedBeam[0][1]
#remove y_max from beam, to prevent cycling
sortedBeam = sortedBeam[1:]
xvec = xseq[cxSeqIndex][0,:]
prevC = y_max[-1]
#expand the beam with children (candidate set) of highest scoring node
for c in LABELS:
candidate = y_max + [c]
#adjust unary score component
urange = g_unaryFeatureVectorIndices[c]
candidateScore = baseScore + w[0,urange[0]:urange[1]].dot(xvec)
#pairwise features
pairwiseIndex = g_pairwiseFeatureVectorIndices[prevC+c]
candidateScore += w[0, pairwiseIndex]
#candidateScore = w.dot( phi(xseq,candidate,d).T ) #TODO: if necessary, optimize this score update; full-score computation is repetitive and unnecessary
sortedBeam.append( (candidate,candidateScore) )
#sort beam by node scores
sortedBeam.sort(key = lambda tup: tup[1],reverse=True)
#prune beam
if len(sortedBeam) > beamWidth and beamWidth > 0:
sortedBeam = sortedBeam[0:beamWidth]
#print("BEAM: "+str(sortedBeam))
return sortedBeam
"""
Performs breadth-first-search beam update: replace all nodes on the beam with their children, re-score, sort by score,
and truncate by @beamWidth.
@beam: The current beam
@beamWidth: The beam width; if <= 0, beam is infinity
@xseq: The input x-sequence, required here just for scoring the nodes
@w: The current weight vector, again required just for scoring nodes
@phi: Feature function, also just for scoring nodes
Returns: new, sorted beam, with max at 0th index
urange = g_unaryFeatureVectorIndices[yseq[0]]
z[0,urange[0]:urange[1]] = xseq[0][0,:]
#iterate pairwise and other y sequence features
for i in range(1,len(yseq)):
#unary features
urange = g_unaryFeatureVectorIndices[yseq[i]]
z[0,urange[0]:urange[1]] += xseq[i][0,:]
#pairwise features; z_yi_i == 1 iff y_i == alpha and y_i-1 == alpha
pairwiseIndex = g_pairwiseFeatureVectorIndices[yseq[i-1]+yseq[i]]
z[0, pairwiseIndex] += 1.0
"""
def _breadthFirstBeamUpdate(beam,beamWidth,xseq,w,phi):
d = w.shape[1]
newBeam = []
#expand all nodes on the beam
for node in beam:
baseScore = node[1]
#if int(baseScore) % 6 == 5: #periodically recalculate full score calculation, to mitigate accumulating error in code optimization
# baseScore = w.dot( phi(xseq, node[0], d).T )[0,0]
cxSeqIndex = len(node[0])
for c in LABELS:
candidate = node[0] + [c]
#adjust unary score component
urange = g_unaryFeatureVectorIndices[c]
candidateScore = baseScore + w[0, urange[0]:urange[1]].dot(xseq[cxSeqIndex][0,:])
#pairwise features; z_yi_i == 1 iff y_i == alpha and y_i-1 == alpha
pairwiseIndex = g_pairwiseFeatureVectorIndices[candidate[-2]+candidate[-1]]
candidateScore += w[0, pairwiseIndex]
#candidateScore = w.dot( phi(xseq,candidate,d).T ) #TODO: if necessary, optimize this score update; full-score computation is repetitive and unnecessary
newBeam.append( (candidate,candidateScore) )
#sort beam by node scores
newBeam.sort(key = lambda tup: tup[1],reverse=True)
#prune beam
if len(newBeam) > beamWidth and beamWidth > 0:
newBeam = newBeam[0:beamWidth]
#print("BEAM: "+str(newBeam))
return newBeam
"""
Verifies whether or not any node in the beam is a prefix of the correct output, @y_star
"""
def _beamHasGoodNode(beam,y_star):
#early update check: if beam contains no y_good nodes, return highest scoring node to perform update
for node in beam:
if node[0] == y_star[0:len(node[0])]:
return True
return False
def _getFirstCompleteNode(sortedBeam,yLen):
for node in sortedBeam:
if len(node[0]) >= yLen:
return node
return None
"""
Just checks if any node on the beam is a complete output
"""
def _beamHasCompleteOutput(beam,yLen):
return _getFirstCompleteNode(beam,yLen) != None
"""
For two lists, check if the members of y_hat are a prefix of y_star.
This is degenerately true if y_star is a prefix of y_hat, and y_hat is longer.
"""
def _isPrefix(y_hat,y_star):
for i in range(min(len(y_hat),len(y_star))):
if y_hat[i] != y_star[i]:
return False
return True
"""
This is a utility of max-violation, which finds the first wrong node in a beam instance,
where "wrong" means node is not a prefix of y_star. The beam is assumed sorted,
such that the first non-matching node is the max wrong one.
Returns: first non-matching node, or None if no such node (which should happen only very rarely)
"""
def _getFirstWrongNode(sortedBeam,y_star):
for node in sortedBeam:
if not _isPrefix(node[0],y_star):
return node
return None
"""
Returns the top-scoring example of the lowest scoring beam.
"""
def _maxViolationUpdate(xseq, y_star, w, phi, beamUpdateMethod, beamWidth):
d = w.shape[1]
#beam initialization function, I() in the lit. The beam contains (sequence,score) tuples
beam = _beamSearchInitialization(xseq, w, beamWidth)
#max-score y_hat of the min-scoring beam instance
#yMaxViolation = beam[0][0]
#maxViolationScore = beam[0][1]
yMaxViolation = beam[0][0]
maxViolationScore = beam[0][1]
maxViolationDelta = -10000000
yLen = len(y_star)
#until beam highest scoring node in beam is a complete structured output or terminal node
t = 1
while not _beamHasCompleteOutput(beam, yLen):
beam = beamUpdateMethod(beam, beamWidth, xseq, w, phi)
#update the max-violation delta param
wrongNode = _getFirstWrongNode(beam,y_star)
if wrongNode != None:
correctPrefixScore = w.dot( phi(xseq, y_star[0:t], d).T )[0,0]
delta = abs(wrongNode[1] - correctPrefixScore)
if delta > maxViolationDelta:
maxViolationDelta = delta
yMaxViolation = wrongNode[0]
maxViolationScore = wrongNode[1]
#else:
#in this case all nodes in the beam matched the y_star prefix, which should be extremely unlikely, except for very small beams
#print("WARNING wrongNode == None in maxViolationUpdate()")
t += 1
return yMaxViolation, phi(xseq, yMaxViolation, d), maxViolationScore
def _standardUpdate(xseq, y_star, w, phi, beamUpdateMethod, beamWidth):
d = w.shape[1]
yLen = len(y_star)
#beam initialization function, I() in the lit. The beam contains (sequence,score) tuples
beam = _beamSearchInitialization(xseq, w, beamWidth)
#until beam contains a complete structured output/terminal node
while not _beamHasCompleteOutput(beam, yLen):
beam = beamUpdateMethod(beam, beamWidth, xseq, w, phi)
#beam is sorted on each iteration, so max is first node
return beam[0][0], phi(xseq, beam[0][0], d), beam[0][1]
"""
def _maxViolationUpdate(xseq, y_star, w, phi, beamUpdateMethod, beamWidth):
d = w.shape[1]
#beam initialization function, I() in the lit. The beam contains (sequence,score) tuples
beam = _beamSearchInitialization(xseq, w, beamWidth)
#max-score y_hat of the min-scoring beam instance
#yMaxViolation = beam[0][0]
#maxViolationScore = beam[0][1]
yMaxViolation = ""
maxViolationScore = 10000000
yLen = len(y_star)
#until beam highest scoring node in beam is a complete structured output or terminal node
t = 0
while not _beamHasCompleteOutput(beam, yLen):
beam = beamUpdateMethod(beam, beamWidth, xseq, w, phi)
completeMaxNode = _getMaxWrongNodeInBeam(beam, yLen)
if completeMaxNode != None and completeMaxNode[1] < maxViolationScore:
#print("hit it")
maxViolationScore = completeMaxNode[1]
yMaxViolation = completeMaxNode[0]
return yMaxViolation, phi(xseq, yMaxViolation, d), maxViolationScore
"""
"""
Returns the max-scoring complete node in beam; None if there is no complete node.
Returns: max-complete-Node (or None)
"""
def _getMaxCompleteNodeInBeam(beam,completeLen):
maxCompleteScore = -100000000.0
maxCompleteNode = None
for node in beam:
if len(node[0]) == completeLen and node[1] > maxCompleteScore:
maxCompleteScore = node[1]
maxCompleteNode = node
return maxCompleteNode
"""
Runs early update: progress until beam contains no y_good node (no node that could lead to a solution),
and return the highest-scoring node in the beam at that point as the y_hat by which to make perceptron updates.
@beamUpdateMethod: A function for performing the beam update, in this case either best-first or breadth-first beam update.
"""
def _earlyUpdate(xseq, y_star, w, phi, beamUpdateMethod, beamWidth):
d = w.shape[1]
#beam initialization function, I() in the lit. The beam contains (sequence,score) tuples
beam = _beamSearchInitialization(xseq,w,beamWidth)
yLen = len(y_star)
#until beam highest scoring node in beam is a complete structured output or terminal node
searchError = False #the condition by which we exit search and return the current highest scoring node in the beam when beam contains no good nodes
while not _beamHasCompleteOutput(beam, yLen) and _beamHasGoodNode(beam,y_star):
beam = beamUpdateMethod(beam,beamWidth,xseq,w,phi)
return beam[0][0], phi(xseq, beam[0][0], d), beam[0][1]
"""
The test version of beam-inference. Currently this implements best-first search, but can be easily modified to do BFS instead.
"""
def _beamSearchInference(xseq, w, phi, outputLen, beamUpdateMethod, beamWidth):
d = w.shape[1]
#beam initialization function, I() in the lit. The beam contains (sequence,score) tuples
beam = _beamSearchInitialization(xseq,w,beamWidth)
#until beam highest scoring node in beam is a complete structured output or terminal node
while not _beamHasCompleteOutput(beam, outputLen):
beam = beamUpdateMethod(beam,beamWidth,xseq,w,phi)
return beam[0][0], phi(xseq, beam[0][0], d), beam[0][1]
"""
Returns hamming loss for two strings. See wiki.
Returns: hamming loss, which is not the total number of incorrect characters, but rather the number
of incorrect characters weighed by length.
"""
def _getHammingDist(y_star, y_hat):
loss = 0.0
#count character by character losses
for i in range(min(len(y_star), len(y_hat))):
if y_star[i] != y_hat[i]:
loss += 1.0
#loss, accounting for differences in length
loss += float(abs(len(y_star) - len(y_hat)))
return loss
#return loss / float(max(len(y_star), len(y_hat))) #TODO: Div zero
"""
Util for getting the correct phi function to pass around
@phiNum: The integer number of the phi funtion (1 for pairwise, 2 for triples, 3 for quadruples)
"""
def _getPhi(phiNum):
if phiNum == 1: #first order features
phi = _Phi1
elif phiNum == 2: #second order features
phi = _Phi2
elif phiNum == 3: #third order features
phi = _Phi3
else:
print("ERROR phi not found "+str(phiNum))
exit()
return phi
"""
@phiNum: phi number (1 for pairwise/bigram y features, 2 for triplets, etc)
@xdim: The dimensionality of each x; here, likely 128, for 128-bit inpu vectors
"""
def _getDim(phiNum, xdim):
if phiNum == 1: #first order features
# dim = number of components of the weight vector
dim = xdim * K + K**2
elif phiNum == 2: #second order features
# dim = number of components of the weight vector
dim = xdim * K + K**2 + K**3
elif phiNum == 3: #third order features
# dim = number of components of the weight vector
dim = xdim * K + K**2 + K**3 + K**4
else:
print("ERROR phi not found "+str(phiNum))
exit()
return dim
"""
Configures all global parameters. Note that xdim is determined beforehand by the dataset.
"""
def _configureGlobalParameters(xdim, phiNum, dataPath):
global LABELS
global K
global XDIM
global USE_TRIPLES
global USE_QUADS
global RAND_LABEL_INDICES
global RAND_RING_INDEX
global RAND_INTS_LEN
XDIM = xdim
#label set is simply hardcoded to nettalk or ocr datasets; this assumes nettalk dataset has been manually modified to map 01 to 'A', 02 to 'B' and so on, for simplicityss
if "nettalk" in dataPath:
LABELS = "ABCDE"
else:
LABELS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
K = len(LABELS)
#prepare a set of pseudo-random numbers indices into labels in advance, of prime length, such that randint() doesn't need to be called at high frequency
RAND_LABEL_INDICES = [random.randint(0,len(LABELS)-1) for i in range(0,1000000)]
RAND_RING_INDEX = 0
RAND_INTS_LEN = len(RAND_LABEL_INDICES)
#build the vector index dictionaries
_buildVectorIndexDicts(XDIM,LABELS)
USE_TRIPLES = False
USE_QUADS = False
if phiNum >= 2:
USE_TRIPLES = True
if phiNum >= 3:
USE_QUADS = True
"""
Utility for getting the ocr data as a dataset.
Expects a dataset of tab delimited records, formatted as in the ocr and nettalk data:
"3 im000000010000000000000000000000010000000000000000000000100000000000000000000000000 01 _"
Note that as a side-effect, this function configures the feature vector dimension component XDIM, which is determined by the dataset.
Returns: A list of training examples. Each training example is a pairwise x and y sequence: ([xsequence], [ysequence])
xsequence: A list of real-valued numpy vectors of size m; each x vector is binary
ysequence: A list of symbols/labels
Also returns xdim, the dimension of the data.
"""
def _getData(dataPath):
#build the dataset as a list of x/y tuples
D = []
dataFile = open(dataPath,"r")
records = [line.strip().replace("\t"," ") for line in dataFile.readlines()]
dataFile.close()
xseq = []
yseq = []
#get dimension from the first x example
xdim = len(records[0].split(" ")[1].replace("im","").strip())
#exit()
for line in records:
#print("line: "+line)
if len(line) > 10:
binaryString = line.split(" ")[1].replace("im","")
#x = int(binaryString, 2)
x = np.zeros((1,xdim), dtype=np.float32)
for i in range(len(binaryString)):
if binaryString[i] == "1":
x[0,i] = 1
xseq.append(x)
yseq.append(line.split(" ")[2][0].strip().upper())
if len(line.strip()) < 10 and not high:
D.append((xseq,yseq))
high = True
xseq = []
yseq = []
else:
high = False
return D, xdim
"""
_getData returns a dataset as a list of x/y sequence pairs: [ (["0000101","00010110"],["a","t"]), etc. ]
This is inefficient for the structured perceptron construction, as the binary strings must be parsed during learning to map
each x_i to its component in an input vector.
The solution is obviously to format the dataset in the proper manner before training, instead of doing so many thousands of more times
during training. Recall that each x_i binary string maps to some component of the unary components of the weight vector;
Returns: The new dataset as a list of x+y numpy vectors of the required dimension, under the construction required by the Phi function.
"""
def _preformatData(D,phi,d):
return [phi(example[0], example[1], d) for example in D]
"""
Given some test data, get a prediction from InferRGS()
"""
def TestPerceptron(w, phiNum, R, beamUpdateMethod, beamWidth, testData):
losses = []
phi = _getPhi(phiNum)
#filter the data of too-short examples
testData = _filterShortData(testData, phiNum)
print("Testing weights over "+str(len(testData))+" examples. This may take a while.")
i = 0
for example in testData:
xseq = example[0]
y_star = example[1]
#y_hat, phi_y_hat, score = InferRGS(xseq, w, phi, R)
y_hat, phi_y_hat, score = _beamSearchInference(xseq, w, phi, len(y_star), beamUpdateMethod, beamWidth)
#y_hat, phi_y_hat, score = InferRGS_Inefficient(xseq, w, phi, R)
#print("hat: "+str(y_hat))
#print("star: "+str(y_star))
loss = _getHammingDist(y_star, y_hat)
losses.append(loss)
i += 1
if i % 50 == 49:
print("\rTest datum "+str(i)+" of "+str(len(testData))+" ",end="")
#get Hamming accuracy
accuracy = 100.0 * (1.0 - sum(losses) / (float(K) * float(len(testData))))
print("Accuracy: "+str(accuracy)+"%")
return losses, accuracy
def _filterShortData(D, phiNum):
requiredLength = 2
if phiNum == 2:
requiredLength = 3
if phiNum == 3:
requiredLength = 4
return [d for d in D if len(d[1]) >= requiredLength]
"""
@D: A list of training examples in the pairwise form [ (xseq,yseq), (xseq, yseq) ... ].
@R: Number of restarts for RGS
@phiNum: The structured feature function phi(x,y)
@maxIt: max iterations
@eta: learning rate
@errorUpdateMethod: A function performing either bfs or early update
@beamUpdateMethod: A function for performing beam updates, either best-first or breadth-first beam update
Returns: trained weight vector @w, the losses, and the accuracy of the final iteration
"""
def OnlinePerceptronTraining(D, R, phiNum, maxIt, eta, errorUpdateMethod, beamUpdateMethod, beamWidth):
phi = _getPhi(phiNum)
xdim = XDIM
print("xdim: "+str(xdim))
dim = _getDim(phiNum, xdim)
print("wdim: "+str(dim))
#intialize weights of scoring function to 0
w = np.zeros((1,dim), dtype=np.float32)
#w[0,:] = 1.0
d = w.shape[1]
#filter out the training examples of length less than the n-gram features
D = _filterShortData(D,phiNum)
#get a preformatted cache of the data, to help cut down on constant phi re-computations
preprocessedData = _preformatData(D,phi,d)
#a list of sum losses over an entire iteration
losses = []
print("num training examples: "+str(len(D)))
for i in range(maxIt):
sumItLoss = 0.0
for j in range(len(D)):
#sequential training
xseq, y_star = D[j]
#stochastic training: select a random training example (stochastic training)
#x, y_star = D[random.randint(0,len(D)-1)] #IF USED, MAKE SURE TO USE CORRECT zStar BELOW IN preprocessedData[] index!
#print("y_star: "+str(y_star))
#get predicted structured output
#print("j="+str(j)+" of "+str(len(D)))
#y_hat = _getRandomY(labels, len(y_star))
#y_hat, phi_y_hat, score = InferBestFirstBeamSearch(xseq, w, phi, beamWidth)
y_hat, phi_y_hat, score = errorUpdateMethod(xseq, y_star, w, phi, beamUpdateMethod, beamWidth)
#y_hat, phi_y_hat, score = InferRGS(xseq, w, phi, R)
#y_hat, phi_y_hat, score = InferRGS_Inefficient(xseq, w, phi, R)
#print("y_hat: "+str(y_hat)+" score: "+str(score))
#get the hamming loss
#print("ystar: "+str(y_star))
#print("yhat: "+str(y_hat))
loss = _getHammingDist(y_star, y_hat)
if loss > 0:
#zStar = preprocessedData[j] #effectively this is phi(x, y_star, d), but preprocessed beforehand to cut down on computations
#w = w + eta * (phi(xseq, y_star, d) - phi_y_hat)
#w = w + eta * (phi(xseq, y_star, d) - phi(xseq, y_hat, d))
w = w + eta * (preprocessedData[j] - phi_y_hat)
sumItLoss += loss
#append the total loss for this iteration, for plotting
losses.append(sumItLoss/float(K))
print("iter: "+str(i)+" it-loss: "+str(losses[-1])+" it-accuracy: "+str(1.0 - losses[-1]/float(len(D))))
return w, losses, 1.0 - losses[-1]/float(len(D))
def usage():
print("python StructuredPerceptron --trainPath=[] --testPath=[]")
print("Optional (prefixed '--'): eta, maxIt, R, beamWidth, showPlot, phi, searchError (mv, early, std), beamUpdate (bfs, best) ")
trainPath = None
testPath = None
showPlot = False
searchMethod = "std"
beamUpdateType = "best"
beamUpdateMethod = _bestFirstBeamUpdate
searchErrorUpdateMethod = _standardUpdate
R = 10
phiNum = 1
maxIt = 50
eta = 0.01
beamWidth = 10
for arg in sys.argv:
if "--trainPath=" in arg:
trainPath = arg.split("=")[1]
if "--testPath=" in arg:
testPath = arg.split("=")[1]
if "--eta=" in arg:
eta = float(arg.split("=")[1])
if "--phi=" in arg:
phiNum = int(arg.split("=")[1])
if "--maxIt=" in arg:
maxIt = int(arg.split("=")[1])
if "--R=" in arg:
R = int(arg.split("=")[1])
if "--showPlot" in arg:
showPlot = True
if "--beamWidth=" in arg:
beamWidth = int(arg.split("=")[1])
if "--beamUpdate=" in arg:
if arg.split("=")[1].lower() == "bfs":
beamUpdateType = "bfs"
beamUpdateMethod = _breadthFirstBeamUpdate
elif arg.split("=")[1].lower() == "best":
beamUpdateType = "best"
beamUpdateMethod = _bestFirstBeamUpdate
else:
print("ERROR beam update option not found: "+arg.split("=")[1])
usage()
exit()
if "--searchError=" in arg:
if arg.split("=")[1].lower() == "mv":
searchMethod = "mv"
searchErrorUpdateMethod = _maxViolationUpdate
elif arg.split("=")[1].lower() == "early":
searchMethod = "early"
searchErrorUpdateMethod = _earlyUpdate
elif arg.split("=")[1].lower() == "std" or arg.split("=")[1].lower() == "standard":
searchMethod = "std"
searchErrorUpdateMethod = _standardUpdate
else:
print("ERROR search update option not found: "+arg.split("=")[1])
usage()
exit()
if phiNum == 2:
USE_TRIPLES = True
elif phiNum == 3:
USE_QUADS = True
if trainPath == None:
print("ERROR no trainPath passed")
usage()
exit()
if testPath == None:
print("ERROR no testPath passed")
usage()
exit()
trainData, xdim = _getData(trainPath)
testData, _ = _getData(testPath)
_configureGlobalParameters(xdim, phiNum, trainPath)
print("Global params configured")
print("\tLABELS: "+LABELS)
print("\tK: "+str(K))
print("\tXDIM: "+str(XDIM))
print("\tUSE_TRIPLES: "+str(USE_TRIPLES))
print("\tUSE_QUADS: "+str(USE_QUADS))
print("Executing with maxIt="+str(maxIt)+" R="+str(R)+" eta="+str(eta)+" phiNum="+str(phiNum)+" beam="+str(beamWidth)+" trainPath="+trainPath+" testPath="+testPath)
print("searchUpdate="+searchMethod+" beamUpdate="+beamUpdateType+" maxIt="+str(maxIt))
print("xdim="+str(xdim))
#print(str(trainData[0]))
#print("lenx: "+str(len(trainData[0][0]))+" leny: "+str(len(trainData[0][1])))
w, trainingLosses, trainAccuracy = OnlinePerceptronTraining(trainData, R, phiNum, maxIt, eta, searchErrorUpdateMethod, beamUpdateMethod, beamWidth)
#normalize the summed-losses to make them accuracy measures
trainingLosses = [1.0 - loss / float(len(trainData)) for loss in trainingLosses]
SaveLosses(trainingLosses, trainAccuracy, beamWidth, trainPath, searchMethod, beamUpdateType, "Hamming Accuracy per Training Iteration",False,True)
#run testing on only 1/4th of test data, since for this assignment we have way more test data than training data
print("WARNING: Truncating test data from "+str(len(testData))+" data points to "+str(len(testData)/4))
testData = testData[0:int(len(testData)/4)]
testLosses, testAccuracy = TestPerceptron(w, phiNum, R, beamUpdateMethod, beamWidth, testData)
SaveLosses(testLosses, testAccuracy, beamWidth, trainPath, searchMethod, beamUpdateType, "Hamming Accuracy per Test Iteration",False,False)
|
983,376 | ed7ed671c0c90390fbfd5a5d2cb9224642ac18d8 | def get_divisor(num):
divisor_list = []
if num == 2:
return [1, 2]
elif num == 3:
return [1, 3]
for i in range(1, num//2 + 1):
if num % i == 0:
divisor_list.append(i)
divisor_list.append(num // i)
divisor_list = list(set(divisor_list))
divisor_list.sort()
return divisor_list
if __name__ == "__main__":
print(get_divisor(2))
print(get_divisor(3))
print(get_divisor(4))
print(get_divisor(10))
print(get_divisor(30))
print(get_divisor(49))
|
983,377 | 54c0aeb431480cc28382e092dd9890d8e0f9307e | import torch
from torch import nn
import torchvision
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
import time
from tqdm import tqdm
import urllib.request
import argparse
from resnet import *
def main(opt):
# Acquiring device
device = "cpu"
if not opt.force_cpu:
device = "cuda" if torch.cuda.is_available() else "cpu"
print("{}\n".format(device))
if device == "cuda":
current = torch.cuda.current_device()
print(torch.cuda.device(current))
print(torch.cuda.get_device_name(current))
print(torch.cuda.get_device_capability(current))
print(torch.cuda.get_device_properties(current))
# Testin old residual block
module = Old_ResidualBlock(256, 128, 1)
input = torch.zeros((3, 256, 7, 7))
output = module(input)
print(output.shape)
# Testing new residual block
module = New_ResidualBlock(256, 128, 1).to(device)
input = torch.zeros((3, 256, 7, 7)).to(device)
output = module(input)
print(output.shape)
# Testing resnet50
net = ResNet50(opt.classes, opt.channels)
x = torch.randn((1, opt.channels, opt.classes, opt.classes))
print(net(x).shape)
# Downloading ImageNet
import os
import shutil
checkpoints = '/home/imagenet'
if not os.path.exists(checkpoints):
os.makedirs(checkpoints)
if not os.path.exists('imagenet64'):
if not os.path.exists(checkpoints + 'imagenet64.tar'):
print("Downloading archive...")
os.chdir(checkpoints)
urllib.request.urlretrieve("https://pjreddie.com/media/files/imagenet64.tar", filename="imagenet64.tar")
os.chdir('..')
print("Copying to local runtime...")
shutil.copy(checkpoints + 'imagenet64.tar', './imagenet64.tar')
print("Uncompressing...")
os.system("tar -xf imagenet64.tar")
print("Data ready!")
# Loading Dataset
transform_train = transforms.Compose([
transforms.Resize(250),
transforms.RandomCrop(224, padding=1, padding_mode='edge'),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.ImageFolder(root='./imagenet64/train/', transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers)
testset = torchvision.datasets.ImageFolder(root='./imagenet64/val/', transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers)
# Show some images
trainiter = iter(trainloader)
images, labels = trainiter.next()
images = images[:8]
print(images.size())
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
imshow(torchvision.utils.make_grid(images))
print("Labels:" + ' '.join('%9s' % labels[j] for j in range(8)))
flat = torch.flatten(images, 1)
print(images.size())
print(flat.size())
# Create the model
model = ResNet50(opt.classes, opt.channels).to(device)
# Train
crit = nn.CrossEntropyLoss()
optim = torch.optim.Adam(model.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay)
model.zero_grad()
model.train()
for e in range(opt.epochs):
pbar = tqdm(trainloader, desc="Epoch {} - Batch idx: {} - loss: {}".format(e, 0, "calculating..."))
for batch_idx, (x,y) in enumerate(pbar):
optim.zero_grad()
x = x.to(device)
y_true = torch.zeros((opt.batch_size, opt.classes))
for i in range(len(y)):
y_true[i][y[i]] = 1
y_true = y_true.to(device)
start = time.process_time()
y_pred = torch.nn.functional.softmax(model(x), dim=1)
stop = time.process_time()
loss = crit(y_true, y_pred)
loss.backward()
optim.step()
pbar.set_description(desc="Epoch {} - Batch idx: {} - loss: {:.6f} - runtime: {:.3f}ms".format(e, batch_idx, loss.item(), (stop-start)*1000), refresh=True)
torch.save(model.state_dict(), "resnet50.pt")
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog = 'ResNet', description = 'Image Classification Network', epilog = 'Ciao')
parser.add_argument('-bs', '--batch-size', type=int, default=256, help='Training and Testing batch size')
parser.add_argument('-cpu' ,'--force-cpu', action='store_true', default=False, help='Force CPU usage during training and testing')
parser.add_argument('-chn', '--channels', type=int, default=3, help='Number of channels in input images')
parser.add_argument('-cls', '--classes', type=int, default=1000, help='Number of classes')
parser.add_argument('-ep', '--epochs', type=int, default=5, help='Training epochs')
parser.add_argument('-lr', '--learning-rate', type=float, default=0.001, help='Learning rate')
parser.add_argument('-nw', '--num-workers', type=int, default=0, help='Number of workers in dataloader')
parser.add_argument('-wd', '--weight-decay', type=float, default=0.00001, help='Weight decay')
opt = parser.parse_args()
main(opt)
|
983,378 | 7ed7699118293ac30b757c6917bf2b8ab9023eea | import sys
import hashlib
from hashlib import sha1
import hmac
import base64
from datetime import datetime
from datetime import timezone
import requests
import json
import asyncio
import aiohttp
from soliscloud_api.soliscloud_api import *
DELAY = 1 #seconds
plant_ids = None
inverter_ids = {}
# Provided by Solis Support
KeyId = 'XXXXXXXXXXXXXX'
secretKey = b'YYYYYYYYYYYY'
#plant_ids = [1234567891234567891] # 19-digit value, separate multiple ID's with a comma ([ID1, ID2, ID3])
async def details_per_station(api, station_ids):
for sid in station_ids:
try:
await asyncio.sleep(DELAY)
response = await api.station_detail(KeyId, secretKey, station_id=sid)
print("station_detail(station_id=",sid,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
await asyncio.sleep(DELAY)
except SoliscloudAPI.SolisCloudError as err:
print("station_detail(station_id=",sid,"): Failed with:",err)
async def collector_details_per_station(api, station_ids):
for sid in station_ids:
try:
response = await api.collector_list(KeyId, secretKey, station_id=sid)
if verbose: print(json.dumps(response,sort_keys=True))
if response is not None:
print("collector_list(station_id=",sid,"): [OK]")
for record in response:
await asyncio.sleep(DELAY)
try:
response = await api.collector_detail(KeyId, secretKey, collector_sn=record["sn"])
if response is not None:
print("collector_detail(station_id=",sid,", collector_sn=",record["sn"],"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("collector_detail(station_id=",sid,", collector_sn=",record["sn"],"): Failed with:",err)
except SoliscloudAPI.SolisCloudError as err:
print("collector_list(",sid,"): Failed with:",err)
async def inverter_details_per_station(api, station_ids):
global inverter_ids
for sid in station_ids:
inverter_ids[sid] = []
try:
await asyncio.sleep(DELAY)
response = await api.inverter_list(KeyId, secretKey, station_id=sid)
if response is not None:
print("inverter_list(",sid,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
for record in response:
await asyncio.sleep(DELAY)
try:
inverter_ids[sid].append(record["sn"])
response = await api.inverter_detail(KeyId, secretKey, inverter_sn=record["sn"])
if response is not None:
print("inverter_detail(station_id =",sid,", inverter_sn =",record["sn"],"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("inverter_detail(station_id=",sid,", inverter_sn=",record["sn"],"): Failed with:",err)
except SoliscloudAPI.SolisCloudError as err:
print("inverter_list(station_id =",sid,"): Failed with:",err)
async def station_graphs(api, station_ids):
for sid in station_ids:
try:
await asyncio.sleep(DELAY)
response = await api.station_day(KeyId, secretKey, currency="EUR", time="2022-12-27", time_zone=1, station_id=sid)
if response is not None:
print("station_day(",sid,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("station_day(station_id =",sid,"): Failed with:",err)
try:
response = await api.station_month(KeyId, secretKey, currency="EUR", month="2022-12", station_id=sid)
if response is not None:
print("station_month(",sid,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("station_month(station_id =",sid,"): Failed with:",err)
try:
response = await api.station_year(KeyId, secretKey, currency="EUR", year="2022", station_id=sid)
if response is not None:
print("station_year(",sid,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("station_year(station_id =",sid,"): Failed with:",err)
try:
response = await api.station_all(KeyId, secretKey, currency="EUR", station_id=sid)
if response is not None:
print("station_all(",sid,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("station_all(station_id =",sid,"): Failed with:",err)
async def inverter_graphs(api, station_ids):
for sid in station_ids:
for isn in inverter_ids[sid]:
try:
await asyncio.sleep(DELAY)
response = await api.inverter_day(KeyId, secretKey, currency="EUR", time="2022-12-27", time_zone=1, inverter_sn=isn)
if response is not None:
print("inverter_day(",isn,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("inverter_day(inverter_id =",isn,"): Failed with:",err)
try:
await asyncio.sleep(DELAY)
response = await api.inverter_month(KeyId, secretKey, currency="EUR", month="2022-12", inverter_sn=isn)
if response is not None:
print("inverter_month(",isn,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("inverter_month(inverter_id =",isn,"): Failed with:",err)
try:
await asyncio.sleep(DELAY)
response = await api.inverter_year(KeyId, secretKey, currency="EUR", year="2022", inverter_sn=isn)
if response is not None:
print("inverter_year(",isn,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("inverter_year(inverter_id =",isn,"): Failed with:",err)
try:
await asyncio.sleep(DELAY)
response = await api.inverter_all(KeyId, secretKey, currency="EUR", inverter_sn=isn)
if response is not None:
print("inverter_all(",isn,"): [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
except SoliscloudAPI.SolisCloudError as err:
print("inverter_all(inverter_id =",isn,"): Failed with:",err)
async def call_solis(api):
global plant_ids
if plant_ids is None:
try:
response = await api.user_station_list(KeyId, secretKey)
if response is not None:
print("user_station_list: [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
plant_ids = [int(record["id"]) for record in response]
print(plant_ids)
except SoliscloudAPI.SolisCloudError as err:
print("user_station_list(): Failed with:",err)
print("Falling back to station_detail_list")
try:
response = await api.station_detail_list(KeyId, secretKey)
if response is not None:
print("station_detail_list: [OK]")
if verbose: print(json.dumps(response,sort_keys=True))
plant_ids = [record["id"] for record in response]
except SoliscloudAPI.SolisCloudError as err:
print("station_detail_list(): Failed with:",err)
else:
print("Using predefined station list")
if plant_ids is None:
print("Cannot retrieve station ID's, giving up")
return
await details_per_station(api, plant_ids)
await collector_details_per_station(api, plant_ids)
await inverter_details_per_station(api, plant_ids)
await station_graphs(api, plant_ids)
await inverter_graphs(api, plant_ids)
async def main():
loop = asyncio.get_event_loop()
async with aiohttp.ClientSession(loop=loop) as session:
api = SoliscloudAPI('https://www.soliscloud.com:13333', session)
cwlist = [loop.create_task(call_solis(api)) for i in range(1)]
responses = await asyncio.gather(*cwlist, return_exceptions=True)
print("Exceptions: ",responses)
verbose = False
if len(sys.argv) > 1 and (sys.argv[1] == '--verbose' or sys.argv[1] == '-v'):
print("Verbose")
verbose = True
asyncio.run(main())
|
983,379 | 27e09a4f35d49205f4a697ebea633803b856fcf3 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
# class custom_apps(models.Model):
# _name = 'custom_apps.custom_apps'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
proforma_invoice = fields.Char(string="N° Devis Fournisseur :", store=True)
total_amount_letter = fields.Text(string="Montant total en lettre:")
project_id = fields.Many2one("project.project", "Project", ondelete="set null")
sale_order_id = fields.Many2one("sale.order", "Sale Order", ondelete="set null")
#product_code = fields.Many2one("product.product", "Product Code", related="product_id.default_code")
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
item = fields.Integer(string="Item", store=True)
date_planned = fields.Datetime(string='Scheduled Date', required=False, index=True)
product_code = fields.Char(string = "Product Code",
related="product_id.default_code")
product_name = fields.Char(string="Product Description",
related="product_id.name")
'''
@api.onchange('product_id')
def onchange_product_id(self):
result = {}
if not self.product_id:
return result
# Reset date, price and quantity since _onchange_quantity will provide default values
self.date_planned = datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.price_unit = self.product_qty = 0.0
self.product_uom = self.product_id.uom_po_id or self.product_id.uom_id
#Added By Halltech Africa
self.product_code = self.product_id.code
result['domain'] = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
product_lang = self.product_id.with_context(
lang=self.partner_id.lang,
partner_id=self.partner_id.id,
)
self.name = product_lang.display_name
if product_lang.description_purchase:
self.name += '\n' + product_lang.description_purchase
fpos = self.order_id.fiscal_position_id
if self.env.uid == SUPERUSER_ID:
company_id = self.env.user.company_id.id
self.taxes_id = fpos.map_tax(self.product_id.supplier_taxes_id.filtered(lambda r: r.company_id.id == company_id))
else:
self.taxes_id = fpos.map_tax(self.product_id.supplier_taxes_id)
self._suggest_quantity()
self._onchange_quantity()
return result
'''
|
983,380 | c24bee7eccd36c4e75a2dada490971b8aa76ac20 | """This is an ugly brute-force solution since I did not know about
https://en.wikipedia.org/wiki/Summed-area_table"""
def power_level(x, y, serial_number):
rack_id = x + 10
power = rack_id * y
power += serial_number
power *= rack_id
hundred_digit = int(str(power)[-3])
return hundred_digit - 5
def square_power(power_levels, x, y, side=3):
power = 0
for x1 in range(x, x + side):
for y1 in range(y, y + side):
power += power_levels[(x1, y1)]
return power
def all_sides(x, y):
return range(1, 302 - max(x, y))
def power_diff_from_one_step_left(power_levels, x, y, side):
power = 0
for y1 in range(y, y + side):
power -= power_levels[(x - 1, y1)]
power += power_levels[(x + side - 1, y1)]
return power
def power_diff_from_one_step_up(power_levels, x, y, side):
power = 0
for x1 in range(x, x + side):
power -= power_levels[(x1, y - 1)]
power += power_levels[(x1, y + side - 1)]
return power
def largest_power_coordinate(serial_number, any_size=False):
power_levels = {}
for x in range(1, 301):
for y in range(1, 301):
power_levels[(x, y)] = power_level(x, y, serial_number)
powers = {}
sides = range(300, 0, -1) if any_size else [3]
for side in sides:
print(side)
for x in range(1, 301 - side):
for y in range(1, 301 - side):
if (x - 1, y, side) in powers:
powers[(x, y, side)] = powers[(x - 1, y, side)] + \
power_diff_from_one_step_left(
power_levels, x, y,
side)
elif (x, y - 1, side) in powers:
powers[(x, y, side)] = powers[(x, y - 1, side)] + \
power_diff_from_one_step_up(
power_levels, x, y, side)
else:
powers[(x, y, side)] = square_power(power_levels, x, y,
side)
max_val = max(powers.values())
return [key for key in powers.keys() if powers[key] == max_val]
# record_power = 0
# record_power_coordinate = None
# record_size = 0
# sides = all_sides if any_size else lambda x, y: [3]
# for x in range(1, 299):
# for y in range(1, 299):
# print(x,y, sides(x,y))
# for side in sides(x, y):
# power = square_power(power_levels, x, y, side)
# if power > record_power:
# record_power = power
# record_power_coordinate = (x, y)
# record_size = side
# return record_power_coordinate + ((record_size,) if any_size else ())
def largest_power_square(serial_number):
return largest_power_coordinate(serial_number, True)
def main():
pass
# print(largest_power_square(8199))
if __name__ == '__main__':
main()
print(largest_power_coordinate(8199))
print(largest_power_square(8199))
|
983,381 | e76a5c9bcc3801e3ca72e3410ec5653aa7b40542 | # Module for augementing image dataset as a preprocessing step
import math
import numpy as np
import cv2
from scipy import ndimage
"""
Augmentations to implement:
- normalization
- rotation
- translation
- pixelation
- mirroring
? change backgrounds
? add random noise
Look into:
- AutoAugment
"""
def unflatten(image):
dim = image.shape[0]
new_dim = int(math.sqrt(dim))
image = np.reshape(image, (new_dim, new_dim))
return image
def make_rotations(dataset, labels, angles):
"""
Augment dataset with rotations of source images
Args
dataset: source dataset
angles: list of positive angles (in degrees) for mirroring. Function will use negatives of each angle as well.
Returns
A tuple of augmented images and their corresponding labels
"""
was_flattened = (len(dataset[0].shape) == 1)
augmented_dataset = []
augmented_labels = []
for image, label in zip(dataset, labels):
if was_flattened:
image = unflatten(image)
for angle in angles:
rotated_pos = ndimage.rotate(image, angle)
rotated_neg = ndimage.rotate(image, -angle)
if was_flattened:
rotated_pos = rotated_pos.flatten()
rotated_neg = rotated_neg.flatten()
augmented_dataset.append(rotated_pos)
augmented_dataset.append(rotated_neg)
augmented_labels.append(label)
augmented_labels.append(label)
return (augmented_dataset, augmented_labels)
def make_translations(dataset, labels):
"""
Augment dataset with translations of source images. Shift image around by 10 pixels
Args
dataset: source dataset
Returns
A tuple of augmented images and their corresponding labels
"""
offset = 10
translations = [
(0, offset),
(0, -offset),
(offset, 0),
(-offset, 0),
(-offset, -offset),
(-offset, offset),
(offset, -offset),
(offset, offset)
]
was_flattened = (len(dataset[0].shape) == 1)
augmented_dataset = []
augmented_labels = []
for image, label in zip(dataset, labels):
if was_flattened:
image = unflatten(image)
height = image.shape[0]
width = image.shape[1]
for t_x, t_y in translations:
new_image = np.zeros(image.shape)
t_mat = np.array([[1,0,t_x],[0,1,t_y],[0,0,1]])
for x in range(0, width):
for y in range(0, height):
old_coords = np.array([[x],[y],[1]])
new_coords = t_mat.dot(old_coords) # translation here
if new_coords[0] > 0 and new_coords[0] < width and new_coords[1] > 0 and new_coords[1] < height:
new_image[new_coords[1], new_coords[0]] = image[y, x]
if was_flattened:
new_image.flatten()
augmented_dataset.append(new_image)
augmented_labels.append(label)
return (augmented_dataset, augmented_labels)
def make_blurry(dataset, labels, filter_size):
"""
Augment dataset by pixelating image (make it blurry)
Args
dataset: source dataset
filter_size: size of kernel to convolve
Returns
A tuple of augmented images and their corresponding labels
"""
kernel = np.ones((filter_size, filter_size))
k_width = filter_size
k_height = filter_size
border_size = int(filter_size / 2)
was_flattened = (len(dataset[0].shape) == 1)
augmented_dataset = []
augmented_labels = []
for image, label in zip(dataset, labels):
if was_flattened:
image = unflatten(image)
blurry_image = np.zeros_like(image)
# pad image
image = cv2.copyMakeBorder(image, border_size, border_size + 1, border_size, border_size + 1, cv2.BORDER_REPLICATE)
i_height = image.shape[0]
i_width = image.shape[1]
for y in range(0, i_height - k_height):
for x in range(0, i_width - k_width):
# Extract the sub_matrix at current position
sub_matrix = image[y:y+k_height,x:x+k_width]
# element-wise multiplication with kernel
sum_matrix = sub_matrix * kernel
# sum the matrix and set values of img_out
asum = np.sum(sum_matrix) / (k_width * k_height)
blurry_image[y,x] = asum
if was_flattened:
blurry_image.flatten()
augmented_dataset.append(blurry_image)
augmented_labels.append(label)
return (augmented_dataset, augmented_labels)
def make_mirrored(dataset, labels, fliplist):
"""
Augment dataset by mirroring source images
Args
dataset: source dataset
fliplist: list of desired flips.
0: flips around x-axis
1: flips around y-axis
-1: flips both
Returns
A tuple of augmented images and their corresponding labels
"""
was_flattened = (len(dataset[0].shape) == 1)
augmented_dataset = []
augmented_labels = []
for image, label in zip(dataset, labels):
if was_flattened:
image = unflatten(image)
for flip in fliplist:
altered_image = cv2.flip(image, flip)
if was_flattened:
altered_image = altered_image.flatten()
augmented_dataset.append(altered_image)
augmented_labels.append(label)
return (augmented_dataset, augmented_labels)
|
983,382 | ca8e90bdbd40191a1beed175aa534eda7bdfa127 | #! /usr/bin/env python3
import csv
import shutil
import sys
import time
import os
import logging
# http client configuration
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/63.0.3239.84 Chrome/63.0.3239.84 Safari/537.36'
# logging configuration
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
python_version = sys.version_info.major
logging.info("executed by python %d" % python_version)
# compatability with python 2
if python_version == 3:
import urllib.parse
import urllib.request
urljoin = urllib.parse.urljoin
urlretrieve = urllib.request.urlretrieve
quote = urllib.parse.quote
# configure headers
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', user_agent)]
urllib.request.install_opener(opener)
else:
import urlparse
import urllib
urljoin = urlparse.urljoin
urlretrieve = urllib.urlretrieve
quote = urllib.quote
# configure headers
class AppURLopener(urllib.FancyURLopener):
version = user_agent
urllib._urlopener = AppURLopener()
def fix_url(url):
url = quote(url, safe="%/:=&?~#+!$,;'@()*[]")
return url
def download_csv_row_images(row, dest_dir):
for key in row:
start_url = row['web-scraper-start-url']
id = row['web-scraper-order']
if key.endswith("-src"):
image_url = row[key]
image_url = urljoin(start_url, image_url)
image_filename = "%s-%s" % (id, key[0:-4])
download_image(image_url, dest_dir, image_filename)
def download_image(image_url, dest_dir, image_filename):
image_url = fix_url(image_url)
try:
logging.info("downloading image %s" % image_url)
tmp_file_name, headers = urlretrieve(image_url)
content_type = headers.get("Content-Type")
if content_type == 'image/jpeg' or content_type == 'image/jpg':
ext = 'jpg'
elif content_type == 'image/png':
ext = 'png'
elif content_type == 'image/gif':
ext = 'gif'
else:
logging.warning("unknown image content type %s" % content_type)
return
image_path = os.path.join(dest_dir, image_filename+"."+ext)
shutil.move(tmp_file_name, image_path)
except Exception as e:
logging.warning("Image download error. %s" % e)
def get_csv_image_dir(csv_filename):
base = os.path.basename(csv_filename)
dir = os.path.splitext(base)[0]
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def download_csv_file_images(filename):
logging.info("importing data from %s" % filename)
dest_dir = get_csv_image_dir(filename)
#check whether csv file has utf-8 bom char at the beginning
skip_utf8_seek = 0
with open(filename, "rb") as csvfile:
csv_start = csvfile.read(3)
if csv_start == b'\xef\xbb\xbf':
skip_utf8_seek = 3
with open(filename, "r") as csvfile:
# remove ut-8 bon sig
csvfile.seek(skip_utf8_seek)
csvreader = csv.DictReader(csvfile)
for row in csvreader:
download_csv_row_images(row, dest_dir)
def main(args):
# filename passde through args
if len(args) >=2:
csv_filename = args[1]
download_csv_file_images(csv_filename)
logging.info("image download completed")
else:
logging.warning("no input file found")
time.sleep(10)
main(sys.argv)
|
983,383 | aaf90014b825eeead7cc5133b01b4b427603d37c | import threading
from collections import OrderedDict
from xml.etree import ElementTree as ETree
import logging
import time
import datetime
class FBInterface:
"""
Monitoring - Just calculate the distance between two 2D points
"""
def dist(self,p1, p2):
(x1, y1), (x2, y2) = p1, p2
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def avg_dist(self,_x,_y):
## Specific imports
global combinations
from itertools import combinations
global math
import math
## Calculate mean distance between all base points for neighbourhood-based methods (e.g. DBSCAN)
points = list(zip(_x, _y))
distances = [self.dist(p1, p2) for p1, p2 in combinations(points, 2)]
return sum(distances) / len(distances)
def write_2_file(self, vals, method_name, pred_val):
f = open("{0}{1}__{2}.txt".format(self.monitoring_path, self.fb_name, method_name), "a")
f.write(str(vals).strip('[]'))
f.write(",{0}\n".format(pred_val))
f.close()
"""
Monitoring - Function that executes the thread routine for monitoring
"""
def thread_monitoring_classify(self, _new_x):
for neigh, name, algorithm in self.anomaly_algorithms:
## For clustering algorithms that do not have the "predict" function
if neigh:
## normalize and stack data
temp_data = np.vstack((self.scaler.transform(self.anomaly_data),
self.scaler.transform(np.array(_new_x).reshape(1, -1))))
prediction = algorithm.fit_predict(self.pca.transform(temp_data))[-1] #get last prediction
## For supervised algorithms that do have the "predict" function
else:
prediction = algorithm.predict(self.pca.transform(self.scaler.transform(np.array(_new_x).reshape(1, -1))))[0]
self.write_2_file(np.around(self.pca.transform(self.scaler.transform(np.array(_new_x).reshape(1, -1)))).tolist(), name, prediction)
"""
Monitoring - Function that filters out outliers before training process
"""
def thread_monitoring_pre_train(self):
########################################################
## Normalize and apply PCA to the training data
result = self.pca.fit_transform(self.scaler.fit_transform(self.anomaly_data))
## First element of the Tuple is True or False either if it is a neighbourhood-based method or not
self.anomaly_algorithms[0][2] = EllipticEnvelope(support_fraction=1, contamination=self.contamination)
self.anomaly_algorithms[1][2] = DBSCAN(eps=self.avg_dist(result[:, 0], result[:, 1]), metric='euclidean', min_samples=2)
self.anomaly_algorithms[2][2] = OneClassSVM(kernel='rbf', nu=self.contamination, gamma=0.05)
########################################################
## Predict outliers - use DBSCAN (unsupervised technique) for first fitering of outliers
## get predictions for all training data
DBSCAN_index = 1
predictions_temp = self.anomaly_algorithms[DBSCAN_index][2].fit_predict(result)
#########################################################
## Filter data - for each element of the training data
filtered_anomaly = np.array([])
for temp_i in np.arange(len(self.anomaly_data)):
## If sample is not outlier
if predictions_temp[temp_i] != -1:
if len(filtered_anomaly) == 0:
filtered_anomaly = self.anomaly_data[temp_i]
else:
filtered_anomaly = np.vstack((filtered_anomaly,self.anomaly_data[temp_i]))
##########################################################
## Update data
self.anomaly_data = filtered_anomaly
## Train algorithms
self.thread_monitoring_train()
"""
Monitoring - Function that executes the thread routine for monitoring
"""
def thread_monitoring_train(self):
## Normalize and apply PCA to the training data
result = self.pca.fit_transform(self.scaler.fit_transform(self.anomaly_data))
## for each algorithm in each function block
for i in np.arange(len(self.anomaly_algorithms)):
## If algorithms do have the "predict" function (Clustering algorithms usually don't have)
if (not self.anomaly_algorithms[i][0]):
self.anomaly_algorithms[i][2].fit(result)
## Update visualization files
open("{0}{1}__{2}.txt".format(self.monitoring_path, self.fb_name, self.anomaly_algorithms[i][1]), "w").close()
for r in result:
f = open("{0}{1}__{2}.txt".format(self.monitoring_path, self.fb_name, self.anomaly_algorithms[i][1]), "a")
f.write(str(np.around(r, 2).tolist()).strip('[]'))
f.write(",1\n")
f.close()
"""
Monitoring - Function that executes the thread routine for monitoring
"""
def thread_monitoring_collect(self):
##########################################
## Declarations
counter = 0
##########################################
## Make initial overwrite of dataset files
for _, name, _ in self.anomaly_algorithms:
f = open("{0}{1}__{2}.txt".format(self.monitoring_path,self.fb_name, name), "w")
f.write("")
f.close()
## create file for input and output event count
f = open("{0}{1}.txt".format(self.monitoring_path, self.fb_name), "w")
f.write("")
f.close()
## Initial sleep waiting for all function blocks
time.sleep(self.init_time_wait)
## While not received the first event
while (not self.first_event):
time.sleep(1)
## A way to terminate a thread
if self.stop_thread: break
##############################################################
## Collecting ssamples for training cycle
while(True):
## Exit thread
if self.stop_thread: break
# reset all statistics
self.reset_monitoring()
#############################################
# time to collect data
if self.first_delta:
time.sleep(self.time_per_sample)
self.first_delta = False
else:
time.sleep(self.time_delta)
#############################################
## Exit thread
if self.stop_thread: break
## Get new sample from Anomaly Detection
new_sample = self.get_monitoring()
## Collecting phase for each function block
if (not self.classify):
## Append to the array
if len(self.anomaly_data) == 0:
self.anomaly_data = self.anomaly_data + new_sample
else:
self.anomaly_data = np.vstack((self.anomaly_data,new_sample))
################################################
## Update file for visualization purposes
for _, name, _ in self.anomaly_algorithms:
## just for visuzalization purposes - remove for accelerated processing
if len(self.anomaly_data) >= 2:
result = self.pca.fit_transform(self.scaler.fit_transform(self.anomaly_data))
open("{0}{1}__{2}.txt".format(self.monitoring_path, self.fb_name, name), "w").close()
for r in result:
f = open("{0}{1}__{2}.txt".format(self.monitoring_path, self.fb_name, name), "a")
f.write(str(np.around(r,2).tolist()).strip('[]'))
f.write(",1\n")
f.close()
################################################
## Classification phase for each function block
else:
threading.Thread(target=self.thread_monitoring_classify, args=(new_sample,)).start()
continue
#############################################
## If in collecting phase
if (counter < self.training_samples):
counter += 1
## Train algorithms
elif (counter >= self.training_samples and not self.classify):
#threading.Thread(target=self.thread_monitoring_train).start()
threading.Thread(target=self.thread_monitoring_pre_train).start()
self.classify = True
###############################################################################
def __init__(self, fb_name, fb_type, xml_root, monitor=None):
self.fb_name = fb_name
self.fb_type = fb_type
self.monitor_fb = monitor
self.stop_thread = False
self.event_queue = []
"""
Each events and variables dictionary contains:
- name (str): event/variable name
- type (str): INT, REAL, STRING, BOOL
- watch (boolean): True, False
"""
self.input_events = OrderedDict()
self.output_events = OrderedDict()
self.input_vars = OrderedDict()
self.output_vars = OrderedDict()
logging.info('parsing the fb interface (inputs/outputs events/vars)')
# Parse the xml (iterates over the root)
for fb in xml_root:
# Searches for the interfaces list
if fb.tag == 'InterfaceList':
# Iterates over the interface list
# to find the inputs/outputs
for interface in fb:
# Input events
if interface.tag == 'EventInputs':
# Iterates over the input events
for event in interface:
event_name = event.attrib['Name']
event_type = event.attrib['Type']
self.input_events[event_name] = (event_type, None, False)
# Output Events
elif interface.tag == 'EventOutputs':
# Iterates over the output events
for event in interface:
event_name = event.attrib['Name']
event_type = event.attrib['Type']
self.output_events[event_name] = (event_type, None, False)
# Input vars
elif interface.tag == 'InputVars':
# Iterates over the input vars
for var in interface:
var_name = var.attrib['Name']
var_type = var.attrib['Type']
self.input_vars[var_name] = (var_type, None, False)
# Output vars
elif interface.tag == 'OutputVars':
# Iterates over the output vars
for var in interface:
var_name = var.attrib['Name']
var_type = var.attrib['Type']
self.output_vars[var_name] = (var_type, None, False)
# Doesn't expected interface
else:
logging.error("doesn't expected interface (check interface name in .fbt file)")
logging.info('parsing successful with:')
logging.info('input events: {0}'.format(self.input_events))
logging.info('output events: {0}'.format(self.output_events))
logging.info('input vars: {0}'.format(self.input_vars))
logging.info('output vars: {0}'.format(self.output_vars))
self.output_connections = dict()
self.new_event = threading.Event()
self.lock = threading.Lock()
###############################################################
## If we want to monitor the FB
if self.monitor_fb is not None:
#############################################
## All key imports
global svm
from sklearn import svm
global EllipticEnvelope
from sklearn.covariance import EllipticEnvelope
global IsolationForest
from sklearn.ensemble import IsolationForest
global LocalOutlierFactor
from sklearn.neighbors import LocalOutlierFactor
global PCA
from sklearn.decomposition import PCA
global StandardScaler
from sklearn.preprocessing import StandardScaler
global DBSCAN
from sklearn.cluster import DBSCAN
global OneClassSVM
from sklearn.svm import OneClassSVM
global np
import numpy as np
global pd
import pandas as pd
global os
import os
global sys
import sys
"""
Monitoring variables for Behavioral Anomaly Detection
"""
self.time_in = pd.DataFrame(columns=['time'])
self.time_out = pd.DataFrame(columns=['time'])
#############################################
## Monitoring - Create thread
self.training_samples, self.time_per_sample = self.monitor_fb
## Time lag like in predicive maintenance approaches
self.training_samples = (self.training_samples*4) - 3
self.time_delta = int(self.time_per_sample/4)
self.first_delta = True
self.init_time_wait = 5
self.moni_thread = threading.Thread(target=self.thread_monitoring_collect)
self.classify = False
self.contamination = 1/self.training_samples
self.anomaly_data = []
self.first_event = False
self.monitoring_path = os.path.join(os.path.dirname(sys.path[0]), 'resources', 'monitoring','')
## Pre-processing Methods
self.pca = PCA(n_components=2)
self.scaler = StandardScaler()
## First element of the Tuple is True or False either if it is a neighbourhood-based method or not
self.anomaly_algorithms = [
[False, "Empirical Covariance", None],
[True, "DBSCAN", None],
[False, "One Class SVM", None]]
#########################
## Monitoring - Start thread
self.moni_thread.start()
#########################
#############################################
def set_attr(self, name, new_value=None, set_watch=None):
# Locks the dictionary usage
self.lock.acquire()
try:
# INPUT VAR
if name in self.input_vars:
v_type, value, is_watch = self.input_vars[name]
# Sets the watch
if set_watch is not None:
self.input_vars[name] = (v_type, value, set_watch)
# Sets the var value
elif new_value is not None:
self.input_vars[name] = (v_type, new_value, is_watch)
# INPUT EVENT
elif name in self.input_events:
event_type, value, is_watch = self.input_events[name]
# Sets the watch
if set_watch is not None:
self.input_events[name] = (event_type, value, set_watch)
# Sets the event value
elif new_value is not None:
self.input_events[name] = (event_type, new_value, is_watch)
# OUTPUT VAR
elif name in self.output_vars:
var_type, value, is_watch = self.output_vars[name]
# Sets the watch
if set_watch is not None:
self.output_vars[name] = (var_type, value, set_watch)
# Sets the var value
elif new_value is not None:
self.output_vars[name] = (var_type, new_value, is_watch)
# OUTPUT EVENT
elif name in self.output_events:
event_type, value, is_watch = self.output_events[name]
# Sets the watch
if set_watch is not None:
self.output_events[name] = (event_type, value, set_watch)
# Sets the event value
elif new_value is not None:
self.output_events[name] = (event_type, new_value, is_watch)
finally:
# Unlocks the dictionary usage
self.lock.release()
def read_attr(self, name):
v_type = None
value = None
is_watch = None
# Locks the dictionary usage
self.lock.acquire()
try:
# INPUT VAR
if name in self.input_vars:
v_type, value, is_watch = self.input_vars[name]
# INPUT EVENT
elif name in self.input_events:
v_type, value, is_watch = self.input_events[name]
# OUTPUT VAR
elif name in self.output_vars:
v_type, value, is_watch = self.output_vars[name]
# OUTPUT EVENT
elif name in self.output_events:
v_type, value, is_watch = self.output_events[name]
except KeyError as error:
logging.error('can not find that fb attribute')
logging.error(error)
finally:
# Unlocks the dictionary usage
self.lock.release()
return v_type, value, is_watch
def add_connection(self, value_name, connection):
# If already exists a connection
if value_name in self.output_connections:
conns = self.output_connections[value_name]
conns.append(connection)
# If don't exists any connection with that value
else:
conns = [connection]
self.output_connections[value_name] = conns
def push_event(self, event_name, event_value):
if event_value is not None:
self.event_queue.append((event_name, event_value))
# Updates the event value
self.set_attr(event_name, new_value=event_value)
# Sets the new event
self.new_event.set()
def pop_event(self):
if len(self.event_queue) > 0:
## pop event
event_name, event_value = self.event_queue.pop()
if self.monitor_fb:
############################################
## Event In - read from my own FB queue
self.time_in = self.time_in.append({'time': datetime.datetime.now()}, ignore_index=True)
############################################
if not self.first_event and event_name != 'INIT':
self.first_event = True
return event_name, event_value
def wait_event(self):
while len(self.event_queue) <= 0:
self.new_event.wait()
# Clears new_event to wait for new events
self.new_event.clear()
# Clears new_event to wait for new events
self.new_event.clear()
def read_inputs(self):
logging.info('reading fb inputs...')
# First convert the vars dictionary to a list
events_list = []
event_name, event_value = self.pop_event()
self.set_attr(event_name, new_value=event_value)
events_list.append(event_name)
events_list.append(event_value)
# Second converts the event dictionary to a list
vars_list = []
logging.info('input vars: {0}'.format(self.input_vars))
# Get all the vars
for index, var_name in enumerate(self.input_vars):
v_type, value, is_watch = self.read_attr(var_name)
vars_list.append(value)
# Finally concatenate the 2 lists
return events_list + vars_list
def update_outputs(self, outputs):
logging.info('updating the outputs...')
# Converts the second part of the list to variables
for index, var_name in enumerate(self.output_vars):
# Second part of the list delimited by the events dictionary len
new_value = outputs[index + len(self.output_events)]
# Updates the var value
self.set_attr(var_name, new_value=new_value)
# Verifies if exist any connection
if var_name in self.output_connections:
# Updates the connection
for connection in self.output_connections[var_name]:
connection.update_var(new_value)
# Converts the first part of the list to events
for index, event_name in enumerate(self.output_events):
value = outputs[index]
self.set_attr(event_name, new_value=value)
# Verifies if exist any connection
if event_name in self.output_connections:
if self.monitor_fb and value is not None:
############################################
## Event Out - send events to subsequent FB
self.time_out = self.time_out.append({'time': datetime.datetime.now()}, ignore_index=True)
############################################
# Sends the event ot the new fb
for connection in self.output_connections[event_name]:
connection.send_event(value)
def read_watches(self, start_time):
# Creates the xml root element
fb_root = ETree.Element('FB', {'name': self.fb_name})
# Mixes the vars in 1 dictionary
var_mix = {**self.input_vars, **self.output_vars}
# Iterates over the mix dictionary
for index, var_name in enumerate(var_mix):
v_type, value, is_watch = self.read_attr(var_name)
if is_watch and (value is not None):
port = ETree.Element('Port', {'name': var_name})
ETree.SubElement(port, 'Data', {'value': str(value),
'forced': 'false'})
fb_root.append(port)
# Mixes the vars in 1 dictionary
event_mix = {**self.input_events, **self.output_events}
# Iterates over the mix dictionary
for index, event_name in enumerate(event_mix):
v_type, value, is_watch = self.read_attr(event_name)
if is_watch and (value is not None):
port = ETree.Element('Port', {'name': event_name})
ETree.SubElement(port, 'Data', {'value': str(value),
'time': str(int((time.time() * 1000) - start_time))})
fb_root.append(port)
# Gets the number of watches
watches_len = len(fb_root.findall('Port'))
return fb_root, watches_len
## Reset the monitoring statistics
def reset_monitoring(self):
"""
Monitoring variables for Behavioral Anomaly Detection
"""
full_delta = datetime.datetime.now() - datetime.timedelta(seconds=self.time_per_sample + 5)
self.time_in = self.time_in[self.time_in.time > full_delta]
self.time_out = self.time_out[self.time_out.time > full_delta]
## Get the monitoring statistics for further processing
def get_monitoring(self):
#print(self.fb_name , self.time_in)
#print(self.fb_name , self.time_out)
time_now = datetime.datetime.now()
full_delta = time_now - datetime.timedelta(seconds=self.time_per_sample)
half_delta = time_now - datetime.timedelta(seconds=int(self.time_per_sample/2))
quarter_delta = time_now - datetime.timedelta(seconds=int(self.time_per_sample/4))
df_in_full = self.time_in[self.time_in.time > full_delta]
df_in_half = self.time_in[self.time_in.time > half_delta]
df_in_quarter= self.time_in[self.time_in.time > quarter_delta]
df_out_full = self.time_out[self.time_out.time > full_delta]
df_out_half = self.time_out[self.time_out.time > half_delta]
df_out_quarter = self.time_out[self.time_out.time > quarter_delta]
## calculating differences
diff_df_in_full = (df_in_full['time'] - df_in_full['time'].shift(1)) / np.timedelta64(1, 's')
diff_df_in_full = diff_df_in_full[1:]
diff_df_in_half = (df_in_half['time'] - df_in_half['time'].shift(1)) / np.timedelta64(1, 's')
diff_df_in_half = diff_df_in_half[1:]
diff_df_in_quarter = (df_in_quarter['time'] - df_in_quarter['time'].shift(1)) / np.timedelta64(1, 's')
diff_df_in_quarter = diff_df_in_quarter[1:]
diff_df_out_full = (df_out_full['time'] - df_out_full['time'].shift(1)) / np.timedelta64(1, 's')
diff_df_out_full = diff_df_out_full[1:]
diff_df_out_half = (df_out_half['time'] - df_out_half['time'].shift(1)) / np.timedelta64(1, 's')
diff_df_out_half = diff_df_out_half[1:]
diff_df_out_quarter = (df_out_quarter['time'] - df_out_quarter['time'].shift(1)) / np.timedelta64(1, 's')
diff_df_out_quarter = diff_df_out_quarter[1:]
#######################################
## write file with event_queue size
f = open("{0}{1}.txt".format(self.monitoring_path, self.fb_name), "a")
f.write("{0},{1}\n".format(df_in_full['time'].count(),df_out_full['time'].count()))
f.close()
#######################################
return [diff_df_in_quarter.count(),
0 if diff_df_in_quarter.count() == 0 else round(diff_df_in_quarter.mean(),4),
0 if diff_df_in_quarter.count() <= 1 else round(diff_df_in_quarter.std(),4),
diff_df_in_half.count(),
0 if diff_df_in_half.count() == 0 else round(diff_df_in_half.mean(),4),
0 if diff_df_in_half.count() <= 1 else round(diff_df_in_half.std(),4),
diff_df_in_full.count(),
0 if diff_df_in_full.count() == 0 else round(diff_df_in_full.mean(),4),
0 if diff_df_in_full.count() <= 1 else round(diff_df_in_full.std(),4),
diff_df_out_quarter.count(),
0 if diff_df_out_quarter.count() == 0 else round(diff_df_out_quarter.mean(), 4),
0 if diff_df_out_quarter.count() <= 1 else round(diff_df_out_quarter.std(), 4),
diff_df_out_half.count(),
0 if diff_df_out_half.count() == 0 else round(diff_df_out_half.mean(), 4),
0 if diff_df_out_half.count() <= 1 else round(diff_df_out_half.std(), 4),
diff_df_out_full.count(),
0 if diff_df_out_full.count() == 0 else round(diff_df_out_full.mean(), 4),
0 if diff_df_out_full.count() <= 1 else round(diff_df_out_full.std(), 4)]
class Connection:
def __init__(self, destination_fb, value_name):
self.destination_fb = destination_fb
self.value_name = value_name
def update_var(self, value):
self.destination_fb.set_attr(self.value_name, new_value=value)
def send_event(self, value):
self.destination_fb.push_event(self.value_name, value)
|
983,384 | 01c0c4a2e6d5fb0d070a9b799b2ef462ffd0b8cc |
import pandas as pd
import numpy as np
import xgboost
from sklearn import model_selection, metrics, preprocessing
from gensim.models import doc2vec
import matplotlib.pyplot as plt
from sklearn import manifold
import utils
def top_breweries_strongest_beers(data: pd.DataFrame):
"""Rank 3 breweries which produce strongest beers
Args:
data (pd.DataFrame): dataframe containing beer review data
"""
# Sort beers ABV in descending order
strongest_beers = data.beer_ABV.sort_values(ascending=False)
# Get top 3 unique strongest beers
top_3_strongest_beers = strongest_beers.unique()[0:3]
# index by selecting beer id with strongest ABV
strongest_brewer_id = data.beer_brewerId.loc[data['beer_ABV'].isin(top_3_strongest_beers)]
print(f"The top 3 brewers with the strongest beers are {set(strongest_brewer_id)}")
def highest_ratings_year(data: pd.DataFrame):
"""Which year did beers enjoy the highest ratings?
Args:
data (pd.DataFrame): dataframe containing beer review data
"""
# Convert time integer into datatime type and then to year only
data['review_year'] = pd.to_datetime(data['review_time'], unit='s').dt.year
# Find all rows with highest rating (5)
highest_ratings = data[['review_overall', 'review_year']].loc[data.review_overall == 5]
# Find year with highest count of 5 star reviews
highest_year =highest_ratings.value_counts().reset_index().review_year.values[0]
print(f"The year with highest ratings is {highest_year}")
def important_factors_based_on_ratings(data: pd.DataFrame) -> np.ndarray:
"""Get important features from xgboost classification results
Args:
data (pd.DataFrame): dataframe containing data for classification
Returns:
np.ndarray: array containing feature ipmportance from trained xgboost model
"""
# Turn labels into binary classification for equal class distribution
data = utils.add_ratings_binary(data)
# Get feature and label data for classifcation from original dataset
X, y = utils.get_rating_features_labels(data)
# Grab features from feature matrix
features = X.columns
# split data into train and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(X.values, y, test_size=0.2)
# Instantiate and train xgboost model for rating classfication
xgb_model = xgboost.XGBClassifier()
xgb_model.fit(X_train, y_train)
# Grab feature importance scores from trained model
feature_importance = xgb_model.feature_importances_
# Find indices of top 2 important features
top_important_features_ind = np.argpartition(feature_importance, -2)[-2:]
print(f"The top 2 important features are {features[top_important_features_ind]}")
return feature_importance
def beer_reccomendations(data: pd.DataFrame):
"""Get top 3 beer (ID) reccomendations beer style based on written reviews using sentiment analysis
and top ratings(review overall)
Args:
data (pd.DataFrame): dataframe containing data for classification
"""
# Add written review polarity and subjectivity using TextBlob sentiment analysis
data = utils.add_review_polarity_subjectivity(data)
# Get best beeres by indexing beer ID with top review polarity and review overall
best_beers = data['beer_beerId'].loc[ (data['review_polarity'] >= 0.85) & (data['review_overall']==5) ]
print(f"These three beer reccomendations have 5 star reviews and top positive scores based on written reviews: {best_beers[0:3]}")
def favorite_beer_based_on_written_reviews(data: pd.DataFrame):
"""Get favorite beer style based on written reviews using sentiment analysis
Args:
data (pd.DataFrame): dataframe containing data for classification
"""
# Add written review polarity and subjectivity using TextBlob sentiment analysis
data = utils.add_review_polarity_subjectivity(data)
# Get top beer styles by selecting reviews with polarity >= 0.65
top_styles = data['beer_style'].loc[data['revew_polarity'] >= 0.65].value_counts()
print(f"The favorite beer style based on written reviews is {top_styles.index[0]}")
def compare_written_review_with_overall_review_score(data: pd.DataFrame):
"""How does written review compare to overall review score for the beer styles? Compare top beer styles using
written review using sentiment analysis polarity with top beer styles using overall review score
Args:
data (pd.DataFrame): dataframe containing data for classification
"""
# Add written review polarity and subjectivity using TextBlob sentiment analysis
data = utils.add_review_polarity_subjectivity(data)
# Find top beer styles with most number of positive written review polarity (positivity) > 0.65
top_written_styles = data['beer_style'].loc[data['revew_polarity'] >= 0.65].value_counts()
top_5_written = top_written_styles[0:5]
# Find top beer styles with most number of 5 star reviews
highest_ratings = data[['review_overall', 'beer_style']].loc[data.review_overall == 5]
top_highest_ratings = highest_ratings.value_counts().reset_index()
top_5_ratings = top_highest_ratings.beer_style[0:5]
# Find which beer styles the two methods have in common
in_common = [x for x in top_5_written if x in top_5_ratings]
print(f"Favorite beer styles based on written reviews and overall score have {len(in_common)} styles in common. They are: {in_common}")
def similar_beer_drinkers_from_written_reviews(review_text: pd.Series) -> np.ndarray:
"""Find similar beer drinkers from written reviews by using Doc2Vec sentence embedding
and reducing dimensions of emedding space to visualize clusters (similar users)
Args:
review_text (pd.Series): series containing review sentences as rows
Returns:
np.ndarray: Doc2Vec sentence embeddings (document vectors)
"""
# Preprocess text data for Doc2Vec model
cleaned_text = utils.preprocess_text(review_text)
# Convert tokenized text data into gensim formated tagged data
texts = [doc2vec.TaggedDocument(
words=[word for word in review],
tags=[i]
) for i, review in enumerate(cleaned_text)]
# create Doc2Vec model
model = doc2vec.Doc2Vec(vector_size=5,
alpha=0.025,
min_alpha=0.00025,
min_count=1,
dm =1)
# build vocabulary
model.build_vocab(texts)
# Get document vectors
doc_vecs = model.dv.vectors
return doc_vecs
def vis_similar_users(doc_vectors: np.ndarray):
"""Visualize similar beer drinkers using written reviews sentence embeddings
and reducing embedded space to two dimensions using tSNE to visualize data
Args:
doc_vecs (np.ndarray): Doc2Vec sentence embeddings (document vectors)
"""
# reduce sentence embedding space by tSNE using subset (5,000 points) of data
tsne = manifold.TSNE(n_components=2, random_state=1, perplexity=30)
Y = tsne.fit_transform(doc_vectors[0:5_000,:])
# plot tSNE projections of Doc2Vec sentence emeddings to visualize similar users
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
plt.scatter(Y[:,0], Y[:,1], cmap='winter')
plt.title('t-SNE projections on Doc2Vec Sentence Embeddings')
plt.show()
if __name__ == "__main__":
# Load data
data = utils.load_data()
# Print answers from answers.py, Q1
top_breweries_strongest_beers(data)
# Q2
highest_ratings_year(data)
# Q3
important_factors_based_on_ratings(data)
# Q4
beer_reccomendations(data)
# Q5
favorite_beer_based_on_written_reviews(data)
# Q6
compare_written_review_with_overall_review_score(data)
# Q7
# Get Doc2Vec sentence embeddings from written reviews
doc_vecs = similar_beer_drinkers_from_written_reviews(data['review_text'])
# Visualize clusters using t-SNE
vis_similar_users(doc_vecs)
|
983,385 | c906abdd60dda7f970b642fa8a7c83b8f9793803 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-10-22 14:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('webservices', '0189_engageboostorderproducts_weight'),
]
operations = [
migrations.RemoveField(
model_name='engageboostchannelcurrencyproductprice',
name='product_id',
),
migrations.AddField(
model_name='engageboostchannelcurrencyproductprice',
name='product',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='webservices.EngageboostProducts'),
),
]
|
983,386 | f7fab66730a43c85019d4b574ab817e844f877c9 |
def redact(words, banned_words):
new_arr = []
banned_set = set(banned_words)
for item in words:
if item not in banned_words:
new_arr.append(item)
return new_arr
|
983,387 | 8c9e042ccd4ccb9198783f9dafa8c7a58a7c15ce | import cv2
import random
import numpy as np
from PIL import Image
import PIL.ImageEnhance as ImageEnhance
def random_mirror(img, gt):
if random.random() >= 0.5:
img = cv2.flip(img, 1)
gt = cv2.flip(gt, 1)
return img, gt
def random_scale(img, gt, scales):
scale = random.choice(scales)
sh = int(img.shape[0] * scale)
sw = int(img.shape[1] * scale)
img = cv2.resize(img, (sw, sh), interpolation=cv2.INTER_LINEAR)
gt = cv2.resize(gt, (sw, sh), interpolation=cv2.INTER_NEAREST)
return img, gt
def normalize(img, mean, std):
mean = np.array(mean)
std = np.array(std)
img = img.astype(np.float32) / 255.0
img = img - mean
img = img / std
return img
def random_crop(img, gt, crop_size):
h, w = img.shape[:2]
crop_h, crop_w = crop_size[0], crop_size[1]
if h > crop_h:
x = random.randint(0, h - crop_h + 1)
img = img[x:x + crop_h, :, :]
gt = gt[x:x + crop_h, :]
if w > crop_w:
x = random.randint(0, w - crop_w + 1)
img = img[:, x:x + crop_w, :]
gt = gt[:, x:x + crop_w]
img,_ = shape_pad(img, crop_size, 0)
gt,_ = shape_pad(gt, crop_size, 255)
return img, gt
def color_jitter(img, brightness=0.5, contrast=0.5, saturation=0.5):
img = Image.fromarray(img)
brightness_range = [max(1 - brightness, 0), 1 + brightness]
contrast_range = [max(1 - contrast, 0), 1 + contrast]
saturation_range = [max(1 - saturation, 0), 1 + saturation]
r_brightness = random.uniform(brightness_range[0], brightness_range[1])
r_contrast = random.uniform(contrast_range[0], contrast_range[1])
r_saturation = random.uniform(saturation_range[0], saturation_range[1])
img = ImageEnhance.Brightness(img).enhance(r_brightness)
img = ImageEnhance.Contrast(img).enhance(r_contrast)
img = ImageEnhance.Color(img).enhance(r_saturation)
return np.asarray(img)
def shape_pad(img, shape, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(img, margin[0], margin[1], margin[2], margin[3],
cv2.BORDER_CONSTANT, value=value)
return img, margin
if __name__ == '__main__':
img = cv2.imread('/media/wangyunnan/Data/datasets/GTAV/images/test/00011.png')
img = color_jitter(img)
cv2.imshow('test', img)
cv2.waitKey()
|
983,388 | c1e6c9e08141b3dbb807241ed5bd0a0c61dfe284 | # -*- coding: utf-8 -*-
"""
Created on Tue May 23 23:26:22 2017
@author: dito-maf
"""
# Load data
import pandas as pd
white = pd.read_csv("../Data/Wine/winequality-white.csv",sep=";")
red = pd.read_csv("../Data/Wine/winequality-red.csv",sep=";")
print(white.describe())
white.info() |
983,389 | 13acd0d1fdb1e1da17b36b0bc80493feec3ee607 | from Fraction import Fraction,gcd
#fra.show()
f1 = Fraction(3,2)
f2 = Fraction(5,2)
print("%s + %s = "%(f1,f2)),
print(f1+f2)
print(f1-f2)
print(f1*f2)
print(f1/f2)
print(f1==f2) #checks only for shallow equality i.e.
#whether both var reference to same object
f3 = f1 #assigning reference of f1 to f3, i.e. both points to same object
print(f1==f3) #hence shallow equality is true
print(f1==f2)
print(f1<=f2)
print(f1>=f2)
print f1.get_num()
print f1.get_den()
|
983,390 | c57d0271b5a056e8eb851c78aa63cf8dafd663cb | from django.shortcuts import render
def choose_authentication_system(request):
return render(request, 'auth_test/home.html', {'request': request}) |
983,391 | 262efd1dd63fc664bcacb40f9d806f3dac0eff8b | from http import cookiejar
from urllib import request, parse
__author__ = 'Ryun'
def request_post(url, data=None, cookie_jar=cookiejar.CookieJar()):
encoded_data = prepare_urlencode(data)
opener = request.build_opener(request.HTTPCookieProcessor(cookie_jar))
resp = opener.open(url, encoded_data)
return cookie_jar, resp
def request_get(url, cookie_jar):
cj, resp = request_post(url, cookie_jar=cookie_jar)
return resp
def request_login(url, data):
cj, resp = request_post(url, data)
return cj
def prepare_urlencode(data):
if data is None:
return data
elif type(data) is dict:
data = parse.urlencode(data)
return data.encode('utf-8')
else:
return parse.quote(data)
|
983,392 | ac5b7f4ddd1ba414761faca8829411129b06a1d0 | from django.shortcuts import render, get_object_or_404
from .models import Tips
def index(request):
question = get_object_or_404(Tips, id=1)
print("INICIEI O JOGO!");
return render(request, 'index.html', {'question': question})
def start(request):
question = get_object_or_404(Tips, id=1)
print("ESTOU NA PRIMEIRA FASE");
return render(request, 'start.html', {'question': question})
def startDica(request):
question = get_object_or_404(Tips, id=1)
if(question.chances>0):
question.chances = question.chances -1
question.save()
question = get_object_or_404(Tips, id=1)
return render(request, 'start.html', {'question': question, 'message': 'Esta e a dica 1', 'dica': True});
else:
return render(request, 'start.html', {'question': question, 'message': 'Voce gastou todas as dicas', 'dica': False})
def tword(request):
question = get_object_or_404(Tips, id=1)
print("ESTOU NA SEGUNDA FASE");
return render(request, 'tword.html', {'question': question})
def twordDica(request):
question = get_object_or_404(Tips, id=1)
if(question.chances>0):
question.chances = question.chances -1
question.save()
question = get_object_or_404(Tips, id=1)
return render(request, 'tword.html', {'question': question, 'message': 'Esta e a dica 1', 'dica': True});
else:
return render(request, 'tword.html', {'question': question, 'message': 'Voce gastou todas as dicas', 'dica': False})
def threerd(request):
question = get_object_or_404(Tips, id=1)
print("ESTOU NA TERCEIRA FASE");
return render(request, 'threerd.html', {'question': question})
def threerdDica(request):
question = get_object_or_404(Tips, id=1)
if(question.chances>0):
question.chances = question.chances -1
question.save()
question = get_object_or_404(Tips, id=1)
return render(request, 'threerd.html', {'question': question, 'message': 'Esta e a dica 1', 'dica': True});
else:
return render(request, 'threerd.html', {'question': question, 'message': 'Voce gastou todas as dicas', 'dica': False})
def fourrd(request):
question = get_object_or_404(Tips, id=1)
print("ESTOU NA QUARTA FASE");
return render(request, 'fourrd.html', {'question': question})
def fourrdDica(request):
question = get_object_or_404(Tips, id=1)
if(question.chances>0):
question.chances = question.chances -1
question.save()
question = get_object_or_404(Tips, id=1)
return render(request, 'fourrd.html', {'question': question, 'message': 'Esta e a dica 1', 'dica': True});
else:
return render(request, 'fourrd.html', {'question': question, 'message': 'Voce gastou todas as dicas', 'dica': False})
def fiverd(request):
question = get_object_or_404(Tips, id=1)
print("ESTOU NA QUINTA FASE");
return render(request, 'fiverd.html', {'question': question})
def fiverdDica(request):
question = get_object_or_404(Tips, id=1)
if(question.chances>0):
question.chances = question.chances -1
question.save()
question = get_object_or_404(Tips, id=1)
return render(request, 'fiverd.html', {'question': question, 'message': 'Esta e a dica 1', 'dica': True});
else:
return render(request, 'fiverd.html', {'question': question, 'message': 'Voce gastou todas as dicas', 'dica': False})
def sixrd(request):
question = get_object_or_404(Tips, id=1)
print("ESTOU NA SEXTA FASE");
return render(request, 'sixrd.html', {'question': question})
def sixrdDica(request):
question = get_object_or_404(Tips, id=1)
if(question.chances>0):
question.chances = question.chances -1
question.save()
question = get_object_or_404(Tips, id=1)
return render(request, 'sixrd.html', {'question': question, 'message': 'Esta e a dica 1', 'dica': True});
else:
return render(request, 'sixrd.html', {'question': question, 'message': 'Voce gastou todas as dicas', 'dica': False})
|
983,393 | 98300a2980abfc0b6ffaf4907773046bc28c1cb9 | # Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import users
from model import db, Subject, MinimalSubject
from selenium_test_case import Regex, SeleniumTestCase
class PrintTest(SeleniumTestCase):
def setUp(self):
SeleniumTestCase.setUp(self)
self.put_account(actions=['*:view'])
self.put_subject(
'haiti', 'example.org/10',
title='title_within_10_miles', location=db.GeoPt(51.5, 0),
total_beds=10, available_beds=5, address='address_foo',
contact_name='contact_name_foo')
self.put_subject(
'haiti', 'example.org/11',
title='title_center', location=db.GeoPt(51.5, 0.01))
self.put_subject(
'haiti', 'example.org/12',
title='title_outside_10_miles', location=db.GeoPt(51.6, 0.2))
def tearDown(self):
self.delete_account()
self.delete_subject('haiti', 'example.org/10')
self.delete_subject('haiti', 'example.org/11')
self.delete_subject('haiti', 'example.org/12')
SeleniumTestCase.tearDown(self)
def test_print_page(self):
"""Confirms that the print page renders correctly."""
# Print link should be initially disabled
self.login('/?subdomain=haiti')
self.click('id=print-link')
assert self.get_alert().startswith('First select a hospital')
# After a subject is selected, the Print link should work
self.click('id=subject-1')
self.wait_for_element('//div[@class="bubble"]//span')
# Click the link and switch to the new window
self.click_and_wait_for_new_window('print-link')
# Verify that this looks like a print window
params = self.get_location().split('?', 1)[1]
pairs = set(params.split('&'))
assert 'subdomain=haiti' in pairs
assert 'print=yes' in pairs
assert 'lat=51.500000' in pairs
assert 'lon=0.010000' in pairs
assert any(pair.startswith('rad=16093.') for pair in pairs)
self.assert_text(
Regex('Displaying facilities within.*'),
'//span[@id="header-print-subtitle" and @class="print-subtitle"]')
# Check map is present with zoom elements
self.assert_element('map')
self.wait_for_element('//div[@title="Zoom in"]')
self.assert_element('//div[@title="Zoom in"]')
self.assert_element('//div[@title="Zoom out"]')
# Confirm that exactly two subjects are present in the list.
self.assert_text(Regex('title_center.*'),
"//tr[@id='subject-1']/*[@class='subject-title']")
self.assert_text(Regex('title_within_10_miles.*'),
"//tr[@id='subject-2']/*[@class='subject-title']")
self.assert_no_element(
"//tr[@id='subject-3']/*[@class='subject-title']")
# Confirm that subject-2 shows the right available/total bed counts
self.assert_text(Regex('5'),
"//tr[@id='subject-2']/*[@class='subject-beds-open']")
self.assert_text(Regex('10'),
"//tr[@id='subject-2']/*" +
"[@class='subject-beds-total']")
# Confirm that subject-2 shows the right distance to subject-1
self.assert_text(Regex('0.4 miles.*'),
"//tr[@id='subject-2']/*[@class='subject-distance']")
# Confirm that subject-2 shows the correct address
self.assert_text(Regex('address_foo'),
"//tr[@id='subject-2']/*[@class='subject-address']")
# Confirm that subject-2 shows the correct information section
self.assert_text(Regex('contact_name_foo'),
"//tr[@id='subject-2']/*" +
"[@class='subject-general-info']")
# Test to make sure the proper number of subjects are rendering.
# td[1] is the number of total subjects
# td[2] is the number of subjects less than 10 miles away
# td[3] is the number of subjects with availability
self.assert_text(Regex('3'),
'//tbody[@id="print-summary-tbody"]//tr//td[1]')
self.assert_text(Regex('2'),
'//tbody[@id="print-summary-tbody"]//tr//td[2]')
self.assert_text(Regex('1'),
'//tbody[@id="print-summary-tbody"]//tr//td[3]')
|
983,394 | 7e0b4b616a195c44902f396951aabbf17591a45a | from fastapi import Request, Depends, APIRouter, Body, Path, Query, Response
from asyncpgsa.connection import SAConnection
from sqlalchemy import select
from sqlalchemy.dialects.postgresql import insert
from typing import List, Optional, Union
from app.model import items
from app.schema import Item, ResponseModel
from app.dependencies import get_postgresql_connection
router = APIRouter()
@router.get(
'/item/',
name='Получить все элементы',
response_model=Union[List[Item], ResponseModel]
)
async def get_items(
request: Request,
response: Response,
db: SAConnection = Depends(get_postgresql_connection)
):
"""Getting all items list"""
if db is None:
response.status_code = 503
return ResponseModel(result='Service unavailable')
q = items.select()
result = await db.fetch(query=q)
items_list = [Item(**item) for item in result]
for item in items_list:
await request.app.extra['cache'].set_cache_item(item=item)
return items_list
@router.post(
'/item/',
name='Создать новый элемент',
response_model=ResponseModel,
status_code=201
)
async def create_item(
request: Request,
response: Response,
value: str = Body(
...,
desccription='Значение для нового элемента',
example='Булочка с маком',
embed=True
),
db: SAConnection = Depends(get_postgresql_connection)
):
"""Create new item"""
if db is None:
response.status_code = 503
return ResponseModel(result='Service unavailable')
q = insert(items).values(value=value)
created_item_id = await db.fetchval(q)
return ResponseModel(result=created_item_id)
@router.get(
'/item/{item_id}',
name='Получение одного элемента',
response_model=Item
)
async def get_item(
request: Request,
response: Response,
item_id: int,
db: SAConnection = Depends(get_postgresql_connection)
):
"""Get item by id"""
cached_item = await request.app.extra['cache'].get_cache_item(item_id=item_id)
if cached_item:
return cached_item
if db is None:
response.status_code = 503
return ResponseModel(result='Service unavailable')
q = items.select().where(items.c.id == item_id)
item = await db.fetchrow(query=q)
if item is not None:
item = Item(**item)
await request.app.extra['cache'].set_cache_item(item=item)
return item
else:
response.status_code = 404
@router.put(
'/item/{item_id}',
name='Обновить элемент',
response_model=ResponseModel
)
async def update_item(
request: Request,
response: Response,
item_id: int = Path(..., description='Item ID '),
value: str = Body(
None,
embed=True,
description='Новое значение элемента', example='Торт "Наполеон"'),
db: SAConnection = Depends(get_postgresql_connection)
):
"""Update item"""
await request.app.extra['cache'].drop_cache_item(item_id=item_id)
if db is None:
response.status_code = 503
return ResponseModel(result='Service unavailable')
q = items.update().values(value=value).where(items.c.id == item_id)
result = await db.fetch(query=q)
return ResponseModel(result='ok')
@router.delete(
'/item/{item_id}',
name='Удалить элемент',
response_model=ResponseModel
)
async def delete_item(
request: Request,
response: Response,
item_id: int = Path(..., description='Item ID '),
db: SAConnection = Depends(get_postgresql_connection)
):
"""Delete item"""
await request.app.extra['cache'].drop_cache_item
if db is None:
response.status_code = 503
return ResponseModel(result='Service unavailable')
q = items.delete().where(items.c.id == item_id)
result = await db.fetch(query=q)
return ResponseModel(result='ok')
|
983,395 | aedd5ad146fa7f561205a16932dd4bec217a1c69 | from django.shortcuts import render, redirect
from .form import SignUpForm
from django.contrib.auth import login, authenticate
import logging
logger = logging.getLogger(__name__)
def error_404(request, exception):
return render(request,'errors/404.html')
def error_500(request):
return render(request,'errors/500.html')
def error_400(request, exception):
return render(request,'errors/400.html')
def error_403(request, exception):
return render(request,'errors/403.html')
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('login')
else:
form = SignUpForm()
return render(request, 'registration/sign_up.html', {'form': form}) |
983,396 | d41231b50be278612a0ea3a069432486a68a07cc | # USe bfs for this . since bfs always visit connected vertex first then nodes with two distance and on.
# so have a seprated distance array and keep updating distance of each nodes in bfs loop. |
983,397 | 574c3f3f81c3e4503d4efbb42b47fb4566134c1c | from django.db import models
from prescription.models import Prescription
class PrescriptionRecommendation(models.Model):
prescription = models.ForeignKey(Prescription)
recommendation = models.CharField(max_length=1000)
|
983,398 | dcdc8461ec9fde5bf4c43893e6e1dbfd9582c8ec | def shard(ymbpw):
ymbpw = ymbpw.reset_index()
bra_criterion = ymbpw.bra_id.map(lambda x: len(x) == 9)
hs_criterion = ymbpw.hs_id.map(lambda x: len(x) == 6)
wld_criterion = ymbpw.wld_id.map(lambda x: len(x) == 5)
ymb = ymbpw[wld_criterion & hs_criterion]
ymb = ymb.groupby(['year','month','bra_id']).sum()
ymbp = ymbpw[wld_criterion]
ymbp = ymbp.groupby(['year','month','bra_id','hs_id']).sum()
ymbw = ymbpw[hs_criterion]
ymbw = ymbw.groupby(['year','month','bra_id','wld_id']).sum()
ymp = ymbpw[bra_criterion & wld_criterion]
ymp = ymp.groupby(['year','month','hs_id']).sum()
ympw = ymbpw[bra_criterion]
ympw = ympw.groupby(['year','month','hs_id','wld_id']).sum()
ymw = ymbpw[bra_criterion & hs_criterion]
ymw = ymw.groupby(['year','month','wld_id']).sum()
return [ymb, ymbp, ymbw, ymp, ympw, ymw] |
983,399 | 4aa2c9786109b2b09a55ec61238d18bf44ec1116 | import re
import hashlib
from classifier.document import Document
from triager import db, config
from jira import Jira
class TrainStatus(object):
NOT_TRAINED = "not_trained"
QUEUED = "queued"
TRAINING = "training"
TRAINED = "trained"
FAILED = "failed"
@classmethod
def is_active(cls, status):
active_statuses = [cls.TRAINING, cls.FAILED, cls.QUEUED]
return status in active_statuses
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
#: Name of the project
name = db.Column(db.String(63), nullable=False)
datasource_id = db.Column(db.Integer, db.ForeignKey('datasource.id'))
datasource = db.relationship("DataSource")
train_status = db.Column(
db.String(10), default=TrainStatus.NOT_TRAINED, nullable=False)
training_message = db.Column(db.String(253))
schedule = db.Column(db.String(63), default="0 0 * * *")
last_training = db.Column(db.Float(), default=0.0)
accuracy = db.Column(db.Float(), default=0.0)
precision = db.Column(db.Float(), default=0.0)
recall = db.Column(db.Float(), default=0.0)
__table_args__ = {'sqlite_autoincrement': True}
class Feedback(db.Model):
id = db.Column(db.String(128), primary_key=True)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
project = db.relationship("Project")
selected_recommendation = db.Column(db.Integer, default=0)
confirmed_recommendation = db.Column(db.Integer, default=0)
@classmethod
def get_id_from_doc(cls, document, project=None):
title = document.title if document.title else ""
content = document.content if document.content else ""
project_id = str(project.id) if project else ""
striped_doc = re.sub(r'\s+', '', project_id + title + content)
digest = hashlib.sha512(striped_doc.encode("utf-8")).hexdigest()
return digest
class DataSource(db.Model):
__tablename__ = "datasource"
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(63))
__mapper_args__ = {
'polymorphic_on': type
}
def get_data(self):
raise NotImplementedError()
class JiraDataSource(DataSource):
jira_api_url = db.Column(db.String(253))
jira_project_key = db.Column(db.String(63))
jira_statuses = db.Column(db.String(63), default="Resolved,Closed")
jira_resolutions = db.Column(db.String(63))
__mapper_args__ = {
'polymorphic_identity': 'jira'
}
def get_data(self):
jira = Jira(self.jira_api_url)
jql = "project=%s and status in (%s) " + \
"and assignee!=null"
jql = jql % (self.jira_project_key, self.jira_statuses)
if self.jira_resolutions:
jql += " resolutions in (%s)" % self.jira_resolutions
fields = 'summary,description,assignee,created'
raw_issues = jira.find_all(jql, fields,
limit=int(config.general__ticket_limit))
data = []
for issue in raw_issues:
fields = issue['fields']
document = Document(fields['summary'], fields['description'],
fields['assignee']['name'])
data.append(document)
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.