id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1684457
|
from das.parsers import IAddPortscanOutput
class AddPortscanOutput(IAddPortscanOutput):
"""Child class for processing Nmap output."""
def parse(self):
"""
Nmap raw output parser.
:return: a pair of values (portscan raw output filename, number of hosts added to DB)
:rtype: tuple
"""
hosts = set()
for line in self.portscan_raw:
try:
ip = line.split()[-1]
port, _ = line.split()[3].split('/') # port, proto
except Exception:
pass
else:
item = {'ip': ip, 'port': int(port)}
if item not in self.db:
self.db.insert(item)
hosts.add(ip)
return (self.portscan_out, len(hosts))
|
1684470
|
from .gcn import GCNConv
from .sgc import SGConv
from .gat import GATConv
from .gwnn import WaveletConv
from .robustgcn import GaussionConvF, GaussionConvD
from .graphsage import SAGEAggregator, GCNAggregator
from .chebynet import ChebConv
from .densegcn import DenseConv
from .lgcn import LGConv
from .edgeconv import GCNEdgeConv
from .mediansage import MedianAggregator, MedianGCNAggregator
from .gcna import GCNAConv
from .dagnn import DAGNNConv
from .tagcn import TAGConv
from .appnp import APPNProp, PPNProp
from .ssgc import SSGConv
from .agnn import AGNNConv
from .arma import ARMAConv
|
1684532
|
import tensorflow as tf
import numpy as np
import pyomo.environ as pyo
from relumip import AnnModel
from relumip.utils.visualization import plot_results_2d
# Load the trained tensorflow model which will be embedded into the optimization problem.
tf_model = tf.keras.models.load_model('data/peaks_3x10.h5')
# Create a pyomo model into which the ANN will be embedded.
model = pyo.ConcreteModel()
model.construct()
# All network variables will be added to a user-defined block within the model.
model.ann = pyo.Block()
# The network input and output variables have to be defined by the user.
# For the network input, finite variable bounds have to be supplied (they can be inferred from the data used to train
# the model, for example).
model.ann.Input1 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))
model.ann.Input2 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))
model.ann.Output = pyo.Var(bounds=(-10000, 10000), within=pyo.Reals)
# Input and output variables are stored in lists to be passes to the AnnModel.
input_vars = [model.ann.Input1, model.ann.Input2]
output_vars = [model.ann.Output]
# A solver instance has to be defined for bound tightening. Make sure that an appropriate MIP solver is installed.
solver = pyo.SolverFactory('glpk')
# Now the AnnModel instance can be created.
ann_model = AnnModel(tf_model=tf_model, modeling_language='PYOMO')
# Input and output variables are connected to the network.
# The block dedicated for the ANN model has to be passed as well.
ann_model.connect_network_input(opt_model=model.ann, input_vars=input_vars)
ann_model.connect_network_output(opt_model=model.ann, output_vars=output_vars)
# This call generates the network formulation inside the block.
# The bound tightening strategy has to be specified, for Pyomo the options are 'MIP' or 'LP' (default).
ann_model.embed_network_formulation(bound_tightening_strategy='LP', solver=solver)
# In this example, no additional model components besides the ANN are considered.
# We choose to minimize the network output and display the solved model.
model.obj = pyo.Objective(expr=model.ann.Output, sense=pyo.minimize)
res = solver.solve(model)
model.display()
# To visualize the computed results, a test data set is generated within the ANN input domain and the tensorflow model
# is evaluated on it. The solution point computed above is extracted and shown on the response surface plot.
sample_input = 6 * np.random.rand(10000, 2) - 3
sample_output = tf_model.predict(sample_input)
sol_point = [input_vars[0].value, input_vars[1].value, output_vars[0].value]
plot_results_2d(sample_input, sample_output, sol_point=sol_point)
# The model parameters computed during bound tightening can be saved for future use of the same model. See the
# 'load_precomputed_parameters_example.py' file on more information on how to load precomputed parameters
ann_model.save_param('data/peaks3x10_param')
|
1684536
|
from tkinter import *
from moviepy.editor import VideoFileClip
from moviepy.editor import AudioFileClip
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import threading
# Variables for video and audio files
video_file = ''
audio_file = ''
def get_video_file():
"""
Function that gets the video file that needs to be converted
"""
global video_filepath, video_file
video_filepath.set(filedialog.askopenfilename(title="Select your video file", filetypes=[
('MP4 (mp4, m4a, m4v, f4v, f4a, m4b, m4r, f4b, mov)',
'*.mp4 *.m4a *.m4v *.f4v *.f4a *.m4b *.m4r *.f4b *.mov'),
('3GP (3gp, 3gp2, 3g2, 3gpp, 3gpp2)', '*.3gp *.3gp2 *.3g2 *.3gpp *.3gpp2'),
('OGG (ogg, oga, ogv, ogx)', '*.ogg *.oga *.ogv *.ogx'),
('WMV (wmv, wma)', '*.wmv *.wma'),
('FLV', '*.flv'), ('AVI', '*.avi'), ('MPEG-1 (mpg, mp2, mpeg, mpe, mpv )',
'*.mpg *.mp2 *.mpeg *.mpe *.mpv'),
('MPEG-2', '*.mpg *.mpeg *.m2v')]))
video_file = VideoFileClip(str(video_filepath.get()))
def save_audio_file():
"""
Function that converts video file into audio file in a path that the user chooses
"""
global audio_filepath, audio_file, progress_bar
audio_filepath.set(filedialog.asksaveasfilename(defaultextension='.mp3',
title="Select your audio file directory", filetypes=[
('MP3 File', '*.mp3'), ('Wave File', '*.wav')]))
try:
audio_file = video_file.audio
audio_file.write_audiofile(str(audio_filepath.get()))
video_file.close()
audio_file.close()
messagebox.showinfo(message="File converted successfully")
except:
messagebox.showerror(
message="File could not be converted", title="File Error")
# Resetting the video and audio paths
video_filepath.set('')
audio_filepath.set('')
# Resetting the progressbar after function execution
progress_bar['value'] = 0
progress_bar.stop()
def run_program():
"""
Function that runs the process of conversion and loading bar concurrently
"""
global progress_bar
t1 = threading.Thread(target=progress_bar.start)
t2 = threading.Thread(target=save_audio_file)
t2.start()
t1.start()
# Intializing main program settings
main_prog = Tk()
main_prog.title("Video to Audio Converter")
main_prog.maxsize(800, 400)
main_prog.minsize(500, 200)
main_prog.config(bg="ivory")
# Variables for file paths
video_filepath = StringVar()
audio_filepath = StringVar()
# Creating UI Frame
UI_frame = Frame(main_prog, width=500, height=500, bg="ivory")
UI_frame.grid(row=0, column=0)
# Labels and buttons of the program
Label(UI_frame, text="Choose your video file", bg="ivory").grid(
row=1, column=1, padx=5, pady=5, sticky=W)
Button(UI_frame, text="Browse", command=get_video_file,
bg="grey").grid(row=1, column=2, padx=5, pady=5)
Button(UI_frame, text="Convert", command=run_program,
bg="green").grid(row=2, column=2, padx=5, pady=5)
Label(UI_frame, textvariable=video_filepath, bg="ivory").grid(
row=1, column=3, padx=5, pady=5, sticky=W)
progress_bar = ttk.Progressbar(
main_prog, orient=HORIZONTAL, mode='indeterminate', length=500)
progress_bar.grid(padx=25, pady=25)
# Calling main program
main_prog.mainloop()
|
1684541
|
SIGNATURE = '<Signature xmlns="http://www.w3.org/2000/09/xmldsig#">' \
'<SignedInfo>' \
'<CanonicalizationMethod Algorithm="http://www.w3.org/2006/12/xml-c14n11"/>' \
'<SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"/>' \
'<Reference>' \
'<Transforms>' \
'<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>' \
'<Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>' \
'</Transforms>' \
'<DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"/>' \
'<DigestValue />' \
'</Reference>' \
'</SignedInfo>' \
'<SignatureValue />' \
'<KeyInfo><X509Data />' \
'</KeyInfo>' \
'</Signature>'
|
1684561
|
from setuptools import setup
import os
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
data = open(path).read()
return data
setup(name='cluster-workers',
version='0.1.0',
description='a client/master/worker system for distributing '
'jobs in a cluster',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/sampsyo/cluster-workers',
license='MIT',
platforms='ALL',
long_description=_read('README.rst'),
packages=['cw'],
install_requires=['bluelet', 'cloud', 'futures'],
classifiers=[
'Topic :: System :: Networking',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
|
1684572
|
from analyzer.syntax_kind import SyntaxKind
from tests.utils import TestCaseLexer
class TestLexerComparisonOperatorToken(TestCaseLexer):
def test_less_than_token(self):
self.tokenize_source("<", 2)
self.assertToken(0, SyntaxKind.LessThanToken, [], [])
self.assertToken(1, SyntaxKind.EndOfFileToken, [], [])
def test_greater_than_token(self):
self.tokenize_source(">", 2)
self.assertToken(0, SyntaxKind.GreaterThanToken, [], [])
self.assertToken(1, SyntaxKind.EndOfFileToken, [], [])
def test_equals_token(self):
self.tokenize_source("=", 2)
self.assertToken(0, SyntaxKind.EqualsToken, [], [])
self.assertToken(1, SyntaxKind.EndOfFileToken, [], [])
def test_less_than_equals_token(self):
self.tokenize_source("<=", 2)
self.assertToken(0, SyntaxKind.LessThanEqualsToken, [], [])
self.assertToken(1, SyntaxKind.EndOfFileToken, [], [])
def test_greater_than_equals_token(self):
self.tokenize_source(">=", 2)
self.assertToken(0, SyntaxKind.GreaterThanEqualsToken, [], [])
self.assertToken(1, SyntaxKind.EndOfFileToken, [], [])
def test_less_than_greater_than_token(self):
self.tokenize_source("<>", 2)
self.assertToken(0, SyntaxKind.LessThanGreaterThanToken, [], [])
self.assertToken(1, SyntaxKind.EndOfFileToken, [], [])
|
1684607
|
import uuid
import time
from sdc11073 import pmtypes
from sdc11073.namespaces import domTag
from sdc11073.sdcdevice import SdcDevice
from sdc11073.mdib import DeviceMdibContainer
from sdc11073.pysoap.soapenvelope import DPWSThisModel, DPWSThisDevice
from sdc11073.location import SdcLocation
from sdc11073.wsdiscovery import WSDiscoverySingleAdapter
# example SDC provider (device) that sends out metrics every now and then
# The provider we use, should match the one in consumer example
# The UUID is created from a base
baseUUID = uuid.UUID('{cc013678-79f6-403c-998f-3cc0cc050230}')
my_uuid = uuid.uuid5(baseUUID, "12345")
# setting the local ensemble context upfront
def setLocalEnsembleContext(mdib, ensemble):
descriptorContainer = mdib.descriptions.NODETYPE.getOne(domTag('EnsembleContextDescriptor'))
if not descriptorContainer:
print("No ensemble contexts in mdib")
return
allEnsembleContexts = mdib.contextStates.descriptorHandle.get(descriptorContainer.handle, [])
with mdib.mdibUpdateTransaction() as mgr:
# set all to currently associated Locations to Disassociated
associatedEnsembles = [l for l in allEnsembleContexts if
l.ContextAssociation == pmtypes.ContextAssociation.ASSOCIATED]
for l in associatedEnsembles:
ensembleContext = mgr.getContextState(l.descriptorHandle, l.Handle)
ensembleContext.ContextAssociation = pmtypes.ContextAssociation.DISASSOCIATED
ensembleContext.UnbindingMdibVersion = mdib.mdibVersion # UnbindingMdibVersion is the first version in which it is no longer bound ( == this version)
newEnsState = mgr.getContextState(descriptorContainer.handle) # this creates a new location state
newEnsState.ContextAssociation = 'Assoc'
newEnsState.Identification = [pmtypes.InstanceIdentifier(root="1.2.3", extensionString=ensemble)]
if __name__ == '__main__':
# start with discovery (MDPWS) that is running on the named adapter "Ethernet" (replace as you need it on your machine, e.g. "enet0" or "Ethernet")
myDiscovery = WSDiscoverySingleAdapter("Ethernet")
# start the discovery
myDiscovery.start()
# create a local mdib that will be sent out on the network, the mdib is based on a XML file
my_mdib = DeviceMdibContainer.fromMdibFile("mdib.xml")
print ("My UUID is {}".format(my_uuid))
# set a location context to allow easy discovery
my_location = SdcLocation(fac='HOSP', poc='CU2', bed='BedSim')
# set model information for discovery
dpwsModel = DPWSThisModel(manufacturer='Draeger',
manufacturerUrl='www.draeger.com',
modelName='TestDevice',
modelNumber='1.0',
modelUrl='www.draeger.com/model',
presentationUrl='www.draeger.com/model/presentation')
dpwsDevice = DPWSThisDevice(friendlyName='TestDevice',
firmwareVersion='Version1',
serialNumber='12345')
# create a device (provider) class that will do all the SDC magic
sdcDevice = SdcDevice(ws_discovery=myDiscovery,
my_uuid=my_uuid,
model=dpwsModel,
device=dpwsDevice,
deviceMdibContainer=my_mdib)
# start the local device and make it discoverable
sdcDevice.startAll()
# set the local ensemble context to ease discovery based on ensemble ID
setLocalEnsembleContext(my_mdib, "MyEnsemble")
# set the location on our device
sdcDevice.setLocation(my_location)
# create one local numeric metric that will change later on
numMetrDescr = domTag("NumericMetricDescriptor")
# get all metrics from the mdib (as described in the file)
allMetricDescrs = [c for c in my_mdib.descriptions.objects if c.NODETYPE == numMetrDescr]
# now change all the metrics in one transaction
with my_mdib.mdibUpdateTransaction() as mgr:
for metricDescr in allMetricDescrs:
# get the metric state of this specific metric
st = mgr.getMetricState(metricDescr.handle)
# create a value in case it is not there yet
st.mkMetricValue()
# set the value and some other fields to a fixed value
st.metricValue.Value = 1.0
st.metricValue.ActiveDeterminationPeriod = "1494554822450"
st.metricValue.Validity = 'Vld'
st.ActivationState = "On"
metricValue = 0
# now iterate forever and change the value every few seconds
while True:
metricValue += 1
with my_mdib.mdibUpdateTransaction() as mgr:
for metricDescr in allMetricDescrs:
st = mgr.getMetricState(metricDescr.handle)
st.metricValue.Value = metricValue
time.sleep(5)
|
1684614
|
def cadena(N):
C1 = 'ESTA ES NUESTRA CADENA DE PRUEBA '
C = ''
for j in range(N):
C += C1*100
return C
|
1684628
|
import numpy
from ..codecs.InflTCorpFileCodec import InflTCorpFileCodec
from ..slexicon.SKey import *
class ModelInflInData(object):
WVEC_LEN = 9 # keep 8 letters from the lemma + 1 for the category
MAX_LETTER_IDX = 28 # a-z plus <oov> and <>
def __init__(self, fn):
self.entries = InflTCorpFileCodec.load(fn)
# Lower-case the word and turn it arround (so the last char is always in position 0)
# Empty characters are labeled 0, characters not a-z are labeled 1
@classmethod
def wordToVec(cls, word, category):
vec = numpy.zeros(shape=(cls.WVEC_LEN, cls.MAX_LETTER_IDX), dtype='float32')
word = list(word.lower())[::-1] # lower-case, list, inverted-order
for i, letter in enumerate(word):
if i >= cls.WVEC_LEN-1:
break
val = ord(letter)
one_hot = val-95 if val>=97 and val<=122 else 1
vec[i+1, one_hot] = 1
# Now prepend the category one-hot
if category == SKey.NOUN:
vec[0, 0] = 1
elif category == SKey.VERB:
vec[0, 1] = 1
elif category == SKey.ADJ:
vec[0, 2] = 1
elif category == SKey.ADV:
vec[0, 3] = 1
else:
raise ValueError('Unhandled category: %s' % category)
return vec
# Input letters classes
@staticmethod
def getLetterClasses():
classes = ['<>', '<oov>'] + [chr(i) for i in range(97,123)]
return classes
|
1684634
|
import unittest
from binascii import a2b_hex
import wincrypto
from wincrypto.algorithms import symmetric_algorithms, hash_algorithms
from wincrypto.api import CryptImportKey, CryptExportKey, CryptCreateHash, CryptDeriveKey, \
CryptHashData
from wincrypto.constants import bType_PLAINTEXTKEYBLOB, CALG_MD5, CALG_RC4, CALG_SHA1, CALG_AES_192, bType_SIMPLEBLOB
TEST_RSA_PRIVATE_PEM = '''-----<KEY>'''
TEST_RSA_PUBLIC_PEM = '''-----BEGIN PUBLIC KEY-----
<KEY>'''
TEST_DATA = bytes(bytearray(range(64))) # 64 is a multiple of most blocksizes
class TestSymmetricKey(unittest.TestCase):
def test_export_import_plain(self):
for algorithm in symmetric_algorithms:
instance = algorithm(b'A' * algorithm.key_len)
blob = CryptExportKey(instance, None, bType_PLAINTEXTKEYBLOB)
instance2 = CryptImportKey(blob)
self.assertEqual(instance.key, instance2.key)
def test_export_import_simple(self):
rsa_key = wincrypto.algorithms.RSA_KEYX.from_pem(TEST_RSA_PRIVATE_PEM)
for algorithm in symmetric_algorithms:
instance = algorithm(b'A' * algorithm.key_len)
try:
blob = CryptExportKey(instance, rsa_key, bType_SIMPLEBLOB)
except NotImplementedError:
continue
instance2 = CryptImportKey(blob, rsa_key)
self.assertEqual(instance.key, instance2.key)
def test_encrypt_decrypt(self):
for algorithm in symmetric_algorithms:
instance = algorithm(b'A' * algorithm.key_len)
c = instance.encrypt(TEST_DATA)
p = instance.decrypt(c)
self.assertEqual(TEST_DATA, p)
class TestRsa(unittest.TestCase):
def test_rsa_public_import_export(self):
rsa_key = wincrypto.algorithms.RSA_KEYX.from_pem(TEST_RSA_PUBLIC_PEM)
ms_key = rsa_key.export_publickeyblob()
aes_key2 = wincrypto.CryptImportKey(ms_key)
self.assertEqual(rsa_key.key, aes_key2.key)
def test_rsa_private_import_export(self):
rsa_key = wincrypto.algorithms.RSA_KEYX.from_pem(TEST_RSA_PRIVATE_PEM)
ms_key = rsa_key.export_privatekeyblob()
aes_key2 = wincrypto.CryptImportKey(ms_key)
self.assertEqual(rsa_key.key, aes_key2.key)
def test_rsa_encrypt_decrypt(self):
private_key = wincrypto.algorithms.RSA_KEYX.from_pem(TEST_RSA_PRIVATE_PEM)
public_key = wincrypto.algorithms.RSA_KEYX.from_pem(TEST_RSA_PUBLIC_PEM)
c = public_key.encrypt(TEST_DATA)
p = private_key.decrypt(c)
self.assertEqual(TEST_DATA, p)
class TestHash(unittest.TestCase):
def test_hash_len(self):
for algorithm in hash_algorithms:
instance = algorithm()
hash_val = instance.get_hash_val()
hash_size = instance.get_hash_size()
self.assertEqual(len(hash_val), hash_size)
class TestCryptDeriveKey(unittest.TestCase):
def test_cryptderivekey_md5_rc4(self):
md5_hash = CryptCreateHash(CALG_MD5)
CryptHashData(md5_hash, b'Test')
rc4_key = CryptDeriveKey(md5_hash, CALG_RC4)
known_key = a2b_hex('<KEY>')
self.assertEqual(rc4_key.key, known_key)
def test_cryptderivekey_sha1_aes192(self):
sha1_hash = CryptCreateHash(CALG_SHA1)
CryptHashData(sha1_hash, b'Test')
aes_key = CryptDeriveKey(sha1_hash, CALG_AES_192)
known_key = a2b_hex(b'97d4f8389786352382ce6079c28d6ed3d65021a99b96263e')
self.assertEqual(aes_key.key, known_key)
|
1684649
|
from conekt import db, whooshee
from conekt.models.species import Species
from conekt.models.gene_families import GeneFamilyMethod
SQL_COLLATION = 'NOCASE' if db.engine.name == 'sqlite' else ''
@whooshee.register_model('name')
class XRef(db.Model):
__tablename__ = 'xrefs'
id = db.Column(db.Integer, primary_key=True)
platform = db.Column(db.String(50, collation=SQL_COLLATION), index=True)
name = db.Column(db.String(50, collation=SQL_COLLATION), index=True)
url = db.Column(db.Text())
@staticmethod
def __create_xref_genes(species_id, platform, url):
"""
Creates xrefs to PLAZA 3.0 Dicots
:param species_id: species ID of the species to process
"""
species = Species.query.get(species_id)
sequences = species.sequences.all()
for s in sequences:
xref = XRef()
xref.name = s.name
xref.platform = platform
xref.url = url % s.name.upper()
s.xrefs.append(xref)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
@staticmethod
def create_plaza_xref_genes(species_id):
"""
Creates xrefs to PLAZA 3.0 Dicots
:param species_id: species ID of the species to process
"""
XRef.__create_xref_genes(species_id, "PLAZA 3.0 Dicots", "http://bioinformatics.psb.ugent.be/plaza/versions/plaza_v3_dicots/genes/view/%s")
@staticmethod
def create_evex_xref_genes(species_id):
"""
Creates xrefs to EVEX
:param species_id: species ID of the species to process
"""
XRef.__create_xref_genes(species_id, "EVEX", "http://www.evexdb.org/search/?search=%s")
@staticmethod
def add_xref_genes_from_file(species_id, filename):
species = Species.query.get(species_id)
sequences = species.sequences.all()
seq_dict = {s.name.upper(): s for s in sequences}
with open(filename, "r") as f:
for i, line in enumerate(f):
sequence, name, platform, url = line.split('\t')
xref = XRef()
xref.name = name
xref.platform = platform
xref.url = url
if sequence.upper() in seq_dict.keys():
s = seq_dict[sequence.upper()]
s.xrefs.append(xref)
if i % 400 == 0:
# Update every 400 lines
try:
db.session.commit()
except Exception as e:
db.session.rollback()
# Commit final changes
try:
db.session.commit()
except Exception as e:
db.session.rollback()
@staticmethod
def add_xref_families_from_file(gene_family_method_id, filename):
gf_method = GeneFamilyMethod.query.get(gene_family_method_id)
families = gf_method.families.all()
fam_dict = {f.name.upper(): f for f in families}
with open(filename, "r") as f:
for line in f:
family, name, platform, url = line.split('\t')
xref = XRef()
xref.name = name
xref.platform = platform
xref.url = url
if family.upper() in fam_dict.keys():
f = fam_dict[family.upper()]
f.xrefs.append(xref)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
|
1684679
|
from unittest import TestCase
from unittest.mock import patch, Mock
from tornado import httpclient
from comms.common_https import CommonHttps
from utilities.test_utilities import async_test, awaitable
URL = "ABC.ABC"
METHOD = "GET"
HEADERS = {"a": "1"}
BODY = "hello"
CLIENT_CERT = "client.cert"
CLIENT_KEY = "client.key"
CA_CERTS = "ca.certs"
HTTP_PROXY_HOST = "http_proxy"
HTTP_PROXY_PORT = 3128
class TestCommonHttps(TestCase):
@async_test
async def test_make_request(self):
with patch.object(httpclient.AsyncHTTPClient(), "fetch") as mock_fetch:
return_value = Mock()
mock_fetch.return_value = awaitable(return_value)
actual_response = await CommonHttps.make_request(url=URL, method=METHOD, headers=HEADERS, body=BODY,
client_cert=CLIENT_CERT, client_key=CLIENT_KEY,
ca_certs=CA_CERTS, validate_cert=False,
http_proxy_host=HTTP_PROXY_HOST,
http_proxy_port=HTTP_PROXY_PORT)
mock_fetch.assert_called_with(URL,
raise_error=True,
method=METHOD,
body=BODY,
headers=HEADERS,
client_cert=CLIENT_CERT,
client_key=CLIENT_KEY,
ca_certs=CA_CERTS,
validate_cert=False,
proxy_host=HTTP_PROXY_HOST,
proxy_port=HTTP_PROXY_PORT)
self.assertIs(actual_response, return_value, "Expected content should be returned.")
@async_test
async def test_make_request_defaults(self):
with patch.object(httpclient.AsyncHTTPClient(), "fetch") as mock_fetch:
return_value = Mock()
mock_fetch.return_value = awaitable(return_value)
actual_response = await CommonHttps.make_request(url=URL, method=METHOD, headers=HEADERS, body=BODY)
mock_fetch.assert_called_with(URL,
raise_error=True,
method=METHOD,
body=BODY,
headers=HEADERS,
client_cert=None,
client_key=None,
ca_certs=None,
validate_cert=True,
proxy_host=None,
proxy_port=None)
self.assertIs(actual_response, return_value, "Expected content should be returned.")
|
1684695
|
from typing import Dict, List, Union
import torch
from torch import Tensor
from deepstochlog.term import Term
class Context:
""" Represents the context of a query: maps logic terms to tensors """
def __init__(self, context: Dict[Term, Tensor], map_default_to_term=False):
self._context = context
self._hash = hash(tuple(sorted(self._context.items())))
self._map_default_to_term = map_default_to_term
def has_tensor_representation(self, term: Term) -> bool:
return term in self._context
def get_tensor_representation(self, term: Term) -> Union[Tensor, str]:
"""
Returns the tensor representation, unless it doesn't contain it, then it turns just the functor
"""
if self._map_default_to_term and not self.has_tensor_representation(term):
return term.functor
if term.is_list():
return torch.cat(
[self.get_tensor_representation(a) for a in term.arguments]
)
return self._context[term]
def get_all_tensor_representations(self, network_input_args) -> List[Tensor]:
return [self.get_tensor_representation(term) for term in network_input_args]
def __eq__(self, other):
if isinstance(other, Context):
return self._context == other._context
return False
def __hash__(self):
return self._hash
class ContextualizedTerm:
def __init__(
self, context: Context, term: Term, probability: float = 1.0, meta=None
):
self.context = context
self.term = term
self.probability = probability
self.meta = meta
def __str__(self):
return "ContextualizedTerm(" + str(self.term) + ")"
def __repr__(self):
return str(self)
def __eq__(self, other):
if isinstance(other, ContextualizedTerm):
return self.context == other.context and self.term == other.term
return False
def __hash__(self):
return hash((self.context, self.term))
def mask_generation_output(self):
return ContextualizedTerm(
term=self.term.mask_generation_output(),
context=self.context,
)
|
1684706
|
class Observavel:
def __init__(self):
self._observers = []
def adicionar_observer(self, observador):
self._observers.append(observador)
def notificar_observers(self, mensagem):
for observador in self._observers:
observador(mensagem)
def observador_email(mensagem):
print(f'observador_email recebeu a mensagem: {mensagem}')
def observador_impressora(mensagem):
print(f'observador_impressora recebeu a mensagem: {mensagem}')
obs = Observavel()
obs.adicionar_observer(observador_email)
obs.adicionar_observer(observador_impressora)
obs.notificar_observers('A live de python é as 22')
|
1684737
|
from shexer.consts import JSON, FIXED_SHAPE_MAP
from shexer.io.shape_map.shape_map_parser import JsonShapeMapParser, FixedShapeMapParser
def get_shape_map_parser(format, sgraph, namespaces_prefix_dict):
if format == JSON:
return JsonShapeMapParser(sgraph=sgraph,
namespaces_prefix_dict=namespaces_prefix_dict)
elif format == FIXED_SHAPE_MAP:
return FixedShapeMapParser(namespaces_prefix_dict=namespaces_prefix_dict,
sgraph=sgraph)
else:
raise ValueError("ShapeMap format not recognized:" + format)
|
1684750
|
self.description = "Scriptlet test (pre/post remove)"
p1 = pmpkg("dummy")
p1.files = ['etc/dummy.conf']
p1.install['pre_remove'] = "echo foobar > pre_remove"
p1.install['post_remove'] = "echo foobar > post_remove"
self.addpkg2db("local", p1)
self.args = "-R %s" % p1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("FILE_EXIST=pre_remove")
self.addrule("FILE_EXIST=post_remove")
|
1684759
|
import pytest
from app.models.organisation import Organisation
from tests import organisation_json
@pytest.mark.parametrize("purchase_order_number,expected_result", [
[None, None],
["PO1234", [None, None, None, "PO1234"]]
])
def test_organisation_billing_details(purchase_order_number, expected_result):
organisation = Organisation(organisation_json(purchase_order_number=purchase_order_number))
assert organisation.billing_details == expected_result
|
1684762
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import ForumUser
class ForumUserCreationForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = ForumUser
fields = ('username', 'nickname', 'email', '<PASSWORD>', '<PASSWORD>')
class ForumUserChangeForm(UserChangeForm):
class Meta:
model = ForumUser
fields = ('nickname', 'email') # username is permanent, but you can change your nickname
|
1684774
|
from __future__ import absolute_import, division, print_function
import boost_adaptbx.boost.python as bp
from boost_adaptbx.boost.python import streambuf
ext = bp.import_ext("boost_adaptbx_python_streambuf_test_ext")
import subprocess
import sys
def exercise():
proc = subprocess.Popen(args='libtbx.python %s --core' % __file__,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = proc.communicate("Veni Vidi Vici")
assert not error, error
assert not output, output
def read_from_stdin():
written = ext.test_read(streambuf(sys.stdin), "read")
assert written == "Veni, Vidi, Vici, [ fail, eof ]", written
def run(core):
if not core:
exercise()
print('OK')
else:
read_from_stdin()
if __name__ == '__main__':
run(core='--core' in sys.argv[1:])
|
1684787
|
import os
import numpy as np
import torch
from .alignment import load_net, batch_detect
def get_project_dir():
current_path = os.path.abspath(os.path.join(__file__, "../"))
return current_path
def relative(path):
path = os.path.join(get_project_dir(), path)
return os.path.abspath(path)
class RetinaFace:
def __init__(
self,
gpu_id=-1,
model_path=relative("weights/mobilenet0.25_Final.pth"),
network="mobilenet",
):
self.gpu_id = gpu_id
self.device = (
torch.device("cpu") if gpu_id == -1 else torch.device("cuda", gpu_id)
)
self.model = load_net(model_path, self.device, network)
def detect(self, images):
if isinstance(images, np.ndarray):
if len(images.shape) == 3:
return batch_detect(self.model, [images], self.device)[0]
elif len(images.shape) == 4:
return batch_detect(self.model, images, self.device)
elif isinstance(images, list):
return batch_detect(self.model, np.array(images), self.device)
elif isinstance(images, torch.Tensor):
if len(images.shape) == 3:
return batch_detect(self.model, images.unsqueeze(0), self.device)[0]
elif len(images.shape) == 4:
return batch_detect(self.model, images, self.device)
else:
raise NotImplementedError()
def __call__(self, images):
return self.detect(images)
|
1684816
|
import logging
from oic.oic import Client, RegistrationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
DATAPORTEN_PROVIDER_CONFIG = "https://auth.dataporten.no/"
def client_setup(client_id, client_secret):
"""Sets up an OpenID Connect Relying Party ("client") for connecting to Dataporten"""
logger = logging.getLogger(__name__)
assert (
client_id
), "Missing client id when setting up Dataporten OpenID Connect Relying Party"
assert (
client_secret
), "Missing client secret when setting up Dataporten OpenID Connect Relying Party"
client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
logger.debug(
"Automatically registering Dataporten OpenID Provider.",
extra={"config": DATAPORTEN_PROVIDER_CONFIG},
)
client.provider_config(DATAPORTEN_PROVIDER_CONFIG)
client_args = {"client_id": client_id, "client_secret": client_secret}
client.store_registration_info(RegistrationResponse(**client_args))
logger.debug("Successfully registered the provider.")
return client
|
1684820
|
from typing import Tuple
import pytest
import torch
from ludwig.modules import reduction_modules
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@pytest.mark.parametrize("reduce_mode", ["last", "sum", "mean", "avg", "max", "concat", "attention", None])
@pytest.mark.parametrize("test_input_shape", [(16, 1, 4), (4, 10, 16)])
def test_sequence_reducer(reduce_mode: str, test_input_shape: Tuple[int, ...]):
(batch_size, max_sequence_length, encoding_size) = test_input_shape
sequence_reducer = reduction_modules.SequenceReducer(
reduce_mode=reduce_mode, max_sequence_length=max_sequence_length, encoding_size=encoding_size
).to(DEVICE)
inputs = torch.zeros(test_input_shape)
# Generates random sequence of random length for each instance in batch.
for batch_index in range(batch_size):
sequence_length = torch.randint(max_sequence_length, (1,))
inputs[batch_index, :sequence_length] = torch.rand((sequence_length, encoding_size))
outputs = sequence_reducer(inputs.to(DEVICE))
assert outputs.shape[1:] == sequence_reducer.output_shape
|
1684825
|
nama_hewan = ["gajah", "sapi", "kuda", "buaya"]
# menampilkan isi list
print(nama_hewan)
# menampilkan isi list dengan index
print(nama_hewan[0])
print(nama_hewan[1])
# mengganti satu index list
nama_hewan[0] = "kucing"
print(nama_hewan)
# menghitung jumlah list
print(len(nama_hewan))
# campur beberapa tipe data dalam satu list
list_saya = ["gajah", 12, True, "makan"]
print(list_saya)
# menggunakan konstruktor pada pembuatan list
list_hewan1 = list(("kijang", "sapi", "kucing", "ayam"))
print(list_hewan1)
# menggunakan looping untuk menampilkan list
for jenis_hewan in list_hewan1:
print(jenis_hewan)
|
1684869
|
def plot():
import numpy as np
from matplotlib import pyplot as plt
fig = plt.figure()
x = np.ma.arange(0, 2 * np.pi, 0.4)
y = np.ma.sin(x)
y1 = np.sin(2 * x)
y2 = np.sin(3 * x)
ym1 = np.ma.masked_where(y1 > 0.5, y1)
ym2 = np.ma.masked_where(y2 < -0.5, y2)
lines = plt.plot(x, y, "r", x, ym1, "g", x, ym2, "bo")
plt.setp(lines[0], linewidth=4)
plt.setp(lines[1], linewidth=2)
plt.setp(lines[2], markersize=10)
plt.legend(("No mask", "Masked if > 0.5", "Masked if < -0.5"), loc="upper right")
plt.title("Masked line demo")
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
|
1684889
|
from radixlib.network import Network
from radixlib.actions import (
CreateTokenDefinition,
UnregisterValidator,
RegisterValidator,
TransferTokens,
UnstakeTokens,
StakeTokens,
MintTokens,
BurnTokens,
ActionType
)
from typing import Union, List, overload, Optional
import radixlib as radix
class ActionBuilder():
""" Used to build a list of Radix actions through a series of simple function calls.
Some of the actions in the new Gateway API can be rather confusing to create especially ones
where there is a series of optional arguments that are either required together or not required
together. To solve this problem, this action builder class introduces a set of functions which
may be used to create the desired actions.
This class is written with the idea that it should allow for method chaining to take place when
adding actions. So, you should expect to see most functions return a reference to self in order
to allow for action additions to be chained.
"""
def __init__(
self,
network: Network
) -> None:
""" Instantiates a new ActionBuilder for the given network.
Args:
network (Network): The network which the action builder will be used for.
"""
self.network: Network = network
self.__actions_list: List[ActionType] = []
def new_mutable_token(
self,
owner_address: str,
name: str,
symbol: str,
description: str,
icon_url: str,
url: str,
granularity: int,
) -> 'ActionBuilder':
""" Creates a new CreateTokenDefinition action which defines a mutable token.
Args:
owner_address (str): The address of the owner of the token.
name (str): The name of the token.
symbol (str): The symbol of the token. This should be a 3 to 8 long small case symbol
for the token.
description (str): The description of the token.
icon_url (str): The URL of the token icon.
url (str): The URL to the token website.
granularity (int): An integer of the token granularity
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Calculating the RRI of the token based on the information passed to the function
derived_token_rri: str = radix.derive.token_rri(
creator_public_key = radix.derive.public_key_from_wallet_address(owner_address),
token_symbol = symbol,
network = self.network
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
CreateTokenDefinition(
name = name,
symbol = symbol,
description = description,
icon_url = icon_url,
url = url,
granularity = granularity,
token_rri = derived_token_rri,
is_supply_mutable = True,
owner = owner_address
)
)
return self
def new_fixed_supply_token(
self,
owner_address: str,
name: str,
symbol: str,
description: str,
icon_url: str,
url: str,
granularity: int,
token_supply: int,
to_account_address: str
) -> 'ActionBuilder':
""" Creates a new CreateTokenDefinition action which defines a fixed supply token.
Args:
owner_address (str): The address of the owner of the token.
name (str): The name of the token.
symbol (str): The symbol of the token. This should be a 3 to 8 long small case symbol
for the token.
description (str): The description of the token.
icon_url (str): The URL of the token icon.
url (str): The URL to the token website.
granularity (int): An integer of the token granularity.
token_supply (int): The amount of supply of the token that we wish to have.
to_account_address (str): The address that the tokens will be sent to upon their
creation.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Calculating the RRI of the token based on the information passed to the function
derived_token_rri: str = radix.derive.token_rri(
creator_public_key = radix.derive.public_key_from_wallet_address(owner_address),
token_symbol = symbol,
network = self.network
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
CreateTokenDefinition(
name = name,
symbol = symbol,
description = description,
icon_url = icon_url,
url = url,
granularity = granularity,
token_rri = derived_token_rri,
is_supply_mutable = False,
token_supply = token_supply,
to_account = to_account_address
)
)
return self
def unstake_tokens_by_percentage(
self,
from_validator_address: str,
to_account_address: str,
percentage_amount: Union[int, float],
) -> 'ActionBuilder':
""" Creates a new UnstakeTokens action for a percentage of the tokens to unstake from
the specified validator.
Args:
from_validator_address (str): The validators that tokens will be unstaked from.
to_account_address (str): The address that the tokens will be sent to once unstaked.
percentage_amount (Union[int, float]): The percentage amount to unstake from the given
validator. Keep in mind that this is the percentage amount meaning that it should
be a numbet between 0 and 100.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
UnstakeTokens(
to_account = to_account_address,
from_validator = from_validator_address,
unstake_percentage = percentage_amount
)
)
return self
def unstake_tokens_by_amount(
self,
from_validator_address: str,
to_account_address: str,
unstake_amount: int,
) -> 'ActionBuilder':
""" Creates a new UnstakeTokens action for a specific amount of the tokens to unstake from
the specified validator.
Args:
from_validator_address (str): The validators that tokens will be unstaked from.
to_account_address (str): The address that the tokens will be sent to once unstaked.
unstake_amount (int): The amount of XRD to unstake from the validator. Keep in mind that you
must specify this amount in Atto and not in XRD.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
UnstakeTokens(
to_account = to_account_address,
from_validator = from_validator_address,
amount = unstake_amount,
token_rri = radix.derive.xrd_rri_on_network(self.network)
)
)
return self
def stake_tokens_by_amount(
self,
to_validator_address: str,
from_account_address: str,
stake_amount: int,
) -> 'ActionBuilder':
""" Creates a new UnstakeTokens action for a specific amount of the tokens to unstake from
the specified validator.
Args:
to_validator_address (str): The validators that tokens will be unstaked from.
from_account_address (str): The address that the tokens will be sent to once unstaked.
stake_amount (int): The amount of XRD to unstake from the validator. Keep in mind that
you must specify this amount in Atto and not in XRD.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
StakeTokens(
from_account = from_account_address,
to_validator = to_validator_address,
amount = stake_amount,
token_rri = radix.derive.xrd_rri_on_network(self.network)
)
)
return self
def token_transfer(
self,
from_account_address: str,
to_account_address: str,
token_rri: str,
transfer_amount: int
) -> 'ActionBuilder':
""" Creates a new TokenTransfer action.
Args:
from_account_address (str): The account which will be sending the tokens.
to_account_address (str): The account which will be getting the tokens.
token_rri (str): The RRI of the token to send.
transfer_amount_amount (int): The amount of tokens to send.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
TransferTokens(
from_account = from_account_address,
to_account = to_account_address,
amount = transfer_amount,
token_rri = token_rri
)
)
return self
def mint_tokens(
self,
to_account_address: str,
mint_amount: int,
token_rri: str,
) -> 'ActionBuilder':
""" Creates a new MintTokens action.
Args:
to_account_address (str): The account that the tokens will be minted for.
mint_amount (int, optional): The amount of tokens to mint.
token_rri (str, optional): The RRI of the token.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
MintTokens(
to_account = to_account_address,
amount = mint_amount,
token_rri = token_rri
)
)
return self
def burn_tokens(
self,
from_account_address: str,
burn_amount: int,
token_rri: str,
) -> 'ActionBuilder':
""" Creates a new BurnTokens action.
Args:
to_account_address (str): The account that the tokens will be minted for.
mint_amount (int, optional): The amount of tokens to mint.
token_rri (str, optional): The RRI of the token.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
BurnTokens(
from_account = from_account_address,
amount = burn_amount,
token_rri = token_rri
)
)
return self
@overload
def register_validator(self, *, validator_address: str) -> 'ActionBuilder': ...
@overload
def register_validator(self, *, node_address: str) -> 'ActionBuilder': ...
@overload
def register_validator(self, *, public_key: str) -> 'ActionBuilder': ...
@overload
def register_validator(self, *, account_address: str) -> 'ActionBuilder': ...
def register_validator(
self,
*,
validator_address: Optional[str] = None,
node_address: Optional[str] = None,
public_key: Optional[str] = None,
account_address: Optional[str] = None,
) -> 'ActionBuilder':
""" Creates a new RegisterValidator action.
This method is used to create a new RegisterValidator action and has four overrides to
allow this method to be called using anything that identifies the validator.
Args:
validator_address (:obj:`str`, optional): A string of the validator address to register.
node_address (:obj:`str`, optional): A string of the node address to register.
public_key (:obj:`str`, optional): A string of the public key of the validator to
register.
account_address (:obj:`str`, optional): A string of the account address of the validator
to .
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Getting the validator address from the arguments passed
_validator_address: str = ""
if validator_address:
_validator_address = validator_address
elif node_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_node_or_validator_address(node_address),
network = self.network
)
elif public_key:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = public_key,
network = self.network
)
elif account_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_wallet_address(account_address),
network = self.network
)
else:
raise ValueError(
"At least one argument needs to be passed to this method to build the action."
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
RegisterValidator(_validator_address)
)
return self
@overload
def unregister_validator(self, *, validator_address: str) -> 'ActionBuilder': ...
@overload
def unregister_validator(self, *, node_address: str) -> 'ActionBuilder': ...
@overload
def unregister_validator(self, *, public_key: str) -> 'ActionBuilder': ...
@overload
def unregister_validator(self, *, account_address: str) -> 'ActionBuilder': ...
def unregister_validator(
self,
*,
validator_address: Optional[str] = None,
node_address: Optional[str] = None,
public_key: Optional[str] = None,
account_address: Optional[str] = None,
) -> 'ActionBuilder':
""" Creates a new UnregisterValidator action.
This method is used to create a new UnregisterValidator action and has four overrides to
allow this method to be called using anything that identifies the validator.
Args:
validator_address (:obj:`str`, optional): A string of the validator address to unregister.
node_address (:obj:`str`, optional): A string of the node address to unregister.
public_key (:obj:`str`, optional): A string of the public key of the validator to
unregister.
account_address (:obj:`str`, optional): A string of the account address of the validator
to unregister.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Getting the validator address from the arguments passed
_validator_address: str = ""
if validator_address:
_validator_address = validator_address
elif node_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_node_or_validator_address(node_address),
network = self.network
)
elif public_key:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = public_key,
network = self.network
)
elif account_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_wallet_address(account_address),
network = self.network
)
else:
raise ValueError(
"At least one argument needs to be passed to this method to build the action."
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
UnregisterValidator(_validator_address)
)
return self
def to_action_list(self) -> List[ActionType]:
""" Gets a list of the actions that have been created by the action builder so far """
return self.__actions_list
|
1684894
|
import warnings
import numpy as np
import copy
from skopt.space import space as skopt_space
from skopt.learning import GaussianProcessRegressor
from scipy.linalg import cho_solve
from sklearn.utils.validation import check_array
from utils import check_parameter_count
class BoundedGaussianProcessRegressor(GaussianProcessRegressor):
"""
Gaussian process regressor where part of the space in not allowed due to parameter count.
"""
def __init__(self, space, hyper_param_names, adapt_param, kernel, normalize_y, noise, n_restarts_optimizer):
self.space = space
self.hyper_param_names = hyper_param_names
self.adapt_param = adapt_param
self.param_thr = adapt_param['param_thr']
self.par_cnt_scheme = adapt_param['par_cnt_scheme']
transformed_categorical = []
for dim in space.dimensions:
if not isinstance(dim, skopt_space.Categorical):
transformed_categorical.append(False)
else:
transformed_categorical += [True] * dim.transformed_size
if len(transformed_categorical) != space.transformed_n_dims:
raise Exception()
self.transformed_categorical = transformed_categorical
super(BoundedGaussianProcessRegressor, self).__init__(
kernel=kernel, normalize_y=normalize_y, noise=noise, n_restarts_optimizer=n_restarts_optimizer)
def predict(self, X, return_std=False, return_cov=False, return_mean_grad=False, return_std_grad=False):
"""
Predict output for X.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True),
the gradient of the mean and the standard-deviation with respect to X
can be optionally provided.
Parameters
----------
* `X` [array-like, shape = (n_samples, n_features)]:
Query points where the GP is evaluated.
* `return_std` [bool, default: False]:
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
* `return_cov` [bool, default: False]:
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
* `return_mean_grad` [bool, default: False]:
Whether or not to return the gradient of the mean.
Only valid when X is a single point.
* `return_std_grad` [bool, default: False]:
Whether or not to return the gradient of the std.
Only valid when X is a single point.
Returns
-------
* `y_mean` [array, shape = (n_samples, [n_output_dims]):
Mean of predictive distribution a query points
* `y_std` [array, shape = (n_samples,), optional]:
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
* `y_cov` [array, shape = (n_samples, n_samples), optional]:
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
* `y_mean_grad` [shape = (n_samples, n_features)]:
The gradient of the predicted mean
* `y_std_grad` [shape = (n_samples, n_features)]:
The gradient of the predicted std.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
if return_std_grad and not return_std:
raise ValueError(
"Not returning std_gradient without returning "
"the std.")
X = check_array(X)
if X.shape[0] != 1 and (return_mean_grad or return_std_grad):
raise ValueError("Not implemented for n_samples > 1")
# check if X is within bounds defined by parameter count
for ind1, x in enumerate(X):
for ind2, xi in enumerate(x):
if xi < 0 or xi > 1:
if -1e-10 < xi < 0:
X[ind1][ind2] = 0
elif 1 < xi < 1 + 1e-10:
X[ind1][ind2] = 1
else:
raise Exception('Not al points in space')
all_dim_values = self.space.inverse_transform(X)
all_vals_dict = [
{name: val for (name, val) in zip(self.hyper_param_names, dim_values)} for dim_values in all_dim_values]
suitable_inds = []
unsuitable_inds = []
all_par_cnt_dict = []
for ind, vals_dict in enumerate(all_vals_dict):
values_suitable, par_cnt_dict = check_parameter_count(vals_dict, self.param_thr, self.par_cnt_scheme)
all_par_cnt_dict.append(par_cnt_dict)
if values_suitable:
suitable_inds.append(ind)
else:
unsuitable_inds.append(ind)
if not all_par_cnt_dict:
raise ValueError()
X_suit = np.array([X[ind] for ind in suitable_inds])
X_unsuit = np.array([X[ind] for ind in unsuitable_inds])
if len(X_suit) > 0:
if not hasattr(self, "X_train_"): # Not fit; predict based on GP prior
y_suit_mean = np.zeros(X_suit.shape[0])
if return_cov:
y_suit_cov = self.kernel(X_suit)
elif return_std:
y_suit_var = self.kernel.diag(X_suit)
y_suit_std = np.sqrt(y_suit_var)
else: # Predict based on GP posterior
K_trans = self.kernel_(X_suit, self.X_train_)
y_suit_mean = K_trans.dot(self.alpha_) # Line 4 (y_suit_mean = f_star)
y_suit_mean = self.y_train_mean_ + y_suit_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_suit_cov = self.kernel_(X_suit) - K_trans.dot(v) # Line 6
elif return_std:
K_inv = self.K_inv_
# Compute variance of predictive distribution
y_suit_var = self.kernel_.diag(X_suit)
y_suit_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_suit_var_negative = y_suit_var < 0
if np.any(y_suit_var_negative):
warnings.warn("Predicted variances smaller than 0. Setting those variances to 0.")
y_suit_var[y_suit_var_negative] = 0.0
y_suit_std = np.sqrt(y_suit_var)
if return_mean_grad:
grad_suit = self.kernel_.gradient_x(X_suit[0], self.X_train_)
grad_suit_mean = np.dot(grad_suit.T, self.alpha_)
if return_std_grad:
grad_suit_std = np.zeros(X_suit.shape[1])
if not np.allclose(y_suit_std, grad_suit_std):
grad_suit_std = -np.dot(K_trans, np.dot(K_inv, grad_suit))[0] / y_suit_std
else:
y_suit_mean = []
y_suit_cov = []
y_suit_std = []
grad_suit_mean = []
grad_suit_std = []
eps = 1e-10
y_unsuit_mean = []
grad_unsuit_mean = []
y_unsuit_std = [eps] * len(X_unsuit)
y_unsuit_cov = [eps] * len(X_unsuit)
grad_unsuit_std = list(np.ones(np.shape(X_unsuit)) * eps)
y_max = np.max(self.y_train_)
for unsuit_ind, x in zip(unsuitable_inds, X_unsuit):
try:
tot_par = all_par_cnt_dict[unsuit_ind]['total']
except:
print 1
tmp_y_unsuit_mean = y_overshoot(tot_par, self.param_thr, y_max)
y_unsuit_mean.append(tmp_y_unsuit_mean)
if return_mean_grad:
tmp_grad_unsuit_mean = []
for dim_ind, is_categorical in enumerate(self.transformed_categorical):
if is_categorical:
tmp_grad_unsuit_mean.append(eps)
else:
# find a numerical gradient: grad_i = [y(xi+delta_xi) - y(xi)] / delta_xi
xi = x[dim_ind]
delta_xi = 0.2
x2i = xi + delta_xi
if x2i > 1:
x2i = 1.0
delta_xi = x2i - xi
if delta_xi <= 0:
tmp_grad_unsuit_mean.append(eps)
continue
x2 = copy.deepcopy(x)
x2[dim_ind] = x2i
dim_values2 = self.space.inverse_transform(np.array([x2]))[0]
vals_dict2 = {name: val for (name, val) in zip(self.hyper_param_names, dim_values2)}
values_suitable2, par_cnt_dict2 = check_parameter_count(
vals_dict2, self.param_thr, self.par_cnt_scheme)
tot_par2 = par_cnt_dict2['total']
if values_suitable2:
tot_par2 = self.param_thr + 1
y2 = y_overshoot(tot_par2, self.param_thr, y_max)
grad_unsuit_i = (y2 - tmp_y_unsuit_mean)/delta_xi
tmp_grad_unsuit_mean.append(grad_unsuit_i)
grad_unsuit_mean.append(tmp_grad_unsuit_mean)
y_mean = []
y_std = []
y_cov = []
grad_mean = []
grad_std = []
for ind in range(len(X)):
if ind in suitable_inds:
pos = [i for i, x in enumerate(suitable_inds) if x == ind]
pos = pos[0]
y_mean.append(y_suit_mean[pos])
if return_cov:
y_cov.append(y_suit_cov[pos])
if return_std:
y_std.append(y_suit_std[pos])
if return_mean_grad:
# only one value allowed
grad_mean.append(grad_suit_mean)
if return_std_grad:
# only one value allowed
grad_std.append(grad_suit_std)
elif ind in unsuitable_inds:
pos = [i for i, x in enumerate(unsuitable_inds) if x == ind]
pos = pos[0]
y_mean.append(y_unsuit_mean[pos])
if return_cov:
y_cov.append(y_unsuit_cov[pos])
if return_std:
y_std.append(y_unsuit_std[pos])
if return_mean_grad:
grad_mean.append(grad_unsuit_mean[pos])
if return_std_grad:
grad_std.append(grad_unsuit_std[pos])
else:
raise Exception()
y_mean = np.array(y_mean)
y_std = np.array(y_std)
y_cov = np.array(y_cov)
if return_mean_grad:
grad_mean = np.array(grad_mean[0])
if return_std_grad:
grad_std = np.array(grad_std[0])
if return_cov:
return y_mean, y_cov
if return_mean_grad:
if return_std_grad:
return y_mean, y_std, grad_mean, grad_std
if return_std:
return y_mean, y_std, grad_mean
else:
return y_mean, grad_mean
else:
if return_std:
return y_mean, y_std
else:
return y_mean
def y_overshoot(tot_par, param_thr, y_max):
eps = 1e-10
y_max = max([y_max, eps])
if tot_par > param_thr:
par_overshoot = (tot_par - param_thr)/param_thr
elif tot_par < param_thr * 0.95:
par_overshoot = -(tot_par - param_thr * 0.95)/param_thr * 0.95
else:
raise Exception()
y = y_max * 1.1 + max([y_max, 10*eps]) * 0.1 * par_overshoot
# y = y_max * (1.1 + 0.1 * par_overshoot)
return y
|
1684904
|
class TheSwapsDivTwo:
def find(self, sequence):
l, s = len(sequence), set()
for i in xrange(l):
for j in xrange(i + 1, l):
s.add(
tuple(
sequence[:i]
+ (sequence[j],)
+ sequence[i + 1 : j]
+ (sequence[i],)
+ sequence[j + 1 :]
)
)
return len(s)
|
1684916
|
from train_nih import *
if __name__ == "__main__":
confs = []
# main experiments
for seed in [0]:
confs += nih_baseline(seed, 256)
confs += nih_li2018(seed, 256)
confs += nih_unet(seed, 256)
confs += nih_fpn(seed, 256)
confs += nih_deeplabv3(seed, 256)
confs += nih_pan(seed, 256)
confs += nih_pylon(seed, 256)
# 512 x 512 experiments
for seed in [0]:
confs += nih_baseline(seed, 512)
confs += nih_li2018(seed, 512)
confs += nih_fpn(seed, 512)
confs += nih_pylon(seed, 512)
debug = False
multiprocess_map(
Run(
namespace='',
debug=debug,
train=True,
test_auc=True,
test_loc=True,
gen_picked=True,
gen_all=False,
),
confs,
num_workers=len(confs),
progress=True,
debug=debug,
)
|
1684931
|
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
def scrape_divs():
"""This function scrapes all the proposal elements and stores them
in a list.
"""
response = requests.get("https://in.pycon.org/cfp/2020/proposals/")
soup = bs(response.content, "html.parser")
mydivs = soup.findAll("div", {"class": "col-sm-11 col-xs-12"})
return mydivs
def selected_proposals(mydivs, df_columns):
"""This function takes the list of selected proposal elements from the
scarpe_divs function as well as a list of columns and stores the value
of the elements in a csv file.
Args:
mydivs (list): List of proposal elements
df_columns (list): List of column names
"""
final = {}
for i, div in enumerate(mydivs[:43]):
title = div.text
titlex = title.split("\n")
test_list = list(filter(lambda x: x != "", titlex))
no_of_votes = test_list[2]
no_of_messages = test_list[0]
title = test_list[4]
tag1 = test_list[5]
tag2 = test_list[7]
author = test_list[11].strip()
date = test_list[14].strip()
final[i] = [no_of_votes, no_of_messages, title, tag1, tag2, author, date]
df1 = pd.DataFrame.from_dict(final, orient="index")
df1.columns = df_columns
df1.to_csv("selected_proposals.csv")
def total_proposals(mydivs, df_columns):
"""This function takes the list of total proposal elements from the scarpe_divs
function as well as a list of columns and stores the value of the
elements in a csv file.
Args:
mydivs (list): List of proposal elements
df_columns (list): List of column names
"""
final_two = {}
for i, div in enumerate(mydivs[43:]):
title = div.text
titlex = title.split("\n")
test_list = list(filter(lambda x: x != "", titlex))
no_of_votes = test_list[2]
no_of_messages = test_list[0]
title = test_list[4]
tag1 = test_list[6]
tag2 = test_list[8]
author = test_list[12].strip()
date = test_list[15].strip()
final_two[i] = [no_of_votes, no_of_messages, title, tag1, tag2, author, date]
df2 = pd.DataFrame.from_dict(final_two, orient="index")
df2.columns = df_columns
df2.to_csv("total_proposals.csv")
if __name__ == "__main__":
df_columns = ["Votes", "Messages", "Title", "Tag1", "Tag2", "Author", "Date"]
mydivs = scrape_divs()
selected_proposals(mydivs, df_columns)
total_proposals(mydivs, df_columns)
print("The proposals have been saved successfully!!!")
|
1684946
|
class SimIo:
def __init__(self, sim, name, getter, setter):
self.sim = sim
self.name = name
self.getter = getter
self.setter = setter
def get(self):
if self.getter is None:
raise NotImplementedError
return self.getter()
def set(self, val):
if self.setter is None:
raise NotImplementedError
self.setter(val)
|
1684957
|
import logger as log
import mail
import json
def read_json(threatJasonPath):
try:
# read the Threat Dragon JSON file
JSON_file = open(threatJasonPath, 'r')
dataTM = JSON_file.read()
JSON_file.close()
# return a dictionary
obj = json.loads(dataTM)
return obj
except FileNotFoundError as e2:
log.logger.error("File not accessible")
mail.sendErrorEmail("Threat Model: Exception occurred reading the JSON file", e)
log.logger.info(f'Finish processing \n')
raise e2
except Exception as e:
log.logger.error("Exception occurred", exc_info=True)
mail.sendErrorEmail("Threat Model: Exception occurred reading the JSON file", e)
log.logger.info(f'Finish processing \n')
raise e
|
1684962
|
def count_parameters(model):
"""Counts the number of parameters in a model."""
return sum(param.numel() for param in model.parameters() if param.requires_grad_)
class AttrDict(dict):
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, item):
return self[item]
|
1684965
|
import os
import yaml
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default = os.path.join('configs', 'data_preparation.yaml'), help = 'Config File', type = str)
FLAGS = parser.parse_args()
CFG_FILE = FLAGS.cfg
with open(CFG_FILE, 'r') as cfg_file:
cfg_dict = yaml.load(cfg_file, Loader=yaml.FullLoader)
if 'data_folder' not in cfg_dict.keys():
raise AttributeError('The data folder is left unspecified in configuration file.')
data_folder = cfg_dict['data_folder']
if 'data_name' not in cfg_dict.keys():
raise AttributeError('The data name is left unspecified in configuration file.')
data_name = cfg_dict['data_name']
if 'tiny_data_name' not in cfg_dict.keys():
raise AttributeError('The tiny data name is left unspecified in configuration file.')
tiny_data_name = cfg_dict['tiny_data_name']
if 'tiny_data_year_threshold' not in cfg_dict.keys():
raise AttributeError('The tiny data year threshold is left unspecified in configuration file.')
tiny_data_year_threshold = cfg_dict['tiny_data_year_threshold']
df = pd.read_csv(os.path.join(data_folder, data_name + '.csv'), index_col = 'index', engine = 'python')
tiny_df = df[df.src_year <= tiny_data_year_threshold]
tiny_df.to_csv(os.path.join(data_folder, tiny_data_name + '.csv'))
print('*** Summary Statistics ***')
print('# of reference items:{}'.format(tiny_df.shape[0]))
|
1684972
|
import tensorflow as tf
__all__ = ['get_sess']
def get_sess(sess=None):
"""Get default session if sess is None.
Args:
sess: Valid sess or None.
Returns:
Valid sess or get default sess.
"""
if sess is None:
sess = tf.get_default_session()
assert sess, 'sess should not be None.'
return sess
|
1684980
|
import claripy
from . import MemoryMixin
from ...errors import SimMemoryError
class SimpleInterfaceMixin(MemoryMixin):
def load(self, addr, size=None, endness=None, condition=None, fallback=None, **kwargs):
tsize=self._translate_size(size, None)
return super().load(
self._translate_addr(addr),
size=tsize,
endness=self._translate_endness(endness),
condition=self._translate_cond(condition),
fallback=self._translate_data(fallback, tsize) if fallback is not None else None,
**kwargs
)
def store(self, addr, data, size=None, endness=None, condition=None, **kwargs):
tsize=self._translate_size(size, data)
super().store(
self._translate_addr(addr),
self._translate_data(data, tsize),
size=tsize,
endness=self._translate_endness(endness),
condition=self._translate_cond(condition),
**kwargs
)
def _translate_addr(self, a):
if isinstance(a, claripy.ast.Base) and not a.singlevalued:
raise SimMemoryError("address not supported")
return self.state.solver.eval(a)
def _translate_data(self, d, size):
if type(d) in (bytes, bytearray):
return self.state.solver.BVV(d)
elif type(d) is int:
return self.state.solver.BVV(d, size*self.state.arch.byte_width)
elif isinstance(d, claripy.ast.Base):
return d
else:
raise SimMemoryError("data not supported")
def _translate_size(self, s, data):
if isinstance(s, claripy.ast.Base) and not s.singlevalued:
raise SimMemoryError("size not supported")
if s is None:
if isinstance(data, claripy.ast.BV):
return len(data) // self.state.arch.byte_width
elif isinstance(data, (bytes, bytearray)):
return len(data)
else:
raise SimMemoryError("unknown size")
return self.state.solver.eval(s)
def _translate_cond(self, c):
if isinstance(c, claripy.ast.Base) and not c.singlevalued:
raise SimMemoryError("condition not supported")
if c is None:
return True
else:
return self.state.solver.eval_upto(c, 1)[0]
def _translate_endness(self, endness):
if endness is None:
return self.endness
else:
return endness
|
1685017
|
import os
import sys
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
class ABF2(swhlab.ABF):
def phasicTonic(self,m1=None,m2=None,chunkMs=50,quietPercentile=10,
histResolution=.5,plotToo=False):
"""
let's keep the chunkMs as high as we reasonably can. 50ms is good.
Things get flakey at lower numbers like 10ms.
IMPORTANT! for this to work, prevent 0s from averaging in, so keep
bin sizes well above the data resolution.
"""
# prepare sectioning values to be used later
m1=0 if m1 is None else m1*self.pointsPerSec
m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec
m1,m2=int(m1),int(m2)
# prepare histogram values to be used later
padding=200 # pA or mV of maximum expected deviation
chunkPoints=int(chunkMs*self.pointsPerMs)
histBins=int((padding*2)/histResolution)
# center the data at 0 using peak histogram, not the mean
Y=self.sweepY[m1:m2]
hist,bins=np.histogram(Y,bins=2*padding)
Yoffset=bins[np.where(hist==max(hist))[0][0]]
Y=Y-Yoffset # we don't have to, but PDF math is easier
# calculate all histogram
nChunks=int(len(Y)/chunkPoints)
hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))
hist=hist/len(Y) # count as a fraction of total
Xs=bins[1:]
# get baseline data from chunks with smallest variance
chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
blData=chunks[np.where(percentiles<=quietPercentile)[0]].flatten()
# generate the standard curve and pull it to the histogram height
sigma=np.sqrt(np.var(blData))
center=np.average(blData)+histResolution/2
blCurve=mlab.normpdf(Xs,center,sigma)
blCurve=blCurve*max(hist)/max(blCurve)
# determine the phasic current by subtracting-out the baseline
#diff=hist-blCurve
diff=hist
IGNORE_DISTANCE=5 # KEEP THIS FIXED, NOT A FUNCTION OF VARIANCE
ignrCenter=len(Xs)/2
ignrPad=IGNORE_DISTANCE/histResolution
ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)
diff[ignr1:ignt2]=0
# optionally graph all this
if plotToo:
plt.figure(figsize=(15,5))
plt.plot(Y)
plt.figure(figsize=(7,7))
ax1=plt.subplot(211)
plt.title(abf.ID+" phasic analysis")
plt.ylabel("fraction")
plt.plot(Xs,hist,'-',alpha=.8,color='b',lw=3)
plt.plot(Xs,blCurve,lw=3,alpha=.5,color='r')
plt.margins(0,.1)
plt.subplot(212,sharex=ax1)
plt.title("baseline subtracted")
plt.ylabel("fraction")
plt.xlabel("data points (%s)"%abf.units)
plt.plot(Xs,diff,'-',alpha=.8,color='b',lw=3)
plt.axhline(0,lw=3,alpha=.5,color='r')
plt.axvline(0,lw=3,alpha=.5,color='k')
plt.margins(0,.1)
plt.axis([-50,50,None,None])
plt.tight_layout()
plt.show()
print(np.sum(np.split(diff,2),1))
return diff/len(Y)*abf.pointsPerSec # charge/sec
if __name__=="__main__":
#abfPath=r"X:\Data\2P01\2016\2016-09-01 PIR TGOT"
abfPath=r"C:\Users\scott\Documents\important\demodata"
abf=ABF2(os.path.join(abfPath,"16d16007.abf"))
histPoints=len(abf.phasicTonic(.75))
nSweeps=25
plt.figure(figsize=(10,5))
plt.grid()
for title,sweep1 in [["baseline",200],["baseline",240],["baseline",350]]:
hists=np.zeros((nSweeps,histPoints))
for i in range(nSweeps):
sweep=sweep1+i
abf.setsweep(sweep)
hists[i]=abf.phasicTonic(.75)
AV=np.average(hists,axis=0)
plt.plot(AV,lw=5,alpha=.5,label=title)
plt.legend()
# for sweep in abf.setsweeps():
# phasic=abf.phasicTonic(.75)
# neg[sweep],pos[sweep]=np.sum(np.split(phasic,2),1)
#
# plt.plot(pos,'.',color='b',alpha=.5)
# plt.plot(swhlab.common.lowpass(pos),'-',color='b',alpha=.5,lw=5,label="upward")
# plt.plot(neg,'.',color='r',alpha=.5)
# plt.plot(swhlab.common.lowpass(neg),'-',color='r',alpha=.5,lw=5,label="downward")
# for sweep in abf.comment_sweeps:
# plt.axvline(sweep,lw=5,alpha=.5,color='g',ls='--')
# plt.axhline(0,color='k',lw=3,alpha=.5)
plt.xlabel("sweep number")
plt.ylabel("ms * pA / sec")
# plt.legend(loc='upper left',shadow=True)
plt.show()
print("DONE")
|
1685023
|
from PIL import Image
import cv2
import numpy as np
from PySide2.QtGui import QImage
# https://github.com/Mugichoko445/Fast-Digital-Image-Inpainting/blob/master/sources/FastDigitalImageInpainting.hpp
def convertPIL2CV(img:Image):
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
def convertCV2PIL(img)->Image:
return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def convertQImageToCV2(img):
"""Converts a QImage into CV2 format."""
img = img.convertToFormat(QImage.Format_RGB32)
width = img.width()
height = img.height()
ptr = img.bits()
arr = np.array(ptr).reshape(height, width, 4)
return arr
_a = 0.073235
_b = 0.176765
_K = np.array([[_a, _b, _a,
_b, 0, _b,
_a, _b, _a]])
def fastInpaint(src, mask=None, kernel=None, maxIter=100):
if not kernel:
kernel = _K
# Make mask BGR
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# Fill masked region with average color
avgColor = cv2.sumElems(src) // (np.prod(src.shape[:2]))
avgColorMat = np.ones((1,1,3))
avgColorMat[0,0] = np.asarray([avgColor[0], avgColor[1], avgColor[2]])
avgColorMat = cv2.resize(avgColorMat, (src.shape[1], src.shape[0]), 0.0, 0.0, cv2.INTER_NEAREST)
print(mask)
result = np.multiply(mask//255, src) + np.multiply((1 - mask//255), avgColorMat)
# Convolution
bSize = _K.shape[0] // 2
# result.convertTo(result, CV_32FC3)
result = (np.float32(result)-np.min(result))
result /= np.max(result)
# kernel3ch = cv2.cvtColor(kernel, cv2.COLOR_BGR2GRAY)
kernel3ch = np.zeros((kernel.shape[0], kernel.shape[1], 3))
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
kernel3ch[i,j,:] = 3*[kernel[i,j]]
inWithBorder = cv2.copyMakeBorder(result, bSize, bSize, bSize, bSize, cv2.BORDER_REPLICATE)
resInWithBorder = np.copy(inWithBorder[bSize:bSize+result.shape[0], bSize:bSize+result.shape[1]])
# ch = result.shape[-1]
for itr in range(maxIter):
inWithBorder = cv2.copyMakeBorder(result, bSize, bSize, bSize, bSize, cv2.BORDER_REPLICATE)
for r in range(result.shape[1]):
for c in range(result.shape[0]):
if np.all(mask[c,r,:] == 255):
roi = inWithBorder[c:c+_K.shape[1], r:r+_K.shape[0]]
s = cv2.sumElems(np.multiply(kernel3ch, roi))
result[c,r,0] = s[0]
result[c,r,1] = s[1]
result[c,r,2] = s[2]
# cv2.imshow("Inpainting...", result)
# cv2.waitKey(1)
result -= np.min(result)
result *= 255/np.max(result)
return np.uint8(result)
# print(avgColor)
# print(src.shape)
|
1685066
|
import logging
import numpy as np
from gym.spaces import Discrete
from ray.rllib.utils.annotations import override
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.rollout_worker import get_global_worker
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.utils.typing import EnvType
logger = logging.getLogger(__name__)
def model_vector_env(env: EnvType) -> BaseEnv:
"""Returns a VectorizedEnv wrapper around the given environment.
To obtain worker configs, one can call get_global_worker().
Args:
env (EnvType): The input environment (of any supported environment
type) to be convert to a _VectorizedModelGymEnv (wrapped as
an RLlib BaseEnv).
Returns:
BaseEnv: The BaseEnv converted input `env`.
"""
worker = get_global_worker()
worker_index = worker.worker_index
if worker_index:
env = _VectorizedModelGymEnv(
make_env=worker.make_sub_env_fn,
existing_envs=[env],
num_envs=worker.num_envs,
observation_space=env.observation_space,
action_space=env.action_space,
)
return BaseEnv.to_base_env(
env,
make_env=worker.make_sub_env_fn,
num_envs=worker.num_envs,
remote_envs=False,
remote_env_batch_wait_ms=0)
class _VectorizedModelGymEnv(VectorEnv):
"""Vectorized Environment Wrapper for MB-MPO.
Primary change is in the `vector_step` method, which calls the dynamics
models for next_obs "calculation" (instead of the actual env). Also, the
actual envs need to have two extra methods implemented: `reward(obs)` and
(optionally) `done(obs)`. If `done` is not implemented, we will assume
that episodes in the env do not terminate, ever.
"""
def __init__(self,
make_env=None,
existing_envs=None,
num_envs=1,
*,
observation_space=None,
action_space=None,
env_config=None):
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
while len(self.envs) < num_envs:
self.envs.append(self.make_env(len(self.envs)))
super().__init__(
observation_space=observation_space
or self.envs[0].observation_space,
action_space=action_space or self.envs[0].action_space,
num_envs=num_envs)
worker = get_global_worker()
self.model, self.device = worker.foreach_policy(
lambda x, y: (x.dynamics_model, x.device))[0]
@override(VectorEnv)
def vector_reset(self):
"""Override parent to store actual env obs for upcoming predictions.
"""
self.cur_obs = [e.reset() for e in self.envs]
return self.cur_obs
@override(VectorEnv)
def reset_at(self, index):
"""Override parent to store actual env obs for upcoming predictions.
"""
obs = self.envs[index].reset()
self.cur_obs[index] = obs
return obs
@override(VectorEnv)
def vector_step(self, actions):
if self.cur_obs is None:
raise ValueError("Need to reset env first")
# If discrete, need to one-hot actions
if isinstance(self.action_space, Discrete):
act = np.array(actions)
new_act = np.zeros((act.size, act.max() + 1))
new_act[np.arange(act.size), act] = 1
actions = new_act.astype("float32")
# Batch the TD-model prediction.
obs_batch = np.stack(self.cur_obs, axis=0)
action_batch = np.stack(actions, axis=0)
# Predict the next observation, given previous a) real obs
# (after a reset), b) predicted obs (any other time).
next_obs_batch = self.model.predict_model_batches(
obs_batch, action_batch, device=self.device)
next_obs_batch = np.clip(next_obs_batch, -1000, 1000)
# Call env's reward function.
# Note: Each actual env must implement one to output exact rewards.
rew_batch = self.envs[0].reward(obs_batch, action_batch,
next_obs_batch)
# If env has a `done` method, use it.
if hasattr(self.envs[0], "done"):
dones_batch = self.envs[0].done(next_obs_batch)
# Otherwise, assume the episode does not end.
else:
dones_batch = np.asarray([False for _ in range(self.num_envs)])
info_batch = [{} for _ in range(self.num_envs)]
self.cur_obs = next_obs_batch
return list(next_obs_batch), list(rew_batch), list(
dones_batch), info_batch
@override(VectorEnv)
def get_unwrapped(self):
return self.envs
|
1685102
|
from Bio.PDB import *
# Exclude disordered atoms.
class NotDisordered(Select):
def accept_atom(self, atom):
return not atom.is_disordered() or atom.get_altloc() == 'A'
def extractHelix(helix, infilename, outfilename, chain_ids=None, includeWaters=False,\
invert=False):
parser = PDBParser(QUIET=True)
struct = parser.get_structure(infilename, infilename)
model = Selection.unfold_entities(struct, 'M')[0]
chains = Selection.unfold_entities(struct, 'C')
# Select residues to extract and build new structure
structBuild = StructureBuilder.StructureBuilder()
structBuild.init_structure("output")
structBuild.init_seg(" ")
structBuild.init_model(0)
outputStruct = structBuild.get_structure()
for chain in model:
if chain.get_id() in chain_ids:
structBuild.init_chain(chain.get_id())
for residue in chain:
het = residue.get_id()
if het[0] == ' ' and het in helix:
outputStruct[0][chain.get_id()].add(residue)
# Output the selected residues
pdbio = PDBIO()
pdbio.set_structure(outputStruct)
pdbio.save(outfilename, select=NotDisordered())
|
1685150
|
import click
import re
from .base import Prompter
from agent.modules.tools import infinite_retry
from agent import source
class MongoPrompter(Prompter):
def prompt(self, default_config, advanced=False):
self.prompt_connection(default_config)
self.prompt_auth(default_config)
self.prompt_db(default_config)
self.prompt_collection(default_config)
self.prompt_offset(default_config)
self.source.config[source.MongoSource.CONFIG_BATCH_SIZE] = \
click.prompt('Batch size', type=click.IntRange(1),
default=default_config.get(source.MongoSource.CONFIG_BATCH_SIZE, 1000))
self.prompt_batch_wait_time(default_config)
self.source.set_config(self.source.config)
return self.source
@infinite_retry
def prompt_connection(self, default_config: dict):
self.source.config[source.MongoSource.CONFIG_CONNECTION_STRING] = \
click.prompt('Connection string', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_CONNECTION_STRING)).strip()
self.validator.validate_connection()
click.echo('Successfully connected to Mongo server')
@infinite_retry
def prompt_auth(self, default_config: dict):
self.source.config[source.MongoSource.CONFIG_USERNAME] = \
click.prompt('Username', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_USERNAME, '')).strip()
if self.source.config[source.MongoSource.CONFIG_USERNAME] == '':
return
self.source.config[source.MongoSource.CONFIG_PASSWORD] = \
click.prompt('Password', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_PASSWORD, ''))
self.source.config[source.MongoSource.CONFIG_AUTH_SOURCE] = \
click.prompt('Authentication Source', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_AUTH_SOURCE, '')).strip()
self.validator.validate_connection()
print('Successfully connected to the source')
def prompt_batch_wait_time(self, default_config: dict):
default_batch_wait_time = default_config.get(source.MongoSource.CONFIG_MAX_BATCH_WAIT_TIME)
if default_batch_wait_time:
default_batch_wait_time = int(re.findall(r'\d+', default_batch_wait_time)[0])
else:
default_batch_wait_time = 5
batch_wait_time = click.prompt('Max batch wait time (seconds)', type=click.IntRange(1),
default=default_batch_wait_time)
self.source.config[source.MongoSource.CONFIG_MAX_BATCH_WAIT_TIME] = '${' + str(batch_wait_time) + ' * SECONDS}'
def prompt_offset(self, default_config: dict):
self.source.config[source.MongoSource.CONFIG_OFFSET_TYPE] = \
click.prompt('Offset type', type=click.Choice(source.MongoSource.offset_types),
default=default_config.get(source.MongoSource.CONFIG_OFFSET_TYPE, source.MongoSource.OFFSET_TYPE_OBJECT_ID))
default_offset = None if self.source.config[
source.MongoSource.CONFIG_OFFSET_TYPE] == source.MongoSource.OFFSET_TYPE_STRING else '3'
self.source.config[source.MongoSource.CONFIG_INITIAL_OFFSET] = click.prompt(
'Initial offset (amount of days ago or specific date)', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_INITIAL_OFFSET, default_offset))
self.source.config[source.MongoSource.CONFIG_OFFSET_FIELD] = \
click.prompt('Offset field', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_OFFSET_FIELD, '_id')).strip()
@infinite_retry
def prompt_db(self, default_config):
self.source.config[source.MongoSource.CONFIG_DATABASE] = \
click.prompt('Database', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_DATABASE)).strip()
try:
self.validator.validate_db()
except source.validator.ValidationException as e:
raise click.UsageError(e)
def _get_collection(self):
try:
self.validator.validate_collection()
except source.validator.ValidationException as e:
raise click.UsageError(e)
client = source.db.get_mongo_client(
self.source.config[source.MongoSource.CONFIG_CONNECTION_STRING],
self.source.config.get(source.MongoSource.CONFIG_USERNAME),
self.source.config.get(source.MongoSource.CONFIG_PASSWORD),
self.source.config.get(source.MongoSource.CONFIG_AUTH_SOURCE)
)
return client[self.source.config[source.MongoSource.CONFIG_DATABASE]][self.source.config[source.MongoSource.CONFIG_COLLECTION]]
@infinite_retry
def prompt_collection(self, default_config):
self.source.config[source.MongoSource.CONFIG_COLLECTION] = \
click.prompt('Collection', type=click.STRING,
default=default_config.get(source.MongoSource.CONFIG_COLLECTION)).strip()
self.source.config[source.MongoSource.CONFIG_IS_CAPPED] = 'capped' in self._get_collection().options()
|
1685218
|
import pyodbc
def select_all_entries_from_column(table_name, cursor, column_name):
"""Select all entries from a column.
Args:
table_name (str): Table name.
cursor (object): pyobdc cursor.
column_name (str): Column name.
Returns:
list: List with the selected entries.
**Examples**
.. code-block:: python
conn = pyodbc.connect(connection_string)
cur = conn.cursor()
data = select_all_entries_from_column(tab_name, cur, col_name)
"""
query = "SELECT " + column_name + " FROM " + table_name
cursor.execute(query)
data = cursor.fetchall()
data = [d[0] for d in data]
return data
def select_entry_where_column_equals_value(table_name, cursor, column_name, value):
"""Select one entry where the a column has a specific value.
Args:
table_name (str): Table name.
cursor (object): pyobdc cursor.
column_name (str): Column name.
value (str or integer): Value to query.
Returns:
list: List of entries for the queried conditions.
**Examples**
.. code-block:: python
conn = pyodbc.connect(connection_string)
cur = conn.cursor()
data = select_entry_where_column_equals_value(tab_name, cur, col_name, 1)
"""
query = "SELECT * FROM " + table_name + " WHERE " + column_name + " = ?"
cursor.execute(query, value)
data = cursor.fetchone()
return data
|
1685231
|
import logging
from pandas import DataFrame
from dbnd import parameter, task
from targets import target
from targets.target_config import FileFormat
logger = logging.getLogger(__name__)
@task(result=parameter.save_options(FileFormat.csv, header=False)[DataFrame])
def read_first_lines(lines=parameter.load_options(FileFormat.csv, nrows=10)[DataFrame]):
assert lines.shape[0] == 10
return lines
class TestTaskInMemoryParameters(object):
def test_load_options(self, tmpdir):
t = target(tmpdir / "file_with_lines.csv")
t.write("\n".join(str(r) for r in range(20)))
r = read_first_lines.dbnd_run(lines=t)
actual = r.task.result.load(DataFrame)
# we skip the header, so the next value is the header
logger.info("actual : %s", actual)
assert actual.columns[0] == "1"
|
1685262
|
from django.db import models
class PriceHistory(models.Model):
date = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(max_digits=7, decimal_places=2)
volume = models.DecimalField(max_digits=7, decimal_places=3)
|
1685263
|
import os
import re
import sys
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox
from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon
from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import scoreatpercentile
from fluff.color import create_colormap
from fluff.config import FONTSIZE
from fluff.fluffio import load_annotation
from fluff.track import Track
DEFAULT_COLORS = ["#e41a1c", "#4daf4a", "#377eb8"]
GENE_ARROW = "->"
GENE_ARROW = ArrowStyle._Curve(beginarrow=False, endarrow=True, head_length=.4, head_width=.4)
def colortext(x, y, texts, colors, **kwargs):
pos = {
"right": 1,
"center": 0.5,
"left": 0,
"top": 0,
"bottom": 1
}
ax = kwargs.get("ax")
verticalalignment = pos[kwargs.get("verticalalignment", "center")]
horizontalalignment = pos[kwargs.get("horizontalalignment", "center")]
annotation_clip = kwargs.get("clip_on", False)
fontproperties = kwargs.get("fontproperties", None)
textprops = {"fontproperties":fontproperties}
transform = kwargs.get("transform", None)
areas = []
for t,c in zip(texts, colors):
textprops["color"] = c
text = TextArea(t, textprops=textprops)
areas.append(text)
txt = HPacker(children=areas,
align="baseline",
pad=0, sep=0)
bbox = AnnotationBbox(txt, xy=(x, y),
xycoords='data',
annotation_clip=annotation_clip,
frameon=False,
boxcoords=("axes fraction"),
box_alignment=(
horizontalalignment,
verticalalignment), # alignment center, center
#bboxprops={"bbox_transmuter":transform},
)
ax.add_artist(bbox)
def hide_axes(ax):
for x in [ax.xaxis, ax.yaxis]:
x.set_major_formatter(NullFormatter())
x.set_major_locator(NullLocator())
for _, spine in ax.spines.items():
spine.set_color('none')
def heatmap_plot(data, ind, outfile, tracks, titles, colors, bgcolors, scale, tscale, labels, fontsize, colorbar=True):
font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
label_ratio = 4.0
# space between heatmaps
btw_space = 0.02
plot_width = 1.75 * len(tracks) + btw_space * len(tracks)
plot_height = 6
width_ratios = [label_ratio] * len(tracks)
numplots = len(tracks)
if labels is not None and len(labels) == len(ind):
plot_width += 1 / label_ratio
numplots += 1
width_ratios += [1]
# Create figure
fig = plt.figure(figsize=(plot_width, plot_height))
# Create subplot layout
gs = gridspec.GridSpec(1, numplots, width_ratios=width_ratios, )
axes = []
for i, track in enumerate(tracks):
c = create_colormap(bgcolors[i % len(bgcolors)], colors[i % len(colors)])
ax = fig.add_subplot(gs[i])
ax.set_title(titles[i], fontproperties=font, y=1)
axes.append(ax)
cax_mat = ax.pcolormesh(data[track][ind], cmap=c, vmin=0, vmax=scale * tscale[i], rasterized=True)
hide_axes(ax)
ylim = ax.get_ylim()
#fig.colorbar(cax_mat, orientation="horizontal", pad=0.05)
if colorbar:
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="2%", pad=0.1, pack_start=True)
fig.add_axes(ax_cb)
tick_locator = MaxNLocator(nbins=3)
cbar = fig.colorbar(cax_mat, cax=ax_cb, orientation="horizontal", ticks=tick_locator)
cbar_labels = cbar.ax.get_xticklabels()
for lab in cbar_labels:
lab.set_fontsize(fontsize / 1.25)
cbar_ticks = cbar.ax.get_xticks()
if cbar_ticks[0] == 0:
# if the label is at the start of the colobar
# move it a bit inside to avoid overlapping
# with other labels
cbar_labels[0].set_horizontalalignment('left')
if cbar_ticks[-1] == 1:
# if the label is at the end of the colobar
# move it a bit inside to avoid overlapping
# with other labels
cbar_labels[-1].set_horizontalalignment('right')
if labels is not None and len(labels) == len(ind):
axcluster = fig.add_subplot(gs[len(tracks)])
axcluster.axis('off')
if colorbar:
divider = make_axes_locatable(axcluster)
ax_cb = divider.new_vertical(size="2%", pad=0.1, pack_start=True)
axbl = fig.add_axes(ax_cb)
axbl.axis('off')
min_y, max_y = ylim
s = 0
axcluster.hlines(y=0, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
labels = np.array(labels)
# Smaller cluster on the top ([::-1])
for i in range(max(labels) + 1)[::-1]:
prev = s
s += sum(labels == i)
axcluster.hlines(y=s, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
axcluster.text(0.5, (prev + s) / 2,
str(i + 1),
verticalalignment="center",
horizontalalignment="center",
fontproperties=font)
axcluster.set_ylim(ylim)
fig.subplots_adjust(wspace=btw_space, hspace=0)
ext = outfile.split(".")[-1]
if ext not in ["png", "svg", "ps", "eps", "pdf"]:
outfile += ".png"
sys.stderr.write("Saving figure\n")
# Object orientated pyplot
fig.savefig(outfile, dpi=600, bbox_inches='tight')
def coverage_plot(ax, x, data, color="red", percs=None):
"""
ax = matplotlib axes instance
x = x-axis coordinates
data = profile data
color = color in any way matplotlib accepts
"""
# Might change this into an argument for the function
if percs is None:
percs = [50, 90]
percs = [(100 - float(p)) / 2 for p in percs[::-1]]
alphas = [0.1, 0.4]
# Convert to numpy array
vals = np.array(data)
# Get the median
m = np.median(vals, axis=0)
# Draw the minimum percentiles
lines = [np.array([scoreatpercentile(vals[:, i], perc) for i in range(len(vals[0]))]) for perc in percs] + [m]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the maximum percentiles
lines = [m] + [np.array([scoreatpercentile(vals[:, i], 100 - perc) for i in range(len(vals[0]))]) for perc in
percs[::-1]]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas[::-1]):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the median
ax.plot(x, m, color="black", alpha=0.95, linewidth=0.8)
# ax.plot(x, mean(vals, axis = 0), color = "purple", alpha = 0.95, linewidth = 0.8)
def create_grid_figure(nrows, ncolumns, plotwidth=2.0, plotheight=2.0, pad=0.1, padleft=0.1, padright=0.1, padtop=0.1,
padbottom=0.1, clean=True):
wsize = padleft + (ncolumns * plotwidth) + (pad * (ncolumns - 1)) + padright
hsize = padtop + (nrows * plotheight) + (pad * (nrows - 1)) + padbottom
fig = plt.figure(figsize=(wsize, hsize))
wpadfraction = pad / wsize
hpadfraction = pad / hsize
wplotsize = plotwidth / wsize
hplotsize = plotheight / hsize
axes = {}
# Create all the subplots
for row in range(nrows):
axes[row] = {}
for col in range(ncolumns):
axes[row][col] = plt.subplot(nrows, ncolumns, row * ncolumns + col + 1)
# No labels, ticks, etc.
if clean:
for ax in [axes[row][col].xaxis, axes[row][col].yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
# Resize all the subplots
for row in range(nrows):
for col in range(ncolumns):
x0 = (padleft / wsize) + (wplotsize + wpadfraction) * col
x1 = wplotsize
y0 = (padbottom / hsize) + (nrows - row - 1) * (hplotsize + hpadfraction)
y1 = hplotsize
coords = [x0, y0, x1, y1]
axes[row][col].set_position(coords)
for s in list(axes[row][col].spines.values()):
s.set_linewidth(0.8)
return fig, axes
def profile_screenshot(fname, interval, tracks, fontsize=None, colors=None, scalegroups=None, scale=None, show_scale=True, annotation=None, bgmode="color", fragmentsize=200, dpi=600, rmdup=False, rmrepeats=False, reverse=False, adjscale=False):
"""
Plot a genome browser like profile
Parameters
----------
fname: string
output file name
interval: string
interval to plot in "chrom:start-end" format
tracks: list
list of filenames
"""
if scalegroups is None:
scalegroups = []
if not fontsize:
fontsize = FONTSIZE
if not colors:
colors = DEFAULT_COLORS
# Plot size and padding definition
plotwidth = 6
plotheight = 0.3
pad = {
"left": 1.5,
"right": 0.05,
"top": 0.05,
"bottom": 0.05,
"row": 0,
"column": 3,
}
# adjust width for track names if they are to long
# kind of a quick hack
max_len = 0
for group in tracks:
names = [os.path.splitext(os.path.basename(t))[0].strip() for t in group]
l = sum([len(name) for name in names])
if l > max_len:
max_len = l
if max_len > 27:
pad["left"] = 3
# Genomic scale
scale_height = 0.1
# Annotation track height
annotation_height = 0.01
chrom, start, end = re.split(r'[-:]', interval)
start, end = int(start), int(end)
if annotation:
ann = load_annotation([chrom,start,end], annotation)
if ann:
annotation_height = 0.2 * len(list(ann.keys()))
else:
annotation = False
nrows = len(tracks)
wsize = pad["left"] + plotwidth + pad["right"]
hsize = pad["top"] + (nrows * plotheight) + (pad["row"] * (nrows - 1)) + pad["bottom"]
hsize += scale_height + pad["row"] + annotation_height + pad["row"]
# initialize figure
fig = plt.figure(figsize=(wsize, hsize))
# initialize profile figure
pfig = ProfileFigure(fig=fig, fontsize=fontsize, pad=pad)
# add the genomic scale
pfig.add_panel(ScalePanel())
if type(scale) != type([]):
scale = [scale]
# add the signal tracks
c = 0
for group in tracks:
for i,track in enumerate(group):
panel = pfig.add_panel(
BamProfilePanel(track,
color = colors[c % len(colors)],
bgmode = bgmode,
name = os.path.splitext(os.path.split(track)[-1])[0],
fragmentsize = fragmentsize,
rmrepeats = rmrepeats,
rmdup = rmdup,
adjscale = adjscale,
show_scale = show_scale,
),
overlay= i != 0
)
panel.ymax = scale[c % len(scale)]
c += 1
# add the annotation panel
if annotation:
pfig.add_panel(AnnotationPanel(annotation))
pfig.plot([chrom, start, end], scalegroups=scalegroups, reverse=reverse)
plt.savefig(fname, dpi=dpi)
class ProfileFigure(object):
def __init__(self, fig=None, gs=None, fontsize=FONTSIZE, pad=None):
self._panels = []
if not fig:
fig = plt.figure()
self.fig = fig
self.pad = {}
if pad:
self.pad.update(pad)
relpad = {}
for k in ["left", "right"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figwidth()
for k in ["top", "bottom"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figheight()
if gs:
self.gs = gs
else:
gs = gridspec.GridSpec(1, 1)
gs.update(
left=relpad["left"],
right=1 - relpad["right"],
top=1 - relpad["top"],
bottom=relpad["bottom"],
wspace=0,
hspace=0
)
self.gs = gs[0]
self.font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
def _plot_panel_names(self, ax, panels):
names = [p.name for p in panels]
colors = ["black"]
if len(names) > 1:
tmp_names = []
colors = []
for name,color in zip(names, [p.color for p in panels]):
tmp_names.append("= ")
tmp_names.append(name + ", ")
colors += [color,"black"]
names = tmp_names
names[-1] = names[-1].strip(", ")
colortext(-0.01, 0.5,
names,
colors,
ax=ax,
horizontalalignment='right',
verticalalignment="center",
#transform=ax.transAxes,
clip_on=False,
fontproperties=self.font)
def plot(self, interval, scalegroups=None, reverse=False, **kwargs):
if scalegroups is None:
scalegroups = []
for panels in self._panels:
for panel in panels:
panel._load_data(interval)
gs0 = gridspec.GridSpecFromSubplotSpec(
len(self._panels),
1,
subplot_spec=self.gs,
height_ratios=[max([p.height for p in panels]) for panels in self._panels]
)
for panels in self._panels:
if isinstance(panels[-1], BamProfilePanel):
ymax = max([p.ymax for p in panels])
for panel in panels:
panel.ymax = ymax
if scalegroups and len(scalegroups) > 0:
for group in scalegroups:
ymax = max([self._panels[g][-1].ymax for g in group])
for g in group:
for panel in self._panels[g]:
panel.ymax = ymax
# These are quick hacks to to get the track groups to work
for panels in self._panels:
if len(panels) > 1:
# Set the alpha for overlapping tracks
for panel in panels:
panel.alpha = 0.5
for i, panels in enumerate(self._panels):
ax = plt.Subplot(self.fig, gs0[i])
plt.subplots_adjust(bottom=0, top=1, left=0, right=1, hspace=0)
# add track labels
self._plot_panel_names(ax, panels)
for panel in panels:
panel._plot(ax, interval, fig=self.fig, reverse=reverse, odd=i % 2, font=self.font, **kwargs)
self.fig.add_subplot(ax)
def add_panel(self, panel, overlay=False):
if overlay and len(self._panels) > 0:
self._panels[-1].append(panel)
else:
self._panels.append([panel])
return panel
class ProfilePanel(object):
name = ""
def hide_axes(self, axes):
for ax in [axes.xaxis, axes.yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_minor_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
ax.set_minor_locator(NullLocator())
for s in list(axes.spines.values()):
s.set_color('none')
class BamProfilePanel(ProfilePanel):
def __init__(self, bamfile, height=1, color=None, bgmode=None, alpha=None, fragmentsize=200, rmdup=True,
rmrepeats=True, **kwargs):
self.height = height
self.track = Track.load(bamfile, fragmentsize=fragmentsize, rmdup=rmdup, rmrepeats=rmrepeats)
self.ymax = None
self.bgmode = bgmode
self.scalepm = kwargs.get("adjscale", False)
self.show_scale = kwargs.get("show_scale", True)
if color:
self.color = color
else:
self.color = "#a7004b"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
self.fragmentsize = fragmentsize
self.rmdup = rmdup
self.rmrepeats = rmrepeats
self.name = kwargs.get('name')
def _load_data(self, interval):
self.profile = self.track.get_profile(interval,
scalepm=self.scalepm)
if not self.ymax:
self.ymax = np.nanmax(self.profile) * 1.10
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
# Background of profile
if self.bgmode == "stripes":
bgcol = {0: "white", 1: (0.95, 0.95, 0.95)}[int(odd)]
ax.set_facecolor(bgcol)
elif self.bgmode == "color":
ax.set_facecolor(self.color)
ax.patch.set_alpha(0.07)
# get interval
chrom, start, end = interval
profile = self.profile
if reverse:
profile = profile[::-1]
# plot data
ax.fill_between(
list(range(start, end)),
np.zeros(len(profile)),
profile,
edgecolor='face',
facecolor=self.color,
linewidth=0.5,
alpha=self.alpha)
# set the y-limit
ax.set_ylim(0, self.ymax)
# add y-limit label
if self.show_scale:
ax.text(0.005, 0.90,
int(ax.get_ylim()[-1] + 0.5),
horizontalalignment='left',
verticalalignment="top",
transform=ax.transAxes,
clip_on=False,
fontproperties=font)
ax.set_xlim(start, end)
self.hide_axes(ax)
class AnnotationPanel(ProfilePanel):
def __init__(self, annofile, height=0.3, vis="stack", color="black"):
self.annofile = annofile
self.height = height
self.vis = vis
self.color = color
def _load_data(self, interval):
self.gene_track = load_annotation(interval, self.annofile, vis=self.vis)
self.max_tracks = len(list(self.gene_track.keys()))
self.height *= self.max_tracks
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
ax.set_ylim(- 1 * self.max_tracks, 0)
for track_id, genes in list(self.gene_track.items()):
for gene in genes:
h_gene = -1 * track_id - 0.5
genestart = gene[1]
geneend = gene[2]
genename = gene[3]
if len(gene) >= 6:
genestrand = gene[5]
else:
genestrand = "+"
# BED12 format
if len(gene) == 12:
exonstarts = [int(x) for x in gene[11].split(",") if x]
exonsizes = [int(x) for x in gene[10].split(",") if x]
else:
exonstarts = [0]
exonsizes = [geneend - genestart]
x1 = (genestart - start)
x2 = (geneend - start)
if reverse:
x1 = end - genestart
x2 = end - geneend
gstart = x1 / float(end - start)
gend = x2 / float(end - start)
# Horizontal line for complete gene
ax.axhline(h_gene,
gstart,
gend,
color=self.color,
solid_capstyle="butt",
)
# Exons
for exonstart, exonsize in zip(exonstarts, exonsizes):
estart = (genestart + exonstart - start)
eend = (genestart + exonstart + exonsize - start)
if reverse:
estart = end - (genestart + exonstart)
eend = end - (genestart + exonstart + exonsize)
ax.axhspan(
h_gene - 0.35,
h_gene + 0.35,
estart / float(end - start),
eend / float(end - start),
linewidth=0.1,
color=self.color)
# Only draw arrows for BED12 entries
if len(gene) == 12:
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
figwidth, figheight = bbox.width, bbox.height
# Scale with absolute width of figure
step = 0.04 / figwidth
if reverse:
step = -step
for i in np.arange(gstart + step, gend - step, step):
if genestrand == "-":
astart = (i + step, h_gene)
aend = (i, h_gene)
else:
astart = (i, h_gene)
aend = (i + step, h_gene)
arr = FancyArrowPatch(
astart,
aend,
arrowstyle=GENE_ARROW,
mutation_scale=(figheight * fig.dpi) / 8 / self.max_tracks * 1.5,
linewidth=0.5,
color=self.color,
)
ax.add_patch(arr)
if gstart > 0:
ax.text(gstart - 0.01, h_gene, genename,
horizontalalignment="right",
verticalalignment="center",
fontproperties=font)
self.hide_axes(ax)
class ScalePanel(ProfilePanel):
def __init__(self, height=0.3, color=None, alpha=None):
self.height = height
if color:
self.color = color
else:
self.color = "black"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
def _load_data(self, interval):
pass
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
# Formatting
for s in list(ax.spines.values()):
s.set_color('none')
ax.yaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_locator(NullLocator())
ax.set_xlim(start, end)
# ax.set_ylim(0,1)
# Set font
# Plot the numbers
ticks = [s for s in ax.xaxis.get_ticklocs()[:-1] if s > start and s < end]
xcoords = [(s - start) / (end - start) + 0.01 for s in ticks]
if reverse:
ticks = ticks[::-1]
for s, x in zip(ticks[:-1], xcoords[:-1]):
ax.text(
x,
0.5,
str(int(s)),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
ax.text(
0,
0.5,
chrom,
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
class ConservationPanel(ProfilePanel):
def __init__(self, track, target, height=1):
self.track = track
self.height = height
self.data = []
self.target = target
def _load_data(self, ival1):
for line in open(self.track):
vals = line.strip().split("\t")
for i in [1, 2, 4, 5]:
vals[i] = int(vals[i])
self.data.append(vals)
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
reverse_other = reverse
reverse_self = kwargs.get("reverse_self", False)
chrom, start, end = interval
c2, s2, e2 = self.target
span1 = float(end - start)
span2 = float(e2 - s2)
for [chrom1, start1, end1, chrom2, start2, end2] in self.data:
if reverse_self:
if reverse_other:
coords = [
[1 - (end1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
coords = [
[1 - (end1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
if reverse_other:
coords = [
[(start1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
else:
coords = [
[(start1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
poly = Polygon(coords,
facecolor="black",
edgecolor='none',
alpha=0.2,
)
ax.add_patch(poly)
self.hide_axes(ax)
|
1685267
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.test.testcases import TestCase
from .base import Hook
from kolibri.plugins.hooks import register_hook
class KolibriTagNavigationTestCase(TestCase):
def setUp(self):
super(KolibriTagNavigationTestCase, self).setUp()
Hook.__module__ = "test.kolibri_plugin"
self.test_hook = register_hook(Hook)()
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend", self.test_hook.render_to_page_load_sync_html()
)
|
1685312
|
import os
import argparse
import pickle
import numpy as np
import torch
import torch.nn.functional as F
import dnnlib
import legacy
from util.utilgan import basename, calc_init_res
try: # progress bar for notebooks
get_ipython().__class__.__name__
from util.progress_bar import ProgressIPy as ProgressBar
except: # normal console
from util.progress_bar import ProgressBar
parser = argparse.ArgumentParser()
parser.add_argument('--source', required=True, help='Source model path')
parser.add_argument('--out_dir', default='./', help='Output directory for reduced/reconstructed model')
parser.add_argument('-r', '--reconstruct', action='store_true', help='Reconstruct model (add internal arguments)')
parser.add_argument('-s', '--res', default=None, help='Target resolution in format X-Y')
parser.add_argument('-a', '--alpha', action='store_true', help='Add alpha channel for RGBA processing')
parser.add_argument('-l', '--labels', default=0, type=int, help='Make conditional model')
parser.add_argument('-v', '--verbose', action='store_true')
a = parser.parse_args()
if a.res is not None:
a.res = [int(s) for s in a.res.split('-')][::-1]
if len(a.res) == 1: a.res = a.res + a.res
def load_pkl(filepath):
with dnnlib.util.open_url(filepath) as f:
nets = legacy.load_network_pkl(f, custom=False) # ['G', 'D', 'G_ema', 'training_set_kwargs', 'augment_pipe']
return nets
def save_pkl(nets, filepath):
with open(filepath, 'wb') as file:
pickle.dump(nets, file) # , protocol=pickle.HIGHEST_PROTOCOL
def create_model(net_in, data_shape, labels=0, full=False, custom=False):
init_res, resolution, res_log2 = calc_init_res(data_shape[1:])
net_in['G_ema'].img_resolution = resolution
net_in['G_ema'].img_channels = data_shape[0]
net_in['G_ema'].init_res = init_res
if labels > 0:
net_in['G_ema'].c_dim = labels
net_out = legacy.create_networks(net_in, full=full, custom=custom)
return net_out
def add_channel(x, subnet): # [BCHW]
if subnet == 'D': # pad second dim [1]
padding = [0] * (len(x.shape)-2)*2
padding += [0,1,0,0]
else: # pad last dim [-1]
padding = [0] * (len(x.shape)-1)*2
padding += [0,1]
y = F.pad(x, padding, 'constant', 1)
return y
def pad_up_to(x, size, type='side'):
sh = x.shape
if list(x.shape) == list(size): return x
padding = []
for i, s in enumerate(size):
p0 = (s-sh[i]) // 2
p1 = s-sh[i] - p0
padding = padding + [p0,p1]
y = F.pad(x, padding[::-1], 'constant', 0)
return y
def copy_vars(src_net, tgt_net, add_alpha=False, tile=False) -> None:
for subnet in ['G_ema', 'G', 'D']:
if subnet in src_net.keys() and subnet in tgt_net.keys():
src_dict = src_net[subnet].state_dict()
tgt_dict = tgt_net[subnet].state_dict()
vars = [name for name in src_dict.keys() if name in tgt_dict.keys()]
pbar = ProgressBar(len(vars))
for name in vars:
source_shape = src_dict[name].shape
target_shape = tgt_dict[name].shape
if source_shape == target_shape:
tgt_dict[name].copy_(src_dict[name]).requires_grad_(False)
else:
if add_alpha:
update = add_channel(src_dict[name], subnet)
assert target_shape == update.shape, 'Diff shapes yet: src %s tgt %s' % (str(update.shape), str(target_shape))
tgt_dict[name].copy_(update).requires_grad_(False)
elif tile:
assert len(source_shape) == len(target_shape), "Diff shape ranks: src %s tgt %s" % (str(source_shape), str(target_shape))
tile_count = [target_shape[i] // source_shape[i] for i in range(len(source_shape))]
update = np.tile(src_dict[name], tile_count) # [512,512] => [1024,512]
if a.verbose is True: print(name, tile_count, source_shape, '=>', target_shape, '\n\n') # G_mapping/Dense0, D/Output
tgt_dict[name].copy_(torch.from_numpy(update)).requires_grad_(False)
else: # crop/pad
update = pad_up_to(src_dict[name], target_shape)
if a.verbose is True: print(name, source_shape, '=>', update.shape, '\n\n')
tgt_dict[name].copy_(update).requires_grad_(False)
pbar.upd(name)
def main():
net_in = load_pkl(a.source)
Gs_in = net_in['G_ema']
if hasattr(Gs_in, 'output_shape'):
out_shape = Gs_in.output_shape
print(' Loading model', a.source, out_shape)
_, res_in, _ = calc_init_res(out_shape[1:])
else: # original model
res_in = Gs_in.img_resolution
out_shape = [None, Gs_in.img_channels, res_in, res_in]
save_full = False
# netdict = net_in['G_ema'].state_dict()
# for k in netdict.keys():
# print(k, netdict[k].shape)
if a.res is not None or a.alpha is True:
if a.res is None: a.res = out_shape[2:]
colors = 4 if a.alpha is True else out_shape[1]
_, res_out, _ = calc_init_res([colors, *a.res])
if res_in != res_out or a.alpha is True: # add or remove layers
assert 'G' in net_in.keys() and 'D' in net_in.keys(), " !! G/D subnets not found in source model !!"
data_shape = [colors, res_out, res_out]
print(' Reconstructing full model with shape', data_shape)
net_out = create_model(net_in, data_shape, full=True)
copy_vars(net_in, net_out, add_alpha=True)
save_full = True
if a.res[0] != res_out or a.res[1] != res_out: # crop or pad layers
data_shape = [colors, *a.res]
net_out = create_model(net_in, data_shape, full=True)
copy_vars(net_in, net_out)
if a.labels > 0:
assert 'G' in net_in.keys() and 'D' in net_in.keys(), " !! G/D subnets not found in source model !!"
print(' Reconstructing full model with labels', a.labels)
data_shape = out_shape[1:]
net_out = create_model(net_in, data_shape, labels=a.labels, full=True)
copy_vars(net_in, net_out, tile=True)
save_full = True
if a.labels == 0 and a.res is None and a.alpha is not True:
if a.reconstruct is True:
print(' Reconstructing Gs model with same size')
data_shape = out_shape[1:]
net_out = create_model(net_in, data_shape, full=False) # FULL=TRUE - to enable full customization of foreign models ??
else:
net_out = dict(G_ema = Gs_in)
out_name = basename(a.source)
if a.res is not None: out_name += '-%dx%d' % (a.res[1], a.res[0])
if a.alpha is True: out_name += 'a'
if a.labels > 0: out_name += '-c%d' % a.labels
if not save_full: out_name += '-Gs'
save_pkl(net_out, os.path.join(a.out_dir, '%s.pkl' % out_name))
print(' Done')
if __name__ == '__main__':
main()
|
1685326
|
import math
from typing import List, Any
from . import *
class Collection(TopLevel):
"""The Collection class is a class that groups together a set of
TopLevel objects that have something in common.
Some examples of Collection objects:
* Results of a query to find all Component objects in a repository
that function as promoters.
* A set of Component objects representing a library of genetic
logic gates.
* A "parts list" for Component with a complex design, containing
both that component and all of the Component, Sequence, and
Model objects used to provide its full specification.
"""
def __init__(self, identity: str,
*, members: List[str] = None,
namespace: str = None,
attachments: List[str] = None,
name: str = None, description: str = None,
derived_from: List[str] = None,
generated_by: List[str] = None,
measures: List[SBOLObject] = None,
type_uri: str = SBOL_COLLECTION) -> None:
super().__init__(identity=identity, type_uri=type_uri,
namespace=namespace,
attachments=attachments, name=name,
description=description, derived_from=derived_from,
generated_by=generated_by, measures=measures)
self.members = ReferencedObject(self, SBOL_MEMBER, 0, math.inf,
initial_value=members)
def accept(self, visitor: Any) -> Any:
"""Invokes `visit_collection` on `visitor` with `self` as the only
argument.
:param visitor: The visitor instance
:type visitor: Any
:raises AttributeError: If visitor lacks a visit_collection method
:return: Whatever `visitor.visit_collection` returns
:rtype: Any
"""
visitor.visit_collection(self)
class Experiment(Collection):
"""The purpose of the Experiment class is to aggregate
ExperimentalData objects for subsequent analysis, usually in
accordance with an experimental design. Namely, the member
properties of an Experiment MUST refer to ExperimentalData
objects.
"""
def __init__(self, identity: str,
*, members: List[str] = None,
namespace: str = None,
attachments: List[str] = None,
name: str = None, description: str = None,
derived_from: List[str] = None,
generated_by: List[str] = None,
measures: List[SBOLObject] = None,
type_uri: str = SBOL_EXPERIMENT) -> None:
super().__init__(identity=identity, type_uri=type_uri,
members=members, namespace=namespace,
attachments=attachments, name=name,
description=description, derived_from=derived_from,
generated_by=generated_by, measures=measures)
def accept(self, visitor: Any) -> Any:
"""Invokes `visit_experiment` on `visitor` with `self` as the only
argument.
:param visitor: The visitor instance
:type visitor: Any
:raises AttributeError: If visitor lacks a visit_experiment method
:return: Whatever `visitor.visit_experiment` returns
:rtype: Any
"""
visitor.visit_experiment(self)
Document.register_builder(SBOL_COLLECTION, Collection)
Document.register_builder(SBOL_EXPERIMENT, Experiment)
|
1685358
|
from django.db import models, migrations
import core.models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20150126_1611'),
]
operations = [
migrations.AlterField(
model_name='person',
name='birth_date',
field=models.DateField(blank=True, help_text='Syntym\xe4aika muodossa 24.2.1994', null=True, verbose_name='Syntym\xe4aika', validators=[core.models.birth_date_validator]),
preserve_default=True,
),
]
|
1685362
|
import torch.nn as nn
from .utils import register_model
@register_model('DomainFactorBackbone')
class DomainFactorBackbone(nn.Module):
def __init__(self):
super(DomainFactorBackbone, self).__init__()
self.num_channels = 3
self.setup_net()
def setup_net(self):
self.conv_params = nn.Sequential (
nn.Conv2d(self.num_channels, 64, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.Dropout2d(0.1),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(128),
nn.Dropout2d(0.3),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(256),
nn.Dropout2d(0.5),
nn.ReLU()
)
self.fc_params = nn.Sequential (
nn.Linear(256*4*4, 512),
nn.BatchNorm1d(512),
)
def forward(self, x):
x = self.conv_params(x)
x = x.view(x.size(0), -1)
x = self.fc_params(x)
return x
|
1685366
|
import tensorflow as tf
import collections
class TrainModel(
collections.namedtuple("TrainModel",
("graph", "model"))):
pass
def create_train_model(
model_creator,
hparams):
graph = tf.Graph()
with graph.as_default(), tf.container("train"):
model = model_creator(
hparams,
tf.contrib.learn.ModeKeys.TRAIN,
)
return TrainModel(
graph=graph,
model=model,
)
|
1685415
|
import tkinter as tk
from tkinter import Menu, Tk, Text, DISABLED, RAISED,Frame, FLAT, Button, Scrollbar, Canvas, END
from tkinter import messagebox as MessageBox
from tkinter import ttk
import tkinter.simpledialog
from tkinter import *
from tkinter import font as tkFont
#Metodo para enumerar las lineas
class TextLineNumbers(Canvas):
def __init__(self, *args, **kwargs):
Canvas.__init__(self, *args, **kwargs)
self.textwidget = None
def attach(self, text_widget):
self.textwidget = text_widget
def redraw(self, *args):
'''redraw line numbers'''
self.delete("all")
i = self.textwidget.index("@0,0")
while True :
dline= self.textwidget.dlineinfo(i)
if dline is None: break
y = dline[1]
linenum = str(i).split(".")[0]
self.create_text(2,y,anchor="nw", text=linenum)
i = self.textwidget.index("%s+1line" % i)
#Metodo para el campo de texto
class CustomText(Text):
def __init__(self, *args, **kwargs):
Text.__init__(self, *args, **kwargs)
# create a proxy for the underlying widget
self._orig = self._w + "_orig"
self.tk.call("rename", self._w, self._orig)
self.tk.createcommand(self._w, self._proxy)
def _proxy(self, *args):
# let the actual widget perform the requested action
cmd = (self._orig,) + args
result = self.tk.call(cmd)
# generate an event if something was added or deleted,
# or the cursor position changed
if (args[0] in ("insert", "replace", "delete") or
args[0:3] == ("mark", "set", "insert") or
args[0:2] == ("xview", "moveto") or
args[0:2] == ("xview", "scroll") or
args[0:2] == ("yview", "moveto") or
args[0:2] == ("yview", "scroll")
):
self.event_generate("<<Change>>", when="tail")
# return what the actual widget returned
return result
#Frame que une todo
class Campo(Frame):
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
self.text = CustomText(self)
self.linenumbers = TextLineNumbers(self, width=30)
self.linenumbers.attach(self.text)
self.linenumbers.pack(side="left", fill="y")
self.text.pack(side="right", fill="both", expand=True)
self.text.bind("<<Change>>", self._on_change)
self.text.bind("<Configure>", self._on_change)
# clone the text widget font and use it as a basis for some tag
bold_font = tkFont.Font(self.text, self.text.cget("font"))
bold_font.configure(weight="bold")
self.text.tag_configure("bold", font=bold_font)
self.text.tag_configure("reserve", foreground="blue", underline=False)
def _on_change(self, event):
self.linenumbers.redraw()
#Clase para crear usuario
class MyDialog(tkinter.simpledialog.Dialog):
def body(self, master):
Label(master, text="Username:").grid(row=0)
Label(master, text="Password:").grid(row=1)
self.result = []
self.accept = False
self.e1 = Entry(master)
self.e2 = Entry(master, show="*")
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
return self.e1 # initial focus
def apply(self):
first = self.e1.get()
second = self.e2.get()
self.accept = True
self.result = [first, second]
|
1685453
|
import numpy as np
import plotly.express as px
import umap
def d3_umap(X, y_km, heat=None):
"""
Args:
X:
y_km:
heat:
"""
reducer = umap.UMAP(random_state=1234, n_components=3)
X_embedded = reducer.fit_transform(X)
node_colors = get_node_colormap(y_km)
x, y, z = X_embedded[:, 0], X_embedded[:, 1], X_embedded[:, 2]
fig = px.scatter_3d(x=x, y=y, z=z, color=node_colors)
fig.show()
return reducer
def get_node_colormap(node_label):
"""
Args:
node_label:
"""
if type(node_label) == list:
node_labels = node_label
sorted_node_labels = sorted(set(node_labels), reverse=True)
colors = np.linspace(0, 1, len(sorted_node_labels))
node_colormap = {f: colors[sorted_node_labels.index(f)] for f in set(node_labels)}
node_colors = [node_colormap[n] if n in node_colormap.keys() else None for n in node_labels]
elif node_label.dtype == "object":
node_labels = node_label.str.split("|", expand=True)[0]
sorted_node_labels = sorted(node_labels.unique(), reverse=True)
colors = np.linspace(0, 1, len(sorted_node_labels))
node_colormap = {f: colors[sorted_node_labels.index(f)] for f in node_labels.unique()}
node_colors = [node_colormap[n] if n in node_colormap.keys() else None for n in node_labels]
elif node_label.dtype == "float":
node_labels = node_label.values
node_colormap = None
node_colors = [n / node_labels.max() for n in node_labels]
return node_colors
|
1685456
|
from __future__ import unicode_literals
from .common import InfoExtractor
class FreespeechIE(InfoExtractor):
IE_NAME = 'freespeech.org'
_VALID_URL = r'https?://(?:www\.)?freespeech\.org/stories/(?P<id>.+)'
_TEST = {
'add_ie': ['Youtube'],
'url': 'http://www.freespeech.org/stories/fcc-announces-net-neutrality-rollback-whats-stake/',
'info_dict': {
'id': 'waRk6IPqyWM',
'ext': 'mp4',
'title': 'What\'s At Stake - Net Neutrality Special',
'description': 'Presented by MNN and FSTV',
'upload_date': '20170728',
'uploader_id': 'freespeechtv',
'uploader': 'freespeechtv',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
youtube_url = self._search_regex(
r'data-video-url="([^"]+)"',
webpage, 'youtube url')
return {
'_type': 'url',
'url': youtube_url,
'ie_key': 'Youtube',
}
|
1685478
|
from typing import Any, Sequence
from tango import Format, JsonFormat, Step
from tango.common import DatasetDict
from tango.common.testing import run_experiment
@Step.register("train_data")
class TrainData(Step):
DETERMINISTIC = True
CACHEABLE = False
def run(self) -> Sequence[int]: # type: ignore
return list(range(10))
@Step.register("val_data")
class ValData(Step):
DETERMINISTIC = True
CACHEABLE = False
def run(self) -> Sequence[int]: # type: ignore
return list(range(10, 20))
@Step.register("save_data")
class SaveData(Step):
DETERMINISTIC = True
CACHEABLE = True
FORMAT: Format = JsonFormat()
def run(self, dataset_dict: DatasetDict) -> Any: # type: ignore
return dataset_dict.splits
def test_experiment():
with run_experiment(
{
"steps": {
"train_data": {
"type": "train_data",
},
"val_data": {
"type": "val_data",
},
"saved_data": {
"type": "save_data",
"dataset_dict": {
"splits": {
"train": {"type": "ref", "ref": "train_data"},
"val": {"type": "ref", "ref": "val_data"},
}
},
},
}
}
) as run_dir:
assert (run_dir / "saved_data").is_dir()
fmt = JsonFormat()
data = fmt.read(run_dir / "saved_data")
assert data["train"] == list(range(10))
assert data["val"] == list(range(10, 20))
|
1685484
|
from gym import Env
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == "_wrapped_env":
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return "{}({})".format(type(self).__name__, self.wrapped_env)
|
1685522
|
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListVPNService(command.Lister):
def get_parser(self, prog_name):
parser = super(ListVPNService, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'zone',
)
column_headers = (
'ID',
'Name',
'Zone',
)
data = [to_obj.VPNService(vpnsv)
for vpnsv in network_client.list_vpn_services().get('vpn_services')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowVPNService(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowVPNService, self).get_parser(prog_name)
parser.add_argument(
'vpn_service_id',
metavar="VPN_SERVICE_ID",
help="ID of VPN Service to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
vpnsv_id = parsed_args.vpn_service_id
dic = network_client.show_vpn_service(vpnsv_id).get('vpn_service')
columns = utils.get_columns(dic)
obj = to_obj.VPNService(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
|
1685558
|
from flask import Flask, request, url_for
from flask_mail import Mail, Message
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
mail = Mail(app)
s = URLSafeTimedSerializer('Thisisasecret!')
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return '<form action="/" method="POST"><input name="email"><input type="submit"></form>'
email = request.form['email']
token = s.dumps(email, salt='email-confirm')
msg = Message('Confirm Email', sender='<EMAIL>', recipients=[email])
link = url_for('confirm_email', token=token, _external=True)
msg.body = 'Your link is {}'.format(link)
mail.send(msg)
return '<h1>The email you entered is {}. The token is {}</h1>'.format(email, token)
@app.route('/confirm_email/<token>')
def confirm_email(token):
try:
email = s.loads(token, salt='email-confirm', max_age=3600)
except SignatureExpired:
return '<h1>The token is expired!</h1>'
return '<h1>The token works!</h1>'
if __name__ == '__main__':
app.run(debug=True)
|
1685620
|
import tensorflow as tf
import universal_transformer_modified
class UGformerV1(object):
def __init__(self, feature_dim_size, hparams_batch_size, ff_hidden_size, seq_length, num_classes, num_self_att_layers, num_GNN_layers=1):
# Placeholders for input, output
self.input_x = tf.compat.v1.placeholder(tf.int32, [None, seq_length], name="input_x")
self.graph_pool = tf.compat.v1.sparse_placeholder(tf.float32, [None, None], name="graph_pool")
self.X_concat = tf.compat.v1.placeholder(tf.float32, [None, feature_dim_size], name="X_concat")
self.one_hot_labels = tf.compat.v1.placeholder(tf.float32, [None, num_classes], name="one_hot_labels")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
#Inputs for Universal Transformer
self.input_UT = tf.nn.embedding_lookup(self.X_concat, self.input_x)
self.input_UT = tf.reshape(self.input_UT, [-1, seq_length, 1, feature_dim_size])
#Matrix weights in Universal Transformer are shared across each attention layer (timestep), while they are not in Transformer.
#It's optional to use Transformer Encoder.
self.hparams = universal_transformer_modified.universal_transformer_small1()
self.hparams.hidden_size = feature_dim_size
self.hparams.batch_size = hparams_batch_size * seq_length
self.hparams.max_length = seq_length
self.hparams.num_hidden_layers = num_self_att_layers # Number of attention layers: the number T of timesteps in Universal Transformer
self.hparams.num_heads = 1 # due to the fact that the feature embedding sizes are various
self.hparams.filter_size = ff_hidden_size
self.hparams.use_target_space_embedding = False
self.hparams.pos = None
self.hparams.add_position_timing_signal = False
self.hparams.add_step_timing_signal = False
self.hparams.add_sru = False
self.hparams.add_or_concat_timing_signal = None
# Construct k GNN layers
self.scores = 0
for layer in range(num_GNN_layers): # the number k of multiple stacked layers, each stacked layer includes a number of self-attention layers
# Universal Transformer Encoder
self.ute = universal_transformer_modified.UniversalTransformerEncoder1(self.hparams, mode=tf.estimator.ModeKeys.TRAIN)
self.output_UT = self.ute({"inputs": self.input_UT, "targets": 0, "target_space_id": 0})[0]
self.output_UT = tf.squeeze(self.output_UT, axis=2)
#
self.output_target_node = tf.split(self.output_UT, num_or_size_splits=seq_length, axis=1)[0]
self.output_target_node = tf.squeeze(self.output_target_node, axis=1)
#input for next GNN hidden layer
self.input_UT = tf.nn.embedding_lookup(self.output_target_node, self.input_x)
self.input_UT = tf.reshape(self.input_UT, [-1, seq_length, 1, feature_dim_size])
# graph pooling
self.graph_embeddings = tf.compat.v1.sparse_tensor_dense_matmul(self.graph_pool, self.output_target_node)
self.graph_embeddings = tf.nn.dropout(self.graph_embeddings, keep_prob=self.dropout_keep_prob)
# Concatenate graph representations from all GNN layers
with tf.variable_scope("layer_%d" % layer):
W = tf.compat.v1.get_variable(shape=[feature_dim_size, num_classes],
initializer=tf.contrib.layers.xavier_initializer(),
name="W_layer_%d" % layer)
b = tf.Variable(tf.zeros([num_classes]))
self.scores += tf.compat.v1.nn.xw_plus_b(self.graph_embeddings, W, b)
# Final predictions
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.compat.v1.nn.softmax_cross_entropy_with_logits_v2(logits=self.scores, labels=label_smoothing(self.one_hot_labels))
self.total_loss = tf.reduce_mean(losses)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.one_hot_labels, 1))
self.accuracy = tf.reduce_sum(tf.cast(correct_predictions, "float"), name="accuracy")
self.saver = tf.compat.v1.train.Saver(tf.global_variables(), max_to_keep=500)
tf.logging.info('Seting up the main structure')
def label_smoothing(inputs, epsilon=0.1):
V = inputs.get_shape().as_list()[-1] # number of channels
return ((1 - epsilon) * inputs) + (epsilon / V)
|
1685633
|
import cv2
import sys
import numpy as np
import depthai as dai
from time import sleep
'''
This example attaches a NeuralNetwork node directly to the SPI output. The corresponding ESP32 example shows how to decode it.
Make sure you have something to handle the SPI protocol on the other end! See the included ESP32 example.
'''
def create_spi_demo_pipeline(nnPath):
print("Creating SPI pipeline: ")
print("COLOR CAM -> DetectionNetwork -> SPI OUT")
pipeline = dai.Pipeline()
# set up NN node
nn1 = pipeline.createNeuralNetwork()
nn1.setBlobPath(nnPath)
# set up color camera and link to NN node
colorCam = pipeline.createColorCamera()
colorCam.setPreviewSize(300, 300)
colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
colorCam.setInterleaved(False)
colorCam.setCamId(0)
colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
colorCam.preview.link(nn1.input)
# set up SPI out node and link to nn1
spiOut = pipeline.createSPIOut()
spiOut.setStreamName("spimetaout")
spiOut.setBusId(0)
nn1.out.link(spiOut.input)
return pipeline
def test_pipeline():
nnBlobPath="mobilenet-ssd.blob.sh8cmx8NCE1"
if len(sys.argv) >= 2:
nnBlobPath = sys.argv[1]
pipeline = create_spi_demo_pipeline(nnBlobPath)
print("Creating DepthAI device")
if 1:
device = dai.Device(pipeline)
else: # For debug mode, with the firmware already loaded
found, device_info = dai.XLinkConnection.getFirstDevice(
dai.XLinkDeviceState.X_LINK_UNBOOTED)
if found:
device = dai.Device(pipeline, device_info)
else:
raise RuntimeError("Device not found")
print("Starting pipeline")
device.startPipeline()
print("Pipeline is running. See connected SPI device for output!")
while True:
sleep(1)
print("Closing device")
del device
test_pipeline()
|
1685689
|
from core.advbase import *
from slot.d import *
from slot.a import *
def module():
return Gala_Elisanne
class Gala_Elisanne(Adv):
a3 = ('primed_att',0.10)
conf = {}
conf['slots.a'] = BB()+FWHC()
conf['slots.frostbite.a'] = conf['slots.a']
conf['slots.d'] = Gaibhne_and_Creidhne()
conf['acl'] = """
`s4
`s1
`s3
`fsf, x=4
"""
coab = ['Bow','Tobias', 'Renee']
share = ['Patia','Summer_Luca']
def init(self):
self.buff_class = Teambuff if self.condition('buff all team') else Selfbuff
@staticmethod
def prerun_skillshare(adv, dst):
adv.buff_class = Dummy if adv.slots.c.ele != 'water' else Teambuff if adv.condition('buff all team') else Selfbuff
def prerun(self):
self.s2.autocharge_init(900).on()
def s1_proc(self, e):
self.buff_class(e.name,0.3,15).on()
def s2_proc(self, e):
self.energy.add(3)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
1685729
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import misc.utils as utils
from collections import OrderedDict
import torch
import sys
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
sys.path.append("coco-caption")
from pycocoevalcap.bleu.bleu import Bleu
CiderD_scorer = None
Bleu_scorer = None
#CiderD_scorer = CiderD(df='corpus')
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
def array_to_str(arr):
out = ''
for i in range(len(arr)):
out += str(arr[i]) + ' '
if arr[i] == 0:
break
return out.strip()
def get_self_critical_reward(greedy_res, data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[batch_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(2 * batch_size)]
res__ = {i: res[i] for i in range(2 * batch_size)}
gts = {i: gts[i % batch_size // seq_per_img] for i in range(2 * batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
scores = scores[:batch_size] - scores[batch_size:]
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
|
1685746
|
class FindObject:
"""An object represented a find(1) command"""
def __init__(self, cmd):
self.exec_cmd = ''
self.path = ''
self.opts = ''
if cmd.startswith('find'):
# find ./find -type f -exec rm -rf {} ;
# 012345 012 0123456
#
# 8 possibilities:
# find
# find -exec rm -rf {} ;
# find -type f
# find -type f -exec rm -rf {} ;
# find ./bla
# find ./bla -exec rm -rf {} ;
# find ./bla -type f
# find ./bla -type f -exec rm -rf {} ;
exec_from = cmd.find('-exec ')
if exec_from is not -1:
exec_to = cmd.rfind('{}')
self.exec_cmd = cmd[exec_from+6:exec_to].rstrip()
cmd = cmd[:exec_from]
path_end = cmd.find(' -')
if path_end is not -1:
self.path = cmd[5:path_end].rstrip()
self.opts = cmd[path_end:].strip()
else:
self.path = cmd[5:].rstrip()
@classmethod
def build_with(cls, path, opts, exec_cmd):
cmd = cls('')
cmd.path = path
cmd.opts = opts
cmd.exec_cmd = exec_cmd
return cmd
def toCmd(self):
if self.path != '':
cmd = "find %s" % self.path
else:
cmd = "find "
if self.exec_cmd != '':
cmd += " %s %s" % (self.opts, self.exec_cmd)
else:
cmd += " %s" % (self.opts)
return cmd.rstrip()
|
1685765
|
class Error(Exception):
pass
class UnknownObjectException(Error):
pass
class ObjectDisabledException(Error):
pass
class ObjectReadOnlyException(Error):
pass
class NoValueFoundException(Error):
pass
class NoMatchingWindowFoundException(Error):
pass
class UnknownFrameException(Error):
pass
class LocatorException(Error):
pass
|
1685779
|
import unittest
from vaporwavely import vaporize
class VaporwavelyTestCase(unittest.TestCase):
def test_lower(self):
self.assertEqual('Tassoni, patrimonio italiano',
vaporize('Tassoni, patrimonio italiano'))
def test_mixed(self):
self.assertEqual('aesthetic nintendo', vaporize('aesthetic nintendo'))
def test_upper(self):
self.assertEqual('VIOLET ARE RED, ROSES ARE BLUE',
vaporize('VIOLET ARE RED, ROSES ARE BLUE'))
def test_ascii(self):
# test all vaporizable characters
for char in range(33, 127): # '!' is 33 and '~' is 126
self.assertNotEqual(chr(char),
vaporize(chr(char)))
# test limits of vaporizable range
self.assertEqual(chr(127),
vaporize(chr(127)))
self.assertEqual(chr(32),
vaporize(chr(32)))
if __name__ == '__main__':
unittest.main()
|
1685792
|
from app.utils import filters
def test_md_to_html():
assert '<h1>' in filters.md_to_html('# Hello World')
|
1685802
|
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.utils.migrations import ensure_schemas
logger = logging.getLogger(__file__)
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_merge_20181023_1526'),
]
operations = [
# To reverse this migrations simply revert changes to the schema and re-run
migrations.RunPython(ensure_schemas, ensure_schemas),
]
|
1685829
|
import fnmatch
import os
import subprocess
from airflow import configuration
from airflow.models import DagModel, DagRun, TaskInstance
from airflow.settings import Session, engine
from airflow.utils.state import State
from airflow.www.app import csrf
from flask import Blueprint, make_response
from prometheus_client import generate_latest, REGISTRY, Gauge
MetricsBlueprint = Blueprint(
'metrics', __name__,
url_prefix='/metrics'
)
def initialize_gauges():
"""
Initialize gauges to track in prometheus. Sets the gauges in the global frame.
"""
global dag_state, task_state, log_size_bytes, db_size_bytes, dagbag_size, postgres_connection
dag_state = Gauge('airflow_dag_states', 'Number of DAG runs', ['state'])
task_state = Gauge('airflow_task_states', 'Number of Task runs', ['state'])
db_size_bytes = Gauge('airflow_db_size_bytes', 'Size of Postgres tables in bytes', ['table'])
log_size_bytes = Gauge('airflow_log_size_bytes', 'Size of airflow logs in bytes', ['type'])
dagbag_size = Gauge('airflow_dagbag_size', 'Number of dags')
postgres_connection = Gauge('airflow_postgres_connections', 'Number of connections to this airflow database')
@MetricsBlueprint.route('/', methods=['GET'])
@csrf.exempt
def serve_metrics():
"""
Calculate the metrics for the initialized gauges and send the stats to the http endpoint.
"""
session = Session()
dagbag_size.set(session.query(DagModel).filter(DagModel.is_active.is_(True)).count())
for dag_state_type in State.dag_states:
count = session.query(DagRun).filter(DagRun.state == dag_state_type).count()
dag_state.labels(state=dag_state_type).set(count)
# The SKIPPED state currently is not a part of Airflow.State.task_states, which is a bug.
# A PR has been requested: https://github.com/apache/incubator-airflow/pull/2519 with the
# following Jira issue AIRFLOW-1508 filed by <NAME>, @ahaidrey.
for task_state_type in State.task_states + (State.SKIPPED, ):
count = session.query(TaskInstance).filter(TaskInstance.state == task_state_type).count()
task_state.labels(state=task_state_type).set(count)
session.close()
# For querying table size in postgres
result = engine.execute(
"SELECT "
"quote_ident(tablename), "
"pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(tablename))::BIGINT AS size "
"FROM pg_tables WHERE schemaname = 'public';"
)
rows = result.fetchall()
for row in rows:
table, size = row
db_size_bytes.labels(table=table).set(size)
database = configuration.get('core', 'sql_alchemy_conn').rsplit('/', 1)[-1]
result = engine.execute(
"SELECT count(*) as count FROM pg_stat_activity WHERE datname=%s;", database
)
postgres_connection.set(result.fetchone()['count'])
scheduler_size = 0
worker_size = 0
webserver_size = 0
logs_path = configuration.get('core', 'BASE_LOG_FOLDER')
logs = os.listdir(logs_path)
for log in logs:
log_path = os.path.join(logs_path, log)
if os.path.isfile(log_path):
if fnmatch.fnmatch(log, 'airflow-scheduler.log*'):
scheduler_size += os.path.getsize(log_path)
elif fnmatch.fnmatch(log, 'airflow-worker.log*'):
worker_size += os.path.getsize(log_path)
elif fnmatch.fnmatch(log, 'airflow-webserver.log*'):
webserver_size += os.path.getsize(log_path)
folder_size = int(subprocess.check_output(['du', '-sb', logs_path]).split('\t')[0])
log_size_bytes.labels(type='scheduler').set(scheduler_size)
log_size_bytes.labels(type='worker').set(worker_size)
log_size_bytes.labels(type='webserver').set(webserver_size)
log_size_bytes.labels(type='other').set(folder_size - scheduler_size - worker_size - webserver_size)
stats = make_response(generate_latest(REGISTRY))
stats.headers["content-type"] = "text/plain"
return stats
if __name__ == "airflow.blueprints.metrics_blueprint":
"""
Reason for this is so these metric gauges are initialized only once. Not every time the dag files are processed.
"""
initialize_gauges()
|
1685877
|
import time
import datetime
import glob,os, fnmatch, urllib, math
from osgeo import gdal
import numpy
import argparse
import config
import json
force = 0
verbose = 0
BASE_DIR = config.EF5_DIR
def execute( cmd ):
if verbose:
print cmd
os.system(cmd)
def CreateLevel(l, geojsonDir, fileName, src_ds):
projection = src_ds.GetProjection()
geotransform = src_ds.GetGeoTransform()
band = src_ds.GetRasterBand(1)
data = band.ReadAsArray(0, 0, src_ds.RasterXSize, src_ds.RasterYSize )
xorg = geotransform[0]
yorg = geotransform[3]
pres = geotransform[1]
xmax = xorg + geotransform[1]* src_ds.RasterXSize
ymax = yorg - geotransform[1]* src_ds.RasterYSize
if os.path.exists(fileName):
return
driver = gdal.GetDriverByName( "GTiff" )
dst_ds_dataset = driver.Create( fileName, src_ds.RasterXSize, src_ds.RasterYSize, 1, gdal.GDT_Byte, [ 'COMPRESS=DEFLATE' ] )
dst_ds_dataset.SetGeoTransform( geotransform )
dst_ds_dataset.SetProjection( projection )
data[data>=l] = 255
data[data<l] = 0
count = (data >= l).sum()
print "level", l, " count:", count
if count > 0 :
o_band = dst_ds_dataset.GetRasterBand(1)
dst_ds_dataset.SetGeoTransform( geotransform )
dst_ds_dataset.SetProjection( projection )
o_band.WriteArray(data.astype('i1'), 0, 0)
ct = gdal.ColorTable()
ct.SetColorEntry( 0, (255, 255, 255, 255) )
ct.SetColorEntry( 255, (255, 0, 0, 255) )
o_band.SetRasterColorTable(ct)
dst_ds_dataset = None
print "Created", fileName
cmd = "gdal_translate -q -of PNM " + fileName + " "+fileName+".pgm"
execute(cmd)
# -i invert before processing
# -t 2 suppress speckles of up to this many pixels.
# -a 1.5 set the corner threshold parameter
# -z black specify how to resolve ambiguities in path decomposition. Must be one of black, white, right, left, minority, majority, or random. Default is minority
# -x scaling factor
# -L left margin
# -B bottom margin
cmd = str.format("potrace -i -z black -a 1.5 -t 3 -b geojson -o {0} {1} -x {2} -L {3} -B {4} ", fileName+".geojson", fileName+".pgm", pres, xorg, ymax );
execute(cmd)
#cmd = str.format("node set_geojson_property.js --file {0} --prop frost={1}", fileName+".geojson", frost)
#execute(cmd)
cmd = str.format("topojson -o {0} --simplify-proportion 0.5 -p height={1} -- height={2}", fileName+".topojson", l, fileName+".geojson" );
execute(cmd)
# convert it back to json
cmd = "topojson-geojson --precision 4 -o %s %s" % ( geojsonDir, fileName+".topojson" )
execute(cmd)
# rename file
output_file = "height_level_%d.geojson" % l
cmd = "mv %s %s" % (os.path.join(geojsonDir,"height.json"), os.path.join(geojsonDir, output_file))
execute(cmd)
#
# Code from gdal2tiles
#
tileSize = 256
initialResolution = 2 * math.pi * 6378137 / tileSize
# 156543.03392804062 for tileSize 256 pixels
originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def Resolution(zoom):
return initialResolution / (2**zoom)
def LatLonToMeters( lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * originShift / 180.0
return mx, my
def MetersToLatLon( mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / originShift) * 180.0
lat = (my / originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters( px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = Resolution(zoom)
mx = px * res - originShift
my = py * res - originShift
return mx, my
def MetersToPixels( mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = Resolution( zoom )
px = (mx + originShift) / res
py = (my + originShift) / res
return px, py
def ZoomForPixelSize( pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > Resolution(i):
if i!=0:
return i-1
else:
return 0 # We don't want to scale up
#
# Generate the BBOX for that center latlon and zoom level
#
def bbox(lat, lon, zoom, width, height):
mx, my = LatLonToMeters( lat, lon )
px, py = MetersToPixels( mx, my, zoom)
mx,my = PixelsToMeters( px - width/2, py + height/2, zoom)
ullat, ullon = MetersToLatLon( mx, my )
mx,my = PixelsToMeters( px + width/2, py - height/2, zoom)
lrlat, lrlon = MetersToLatLon( mx, my )
return ullon, ullat, lrlon, lrlat
def mapbox_image(centerlat, centerlon, z, rasterXSize, rasterYSize, osm_bg_image):
#if force or not os.path.isfile(app.osm_bg_image):
mapbox_url = str.format("https://api.tiles.mapbox.com/v3/cappelaere.map-1d8e1acq/{0},{1},{2}/{3}x{4}.png32",centerlon, centerlat, z, rasterXSize,rasterYSize)
if verbose:
print "wms url:" , mapbox_url
urllib.urlretrieve(mapbox_url, osm_bg_image)
if verbose:
print "created:" , osm_bg_image
def MakeBrowseImage(src_ds, browse_filename, subset_filename, osm_bg_image, sw_osm_image):
projection = src_ds.GetProjection()
geotransform = src_ds.GetGeoTransform()
band = src_ds.GetRasterBand(1)
data = band.ReadAsArray(0, 0, src_ds.RasterXSize, src_ds.RasterYSize )
xorg = geotransform[0]
yorg = geotransform[3]
pres = geotransform[1]
xmax = xorg + geotransform[1]* src_ds.RasterXSize
ymax = yorg - geotransform[1]* src_ds.RasterYSize
deltaX = xmax - xorg
deltaY = ymax - yorg
driver = gdal.GetDriverByName( "GTiff" )
if force or not os.path.isfile(browse_filename):
dst_ds_dataset = driver.Create( browse_filename, src_ds.RasterXSize, src_ds.RasterYSize, 2, gdal.GDT_Byte, [ 'COMPRESS=DEFLATE', 'ALPHA=YES' ] )
dst_ds_dataset.SetGeoTransform( geotransform )
dst_ds_dataset.SetProjection( projection )
data[data <= 0] = 0
data[numpy.logical_and(data>0, data<=1)] = 1
data[numpy.logical_and(data>1, data<=2)] = 2
data[numpy.logical_and(data>2 ,data<=3)] = 3
data[numpy.logical_and(data>3, data<=5)] = 5
data[numpy.logical_and(data>5, data<=8)] = 8
data[numpy.logical_and(data>8, data<=13)] = 13
data[data>13] = 21
dst_ds_dataset.SetGeoTransform( geotransform )
dst_ds_dataset.SetProjection( projection )
o_band = dst_ds_dataset.GetRasterBand(1)
o_band.WriteArray(data.astype('i1'), 0, 0)
a_band = dst_ds_dataset.GetRasterBand(2)
data[data > 0] = 255
data[data < 0] = 0
a_band.WriteArray(data.astype('i1'), 0, 0)
ct = gdal.ColorTable()
ct = gdal.ColorTable()
for i in range(256):
ct.SetColorEntry( i, (0, 0, 0, 0) )
ct.SetColorEntry( 0, (0, 0, 0, 0) )
ct.SetColorEntry( 1, (254, 229, 217, 255) )
ct.SetColorEntry( 2, (252, 187, 161, 255) )
ct.SetColorEntry( 3, (252, 146, 114, 255) )
ct.SetColorEntry( 5, (251, 106, 74, 255) )
ct.SetColorEntry( 8, (239, 59, 44, 255) )
ct.SetColorEntry( 13, (203, 24, 29, 255) )
ct.SetColorEntry( 21, (153, 0, 13, 255) )
o_band.SetRasterColorTable(ct)
band.SetNoDataValue(0)
dst_ds_dataset = None
print "Created Browse Image:", browse_filename
# subset it
minX = xorg + deltaX/4
maxX = xmax - deltaX/4
minY = ymax - deltaY/2
maxY = yorg
#
centerlon = (minX + maxX)/2
centerlat = (minY + maxY)/2
zoom = 8
if force or not os.path.isfile(osm_bg_image):
mapbox_image(centerlat, centerlon, zoom, src_ds.RasterXSize/8, src_ds.RasterYSize/8, osm_bg_image)
ullon, ullat, lrlon, lrlat = bbox(centerlat, centerlon, zoom, src_ds.RasterXSize/8, src_ds.RasterYSize/8)
if force or not os.path.isfile(subset_filename):
ofStr = ' -of GTiff '
bbStr = ' -te %s %s %s %s '%(ullon, lrlat, lrlon, ullat)
#resStr = ' -tr %s %s '%(pres, pres)
resStr = ' '
projectionStr = ' -t_srs EPSG:4326 '
overwriteStr = ' -overwrite ' # Overwrite output if it exists
additionalOptions = ' -co COMPRESS=DEFLATE -setci ' # Additional options
wh = ' -ts %d %d ' % ( src_ds.RasterXSize/8, src_ds.RasterYSize/8)
warpOptions = ofStr + bbStr + projectionStr + resStr + overwriteStr + additionalOptions + wh
warpCMD = 'gdalwarp ' + warpOptions + browse_filename + ' ' + subset_filename
execute(warpCMD)
# superimpose the suface water over map background
#if force or not os.path.isfile(sw_osm_image):
if force or not os.path.isfile(sw_osm_image):
cmd = str.format("composite -gravity center {0} {1} {2}", subset_filename, osm_bg_image, sw_osm_image)
execute(cmd)
# ===============================
# Main
#
# ef5.py --scene 20100203_1200 -v
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate EF5 flood map')
apg_input = parser.add_argument_group('Input')
apg_input.add_argument("-f", "--force", action='store_true', help="HydroSHEDS forces new water image to be generated")
apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose on/off")
apg_input.add_argument("-s", "--scene", nargs=1, help="scene")
options = parser.parse_args()
scene = options.scene[0]
force = options.force
verbose = options.verbose
print scene, BASE_DIR
inputDir = os.path.join(BASE_DIR, scene)
if not os.path.exists(inputDir):
print "Cannot find dir", inputDir
sys.exit(-1)
inputFile = os.path.join(inputDir, scene+".tif")
if not os.path.exists(inputFile):
print "Cannot find file", inputFile
sys.exit(-1)
geojsonDir = os.path.join(inputDir,"geojson")
if not os.path.exists(geojsonDir):
os.makedirs(geojsonDir)
levelsDir = os.path.join(inputDir,"levels")
if not os.path.exists(levelsDir):
os.makedirs(levelsDir)
merge_filename = os.path.join(geojsonDir, "%s_levels.geojson" % scene)
topojson_filename = os.path.join(geojsonDir, "..", "%s_levels.topojson" % scene)
browse_filename = os.path.join(geojsonDir, "..", "browse_%s.tif" % scene)
subset_filename = os.path.join(geojsonDir, "..", "small_browse_%s.tif" % scene)
osm_bg_image = os.path.join(geojsonDir, "..", "osm_bg.png")
sw_osm_image = os.path.join(geojsonDir, "..", "thn.png")
ds = gdal.Open( inputFile )
levels = [21, 13, 8, 5, 3, 2, 1]
if not os.path.exists(topojson_filename+".gz"):
for l in levels:
fileName = os.path.join(levelsDir, scene+"_level_%d.tif"%l)
CreateLevel(l, geojsonDir, fileName, ds)
jsonDict = dict(type='FeatureCollection', features=[])
for l in reversed(levels):
fileName = os.path.join(geojsonDir, "height_level_%d.geojson"%l)
if os.path.exists(fileName):
print "merge", fileName
with open(fileName) as data_file:
data = json.load(data_file)
if 'features' in data:
for f in data['features']:
jsonDict['features'].append(f)
with open(merge_filename, 'w') as outfile:
json.dump(jsonDict, outfile)
# Convert to topojson
cmd = "topojson -p -o "+ topojson_filename + " " + merge_filename
execute(cmd)
cmd = "gzip --keep "+ topojson_filename
execute(cmd)
MakeBrowseImage(ds, browse_filename, subset_filename, osm_bg_image, sw_osm_image)
# we could remove geojsonDir and levelsDir
#cmd = "rm -rf %s %s" %(geojsonDir, levelsDir)
ds = None
|
1685912
|
from {{appname}}.models.elastic.testelastic import Testelastic
from datetime import datetime
# create the mappings in elasticsearch
Testelastic.init()
# create and save and article
article = Testelastic(meta={'id': 42}, title='Hello world!', tags=['test'])
article.body = ''' looong text '''
article.published_from = datetime.now()
article.save()
article = Testelastic.get(id=42)
print(article.is_published())
# Display cluster health
print(connections.get_connection().cluster.health())
|
1685914
|
import distro
import logging
from lxml import etree
import pkg_resources
log = logging.getLogger(__name__)
emulator_path = None
def parse_rbd_monitor(monitorlist):
monitors = dict()
for monitor in monitorlist.split(','):
port = '6789'
if ':' in monitor:
port = monitor.split(':')[1]
monitor = monitor.split(':')[0]
monitors[monitor] = port
return monitors
def rbd_pool(
name,
pool,
monitorlist,
user,
secret
):
root = etree.Element('pool', type='rbd')
etree.SubElement(root, 'name').text = name
rsource = etree.SubElement(root, 'source')
etree.SubElement(rsource,'name').text = pool
for monitor, port in parse_rbd_monitor(monitorlist).iteritems():
etree.SubElement(rsource, 'host', name=monitor, port=port)
if user:
auth = etree.SubElement(rsource, 'auth', username=user, type='ceph')
etree.SubElement(auth, 'secret', uuid=secret)
return root
def rbd_volume(
name,
capacity,
pool,
):
root = etree.Element('volume')
etree.SubElement(root, 'name').text = name
etree.SubElement(root, 'source')
etree.SubElement(root, 'capacity', unit='bytes').text = str(capacity)
etree.SubElement(root, 'allocation', unit='bytes').text = str(capacity)
target = etree.SubElement(root, 'target')
etree.SubElement(target, 'path').text = 'rbd:{pool}/{name}'.format(pool=pool, name=name)
etree.SubElement(target, 'format', type='unknown')
permissions = etree.SubElement(target, 'permissions')
etree.SubElement(permissions, 'mode').text = '00'
etree.SubElement(permissions, 'owner').text = '0'
etree.SubElement(permissions, 'group').text = '0'
return root
def volume(
name,
capacity=0,
format_=None,
sparse=True,
raw = False,
):
root = etree.Element('volume')
etree.SubElement(root, 'name').text = name
etree.SubElement(root, 'capacity').text = '{0:d}'.format(capacity)
if sparse:
etree.SubElement(root, 'allocation').text = '0'
if raw:
_format = 'raw'
target = etree.SubElement(root, 'target')
if format_ is None:
format_ = 'qcow2'
etree.SubElement(target, 'format', type=format_)
return root
def volume_clone(
name,
parent_vol,
capacity=None,
raw = False
):
(_type_, parent_capacity, _allocation) = parent_vol.info()
if capacity is None:
capacity = parent_capacity
type = 'qcow2'
sparse = False
if raw:
type = 'raw'
sparse = False
root = volume(name=name, capacity=capacity, sparse=sparse, raw=raw)
backing = etree.SubElement(root, 'backingStore')
etree.SubElement(backing, 'format', type=type)
etree.SubElement(backing, 'path').text = parent_vol.key()
return root
def get_emulator_path():
global emulator_path
if emulator_path:
return emulator_path
log.debug('The host distro id is %s', distro.id())
if any(distro.id().startswith(_)
for _ in ('opensuse', 'sles')):
path = '/usr/bin/qemu-kvm'
elif any(distro.id().startswith(_)
for _ in ('centos', 'fedora', 'rhel')):
path = '/usr/libexec/qemu-kvm'
elif any(distro.id().startswith(_)
for _ in ('ubuntu', 'debian')):
path = '/usr/bin/kvm'
else:
raise Exception("Can't get emulator path, the distro '%s' "
"is not supported yet" % distro.id())
log.debug('Using emulator path: "%s"', path)
emulator_path = path
return emulator_path
def domain(
name,
disk_key,
iso_key,
ram=None,
cpus=None,
networks=None,
additional_disks_key=None,
rbd_disks_key=None,
rbd_details=None,
hypervisor='kvm',
raw = False,
emulator = None,
):
with pkg_resources.resource_stream('downburst', 'template.xml') as f:
tree = etree.parse(f)
(domain,) = tree.xpath('/domain')
domain.set('type', hypervisor)
n = etree.SubElement(domain, 'name')
n.text = name
# <disk type='file' device='disk'>
# <driver name='qemu' type='qcow2'/>
# <source file='/var/lib/libvirt/images/NAME.img'/>
# <target dev='vda' bus='virtio'/>
# </disk>
type = 'qcow2'
if raw:
type = 'raw'
(devices,) = tree.xpath('/domain/devices')
emulator_element = devices.find('emulator')
emulator_path = emulator or get_emulator_path()
if emulator_element is not None:
log.debug('Overriding xpath /domain/devices/emulator in xml template with: %s'
% emulator_path)
emulator_element.text = emulator_path
else:
etree.SubElement(devices, 'emulator').text = emulator_path
disk = etree.SubElement(devices, 'disk', type='file', device='disk')
etree.SubElement(disk, 'driver', name='qemu', type=type)
etree.SubElement(disk, 'source', file=disk_key)
etree.SubElement(disk, 'target', dev='vda', bus='virtio')
letters = 'abcdefghijklmnopqrstuvwxyz'
x = 0
if additional_disks_key is not None:
for key in additional_disks_key:
x += 1
# Skip a because vda = boot drive. Drives should start
# at vdb and continue: vdc, vdd, etc...
blockdevice = 'vd' + letters[x]
# <disk type='file' device='disk'>
# <driver name='qemu' type='raw'/>
# <source file='/var/lib/libvirt/images/NAME.img'/>
# <target dev='vdX' bus='virtio'/>
# </disk>
(devices,) = tree.xpath('/domain/devices')
disk = etree.SubElement(devices, 'disk', type='file', device='disk')
etree.SubElement(disk, 'driver', name='qemu', type='raw')
etree.SubElement(disk, 'source', file=key)
etree.SubElement(disk, 'target', dev=blockdevice, bus='virtio')
if rbd_disks_key is not None:
for key in rbd_disks_key:
x += 1
# Skip a because vda = boot drive. Drives should start
# at vdb and continue: vdc, vdd, etc...
blockdevice = 'vd' + letters[x]
# <disk type='file' device='disk'>
# <driver name='qemu' type='raw'/>
# <source file='/var/lib/libvirt/images/NAME.img'/>
# <target dev='vdX' bus='virtio'/>
# </disk>
(devices,) = tree.xpath('/domain/devices')
disk = etree.SubElement(devices, 'disk', type='network')
etree.SubElement(disk, 'driver', name='qemu', type='raw')
rsource = etree.SubElement(disk, 'source', protocol='rbd', name=key)
for monitor, port in parse_rbd_monitor(rbd_details['ceph_cluster_monitors']).iteritems():
etree.SubElement(rsource, 'host', name=monitor, port=port)
etree.SubElement(disk, 'target', dev=blockdevice, bus='virtio')
if rbd_details['ceph_cluster_user']:
auth = etree.SubElement(disk, 'auth', username=rbd_details['ceph_cluster_user'])
etree.SubElement(auth, 'secret', type='ceph', usage=rbd_details['ceph_cluster_secret'])
# <disk type='file' device='cdrom'>
# <driver name='qemu' type='raw'/>
# <source file='/var/lib/libvirt/images/cloud-init.chef03.iso'/>
# <target dev='hdc' bus='ide'/>
# <readonly/>
# </disk>
disk = etree.SubElement(devices, 'disk', type='file', device='cdrom')
etree.SubElement(disk, 'driver', name='qemu', type='raw')
etree.SubElement(disk, 'source', file=iso_key)
etree.SubElement(disk, 'target', dev='hdc', bus='ide')
if ram is not None:
# default unit is kibibytes, and libvirt <0.9.11 doesn't
# support changing that
ram = int(round(ram/1024.0))
(memory,) = tree.xpath('/domain/memory')
memory.text = '{ram:d}'.format(ram=ram)
if cpus is not None:
(vcpu,) = tree.xpath('/domain/vcpu')
vcpu.text = '{cpus:d}'.format(cpus=cpus)
# <interface type='network'>
# <source network='default'/>
# <model type='virtio'/>
# </interface>
if networks is None:
networks = [{}]
for net in networks:
net_elem = etree.SubElement(
devices,
'interface',
type='network',
)
etree.SubElement(net_elem, 'model', type='virtio')
etree.SubElement(
net_elem,
'source',
network=net.get('source', 'default'),
)
mac = net.get('mac')
if mac is not None:
# <mac address='52:54:00:01:02:03'/>
etree.SubElement(net_elem, 'mac', address=mac)
return tree
|
1685917
|
import sys
import glob
import unittest
def create_test_suite():
test_file_strings = glob.glob('tests/test_*.py')
module_strings = ['tests.'+str[6:len(str)-3] for str in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(name) \
for name in module_strings]
testSuite = unittest.TestSuite(suites)
return testSuite
testSuite = create_test_suite()
test_runner = unittest.TextTestRunner().run(testSuite)
if len(test_runner.failures) == 0 and len(test_runner.errors) == 0:
sys.exit(0)
else:
sys.exit(1)
|
1685953
|
import json
import click
from textwrap import dedent
from pathlib import Path
import os
from tabulate import tabulate
from PIL import Image, ImageOps
from math import ceil, floor
from ih import palette, helpers
DEFAULT = {
"palette": palette.PALETTE_DEFAULT,
"scale": 1,
"colors": 256,
"render": False,
"guidelines": False,
"fileformat": "html",
"outputfolder": '.',
}
OUTPUT_FORMAT = ["html", "term"]
# Guideline padding
GUIDE = 10
# Assuming no colour will be this in our palette.
GUIDECOL = (0, 0, 0, 0)
def nicename(image_name):
if hasattr(image_name, "name"):
image_name = image_name.name
return Path(image_name).name
def debug_data(image_name, scale, colors, palette_name, chartimage, colorsused, fileformat="html"):
import pkg_resources
ih_version = pkg_resources.require("ih")[0].version
data = [
f"Image: {nicename(image_name)}",
f"Scale: {scale}x",
f"Image size: {chartimage.height} x {chartimage.width}",
f"Palette: {palette_name}",
f"Colors used: {colorsused} (of possible {colors})",
f"ih version: {ih_version}",
]
if fileformat == "html":
return f'<div class="debug">' + "<br />".join(data) + "</div>"
else:
return "\n".join(data)
def preprocess_image(
im,
pal,
colors=DEFAULT["colors"],
scale=DEFAULT["scale"],
guidelines=DEFAULT["guidelines"],
):
# Reduce palette to max 256 colors
reduced_palette = palette.reduce_palette(palette=pal, image=im)
palette_image = palette.get_palette_image(reduced_palette)
im = im.resize((int(im.width / scale), int(im.height / scale)))
# Remove black transparency issues with this one weird trick.
alpha = im.convert("RGBA").split()[-1]
bg = Image.new("RGBA", im.size, (255, 255, 255, 255))
bg.paste(im, mask=alpha)
im = bg
im = (
im.convert("RGB")
.convert("P", palette=Image.ADAPTIVE, colors=colors)
.convert("RGB")
)
_im = im.im.convert("P", 0, palette_image.im)
return im._new(_im).convert("RGB")
def get_legend(chartimage):
legend = {}
styles = {}
histogram = sorted(chartimage.getcolors())
STARS = helpers.STARS
for idx, x in enumerate(histogram):
rgb = x[1]
h = helpers.rgb2hex(rgb)
star = STARS[idx % len(STARS)]
sclass = helpers.col_class(h)
# Choose the best text colour
if (rgb[0] * 0.299 + rgb[1] * 0.587 + rgb[2] * 0.114) > 186:
color = "black"
else:
color = "lightgray"
styles[sclass] = {"bg": h, "rgb": rgb, "c": color, "star": star}
legend[helpers.rgb2hex(x[1])] = STARS[idx % len(STARS)]
return legend, styles, histogram
def generate_html_chart(
image_name,
chartimage,
palette_name,
pal,
render=False,
guidelines=False,
data="",
):
html = [f'<html><meta charset="UTF-8" /><title>ih - {nicename(image_name)}</title>']
html.append('<link rel="icon" href="data:image/svg+xml,%3csvg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22%3e%3ctext y=%22.9em%22 font-size=%2290%22%3e%f0%9f%a7%b6%3c/text%3e%3c/svg%3e" />')
with open(helpers.base_path("styling").joinpath("styling.css")) as s:
html.append("<style>")
if guidelines:
html.append(":root { --border: lightgrey; }")
else:
html.append(":root { --border: black; }")
html.append("".join(s.readlines()) + "</style>")
if render:
html.append(
dedent(
"""
<style>
.s {
background-image: url('%s');
background-size: cover;
border: none;
}
.r { border: none }
.chart { border: 1px solid black }
</style>
"""
% palette.get_thread_image(palette_name)
)
)
legend, styles, histogram = get_legend(chartimage)
after = {}
html.append("<style>")
for _, x in enumerate(styles):
y = styles[x]
html.append(".%s { background-color: %s; color: %s }" % (x, y["bg"], y["c"]))
if not render:
html.append('.%s::after { content: "%s\ufe0e" }' % (x, y["star"]))
if not render:
html.append(
'.%s::after { content: "%s" }'
% (helpers.col_class(helpers.WHITECOL), helpers.WHITESTAR)
)
html.append("</style>")
html.append('<div class="container">')
html.append('<div class="left-content">')
html.append('<div class="legend_div"><table class="legend">')
html.append(
(
f"<tr><td>X</td><td class='label'>{palette.get_identity_name(palette_name)}</td>"
f"<td class='label'>{palette_name} code</td></tr>"
)
)
# Generate legend
for idx, h in enumerate(reversed(histogram)):
count, rgb = h
color = helpers.rgb2hex(rgb)
thread = palette.thread_name(rgb, pal)
code = thread["code"]
html.append(
"<tr>"
+ helpers.color_cell(
color=color, star=legend[color], thread=False, legend=True
)
+ "<td>{}</td><td>{}</td></tr>".format(count, code)
)
html.append("</table></div>")
html.append(f'<div class="debug">{data}</div>')
# If using guidelines, enable printhacks
if guidelines:
with open(helpers.base_path("styling").joinpath("styling.html")) as s:
html.append("".join(s.readlines()))
html.append("</div>") # end left-content
html.append("<div class='page-break'></div>") # force page break
html.append('<div class="right-content"><div class="chart">')
# If using guidelines, expand the image to a whole number of guidelines first.
if guidelines:
chartimage = chartimage.convert("RGBA")
xpad = GUIDE - (chartimage.width % GUIDE)
ypad = GUIDE - (chartimage.height % GUIDE)
padding = (floor(ypad / 2), ceil(xpad / 2), ceil(ypad / 2), floor(xpad / 2))
chartimage = ImageOps.expand(chartimage, padding, fill=GUIDECOL)
# Generate Chart (TODO(glasnt): make this less terrible)
CENTER = True
for y in range(0, chartimage.height):
row = []
for x in range(0, chartimage.width):
guide_x, guide_y = False, False
if guidelines:
if x % GUIDE == GUIDE - 1:
guide_x = True
if y % GUIDE == GUIDE - 1:
guide_y = True
rgb = chartimage.getpixel((x, y))
if rgb == GUIDECOL:
row.append(helpers.guide_cell([guide_x, guide_y]))
continue
p = helpers.rgb2hex(rgb)
center_flag = False
if not render:
if CENTER:
if chartimage.height / 2 - 1 <= y and chartimage.width / 2 - 1 <= x:
center_flag = True
CENTER = False
row.append(
helpers.color_cell(
color=p,
star=legend[p],
center=center_flag,
guide=[guide_x, guide_y],
)
)
html.append("<div class='r'>" + "".join(row) + "</div>")
html.append("</div></div></div></html>")
return "\n".join(html)
def save_chart(html, image, fileformat, outputfolder):
if fileformat == "html":
os.makedirs(outputfolder, exist_ok=True)
if type(image) == str:
imna = image
else:
imna = image.name
fn = "_".join(imna.split("/")[-1].split(".")[:-1])
outfile = os.path.join(outputfolder, f"{fn}.{fileformat}")
with open(outfile, "w", encoding="utf-8") as f:
f.write(html)
return outfile
def generate_term_chart(chartimage, pal, render, palette_name, data):
def c(text, bg=None, fg=None):
def color(rgb, code):
if not rgb:
return ""
R, G, B = rgb
return f"\033[{code};2;{R};{G};{B}m"
def foreground(rgb=None):
return color(rgb, "38")
def background(rgb=None):
return color(rgb, "48")
def reset():
return "\033[0;00m"
result = foreground(fg) + background(bg) + text + reset()
return result
def star(rgb, render=False):
p = helpers.rgb2hex(rgb)
if render:
return c(" ", bg=rgb)
else:
return c(legend[p], fg=rgb) + " "
legend, styles, histogram = get_legend(chartimage)
headers = ["*", palette.get_identity_name(palette_name), f"{palette_name} code"]
table = []
result = "\n"
for idx, h in enumerate(reversed(histogram)):
count, rgb = h
color = helpers.rgb2hex(rgb)
thread = palette.thread_name(rgb, pal)
code = thread["code"]
symbol = star(rgb, render=render)
table.append([symbol, str(count), code])
result += tabulate(table, headers=headers)
result += "\n\n"
for y in range(0, chartimage.height):
row = []
for x in range(0, chartimage.width):
rgb = chartimage.getpixel((x, y))
row.append(star(rgb, render=render))
result += "".join(row) + "\n"
result += "\n" + data
return result
def chart(
image=None,
image_obj=None,
palette_name=DEFAULT["palette"], # PALETTE_DEFAULT,
scale=DEFAULT["scale"],
colors=DEFAULT["colors"],
render=DEFAULT["render"],
guidelines=DEFAULT["guidelines"],
fileformat=DEFAULT["fileformat"],
outputfolder=DEFAULT["outputfolder"],
):
# can't have both guidelines and rendering
if render:
guidelines = False
image_name = image
if image_name:
im = Image.open(image_name)
elif image_obj:
im = image_obj
else:
raise ValueError("Must provide an image filename or Image object")
pal = palette.get_palette(palette_name)
chartimage = preprocess_image(
im, pal=pal, colors=colors, scale=scale, guidelines=guidelines
)
data = debug_data(
image_name=image_name,
scale=scale,
colors=colors,
palette_name=palette_name,
chartimage=chartimage,
fileformat=fileformat,
colorsused=len(sorted(chartimage.getcolors())),
)
if fileformat == "html":
chart = generate_html_chart(
image_name=image_name,
chartimage=chartimage,
palette_name=palette_name,
pal=pal,
render=render,
guidelines=guidelines,
data=data,
)
saved = save_chart(chart, image_name, fileformat, outputfolder)
return saved
elif fileformat == "term":
chart = generate_term_chart(
chartimage, pal=pal, render=render, palette_name=palette_name, data=data
)
return chart
|
1685964
|
import html
import json
import logging
import re
import threading
import time
import urllib.error
import urllib.parse
import urllib.request
from .utils import *
from .upload import uploadFile
from . import auth
CALL_INTERVAL = 0.35
MAX_CALLS_IN_EXECUTE = 25
def retOrCall(s, *p):
return s(*p) if callable(s) else s
def jsonToUTF8(d):
if isinstance(d, str):
try:
return d.encode('latin1').decode('utf-8')
except UnicodeDecodeError:
return d.encode('latin1').decode('cp1251')
elif isinstance(d, list):
return [jsonToUTF8(i) for i in d]
elif isinstance(d, dict):
return {jsonToUTF8(i): jsonToUTF8(d[i]) for i in d}
else:
return d
class VkApi(VkMethodDispatcher):
api_version = '5.95'
longpoll_version = 3
def __init__(self, *, ignored_errors=None, timeout=5, log_file='', captcha_handler=None, token_file=''):
self.log_file = log_file
self.token_file = token_file
if self.log_file:
logger.info('Logging enabled')
open(self.log_file, 'w').close()
self.limiter = RateLimiter(CALL_INTERVAL)
self.ignored_errors = ignored_errors or {}
self.timeout = timeout
self.longpoll = {'server': '', 'key': '', 'ts': 0}
self.ch = captcha_handler
self.token = None
self.login_params = None
self.getToken()
def _callMethod(self, method, kwargs):
return self.apiCall(method, kwargs)
def execute(self, code):
return self.apiCall('execute', {"code": code}, full_response=True)
@staticmethod
def encodeApiCall(method, params):
return "API." + method + '(' + json.dumps({i:params[i] for i in params if not i.startswith(')')}, ensure_ascii=False) + ')'
def writeLog(self, msg):
if self.log_file:
with open(self.log_file, 'a') as f:
f.write('[{}]\n'.format(time.strftime('%d.%m.%Y %H:%M:%S', time.localtime())) + msg + '\n\n')
def apiCall(self, method, params, full_response=False):
params['v'] = self.api_version
encoded = urllib.parse.urlencode({i: params[i] for i in params if not i.startswith('_')})
post_params = None
if len(encoded) > 1024:
url = 'https://api.vk.com/method/' + method + '?access_token=' + (params.get('_token') or self.getToken())
post_params = encoded.encode()
else:
url = 'https://api.vk.com/method/' + method + '?' + encoded + '&access_token=' + (params.get('_token') or self.getToken())
with self.limiter:
now = time.time()
try:
json_string = urllib.request.urlopen(url, data=post_params, timeout=self.timeout).read()
except OSError as e:
err = str(e)
logger.warning(method + ' failed ({})'.format(html.escape(err.strip())))
time.sleep(1)
return self.apiCall(method, params, full_response)
except Exception as e:
if params.get('_retry'):
logger.exception('({}) {}: {}'.format(method, e.__class__.__name__, str(e)))
return None
else:
time.sleep(1)
logger.warning('({}) {}: {}, retrying'.format(method, e.__class__.__name__, str(e)))
params['_retry'] = True
return self.apiCall(method, params, full_response)
try:
try:
data_array = json.loads(json_string.decode('utf-8'))
except UnicodeDecodeError:
logger.warning('Invalid JSON received, trying to parse anyway')
data_array = jsonToUTF8(json.loads(json_string.decode('latin1')))
except json.decoder.JSONDecodeError:
logger.error('Invalid JSON')
data_array = None
self.writeLog('method: {}, params: {}\nresponse: {}'.format(method + (' (POST)' if post_params else ''), json.dumps(params), json.dumps(data_array)))
duration = time.time() - now
if duration > self.timeout:
logger.warning('{} timeout'.format(method))
if data_array is None:
logger.error('data_array is None')
return None
if 'response' in data_array and not full_response:
if self.ch:
self.ch.reset(params)
return data_array['response']
elif 'error' in data_array:
code = data_array['error']['error_code']
if code == 14: # Captcha needed
if self.ch:
self.ch.handle(data_array, params)
else:
logger.warning('Captcha needed')
time.sleep(5)
return self.apiCall(method, params, full_response)
elif code == 5: # Auth error
if data_array['error']['error_msg'] == 'User authorization failed: method is unavailable with group auth.':
raise VkError('User token required')
self.login()
return self.apiCall(method, params, full_response)
elif code == 6: # Too many requests per second
logger.warning('{}: too many requests per second'.format(method))
time.sleep(2)
return self.apiCall(method, params, full_response)
elif code == 17: # Validation required
logger.warning('Validation required')
self.validate(data_array['error']['redirect_uri'])
time.sleep(1)
return self.apiCall(method, params, full_response)
elif self.processError(method, params, data_array):
time.sleep(1)
params['_retry'] = True
return self.apiCall(method, params, full_response)
else:
return None
elif full_response:
return data_array
else:
return self.apiCall(method, params, full_response)
def processError(self, method, params, response):
code = response['error']['error_code']
if (code, method) not in self.ignored_errors and (code, '*') not in self.ignored_errors:
logger.error('{}, params {}\ncode {}: {}'.format(method, json.dumps(params), code, response['error'].get('error_msg')))
return False
try:
handler = self.ignored_errors[(code, method)]
except KeyError:
handler = self.ignored_errors[(code, '*')]
if not handler:
return False
if params.get('_retry') or not handler[1]:
logger.warning(retOrCall(handler[0], params, method))
return False
else:
logger.warning(retOrCall(handler[0], params, method) + ', retrying')
return True
def login(self):
if not self.login_params:
logger.critical('Unable to log in, no login_params provided')
raise VkError('login_params required')
logger.info('Fetching new token')
self.token = auth.login(self.login_params['username'], self.login_params['password'], self.login_params['client_id'], self.login_params['perms'])
if not self.token:
logger.critical('Login failed')
if self.token_file:
with open(self.token_file, 'w') as f:
f.write(self.token)
def getToken(self):
if not self.token:
try:
self.token = open(self.token_file).read().strip()
except FileNotFoundError:
self.token = ''
return self.token
def initLongpoll(self):
r = self.messages.getLongPollServer(lp_version=self.longpoll_version)
if not r:
logger.warning('Unable to initialize longpoll')
self.longpoll = {}
return
self.longpoll = {'server': r['server'], 'key': r['key'], 'ts': self.longpoll.get('ts') or r['ts']}
def getLongpoll(self, mode=2):
if not self.longpoll.get('server'):
self.initLongpoll()
if not self.longpoll:
return []
url = 'https://{}?act=a_check&key={}&ts={}&wait=25&mode={}&version={}'.format(
self.longpoll['server'], self.longpoll['key'], self.longpoll['ts'], mode, self.longpoll_version)
try:
json_string = urllib.request.urlopen(url, timeout=30).read()
except urllib.error.HTTPError as e:
logger.warning('longpoll http error ' + str(e.code))
return []
except OSError as e:
logger.warning('longpoll failed ({})'.format(html.escape(str(e).strip())))
time.sleep(1)
return []
data_array = json.loads(json_string.decode('utf-8'))
self.writeLog('longpoll request\nresponse: {}'.format(json.dumps(data_array)))
if 'ts' in data_array:
self.longpoll['ts'] = data_array['ts']
if 'updates' in data_array:
return data_array['updates']
elif data_array['failed'] != 1:
self.initLongpoll()
return []
else:
return self.getLongpoll(mode)
def validate(self, url):
if not self.login_params or '@' in self.login_params['username']:
logger.critical("I don't know your phone number")
raise VkError('Phone number required')
page = urllib.request.urlopen(url).read().decode()
url_re = re.compile(r'/(login.php\?act=security_check&[^"]+)"')
post_url = 'https://m.vk.com/' + url_re.search(page).group(1)
phone = self.login_params['username'][-10:-2]
urllib.request.urlopen(post_url, ('code=' + phone).encode('utf-8'))
def uploadMessagePhoto(self, paths):
if isinstance(paths, str):
paths = [paths]
server = self.photos.getMessagesUploadServer()
result = []
with self.delayed() as dm:
for path in paths:
resp = uploadFile(server['upload_url'], path, 'photo')
self.writeLog('uploading photo {} to {}\nresponse: {}'.format(path, server['upload_url'], resp))
if resp['photo'] != '[]':
dm.photos.saveMessagesPhoto(photo=resp['photo'], server=resp['server'], hash=resp['hash']).set_callback(lambda a, b: result.extend(b or []))
return result
def delayed(self, *, max_calls=MAX_CALLS_IN_EXECUTE):
return DelayedManager(self, max_calls)
|
1685990
|
import sys
import os
import math
import random
import numpy as np
import matplotlib.pyplot as plt
# our implementations
import run_ridge
import ridge
import theory
import covar
import output_pert
import naive_covar
#MIN_EPS = 0.00001
#MAX_EPS_CAP = 20.0
#MAX_NAIVE_EPS = 1000.0 # this one can be larger due to doubling
usage_str = """
Usage: python3 run_many.py datafilename lambda alpha gamma max_norm max_steps num_trials outputdir
Runs 'num_trials' separate trials, writing the output to the files
outputdir/trial_1.txt, outputdir/trial_2.txt, ....
--------
For other parameters:
""" + run_ridge.usage_str
def main(X, Y, lamb, alpha, gamma, max_norm, max_steps, num_trials, outputdir):
n = len(X)
dim = len(X[0])
if max_norm <= 0.0:
max_norm = ridge.compute_max_norm(lamb)
sv_sens = ridge.get_sv_sensitivity(max_norm, n)
opt_beta_sens = ridge.compute_opt_sensitivity(n, dim, lamb, max_norm)
compute_err_func = lambda X,Y,beta_hat: ridge.compute_err(X, Y, lamb, beta_hat)
# Compute opt
Sigma, R, opt_beta, opt_res = run_ridge.get_matrices_and_opt(X, Y, lamb)
opt_err = opt_res[0]
data = (X, Y, opt_err)
min_eps = 1.0 / n
max_covar_eps = 4.0 * theory.covar_get_epsilon(alpha, n, dim, max_norm)
max_naive_eps = max_covar_eps
max_output_eps = 4.0 * theory.output_pert_linreg_get_epsilon(alpha, n, dim, lamb, max_norm)
# Create output folder and write value of alpha
os.makedirs(outputdir)
with open(outputdir + "/alpha.txt", "w") as f:
f.write(str(alpha) + "\n")
# Compute results of methods and save them
for trial_ind in range(num_trials):
covar_beta_hat, covar_res = covar.run_covar(Sigma, R, alpha, gamma, max_norm, max_steps, min_eps, max_covar_eps, sv_sens, data, compute_err_func)
output_beta_hat, output_res = output_pert.run_output_pert(opt_beta, alpha, gamma, max_norm, max_steps, min_eps, max_output_eps, sv_sens, opt_beta_sens, data, compute_err_func)
naive_beta_hat, naive_res = naive_covar.run_naive(Sigma, R, alpha, gamma, max_norm, min_eps, max_naive_eps, sv_sens, data, compute_err_func)
with open(outputdir + "/trial_" + str(trial_ind+1) + ".txt", "w") as f:
f.write(run_ridge.stringify(opt_res))
f.write("\n")
f.write(run_ridge.stringify(opt_beta))
f.write("\n")
for beta, res in [(covar_beta_hat, covar_res), (output_beta_hat, output_res), (naive_beta_hat, naive_res)]:
success, excess_err, sv_eps, my_eps, index = res
two_norm = np.linalg.norm(beta)
mse = ridge.compute_err(X, Y, 0.0, beta)
f.write(run_ridge.stringify(("1" if success else "0", excess_err, sv_eps, my_eps, index, two_norm, mse)))
f.write("\n")
f.write(run_ridge.stringify(beta))
f.write("\n")
# when run as script, read parameters from input
# (other python scripts can call main(), above, directly)
if __name__ == "__main__":
X, Y, lamb, alpha, gamma, max_norm, max_steps = run_ridge.parse_inputs(sys.argv)
try:
num_trials = int(sys.argv[7])
outputdir = sys.argv[8]
except:
print(usage_str)
exit(0)
main(X, Y, lamb, alpha, gamma, max_norm, max_steps, num_trials, outputdir)
|
1686010
|
from scout.build.hpo import build_hpo_term
import pytest
def test_build_hpo_term(adapter, test_hpo_info):
## GIVEN a hpo term
## WHEN building the hpo term
hpo_obj = build_hpo_term(test_hpo_info)
## THEN assert that the term has the correct information
assert hpo_obj["_id"] == hpo_obj["hpo_id"] == test_hpo_info["hpo_id"]
assert hpo_obj["description"] == test_hpo_info["description"]
assert len(hpo_obj["genes"]) == 2
@pytest.mark.parametrize("key", ["hpo_id", "description"])
def test_build_hpo_term_missing_key(adapter, test_hpo_info, key):
## GIVEN a dictionary with hpo information
## WHEN deleteing key
test_hpo_info.pop(key)
## THEN calling build_hpo_term() will raise KeyError
with pytest.raises(KeyError):
build_hpo_term(test_hpo_info)
|
1686108
|
import brownie
def test_set_royalties_receiver(token, owner, alice, royalty_wallet):
previous_royalties_receiver = token.royaltiesReceiver().return_value
token.setRoyaltiesReceiver(alice, {"from": owner})
new_royalties_receiver = token.royaltiesReceiver().return_value
assert previous_royalties_receiver == royalty_wallet
assert previous_royalties_receiver != new_royalties_receiver
assert new_royalties_receiver == alice
def test_set_royalties_receiver_not_owner(token, bob):
with brownie.reverts():
token.setRoyaltiesReceiver(bob, {"from": bob})
def test_set_royalties_receiver_same_address(token, owner, royalty_wallet):
with brownie.reverts():
token.setRoyaltiesReceiver(royalty_wallet, {"from": owner})
|
1686130
|
from __future__ import division
import matplotlib
# matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from solution import general_secondorder_ode_fd, poisson_square
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
import matplotlib.colors as mcolors
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve
from solution import poisson_square
def ExercisePoisson():
from numpy import sin, cos, pi
# Domain: [0,1]x[0,1]
a1,b1 = 0.,1.
c1,d1 = 0.,1.
n=100
# Example1: Laplace's equation (Poisson with no source)
def bcs(x,y):
return x**3.
def source(x,y):
return 0.
# # Example2: Poisson's equation
# def bcs(x,y): return sin(pi*x)*cos(2.*pi*y)
# def source(x,y): return -5.*(pi**2.)*bcs(x,y)
# # Example3: Poisson's equation
# def bcs(x,y): return sin(2.*pi*y)*cos(pi*x)
# def source(x,y): return -5.*(pi**2.)*bcs(x,y)
# # Example4: Poisson's equation
# def bcs(x,y): return 1.-x +x*y + (1./2)*sin(pi*x)*sin(pi*y)
#
# def source(x,y): return -(pi**2)*sin(pi*x)*sin(pi*y)
z=poisson_square(a1,b1,c1,d1,n,bcs,source)
print '---------------'
print "Computation successful"
print '---------------'
# Plotting data
fig = plt.figure()
#---- First subplot: Numerical Solution
# ax = fig.add_subplot(121, projection='3d')
ax = fig.gca(projection='3d')
ax.set_xlabel('X'); ax.set_ylabel('Y'); ax.set_zlabel('Z')
x, y = np.linspace(a1,b1,n+1), np.linspace(c1,d1,n+1)
xv, yv = np.meshgrid(x, y)
xv, yv = xv.T, yv.T
surf = ax.plot_surface(xv, yv, z, rstride=2, cstride=2, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# #---- Second subplot: Exact Solution
# ax2 = fig.add_subplot(122, projection='3d')
# ax2.set_xlabel('X'); ax2.set_ylabel('Y'); ax2.set_zlabel('Z')
# surf2 = ax2.plot_surface(xv, yv, bcs(xv,yv), rstride=2, cstride=2, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
print "Maximum Error = \n", np.max(np.abs( z-bcs(xv,yv) ) )
# plt.savefig('Laplace.png',dpi=100)
# plt.clf()
plt.show()
# if True: return
#
# num_approx = 7 # Number of Approximations
# N = np.array([10*2**(j) for j in range(num_approx)])
# h, max_error = (b1-a1)/N[:-1], np.ones(num_approx-1)
#
# num_sol_best = poisson_square(a1,b1,c1,d1,N[-1],bcs,source)
# for j in range(len(N)-1):
# num_sol = poisson_square(a1,b1,c1,d1,N[j],bcs,source)
# max_error[j] = np.max(np.abs( num_sol- num_sol_best[::2**(num_approx-j-1), ::2**(num_approx-j-1)] ) )
# plt.loglog(h,max_error,'.-r',label="$E(h)$")
# plt.loglog(h,h**(2.),'-k',label="$h^{\, 2}$")
# plt.xlabel("$h$")
# plt.legend(loc='best')
# print "The order of the finite difference approximation is about ", ( (np.log(max_error[0]) -
# np.log(max_error[-1]) )/( np.log(h[0]) - np.log(h[-1]) ) ), "."
# plt.savefig('./Poisson_Error.pdf')
# plt.show()
return
def plotRhos():
def source(X,Y):
"""
Takes arbitrary arrays of coordinates X and Y and returns an array of the same shape
representing the charge density of nested charged squares
"""
src = np.zeros(X.shape)
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-1.5) < .1,abs(X+1.5) < .1) ,abs(Y) < 1.6),
np.logical_and( np.logical_or(abs(Y-1.5) < .1,abs(Y+1.5) < .1) ,abs(X) < 1.6))] = 1
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-0.9) < .1,abs(X+0.9) < .1) ,abs(Y) < 1.0),
np.logical_and( np.logical_or(abs(Y-0.9) < .1,abs(Y+0.9) < .1) ,abs(X) < 1.0))] = -1
return src
#Generate a color dictionary for use with LinearSegmentedColormap
#that places red and blue at the min and max values of data
#and white when data is zero
def genDict(data):
zero = 1/(1 - np.max(data)/np.min(data))
cdict = {'red': [(0.0, 1.0, 1.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
return cdict
a1 = -2.
b1 = 2.
c1 = -2.
d1 = 2.
n =100
X = np.linspace(a1,b1,n)
Y = np.linspace(c1,d1,n)
X,Y = np.meshgrid(X,Y)
rho= source(X,Y)
plt.imshow(rho,cmap = mcolors.LinearSegmentedColormap('cmap', genDict(rho)))
plt.colorbar(label="Relative Charge")
# plt.savefig("./pipesRho.pdf",dpi=100)
plt.show()
plt.clf()
return
def plotVs():
#
# def poisson_square(a1,b1,c1,d1,n,bcs, source):
# #n = number of subintervals
# # We discretize in the x dimension by a1 = x_0 < x_1< ... < x_n=b1, and
# # we discretize in the y dimension by c1 = y_0 < y_1< ... < y_n=d1.
# # This means that we have interior points {x_1, ..., x_{n-1}}\times {y_1, ..., y_{n-1}}
# # or {x_1, ..., x_m}\times {y_1, ..., y_m} where m = n-1.
# # In Python, this is indexed as {x_0, ..., x_{m-1}}\times {y_0, ..., y_{m-1}}
# # We will have m**2 pairs of interior points, and m**2 corresponding equations.
# # We will organize these equations by their y coordinates: all equations centered
# # at (x_i, y_0) will be listed first, then (x_i, y_1), and so on till (x_i, y_{m-1})
# delta_x, delta_y, h, m = (b1-a1)/n, (d1-c1)/n, (b1-a1)/n, n-1
#
# #### Here we construct the matrix A ####
# ############################## Slow #################################
# # D, diags = np.ones((1,m**2)), np.array([-m,m])
# # data = np.concatenate((D, D),axis=0)
# # A = h**(-2)*spdiags(data,diags,m**2,m**2).asformat('lil')
# # D = np.ones((1,m))
# # diags, data = np.array([0,-1,1]), np.concatenate((-4.*D,D,D),axis=0)
# # temp = h**(-2)*spdiags(data,diags,m,m).asformat('lil')
# # for i in xrange(m): A[i*m:(i+1)*m,i*m:(i+1)*m] = temp
#
# ############################## Much Faster ################################
# D1,D2,D3 = -4*np.ones((1,m**2)), np.ones((1,m**2)), np.ones((1,m**2))
# Dm1, Dm2 = np.ones((1,m**2)), np.ones((1,m**2))
# for j in range(0,D2.shape[1]):
# if (j%m)==m-1: D2[0,j]=0
# if (j%m)==0: D3[0,j]=0
# diags = np.array([0,-1,1,-m,m])
# data = np.concatenate((D1,D2,D3,Dm1,Dm2),axis=0) # This stacks up rows
# A = 1./h**2.*spdiags(data, diags, m**2,m**2).asformat('csr') # This appears to work correctly
#
# #### Here we construct the vector b ####
# b, Array = np.zeros(m**2), np.linspace(0.,1.,m+2)[1:-1]
# # In the next line, source represents the inhomogenous part of Poisson's equation
# for j in xrange(m): b[j*m:(j+1)*m] = source(a1+(b1-a1)*Array, c1+(j+1)*h*np.ones(m) )
#
# # In the next four lines, bcs represents the Dirichlet conditions on the boundary
# # y = c1+h, d1-h
# b[0:m] = b[0:m] - h**(-2.)*bcs(a1+(b1-a1)*Array,c1*np.ones(m))
# b[(m-1)*m : m**2] = b[(m-1)*m : m**2] - h**(-2.)*bcs(a1+(b1-a1)*Array,d1*np.ones(m))
# # x = a1+h, b1-h
# b[0::m] = b[0::m] - h**(-2.)*bcs(a1*np.ones(m),c1+(d1-c1)*Array)
# b[(m-1)::m] = b[(m-1)::m] - h**(-2.)*bcs(b1*np.ones(m),c1+(d1-c1)*Array)
#
# #### Here we solve the system A*soln = b ####
# soln = spsolve(A,b) # Using the conjugate gradient method: (soln, info) = cg(A,b)
#
# z = np.zeros((m+2,m+2) )
# for j in xrange(m): z[1:-1,j+1] = soln[j*m:(j+1)*m]
#
# x, y = np.linspace(a1,b1,m+2), np.linspace(c1,d1,m+2)
# z[:,0], z[:,m+1] = bcs(x,c1*np.ones(len(x)) ), bcs(x,d1*np.ones(len(x)) )
# z[0,:], z[m+1,:] = bcs(a1*np.ones(len(x)),y), bcs(b1*np.ones(len(x)),y)
# return z
#
#
def source(X,Y):
"""
Takes arbitrary arrays of coordinates X and Y and returns an array of the same shape
representing the charge density of nested charged squares
"""
src = np.zeros(X.shape)
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-1.5) < .1,abs(X+1.5) < .1) ,abs(Y) < 1.6),
np.logical_and( np.logical_or(abs(Y-1.5) < .1,abs(Y+1.5) < .1) ,abs(X) < 1.6))] = 1
src[ np.logical_or(
np.logical_and( np.logical_or(abs(X-0.9) < .1,abs(X+0.9) < .1) ,abs(Y) < 1.0),
np.logical_and( np.logical_or(abs(Y-0.9) < .1,abs(Y+0.9) < .1) ,abs(X) < 1.0))] = -1
return src
#Generate a color dictionary for use with LinearSegmentedColormap
#that places red and blue at the min and max values of data
#and white when data is zero
def genDict(data):
zero = 1/(1 - np.max(data)/np.min(data))
cdict = {'red': [(0.0, 1.0, 1.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(zero, 1.0, 1.0),
(1.0, 1.0, 1.0)]}
return cdict
a1 = -2.
b1 = 2.
c1 = -2.
d1 = 2.
n = 5
# X = np.linspace(a1,b1,n)
# Y = np.linspace(c1,d1,n)
# X,Y = np.meshgrid(X,Y)
#
# rho= source(X,Y)
V = poisson_square(a1,b1,c1,d1,100,lambda x, y:0, lambda X,Y: source(X,Y))
cdict = genDict(V)
plt.imshow(V,cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict))
plt.colorbar(label="Voltage")
# plt.savefig("./pipesV.png",dpi=100)
plt.show()
plt.clf()
return
if __name__ == "__main__":
# example()
# Exercise1()
# ExercisePoisson()
plotRhos()
# plotVs()
|
1686162
|
import numpy as np
import os
import pickle
import re
import sys
import argparse
class Preprocess():
def __init__(self, path_to_babi):
# path_to_babi example: '././babi_original'
self.path_to_babi = os.path.join(path_to_babi, "tasks_1-20_v1-2/en-valid-10k")
self.train_paths = None
self.val_paths = None
self.test_paths = None
self.path_to_processed = "./babi_processed"
self._c_word_set = set()
self._q_word_set = set()
self._a_word_set = set()
self._cqa_word_set = set()
self.c_max_len = 20
self.s_max_len = 0
self.q_max_len = 0
self.mask_index = 0
def set_path(self):
"""
set list of train, val, and test dataset paths
Returns
train_paths: list of train dataset paths for all task 1 to 20
val_paths: list of val dataset paths for all task 1 to 20
test_paths: list of test dataset paths for all task 1 to 20
"""
train_paths = []
val_paths = []
test_paths= []
for dirpath, dirnames, filenames in os.walk(self.path_to_babi):
for filename in filenames:
if 'train' in filename:
train_paths.append(os.path.join(dirpath, filename))
elif 'val' in filename:
val_paths.append(os.path.join(dirpath, filename))
else:
test_paths.append(os.path.join(dirpath, filename))
self.train_paths = sorted(train_paths)
self.val_paths = sorted(val_paths)
self.test_paths = sorted(test_paths)
def _split_paragraphs(self, path_to_file):
"""
split into paragraphs as babi dataset consists of multiple 1~n sentences
Args
file_path: path of the data
Returns
paragraphs: list of paragraph
"""
with open(path_to_file, 'r') as f:
babi = f.readlines()
paragraph = []
paragraphs = []
alphabet = re.compile('[a-zA-Z]')
for d in babi:
if d.startswith('1 '):
if paragraph:
paragraphs.append(paragraph)
paragraph = []
mark = re.search(alphabet, d).span()[0]
paragraph.append(d[mark:])
return paragraphs
def _split_clqa(self, paragraphs, show_print= True):
"""
for each paragraph, split into context, label, question and answer
Args
paragraphs: list of paragraphs
Returns
context: list of contexts
label: list of labels
question: list of questions
answer: list of answers
"""
context = []
label = []
question = []
answer = []
for paragraph in paragraphs:
for i, sent in enumerate(paragraph):
if '?' in sent:
related_para = [para.strip().lower() for para in paragraph[:i] if '?' not in para][::-1]
if len(related_para) > 20:
related_para = related_para[:20]
context.append(related_para)
label.append([i for i in range(len(related_para))])
q_a_ah = sent.split('\t')
question.append(q_a_ah[0].strip().lower())
answer.append(q_a_ah[1].strip().lower())
# check
if show_print:
if (len(question) == len(answer)) & (len(answer) == len(context)) & (len(context) == len(label)):
print("bAbI is well separated into question, answer, context, and label!")
print("total: {}".format(len(label)))
else:
print("Something is missing! check again")
print("the number of questions: {}".format(len(question)))
print("the number of answers: {}".format(len(answer)))
print("the number of contexts: {}".format(len(context)))
print("the number of labels: {}".format(len(label)))
return context, label, question, answer
def split_all_clqa(self, paths, show_print= True):
"""
merge all 20 babi tasks into one dataset
Args
paths: list of path of 1 to 20 task dataset
Returns
contexts: list of contexts of all 20 tasks
labels: list of labels of all 20 tasks
questions: list of questions of all 20 tasks
answers: list of answers of all 20 tasks
"""
if paths == None:
print('path is None, run set_path() first!')
else:
contexts = []
labels = []
questions = []
answers = []
for path in paths:
if show_print:
print('=================')
paragraphs = self._split_paragraphs(path)
if show_print:
print("data: {}".format(os.path.basename(path)))
context, label, question, answer = self._split_clqa(paragraphs, show_print=show_print)
contexts.extend(context)
labels.extend(label)
questions.extend(question)
answers.extend(answer)
return contexts, labels, questions, answers
def _set_word_set(self):
c_word_set = set()
q_word_set = set()
a_word_set = set()
train_context, train_label, train_question, train_answer = self.split_all_clqa(self.train_paths, show_print=False)
val_context, val_label, val_question, val_answer = self.split_all_clqa(self.val_paths, show_print=False)
test_context, test_label, test_question, test_answer = self.split_all_clqa(self.test_paths, show_print=False)
list_of_context = [train_context, val_context, test_context]
list_of_question = [train_question, val_question, test_question]
list_of_answer = [train_answer, val_answer, test_answer]
for list_ in list_of_context:
for para in list_:
for sent in para:
sent = sent.replace(".", " .")
sent = sent.replace("?", " ?")
sent = sent.split()
c_word_set.update(sent)
for list_ in list_of_question:
for sent in list_:
sent = sent.replace(".", " .")
sent = sent.replace("?", " ?")
sent = sent.split()
q_word_set.update(sent)
for answers in list_of_answer:
for answer in answers:
answer = answer.split(',')
a_word_set.update(answer)
a_word_set.add(',')
self._c_word_set = c_word_set
self._q_word_set = q_word_set
self._a_word_set = a_word_set
self._cqa_word_set = c_word_set.union(q_word_set).union(a_word_set)
def _index_context(self, contexts):
c_word_index = dict()
for i, word in enumerate(self._c_word_set):
c_word_index[word] = i+1 # index 0 for zero padding
indexed_cs = []
for context in contexts:
indexed_c = []
for sentence in context:
sentence = sentence.replace(".", " .")
sentence = sentence.replace("?", " ?")
sentence = sentence.split()
indexed_s = []
for word in sentence:
indexed_s.append(c_word_index[word])
indexed_c.append(indexed_s)
indexed_cs.append(np.array(indexed_c))
return indexed_cs
def _index_label(self, labels):
indexed_ls = []
for label in labels:
indexed_ls.append(np.eye(self.c_max_len)[label])
return indexed_ls
def _index_question(self, questions):
q_word_index = dict()
for i, word in enumerate(self._q_word_set):
q_word_index[word] = i+1 # index 0 for zero padding
indexed_qs = []
for sentence in questions:
sentence = sentence.replace(".", " .")
sentence = sentence.replace("?", " ?")
sentence = sentence.split()
indexed_s = []
for word in sentence:
indexed_s.append(q_word_index[word])
indexed_qs.append(np.array(indexed_s))
return indexed_qs
def _index_answer(self, answers):
a_word_index = dict()
a_word_dict = dict()
for i, word in enumerate(self._cqa_word_set):
a_word_dict[i] = word
if word in self._a_word_set:
answer_one_hot = np.zeros(len(self._cqa_word_set), dtype=np.float32)
answer_one_hot[i] = 1
a_word_index[word] = answer_one_hot
indexed_as = []
for answer in answers:
if ',' in answer:
multiple_answer = [a_word_index[',']]
for a in answer.split(','):
indexed_a = a_word_index[a]
multiple_answer.append(indexed_a)
indexed_as.append(np.sum(multiple_answer, axis=0))
else:
indexed_a = a_word_index[answer]
indexed_as.append(indexed_a)
if not os.path.exists(self.path_to_processed):
os.makedirs(self.path_to_processed)
with open(os.path.join(self.path_to_processed, 'answer_word_dict.pkl'), 'wb') as f:
pickle.dump(a_word_dict, f)
return indexed_as
def masking(self, context_index, label_index, question_index):
context_masked = []
question_masked = []
label_masked = []
context_real_len = []
question_real_len = []
# cs: one context
for cs, l, q in zip(context_index, label_index, question_index):
context_masked_tmp = []
context_real_length_tmp = []
# cs: many sentences
for context in cs:
context_real_length_tmp.append(len(context))
diff = self.s_max_len - len(context)
if (diff > 0):
context_mask = np.append(context, [self.mask_index]*diff, axis=0)
context_masked_tmp.append(context_mask.tolist())
else:
context_masked_tmp.append(context)
diff_c = self.c_max_len - len(cs)
context_masked_tmp.extend([[0]*self.s_max_len]*diff_c)
context_masked.append(context_masked_tmp)
diff_q = self.q_max_len - len(q)
question_real_len.append(len(q))
question_masked_tmp = np.array(np.append(q, [self.mask_index]*diff_q, axis=0))
question_masked.append(question_masked_tmp.tolist())
diff_l = self.c_max_len - len(l)
label_masked_tmp = np.append(l, np.zeros((diff_l, self.c_max_len)), axis= 0)
label_masked.append(label_masked_tmp.tolist())
context_real_length_tmp.extend([0]*diff_l)
context_real_len.append(context_real_length_tmp)
return context_masked, question_masked, label_masked, context_real_len, question_real_len
def load(self, mode):
if mode == 'train':
path = self.train_paths
elif mode == 'val':
path = self.val_paths
else:
path = self.test_paths
contexts, labels, questions, answers = self.split_all_clqa(path)
context_index = self._index_context(contexts)
label_index = self._index_label(labels)
question_index = self._index_question(questions)
answer_index = self._index_answer(answers)
if mode == 'train':
# check max sentence length
for context in context_index:
for sentence in context:
if len(sentence) > self.s_max_len:
self.s_max_len = len(sentence)
# check max question length
for question in question_index:
if len(question) > self.q_max_len:
self.q_max_len = len(question)
context_masked, question_masked, label_masked, context_real_len, question_real_len = self.masking(context_index, label_index, question_index)
# check masking
cnt = 0
for c, q, l in zip(context_masked, question_masked, label_masked):
for context in c :
if (len(context) != self.s_max_len) | (len(q) != self.q_max_len) | (len(l) != self.c_max_len):
cnt += 1
if cnt == 0:
print("Masking success!")
else:
print("Masking process error")
dataset = (question_masked, answer_index, context_masked, label_masked, context_real_len, question_real_len)
if not os.path.exists(self.path_to_processed):
os.makedirs(self.path_to_processed)
with open(os.path.join(self.path_to_processed, mode + '_dataset.pkl'), 'wb') as f:
pickle.dump(dataset, f)
def get_args_parser():
"""
python preprocessing.py --path ../ --batch_size 64 --hidden_units 32 --learning_rate 2e-4 --iter_time 150 --display_step 100
:return:
"""
_parser = argparse.ArgumentParser()
_parser.add_argument('--path', '--path_to_babi')
_parser.add_argument('--batch_size', '--batch_size')
_parser.add_argument('--hidden_units', '--hidden_units')
_parser.add_argument('--learning_rate', '--learning_rate')
_parser.add_argument('--iter_time', '--iter_time')
_parser.add_argument('--display_step', '--display_step')
return _parser
def default_write(f, string, default_value):
if string == None:
f.write(str(default_value) + "\t")
else:
f.write(str(string) + "\t")
def main():
args = get_args_parser().parse_args()
preprocess = Preprocess(args.path)
preprocess.set_path()
preprocess._set_word_set()
preprocess.load(mode='train')
preprocess.load(mode='val')
preprocess.load(mode='test')
with open(os.path.join('config.txt'), 'w') as f:
f.write(str(preprocess.c_max_len)+"\t")
f.write(str(preprocess.s_max_len)+"\t")
f.write(str(preprocess.q_max_len)+"\t")
f.write(str(preprocess.path_to_processed)+'\t')
default_write(f, args.batch_size, 64)
default_write(f, args.hidden_units, 32)
default_write(f, args.learning_rate, 2e-4)
default_write(f, args.iter_time, 150)
default_write(f, args.display_step, 100)
if __name__ == '__main__':
main()
|
1686166
|
import typer
import subprocess
from clumper import Clumper
from crontab import CronTab
def clean_cron(user):
cron = CronTab(user=user)
cron.remove_all()
cron.write()
def parse_job_from_settings(settings, name):
if len(settings) == 0:
print(f"The name `{name}` doesn't appear in supplied schedule config.")
raise typer.Exit(code=1)
cmd_settings = settings[0]
arguments = " ".join([f"--{k} {v}" for k, v in cmd_settings.get('arguments', {}).items()])
# Ensure we remove the space at the end.
return f"{cmd_settings['command']} {arguments}".rstrip()
class Cron:
def __init__(self, settings_path):
self.settings = Clumper.read_yaml(settings_path).unpack("schedule").collect()
def grab_nums(self, setting):
return int("".join([s for s in setting["every"] if s.isdigit()]))
def parse_cmd(self, setting):
"""
Parse single cron setting into elaborate command for crontab.
"""
# If no venv is given we assume the one you're currently in.
python = "python"
if "venv" not in setting.keys():
output = subprocess.run(["which", "python"], capture_output=True)
python = output.stdout.decode("ascii").replace("\n", "")
# Set base values.
retry = setting.get("retry", 2)
wait = setting.get("wait", 60)
# We only want to replace python if it is at the start.
cmd = setting["command"]
if cmd.startswith("python"):
cmd = cmd.replace("python", python, 1)
print(f"adding command: '{cmd}'")
big_cmd = f'{python} -m skedulord run {setting["name"]} "{cmd}" --retry {retry} --wait {wait}'
return big_cmd.rstrip()
def set_new_cron(self):
cron = CronTab(user=self.settings[0]["user"])
cron.remove_all()
for s in self.settings:
s["name"] = s["name"].replace(" ", "-")
cmd = self.parse_cmd(s)
job = cron.new(command=cmd, comment=s["name"])
job.setall(s["cron"])
cron.write()
|
1686184
|
import argparse
import torch
from model.wide_res_net import WideResNet
from model.smooth_cross_entropy import smooth_crossentropy
from data.cifar import Cifar
from utility.log import Log
from utility.initialize import initialize
from utility.step_lr import StepLR
from utility.bypass_bn import enable_running_stats, disable_running_stats
import sys; sys.path.append("..")
from sam import SAM
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--adaptive", default=True, type=bool, help="True if you want to use the Adaptive SAM.")
parser.add_argument("--batch_size", default=128, type=int, help="Batch size used in the training and validation loop.")
parser.add_argument("--depth", default=16, type=int, help="Number of layers.")
parser.add_argument("--dropout", default=0.0, type=float, help="Dropout rate.")
parser.add_argument("--epochs", default=200, type=int, help="Total number of epochs.")
parser.add_argument("--label_smoothing", default=0.1, type=float, help="Use 0.0 for no label smoothing.")
parser.add_argument("--learning_rate", default=0.1, type=float, help="Base learning rate at the start of the training.")
parser.add_argument("--momentum", default=0.9, type=float, help="SGD Momentum.")
parser.add_argument("--threads", default=2, type=int, help="Number of CPU threads for dataloaders.")
parser.add_argument("--rho", default=2.0, type=int, help="Rho parameter for SAM.")
parser.add_argument("--weight_decay", default=0.0005, type=float, help="L2 weight decay.")
parser.add_argument("--width_factor", default=8, type=int, help="How many times wider compared to normal ResNet.")
args = parser.parse_args()
initialize(args, seed=42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dataset = Cifar(args.batch_size, args.threads)
log = Log(log_each=10)
model = WideResNet(args.depth, args.width_factor, args.dropout, in_channels=3, labels=10).to(device)
base_optimizer = torch.optim.SGD
optimizer = SAM(model.parameters(), base_optimizer, rho=args.rho, adaptive=args.adaptive, lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, args.learning_rate, args.epochs)
for epoch in range(args.epochs):
model.train()
log.train(len_dataset=len(dataset.train))
for batch in dataset.train:
inputs, targets = (b.to(device) for b in batch)
# first forward-backward step
enable_running_stats(model)
predictions = model(inputs)
loss = smooth_crossentropy(predictions, targets, smoothing=args.label_smoothing)
loss.mean().backward()
optimizer.first_step(zero_grad=True)
# second forward-backward step
disable_running_stats(model)
smooth_crossentropy(model(inputs), targets, smoothing=args.label_smoothing).mean().backward()
optimizer.second_step(zero_grad=True)
with torch.no_grad():
correct = torch.argmax(predictions.data, 1) == targets
log(model, loss.cpu(), correct.cpu(), scheduler.lr())
scheduler(epoch)
model.eval()
log.eval(len_dataset=len(dataset.test))
with torch.no_grad():
for batch in dataset.test:
inputs, targets = (b.to(device) for b in batch)
predictions = model(inputs)
loss = smooth_crossentropy(predictions, targets)
correct = torch.argmax(predictions, 1) == targets
log(model, loss.cpu(), correct.cpu())
log.flush()
|
1686206
|
from unittest import TestCase
import numpy as np
from skfem import BilinearForm, LinearForm, Functional, asm, condense, solve
from skfem.helpers import dd, ddot
from skfem.mesh import MeshQuad, MeshTri, MeshLine
from skfem.element import (ElementQuadBFS, ElementTriArgyris,
ElementTriMorley, ElementLineHermite,
ElementTri15ParamPlate)
from skfem.assembly import InteriorBasis
class ConvergenceMorley(TestCase):
case = (MeshTri, ElementTriMorley)
prerefs = 3
limits = (1.9, 2.1)
abs_limit = 8e-5
def runTest(self):
m = self.case[0]().refined(self.prerefs)
hs = []
L2s = []
for itr in range(3):
e = self.case[1]()
ib = InteriorBasis(m, e)
t = 1.
E = 1.
nu = 0.3
D = E * t ** 3 / (12. * (1. - nu ** 2))
@BilinearForm
def bilinf(u, v, w):
def C(T):
trT = T[0, 0] + T[1, 1]
return E / (1. + nu) * \
np.array([[T[0, 0] + nu / (1. - nu) * trT, T[0, 1]],
[T[1, 0], T[1, 1] + nu / (1. - nu) * trT]])
return t ** 3 / 12.0 * ddot(C(dd(u)), dd(v))
def load(x):
return np.sin(np.pi * x[0]) * np.sin(np.pi * x[1])
@LinearForm
def linf(v, w):
return load(w.x) * v
K = asm(bilinf, ib)
f = asm(linf, ib)
# TODO fix boundary conditions
# u_x should be zero on top/bottom
# u_y should be zero on left/right
x = solve(*condense(K, f, D=ib.get_dofs().all('u')))
X = ib.interpolate(x)
def exact(x):
return 1. / (4. * D * np.pi ** 4) * load(x)
@Functional
def error(w):
return (w.w - exact(w.x)) ** 2
L2 = np.sqrt(error.assemble(ib, w=X))
L2s.append(L2)
hs.append(m.param())
m = m.refined()
hs = np.array(hs)
L2s = np.array(L2s)
pfit = np.polyfit(np.log10(hs), np.log10(L2s), 1)
self.assertGreater(pfit[0], self.limits[0])
self.assertLess(pfit[0], self.limits[1])
self.assertLess(L2s[-1], self.abs_limit)
class ConvergenceArgyris(ConvergenceMorley):
case = (MeshTri, ElementTriArgyris)
preref = 0
limits = (2.9, 3.1)
abs_limit = 5e-7
class Convergence15Param(ConvergenceMorley):
case = (MeshTri, ElementTri15ParamPlate)
preref = 1
limits = (1.9, 2.1)
abs_limit = 5e-6
class ConvergenceBFS(ConvergenceMorley):
case = (MeshQuad, ElementQuadBFS)
preref = 1
limits = (3.9, 4.5)
abs_limit = 5e-9
class ConvergenceHermite(TestCase):
case = (MeshLine, ElementLineHermite)
prerefs = 3
limits = (3.9, 4.1)
abs_limit = 8e-5
def runTest(self):
m = self.case[0]().refined(self.prerefs)
hs = []
L2s = []
for itr in range(3):
e = self.case[1]()
ib = InteriorBasis(m, e)
@BilinearForm
def bilinf(u, v, w):
return ddot(dd(u), dd(v))
@LinearForm
def linf(v, w):
return 1. * v
K = asm(bilinf, ib)
f = asm(linf, ib)
x = solve(*condense(K, f, D=ib.get_dofs().all()))
X = ib.interpolate(x)
def exact(x):
return (x ** 2 - 2. * x ** 3 + x ** 4) / 24.
@Functional
def error(w):
return (w.w - exact(w.x)) ** 2
L2 = np.sqrt(error.assemble(ib, w=X))
L2s.append(L2)
hs.append(m.param())
m = m.refined()
hs = np.array(hs)
L2s = np.array(L2s)
pfit = np.polyfit(np.log10(hs), np.log10(L2s), 1)
self.assertGreater(pfit[0], self.limits[0])
self.assertLess(pfit[0], self.limits[1])
self.assertLess(L2s[-1], self.abs_limit)
|
1686251
|
from pgdrive.component.map.base_map import BaseMap, MapGenerateMethod
from pgdrive.envs.pgdrive_env import PGDriveEnv
if __name__ == "__main__":
def get_image(env):
env.vehicle.image_sensors[env.vehicle.config["image_source"]].save_image()
env.engine.screenshot()
env = PGDriveEnv(
{
"environment_num": 1,
"traffic_density": 0.1,
"start_seed": 4,
"manual_control": True,
"use_render": True,
"offscreen_render": True,
"rgb_clip": True,
"vehicle_config": dict(depth_camera=(200, 88, True), image_source="depth_camera"),
"headless_machine_render": False,
"map_config": {
BaseMap.GENERATE_TYPE: MapGenerateMethod.BIG_BLOCK_NUM,
BaseMap.GENERATE_CONFIG: 12,
BaseMap.LANE_WIDTH: 3.5,
BaseMap.LANE_NUM: 3,
}
}
)
env.reset()
env.engine.accept("m", get_image, extraArgs=[env])
for i in range(1, 100000):
o, r, d, info = env.step([0, 1])
assert env.observation_space.contains(o)
if env.config["use_render"]:
# for i in range(ImageObservation.STACK_SIZE):
# ObservationType.show_gray_scale_array(o["image"][:, :, i])
env.render()
if d:
print("Reset")
env.reset()
env.close()
|
1686271
|
import os
from tqdm import tqdm
from persia.embedding.data import PersiaBatch
from persia.logger import get_logger
from persia.ctx import DataCtx
from data_generator import make_dataloader
logger = get_logger("data_loader")
train_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data/train.npz"
)
if __name__ == "__main__":
with DataCtx() as ctx:
loader = make_dataloader(train_filepath)
for (non_id_type_feature, id_type_features, label) in tqdm(
loader, desc="gen batch data..."
):
persia_batch = PersiaBatch(
id_type_features,
non_id_type_features=[non_id_type_feature],
labels=[label],
requires_grad=True,
)
ctx.send_data(persia_batch)
|
1686275
|
from __future__ import annotations
from result import Err, Ok, Result
def test_pattern_matching_on_ok_type() -> None:
"""
Pattern matching on ``Ok()`` matches the contained value.
"""
o: Result[str, int] = Ok("yay")
match o:
case Ok(value):
reached = True
assert value == "yay"
assert reached
def test_pattern_matching_on_err_type() -> None:
"""
Pattern matching on ``Err()`` matches the contained value.
"""
n: Result[int, str] = Err("nay")
match n:
case Err(value):
reached = True
assert value == "nay"
assert reached
|
1686285
|
import unittest
from matbench.constants import CLF_KEY, REG_KEY
from matbench.metadata import mbv01_metadata, mbv01_validation
class TestMetadata(unittest.TestCase):
def test_mbv01_metadata(self):
# for matbench v0.1
for ds, metadata in mbv01_metadata.items():
for key in ["task_type", "n_samples", "input_type", "target"]:
self.assertIn(key, metadata.keys())
if key == "input_type":
self.assertIn(metadata[key], ["composition", "structure"])
elif key == "task_type":
self.assertIn(metadata[key], [REG_KEY, CLF_KEY])
self.assertEqual(len(list(mbv01_metadata.values())), 13)
self.assertIn("metadata", mbv01_validation)
self.assertIn("splits", mbv01_validation)
self.assertEqual(len(mbv01_validation.splits), 13)
for k in mbv01_metadata:
self.assertEqual(len(mbv01_validation.splits[k]), 5)
|
1686306
|
class DataModelRenderer:
def __init__(self):
self.lines = []
def add(self, template, value=None, **kwargs):
if isinstance(value, bool) and value:
self.lines.append(template.format(**kwargs))
elif isinstance(value, (list, set)):
for elem in value:
if isinstance(elem, dict):
self.lines.append(template.format(**elem))
else:
self.lines.append(template.format(elem))
elif value is None:
self.lines.append(template.format(**kwargs))
return self
def get(self):
return '\n'.join([v.rstrip() for v in self.lines]) + '\n'
|
1686326
|
from keras.models import Model
from keras.layers import Input, Convolution1D, Activation, Merge, Lambda
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Nadam
from eva.layers.causal_atrous_convolution1d import CausalAtrousConvolution1D
from eva.layers.wavenet_block import WavenetBlock, WavenetBlocks
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
# TODO: Soft targets? A float to make targets a gaussian with stdev.
# TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
# TODO: Global conditioning?
# TODO: Local conditioning?
_, nb_bins = input_shape
input_audio = Input(input_shape, name='audio_input')
model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)
out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)
out = Merge(mode='sum', name='merging_skips')(skip_connections)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
# https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
if last > 0:
out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)
out = Activation('softmax')(out)
if build:
model = Model(input_audio, out)
model.compile(Nadam(), 'sparse_categorical_crossentropy')
return model
def compute_receptive_field(sample_rate, depth, stacks):
receptive_field = stacks * (2 ** depth) - (stacks - 1)
receptive_field_ms = (receptive_field * 1000) / sample_rate
return receptive_field, receptive_field_ms
|
1686350
|
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
import math as m
import matplotlib as mlp
pgf_with_rc_fonts = {
"font.family": "serif",
"font.size": 16,
"legend.fontsize": 16,
"font.sans-serif": ["DejaVu Sans"], # use a specific sans-serif font
}
mlp.rcParams.update(pgf_with_rc_fonts)
def estimate_time(x, y):
angle = np.degrees(np.arctan2(y, x))
rot_time = np.abs(angle / velRot)
# calculate the distance
distance = np.hypot(x, y)
distance_time = distance / velWalk
total_time = distance_time + rot_time # type: np.ndarray
for d1 in range(len(x)):
for d2 in range(len(y)):
total_time[d1, d2] = 1.5 * total_time[d1, d2] * m.exp(-total_time[d1, d2] * 0.1)
if total_time[d1, d2] >= 5:
total_time[d1, d2] = 5
total_time[d1, d2] -= 5
return total_time
if __name__ == "__main__":
# Constants for robot
velRot = 60 # grad pro second
velWalk = 200 # mm pro second
size = 1000
x_val = np.arange(-size, size, 10)
y_val = np.arange(-size, size, 10)
xm, ym = np.meshgrid(x_val, y_val)
times = estimate_time(xm, ym)
# plot
fig = plt.figure(frameon=False)
ax = fig.gca()
ax.set_aspect("equal")
ax.set_xlabel("x [mm]")
ax.set_ylabel("y [mm]")
ax.axis('on')
ax.set_xlim([-size, size])
ax.set_ylim([-size, size])
ax.spines['left'].set_position(('axes', 0.0))
ax.spines['bottom'].set_position(('axes', 0.0))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
CS1 = plt.contourf(x_val, y_val, times, 10, alpha=0.5, cmap="coolwarm", frameon=False)
CS = plt.contour(CS1, levels=CS1.levels)
plt.clabel(CS, inline=1, fontsize=10)
plt.show()
|
1686368
|
import asyncio
import logging
import typing
from ib_async.errors import UnsupportedFeature
from ib_async.instrument import Instrument
from ib_async.messages import Outgoing
from ib_async.protocol import RequestId, ProtocolInterface, OutgoingMessage
from ib_async.protocol_versions import ProtocolVersion
from ib_async.tick_types import TickTypeGroup, MarketDataTimeliness, TickType, TickAttributes
LOG = logging.getLogger(__name__)
class MarketDataMixin(ProtocolInterface):
def __init__(self):
super().__init__()
self.__instruments = {}
def change_market_data_timeliness(self, timeliness: MarketDataTimeliness):
"""Switches market data timeliness.
The API can receive frozen market data from Trader Workstation. Frozen market data is the last data recorded
in our system. During normal trading hours, the API receives real-time market data. Invoking this function
with argument 2 requests a switch to frozen data immediately or after the close. When the market reopens the
next data the market data type will automatically switch back to real time if available."""
self.send_message(Outgoing.REQ_MARKET_DATA_TYPE, 1, timeliness)
def _handle_market_data_type(self, request_id: RequestId, timeliness: MarketDataTimeliness):
instrument = self.__instruments[request_id]
instrument.market_data_timeliness = timeliness
def get_market_data(self, instrument: Instrument,
tick_types: typing.Iterable[TickTypeGroup] = (),
snapshot=False, regulatory_snapshot=False,
market_data_options: typing.Dict[str, str] = None) -> typing.Awaitable[None]:
if regulatory_snapshot:
self.check_feature(ProtocolVersion.REQ_SMART_COMPONENTS, "regulatory snapshots")
if snapshot:
request_id, future = self.make_future()
elif instrument._market_data_request_id:
request_id = instrument._market_data_request_id
future = asyncio.Future()
else:
request_id, future = self.make_future()
message = OutgoingMessage(Outgoing.REQ_MKT_DATA, 11, request_id, protocol_version=self.version)
message.add(instrument)
if instrument.security_type == 'BAG':
raise UnsupportedFeature("BAG orders") # We're currently missing serialization for BAG
if instrument.underlying_component:
message.add(True, instrument.underlying_component)
else:
message.add(False)
# convert to integers using getattr with default. This way the en user can provide integers instead of
# GenericTickType values.
tick_type_ids = (getattr(tick_type, "value", tick_type) for tick_type in tick_types)
message.add(','.join(str(tick_type) for tick_type in tick_type_ids))
message.add(snapshot)
message.add(regulatory_snapshot, min_version=ProtocolVersion.REQ_SMART_COMPONENTS)
message.add(market_data_options)
self.send(message)
self.__instruments[request_id] = instrument
if not snapshot:
instrument._market_data_request_id = request_id
# subscriptions are complete as soon as the request is sent.
self.resolve_future(request_id, instrument)
return future
def cancel_market_data(self, instrument: Instrument):
"""Cancels a RT Market Data request."""
message = OutgoingMessage(Outgoing.CANCEL_MKT_DATA, 2, instrument._market_data_request_id)
self.send(message)
instrument._market_data_request_id = None
def _handle_tick_price(self, request_id: RequestId, tick_type: TickType, price: float, size: float,
attributes: int):
instrument = self.__instruments[request_id]
instrument.handle_market_data(tick_type, price, size, TickAttributes.list_from_int(attributes))
def _handle_tick_generic(self, request_id: RequestId, tick_type: TickType, value: float):
instrument = self.__instruments[request_id]
instrument.handle_market_data(tick_type, value)
def _handle_tick_size(self, request_id: RequestId, tick_type: TickType, value: int):
instrument = self.__instruments[request_id]
instrument.handle_market_data(tick_type, value)
def _handle_tick_string(self, request_id: RequestId, tick_type: TickType, value: str):
instrument = self.__instruments[request_id]
instrument.handle_market_data(tick_type, value)
def _handle_tick_req_params(self, request_id: RequestId, min_tick: float, bbo_exchange: str,
snapshot_permissions: int):
instrument = self.__instruments[request_id]
instrument.minimum_tick = min_tick
instrument.bbo_exchange = bbo_exchange
instrument.snapshot_permissions = snapshot_permissions
def _handle_tick_snapshot_end(self, request_id: RequestId):
instrument = self.__instruments.pop(request_id)
self.resolve_future(request_id, instrument)
|
1686410
|
from setuptools import setup, find_packages
setup(
name='corpus2graph', # Required
version='0.0.1', # Required
description='tools to generate graph from corpus', # Required
url='https://github.com/zzcoolj/corpus2graph', # Optional
author='<NAME> and <NAME>', # Optional
author_email='<EMAIL>', # Optional
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
packages=find_packages(), # Required
install_requires=['nltk >= 3.2.5',
'docopt >= 0.6.2'
], # Optional
entry_points={ # Optional
'console_scripts': [
'corpus2graph=corpus2graph.applications.corpus2graph:main',
],
},
)
|
1686415
|
from selenium import webdriver
import os
import time
def save_and_submit():
driver.find_element_by_xpath(
"//button[@class='btn btn-icon btn-primary glyphicons circle_ok center']").click()
# save and submit
USERNAME = os.environ['STUDENT_USERNAME']
PASSWORD = os.environ['STUDENT_PASSWORD']
driver = webdriver.Firefox()
driver.get("https://alfarabi.mans.edu.eg/login")
username_input = driver.find_element_by_xpath("//input[@name='username']")
password_input = driver.find_element_by_xpath("//input[@name='password']")
username_input.send_keys(USERNAME)
password_input.send_keys(PASSWORD)
driver.find_element_by_xpath(
"//input[@name='userType'][@value='2']").click() # usertype = student
driver.find_element_by_xpath(
"//button[@class='btn btn-primary']").click() # signin button
time.sleep(2)
subjects = driver.find_elements_by_xpath("//li[@class='glyphicons dropMenu']")
for subject in subjects:
subject.click()
time.sleep(1)
driver.find_element_by_xpath(
"//input[@class='uniform'][@type='checkbox']").click() # check on doctor name
good_ratings = driver.find_elements_by_xpath(
"//input[@type='radio'][@value='3']")
for rating in good_ratings:
rating.click() # check all "I totally agree" radio buttons
strengths, weaknesses = driver.find_elements_by_xpath(
"//input[@type='text']")
strengths.send_keys("...")
weaknesses.send_keys("...")
save_and_submit()
general_questionnaires = driver.find_elements_by_xpath(
"//li[@class='glyphicons']")
for g_q in general_questionnaires:
g_q.click()
time.sleep(1)
good_ratings = driver.find_elements_by_xpath(
"//input[@type='radio'][@value='2']")
for rating in good_ratings:
rating.click()
save_and_submit()
driver.close()
|
1686439
|
from Core.App import App
from Core.Ui import *
import sys
import gc
class TwitchLink:
def run(self):
app = QtWidgets.QApplication(sys.argv)
Translator.load()
while True:
exitCode = App.start(Ui.MainWindow())
DB.save()
gc.collect()
if exitCode != App.EXIT_CODE.REBOOT:
return exitCode
if __name__ == "__main__":
sys.exit(TwitchLink().run())
|
1686492
|
import unittest
from katas.kyu_7.sentence_to_words import splitSentence
class SplitSentenceTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(splitSentence('This string is splitsville'),
['This', 'string', 'is', 'splitsville'])
def test_equal_2(self):
self.assertEqual(splitSentence('something'), ['something'])
|
1686508
|
import sys
import logging
from optparse import OptionParser
from flvlib import __versionstr__
from flvlib import tags
from flvlib import helpers
from flvlib.astypes import MalformedFLV
log = logging.getLogger('flvlib.debug-flv')
log.setLevel(logging.ERROR)
def debug_file(filename, quiet=False, metadata=False):
try:
f = open(filename, 'rb')
except IOError, (errno, strerror):
log.error("Failed to open `%s': %s", filename, strerror)
return False
flv = tags.FLV(f)
if not quiet:
print "=== `%s' ===" % filename
try:
tag_generator = flv.iter_tags()
for i, tag in enumerate(tag_generator):
if quiet:
# If we're quiet, we just want to catch errors
continue
# Print the tag information
print "#%05d %s" % (i + 1, tag)
# Print the content of onMetaData tags
if (isinstance(tag, tags.ScriptTag)
and tag.name == "onMetaData"):
helpers.pprint(tag.variable)
if metadata:
return True
except MalformedFLV, e:
message = e[0] % e[1:]
log.error("The file `%s' is not a valid FLV file: %s",
filename, message)
return False
except tags.EndOfFile:
log.error("Unexpected end of file on file `%s'", filename)
return False
f.close()
return True
def process_options():
usage = "%prog [options] files ..."
description = ("Checks FLV files for comformance with the FLV "
"specification. Outputs a list of tags and, "
"if present, the content of the onMetaData script tag.")
version = "%%prog flvlib %s" % __versionstr__
parser = OptionParser(usage=usage, description=description,
version=version)
parser.add_option("-s", "--strict", action="store_true",
help="be strict while parsing the FLV file")
parser.add_option("-q", "--quiet", action="store_true",
help="do not output anything unless there are errors")
parser.add_option("-m", "--metadata", action="store_true",
help="exit immediately after printing an onMetaData tag")
parser.add_option("-v", "--verbose", action="count",
default=0, dest="verbosity",
help="be more verbose, each -v increases verbosity")
options, args = parser.parse_args(sys.argv)
if len(args) < 2:
parser.error("You have to provide at least one file path")
if options.strict:
tags.STRICT_PARSING = True
if options.verbosity > 3:
options.verbosity = 3
level = ({0: logging.ERROR, 1: logging.WARNING,
2: logging.INFO, 3: logging.DEBUG}[options.verbosity])
logging.getLogger('flvlib').setLevel(level)
return options, args
def debug_files():
options, args = process_options()
clean_run = True
for filename in args[1:]:
if not debug_file(filename, options.quiet, options.metadata):
clean_run = False
return clean_run
def main():
try:
outcome = debug_files()
except KeyboardInterrupt:
# give the right exit status, 128 + signal number
# signal.SIGINT = 2
sys.exit(128 + 2)
except EnvironmentError, (errno, strerror):
try:
print >>sys.stderr, strerror
except StandardError:
pass
sys.exit(2)
if outcome:
sys.exit(0)
else:
sys.exit(1)
|
1686526
|
def up(cursor, bot):
cursor.execute("DROP TABLE hsbet_game, hsbet_bet")
cursor.execute("DROP TYPE hsbet_outcome")
|
1686542
|
import sqlite3
import sys
import time
import traceback
sys.path.append('..\\SubredditBirthdays')
import sb
sys.path.append('..\\Usernames')
import un4
newnames_sql = sqlite3.connect('..\\Usernames\\newnames.db')
newnames_cur = newnames_sql.cursor()
def migrate():
resume_from = int(open('latest.txt', 'r').read().strip())
sb.cur.execute(
'SELECT name, created FROM subreddits WHERE subreddit_type == 8 AND created >= ? ORDER BY created ASC',
[resume_from]
)
f = sb.cur.fetchall()
resume_from = f[-1][1]
f = [x[0] for x in f]
for x in f:
newnames_cur.execute('INSERT INTO names3 VALUES(?)', [x])
print('Moved %d records' % len(f))
newnames_sql.commit()
open('latest.txt', 'w').write(str(resume_from))
def run_newnames():
while True:
try:
if newnames_cur.execute('SELECT count(*) from names3').fetchone() == 0:
break
un4.process_from_database('..\\Usernames\\newnames.db', 'names3', 'name', True)
except sqlite3.OperationalError:
raise
except Exception:
traceback.print_exc()
time.sleep(60)
def run_modernize():
while True:
try:
sb.modernize(limit=10000)
except Exception:
traceback.print_exc()
time.sleep(60)
|
1686578
|
import sys
import pickle
import utils
from scribe.scribe import Scribe
if len(sys.argv) < 2:
print('Usage:'
'\n python3 {} <output_file_name> [configurations]'
'Generates data based on the configuration files.'.format(sys.argv[0]))
sys.exit(-1)
out_file_name = sys.argv[1]
if not out_file_name.endswith('.pkl'):
out_file_name += '.pkl'
args = utils.read_args(sys.argv[2:])
scriber = Scribe(**args['scribe_args'])
alphabet_chars = scriber.alphabet.chars
xs = []
ys = []
for i in range(args['num_samples']):
x, y = scriber.get_sample()
xs.append(x)
ys.append(y)
print(y, ''.join(alphabet_chars[i] for i in y))
utils.slab_print(x)
with open(out_file_name, 'wb') as f:
pickle.dump({'x': xs, 'y': ys, 'chars': alphabet_chars}, f, -1)
print(scriber)
print('Generated dataset:', out_file_name)
|
1686580
|
from __future__ import division
import numpy as np
model = None
labels_list = None
def _get_sampled_labels(idx):
model.add_data(model.labels_list[idx].data,initialize_from_prior=False)
l = model.labels_list.pop()
return l.z, l._normalizer
def _get_sampled_component_params(idx):
model.components[idx].resample([l.data[l.z == idx] for l in labels_list])
return model.components[idx].parameters
|
1686628
|
import json
import operator
import os
import pickle
import subprocess
from django.contrib.auth import logout, authenticate, login
from django.shortcuts import render, render_to_response
from django.utils.datastructures import MultiValueDictKeyError
from django.contrib.auth.models import User
from carnivora.instabot.config import ConfigLoader, Config
from carnivora.instabot.driver import Driver
from carnivora.instabot.log import Log
from carnivora.instabot.statistics import Statistics
from carnivora.instabot.statistics import frequencies
from carnivora.instabot.statistics import timeranges
from tf_imagenet.imagenet import classify_image
from tf_open_nsfw.classify_nsfw import classify_nsfw
from django.contrib.auth.decorators import user_passes_test
def index(request):
return render_to_response('index.html')
def main_body(request):
if request.user.is_authenticated:
return render(request, 'main-body.html')
else:
return render(request, 'login.html')
def login_user(request):
logout(request)
username = request.GET['username']
password = request.GET['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user=user)
return render(request, 'main-body.html')
return render(request, 'login.html', {'message': 'Login failed. Please try again.'})
def load_registration(request):
username = request.GET['username']
password = request.GET['password']
return render(request, 'register.html', {'username': username, 'password': password})
def logout_user(request):
logout(request)
return render(request, 'logout.html')
def register_user(request):
logout(request)
username = request.GET['username']
email = request.GET['email']
password = request.GET['password']
User.objects.create_user(username=username, email=email, password=password)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user=user)
return render(request, 'main-body.html')
return render(request, 'register.html', {'message': 'Registration failed. Please try again.'})
def load_button_chain(request):
if not request.user.is_authenticated:
return
username = request.user.username
log_path = Config.bot_path + "log/" + username
if not os.path.exists(log_path):
os.makedirs(log_path)
running_path = log_path + "/running.pickle"
try:
with open(running_path, "rb") as f:
active = bool(pickle.load(f))
except FileNotFoundError:
active = False
return render(request, 'buttonchain.html', {'active': active})
def run_instabot(request):
if not request.user.is_authenticated:
return
username = request.GET['username']
password = request.GET['password']
log_path = Config.bot_path + "log/" + username
if not os.path.exists(log_path):
os.makedirs(log_path)
running_path = log_path + "/running.pickle"
with open(running_path, "wb") as f:
pickle.dump(True, f)
screenshot_folder = "static/img/" + request.user.username
if not os.path.exists(screenshot_folder):
os.makedirs(screenshot_folder)
screenshot_path = screenshot_folder + "/screenshot.png"
driver = Driver(username=username, password=password, screenshot_path=screenshot_path)
driver.start()
return render(request, 'buttonchain.html', {'active': True})
def stop_instabot(request):
if not request.user.is_authenticated:
return
username = request.user.username
log_path = Config.bot_path + "log/" + username
if not os.path.exists(log_path):
os.makedirs(log_path)
running_path = log_path + "/running.pickle"
with open(running_path, "wb") as f:
pickle.dump(False, f)
return render(request, 'buttonchain.html', {'active': False})
@user_passes_test(lambda u: u.is_superuser)
def update_server(request):
commands = [["git", "status"], ["git", "pull"]]
output = []
for command in commands:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
output.append(
(" ".join(command), process.stdout, str(process.wait(timeout=30)))
)
return render(request, 'server_update.html', {'output': output})
def table_monitor_update(request):
if not request.user.is_authenticated:
return
try:
n = int(request.GET['n'])
except (MultiValueDictKeyError, ValueError):
n = 20
try:
search = request.GET['search']
except (MultiValueDictKeyError, ValueError):
search = ''
username = request.user.username
log_path = Config.bot_path + "log/" + username
path = log_path + "/log.pickle"
lines = Log.get(log_path=path, page_size=n, search=search)
return render(request, 'table_monitor_update.html', {'lines': lines})
def load_screenshot(request):
if not request.user.is_authenticated:
return
path = "static/img/" + request.user.username + "/screenshot.png"
time = os.path.getmtime(path)
src = path + "?mtime=" + str(time)
return render(request, 'screenshot.html', {'src': src})
@user_passes_test(lambda u: u.is_superuser)
def submit_to_config(request):
try:
config_key = request.GET['config_key']
config_param = request.GET['config_param']
ConfigLoader.store(config_key, config_param)
return render(request, 'settings_update.html', {'config_key': config_key, 'config_param': config_param})
except MultiValueDictKeyError as e:
print(e)
return render(request, 'settings_update.html')
def monitor(request):
if not request.user.is_authenticated:
return
try:
n = int(request.GET['n'])
except (MultiValueDictKeyError, ValueError):
n = 20
try:
search = request.GET['search']
except (MultiValueDictKeyError, ValueError):
search = ''
if not request.user.is_authenticated:
return
path = "static/img/" + request.user.username + "/screenshot.png"
try:
time = os.path.getmtime(path)
except FileNotFoundError:
time = 0
src = path + "?mtime=" + str(time)
# pages = range(Log.number_of_pages(page_size=page_size))
username = request.user.username
log_path = Config.bot_path + "log/" + username
path = log_path + "/log.pickle"
lines = Log.get(log_path=path, page_size=n, search=search)
return render(request, 'monitor.html', {'lines': lines, 'src': src})
def statistics(request):
if not request.user.is_authenticated:
return
username = request.user.username
try:
freq = request.GET['freq']
except (MultiValueDictKeyError, ValueError):
freq = "Calendar day frequency"
try:
timerange = request.GET['timerange']
except (MultiValueDictKeyError, ValueError):
timerange = None
hashtag_names, hashtag_scores = Statistics.get_hashtags(username=username, n=40, truncated_name_length=20)
amount_of_users, amount_of_interactions, amount_of_likes, amount_of_follows, amount_of_comments \
= Statistics.get_amount_of_actions(username=username)
amount_of_follows_all_time = Statistics.get_amount_of_followed_accounts(username=username)
index, likes_data, comments_data, follows_data = Statistics.get_timelines(
username=username,
freq=freq,
timerange=timerange
)
current_likes, remaining_likes, \
current_follows, remaining_follows, \
current_comments, remaining_comments, \
current_unfollows, remaining_unfollows = Statistics.get_dispatch_statistics(username=username)
tr = timeranges.keys()
render_data = {
'hashtag_names': json.dumps(hashtag_names),
'hashtag_scores': hashtag_scores,
'amount_of_users': amount_of_users,
'amount_of_likes': amount_of_likes,
'amount_of_comments': amount_of_comments,
'amount_of_follows': amount_of_follows,
'amount_of_interactions': amount_of_interactions,
'amount_of_follows_all_time': amount_of_follows_all_time,
'index': index,
'likes_data': likes_data,
'comments_data': comments_data,
'follows_data': follows_data,
'frequencies': frequencies,
'freq': freq,
'timerange': timerange,
'timeranges': tr,
'current_likes': current_likes,
'current_follows': current_follows,
'current_comments': current_comments,
'current_unfollows': current_unfollows,
'remaining_likes': remaining_likes,
'remaining_follows': remaining_follows,
'remaining_comments': remaining_comments,
'remaining_unfollows': remaining_unfollows,
}
return render(request, 'statistics.html', render_data)
@user_passes_test(lambda u: u.is_superuser)
def submit_nsfw(request):
if not request.user.is_authenticated:
return render(request, 'nsfw_progress_bar.html', {'nsfw': 0})
try:
link = request.GET['nsfw_link']
sfw, nsfw = classify_nsfw(link)
return render(request, 'nsfw_progress_bar.html', {'nsfw': int(nsfw * 100)})
except MultiValueDictKeyError as e:
print(e)
return render(request, 'nsfw_progress_bar.html', {'nsfw': 0})
@user_passes_test(lambda u: u.is_superuser)
def submit_to_classification(request):
if not request.user.is_authenticated:
return render(request, 'image_classification.html', {'top_k': []})
username = request.user.username
try:
link = request.GET['link']
top_k = classify_image(image_url=link, num_predictions=5, username=username)
return render(request, 'image_classification.html', {'top_k': top_k})
except MultiValueDictKeyError as e:
print(e)
return render(request, 'image_classification.html', {'top_k': []})
@user_passes_test(lambda u: u.is_superuser)
def server(request):
return render(request, 'server.html')
@user_passes_test(lambda u: u.is_superuser)
def nsfw_check(request):
return render(request, 'nsfw_check.html')
@user_passes_test(lambda u: u.is_superuser)
def perform_reboot(request):
os.system('touch '+__file__)
return None
@user_passes_test(lambda u: u.is_superuser)
def settings(request):
config = ConfigLoader.load()
sorted_config = sorted(config.items(), key=operator.itemgetter(0))
filtered_config = [(k, v) for k, v in sorted_config if
k != "nsfw_hashtags" and
k != "comments" and
k != "topics" and
k != "smileys"]
return render(request, 'settings.html', {
'filtered_config': filtered_config,
'nsfw_hashtags': config['nsfw_hashtags'],
'comments': config['comments'],
'topics': config['topics'],
'smileys': config['smileys'],
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.