hexsha
stringlengths
40
40
size
int64
4
1.02M
ext
stringclasses
8 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
209
max_stars_repo_name
stringlengths
5
121
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
209
max_issues_repo_name
stringlengths
5
121
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
209
max_forks_repo_name
stringlengths
5
121
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
4
1.02M
avg_line_length
float64
1.07
66.1k
max_line_length
int64
4
266k
alphanum_fraction
float64
0.01
1
27b183d627d557228a5c89c088d2f1e69f710f7a
1,496
py
Python
migrations/versions/6a0accee94b3_.py
npgeorge/twitter-bounty
2730dc4f9b01ac3b632b3a0dfacf92e33df3789b
[ "MIT" ]
3
2020-08-13T20:45:41.000Z
2022-03-25T06:16:59.000Z
migrations/versions/6a0accee94b3_.py
npgeorge/twitter-bounty
2730dc4f9b01ac3b632b3a0dfacf92e33df3789b
[ "MIT" ]
null
null
null
migrations/versions/6a0accee94b3_.py
npgeorge/twitter-bounty
2730dc4f9b01ac3b632b3a0dfacf92e33df3789b
[ "MIT" ]
null
null
null
"""empty message Revision ID: 6a0accee94b3 Revises: Create Date: 2020-06-25 00:44:29.146631 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '6a0accee94b3' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('followers', sa.Column('id', sa.Integer(), nullable=False), sa.Column('screen_name', sa.String(length=120), nullable=True), sa.Column('name', sa.String(length=120), nullable=True), sa.Column('followers_count', sa.Integer(), nullable=True), sa.Column('created_at', sa.String(length=120), nullable=True), sa.Column('friends_count', sa.Integer(), nullable=True), sa.Column('statuses_count', sa.Integer(), nullable=True), sa.Column('verified', sa.Boolean(), nullable=True), sa.Column('location', sa.String(length=120), nullable=True), sa.Column('DM_sent', sa.Boolean(create_constraint=False), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('user', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=250), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('username') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('user') op.drop_table('followers') # ### end Alembic commands ###
31.166667
77
0.680481
700df8498ac108f45c70a448483cd8ec39717239
3,752
py
Python
utility.py
lukovdm/NN-paper
daef51fe1305e4bdc268bba5da9309ace0578b47
[ "MIT" ]
null
null
null
utility.py
lukovdm/NN-paper
daef51fe1305e4bdc268bba5da9309ace0578b47
[ "MIT" ]
null
null
null
utility.py
lukovdm/NN-paper
daef51fe1305e4bdc268bba5da9309ace0578b47
[ "MIT" ]
null
null
null
import tensorflow as tf def variable_summaries(var, name): """Attach a lot of summaries to a Tensor.""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.scalar_summary('mean/' + name, mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.scalar_summary('sttdev/' + name, stddev) tf.scalar_summary('max/' + name, tf.reduce_max(var)) tf.scalar_summary('min/' + name, tf.reduce_min(var)) tf.histogram_summary(name, var) def conv_layer(input_tensor, filter_shape, strides, padding, layer_name, act=tf.nn.relu): """ a convolutional layer initializer :param input_tensor: shape [batch, in_height, in_width, in_channels] :param filter_shape: [filter_height, filter_width, in_channels, out_channels] :param strides: list of ints defining the stride :param padding: "SAME" / "VALID" :param layer_name: name of the layer :param act: the activation function used :return: shape [batch, in_height, in_width, in_channels] """ with tf.name_scope('kernels'): with tf.variable_scope(layer_name): kernels = tf.get_variable( name="Xavier_initializer", shape=filter_shape, initializer=tf.contrib.layers.xavier_initializer_conv2d(), trainable=True ) with tf.name_scope('image_summary'): image_kernel = tf.reshape(kernels, [-1, filter_shape[0], filter_shape[1], 1]) #tf.image_summary(layer_name + "/kernels", image_kernel, max_images=2) with tf.name_scope('biases'): biases = tf.constant(0.1, shape=[filter_shape[3]]) variable_summaries(biases, layer_name + '/biases') with tf.name_scope('convolution'): # http://stackoverflow.com/questions/34619177/what-does-tf-nn-conv2d-do-in-tensorflow with tf.name_scope('preactivation'): preactivation = tf.nn.conv2d(input_tensor, kernels, strides, padding) with tf.name_scope('activation'): activation = act(preactivation + biases, name='activation') return activation def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer. It does a matrix multiply, bias add, and then uses relu to nonlinearize. It also sets up name scoping so that the resultant graph is easy to read, and adds a number of summary ops. :param input_tensor: a tensor of inputs :param input_dim: the dimensionality of the input :param output_dim: the dimensionality of the output :param layer_name: the name of the layer :param initializer: the initializer used to initialize the weights :param act: the activation function. Default is tf.act.relu :return: a tensor of dimensionality output_dim """ with tf.name_scope('weights'): with tf.variable_scope(layer_name): weights = tf.get_variable( name="Xavier_initializer", shape=[input_dim, output_dim], initializer=tf.contrib.layers.xavier_initializer(), trainable=True ) variable_summaries(weights, layer_name + '/weights') with tf.name_scope('biases'): biases = tf.constant(0.1, shape=[output_dim]) variable_summaries(biases, layer_name + '/biases') with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases tf.histogram_summary(layer_name + '/pre_activations', preactivate) activations = act(preactivate, name='activation') tf.histogram_summary(layer_name + '/activations', activations) return activations
42.636364
93
0.666311
0db662945fc9f7d8eb637fcf73b854a6e0abc7bd
498
py
Python
2015/Day 8+/helpers/file.py
Fluxanoia/AdventOfCode
1e5771860930dfa31bb488db11ee75eff5142b19
[ "MIT" ]
null
null
null
2015/Day 8+/helpers/file.py
Fluxanoia/AdventOfCode
1e5771860930dfa31bb488db11ee75eff5142b19
[ "MIT" ]
null
null
null
2015/Day 8+/helpers/file.py
Fluxanoia/AdventOfCode
1e5771860930dfa31bb488db11ee75eff5142b19
[ "MIT" ]
null
null
null
import os from typing import Optional def drop_newline(str : str) -> str: if len(str) > 0 and str[-1] == '\n': return str[0:len(str) - 1] return str def load_file(number : int, drop_newlines : Optional[bool]) -> list[str]: path = os.path.dirname(os.path.dirname(__file__)) path = os.path.join(path, 'days', f'_{number}_input.txt') with open(path, 'r') as file: lines = file.readlines() return list(map(drop_newline, lines)) if drop_newlines else lines
33.2
73
0.64257
f499be08e376e79780fe4aabc5a919717de84b49
469
py
Python
heart/migrations/0012_remove_trading_models.py
DarkoR12/dafi-system
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
[ "MIT" ]
7
2019-08-03T12:25:18.000Z
2021-11-02T12:51:33.000Z
heart/migrations/0012_remove_trading_models.py
DarkoR12/dafi-system
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
[ "MIT" ]
11
2019-08-20T17:07:37.000Z
2021-11-23T14:26:07.000Z
heart/migrations/0012_remove_trading_models.py
DarkoR12/dafi-system
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
[ "MIT" ]
4
2020-04-06T11:33:02.000Z
2021-10-31T09:10:53.000Z
# Generated by Django 2.1.13 on 2019-11-02 12:21 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('heart', '0011_documentmedia_category'), ] operations = [ migrations.RemoveField( model_name='subject', name='year', ), migrations.DeleteModel( name='Subject', ), migrations.DeleteModel( name='Year', ), ]
19.541667
49
0.547974
952c54d00d1f62fc0ea45ffa4b70b8b224f5c175
2,402
py
Python
data/cirq_new/cirq_program/startCirq_Class726.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
data/cirq_new/cirq_program/startCirq_Class726.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
data/cirq_new/cirq_program/startCirq_Class726.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=4 # total number=21 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np #thatsNoCode def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.H.on(input_qubit[1])) # number=2 c.append(cirq.H.on(input_qubit[1])) # number=7 c.append(cirq.H.on(input_qubit[2])) # number=3 c.append(cirq.H.on(input_qubit[3])) # number=4 c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5 c.append(cirq.H.on(input_qubit[0])) # number=16 c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=17 c.append(cirq.H.on(input_qubit[0])) # number=18 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=19 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=20 # circuit end return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2820 info = cirq.final_state_vector(circuit) qubits = round(log2(len(info))) frequencies = { np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3) for i in range(2 ** qubits) } writefile = open("../data/startCirq_Class726.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
33.361111
80
0.676936
c5c51352decdb969233fd587e57936fb32e7a2f9
17,210
py
Python
test/test_user_tests.py
image72/browserscope
44a63558ee376704d996851099bc7703128201cc
[ "Apache-2.0" ]
22
2015-10-26T15:20:37.000Z
2022-03-11T06:38:17.000Z
test/test_user_tests.py
image72/browserscope
44a63558ee376704d996851099bc7703128201cc
[ "Apache-2.0" ]
10
2016-01-22T18:46:19.000Z
2019-07-19T12:49:51.000Z
test/test_user_tests.py
mcauer/browserscope
a9c0e1a250774f14689e06f93ad274d0b9d725e4
[ "Apache-2.0" ]
12
2015-10-17T09:40:44.000Z
2019-06-08T19:54:36.000Z
#!/usr/bin/python2.5 # # Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the 'License') # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test admin_rankers.""" __author__ = 'elsigh@google.com (Lindsey Simon)' import datetime import logging import re import unittest from django.test.client import Client from google.appengine.api import memcache from google.appengine.api import users from google.appengine.ext import db from base import util from categories import all_test_sets from models import result_stats import models.user_test import mock_data import settings from third_party import mox def repeat_to_length(string_to_expand, length): return (string_to_expand * ((length/len(string_to_expand))+1))[:length] class TestModels(unittest.TestCase): def testUser(self): current_user = users.get_current_user() u = models.user_test.User.get_or_insert(current_user.user_id()) u.email = current_user.email() u.save() user_q = models.user_test.User.get_by_key_name(current_user.user_id()) self.assertTrue(user_q.email, current_user.email()) def testGetTestSetFromResultString(self): current_user = users.get_current_user() u = models.user_test.User.get_or_insert(current_user.user_id()) test = models.user_test.Test(user=u, name='Fake Test', url='http://fakeurl.com/test.html', description='stuff') test.save() results_str = 'test_1=0,test_2=1' test_set_category = 'usertest_%s' % test.key() test_set = models.user_test.Test.get_test_set_from_results_str( test_set_category, results_str) self.assertTrue(test_set != None) self.assertEqual(test_set.category, test_set_category) self.assertEqual(len(test_set.tests), 2) self.assertEqual('test_1', test_set.tests[0].key) self.assertEqual('test_2', test_set.tests[1].key) def testGetTestSetFromResultStringThrowsOnLongKeys(self): current_user = users.get_current_user() u = models.user_test.User.get_or_insert(current_user.user_id()) test = models.user_test.Test(user=u, name='Fake Test', url='http://fakeurl.com/test.html', description='stuff') test.save() too_long_key_name = repeat_to_length('x', models.user_test.MAX_KEY_LENGTH + 1) results_str = 'test_1=0,test_2=1,%s=2' % too_long_key_name test_set_category = 'usertest_%s' % test.key() self.assertRaises(models.user_test.KeyTooLong, models.user_test.Test.get_test_set_from_results_str, test_set_category, results_str) class TestBasics(unittest.TestCase): def setUp(self): self.client = Client() def testHowto(self): response = self.client.get('/user/tests/howto') self.assertEqual(200, response.status_code) def testGetSettings(self): response = self.client.get('/user/settings') self.assertEqual(200, response.status_code) def testCreateTestBad(self): csrf_token = self.client.get('/get_csrf').content data = { 'name': '', 'url': 'http://fakeurl.com/test.html', 'description': 'whatever', 'csrf_token': csrf_token, } response = self.client.post('/user/tests/create', data) self.assertEqual(200, response.status_code) tests = db.Query(models.user_test.Test) self.assertEquals(0, tests.count()) def testCreateTestOk(self): csrf_token = self.client.get('/get_csrf').content data = { 'name': 'FakeTest', 'url': 'http://fakeurl.com/test.html', 'description': 'whatever', 'csrf_token': csrf_token, } response = self.client.post('/user/tests/create', data) # Should redirect to /user/settings when all goes well. self.assertEqual(302, response.status_code) tests = db.Query(models.user_test.Test) self.assertEquals(1, tests.count()) class TestWithData(unittest.TestCase): def setUp(self): self.client = Client() current_user = users.get_current_user() u = models.user_test.User.get_or_insert(current_user.user_id()) u.email = current_user.email() u.save() meta = models.user_test.TestMeta().save() self.test = models.user_test.Test(user=u, name='Fake Test', url='http://fakeurl.com/test.html', description='stuff', sandboxid='sand', meta=meta) self.test.save() def saveData(self): """Other tests call this function to save simple data.""" params = { 'category': self.test.get_memcache_keyname(), 'results': 'apple=1,banana=20000,coconut=400000', } csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(204, response.status_code) def testDataValueGreateThanMaxFails(self): params = { 'category': self.test.get_memcache_keyname(), 'results': 'apple=%s,banana=2,coconut=4' % str(models.user_test.MAX_VALUE + 1), } csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(500, response.status_code) def testDataWithTooManyKeysFails(self): results_list = [] for i in range(models.user_test.MAX_KEY_COUNT + 1): results_list.append('key%s=data%s' % (i,i)) params = { 'category': self.test.get_memcache_keyname(), 'results': ','.join(results_list), } csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(500, response.status_code) def testUpdateTestMeta(self): # Invoke the deferred handler forcefully since the SDK won't run # our deferred tasks. params = { 'category': self.test.get_memcache_keyname(), 'results': 'apple=1,banana=2,coconut=4', } csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertFalse(hasattr(self.test.meta, 'apple_min_value')) models.user_test.update_test_meta(self.test.key(), [['apple', '1'], ['banana', '2'], ['coconut', '3']]) # update our reference meta = models.user_test.TestMeta.get(self.test.meta.key()) self.assertTrue(hasattr(meta, 'apple_min_value')) self.assertTrue(hasattr(meta, 'apple_max_value')) self.assertTrue(hasattr(meta, 'coconut_min_value')) self.assertTrue(hasattr(meta, 'coconut_max_value')) self.assertEquals(1, meta.apple_min_value) self.assertEquals(1, meta.apple_max_value) self.assertEquals(2, meta.banana_min_value) self.assertEquals(2, meta.banana_max_value) self.assertEquals(3, meta.coconut_min_value) self.assertEquals(3, meta.coconut_max_value) models.user_test.update_test_meta(self.test.key(), [['apple', '0'], ['banana', '2'], ['coconut', '30']]) # update our reference meta = models.user_test.TestMeta.get(self.test.meta.key()) self.assertEquals(0, meta.apple_min_value) self.assertEquals(1, meta.apple_max_value) self.assertEquals(2, meta.banana_min_value) self.assertEquals(2, meta.banana_max_value) self.assertEquals(3, meta.coconut_min_value) self.assertEquals(30, meta.coconut_max_value) def testUserBeaconJsReturn(self): response = self.client.get('/user/beacon/%s' % self.test.key()) self.assertEquals('text/javascript', response['Content-type']) # There should be no callback setTimeout in the page. self.assertFalse(re.search('window.setTimeout', response.content)) # There should be no sandboxid in the page. self.assertFalse(re.search('sandboxid', response.content)) # There test_result_var name should be the default. self.assertTrue(re.search(settings.USER_TEST_RESULTS_VAR_DEFAULT, response.content)) # Now test a beacon with a callback specified. # This is a regex test ensuring it's there in a setTimeout. params = {'callback': 'MyFunction', 'sandboxid': 'foobar'} response = self.client.get('/user/beacon/%s' % self.test.key(), params) self.assertEquals('text/javascript', response['Content-type']) self.assertTrue(re.search('window.setTimeout\(%s' % params['callback'], response.content)) self.assertTrue(re.search("'sandboxid': '%s'" % params['sandboxid'], response.content)) # Now test a test_results_var specified. params = {'test_results_var': 'MyFunkyVar'} response = self.client.get('/user/beacon/%s' % self.test.key(), params) self.assertEquals('text/javascript', response['Content-type']) # The default should not be present, but our custom one should. self.assertFalse(re.search(settings.USER_TEST_RESULTS_VAR_DEFAULT, response.content)) self.assertTrue(re.search('MyFunkyVar', response.content)) def testBeaconResultsTableGvizData(self): self.saveData() response = self.client.get('/gviz_table_data', {'category': 'usertest_%s' % self.test.key(), 'v': '3'}, **mock_data.UNIT_TEST_UA) self.assertEqual(200, response.status_code) # Note that gviz data has thousands formatted with commas. self.assertEqual("google.visualization.Query.setResponse({'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols:[{id:'ua',label:'UserAgent',type:'string'},{id:'apple',label:'apple',type:'number'},{id:'banana',label:'banana',type:'number'},{id:'coconut',label:'coconut',type:'number'},{id:'numtests',label:'# Tests',type:'number'}],rows:[{c:[{v:'other',f:'Other',p:{'className':'rt-ua-cur'}},{v:100,f:'1',p:{}},{v:100,f:'20,000',p:{}},{v:100,f:'400,000',p:{}},{v:1}]}]}});", response.content) def NOtestBeaconResultsTable(self): self.saveData() response = self.client.get('/user/tests/table/%s' % self.test.key(), {'v': '3'}, **mock_data.UNIT_TEST_UA) self.assertEqual(200, response.status_code) self.assertEqual('text/html', response['Content-type']) strings_to_test_for = [ # test.name '<h3>Fake Test</h3>', # test.description '<p>stuff</p>', # Hidden form field in the browser v select. #('<input type="hidden" name="category" ' # 'value="usertest_%s">' % self.test.key()), # Ensures that 1 test was saved and that full category update worked. #'1\s+test\s+from\s+1\s+browser', # test_keys are there as headers 'apple', 'banana', 'coconut', ] for string_value in strings_to_test_for: self.assertTrue(re.search(string_value, response.content), string_value) def testBeaconResultsTableJSON(self): self.saveData() response = self.client.get('/user/tests/table/%s' % self.test.key(), {'v': '3', 'o': 'json'}, **mock_data.UNIT_TEST_UA) self.assertEqual(200, response.status_code) self.assertEqual('application/json', response['Content-type']) self.assertTrue(re.search( '"category": "usertest_%s"' % self.test.key(), response.content)) # callback test response = self.client.get('/user/tests/table/%s' % self.test.key(), {'v': '3', 'o': 'json', 'callback': 'myFn'}, **mock_data.UNIT_TEST_UA) self.assertEqual(200, response.status_code) self.assertEqual('application/json', response['Content-type']) self.assertTrue(re.search( '"category": "usertest_%s"' % self.test.key(), response.content)) self.assertTrue(re.search('^myFn\(\{', response.content)) def testBeaconWithSandboxId(self): params = { 'category': self.test.get_memcache_keyname(), 'results': 'apple=1,banana=2,coconut=4', } # Run 10 times. for i in range(11): csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(204, response.status_code) # The 11th should bomb due to IP throttling. csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(util.BAD_BEACON_MSG + 'IP', response.content) # But we should be able to run 11 beacons (i.e. 10 + 1) with a sandboxid. params['sandboxid'] = self.test.sandboxid # Run 11 times for i in range(12): csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(204, response.status_code, 'Failed on run %s with sandboxid %s' % (i, params['sandboxid'])) class TestAliasedUserTest(unittest.TestCase): """Using HTML5 as an example.""" def setUp(self): self.client = Client() current_user = users.get_current_user() u = models.user_test.User.get_or_insert(current_user.user_id()) u.email = current_user.email() u.save() test = models.user_test.Test(user=u, name='Fake Test', url='http://fakeurl.com/test.html', description='stuff') # Because GAEUnit won't run the deferred taskqueue properly. test.test_keys = ['apple', 'coconut', 'banana'] test.save() self.test = test self.test_set = mock_data.MockUserTestSet() self.test_set.user_test_category = test.get_memcache_keyname() #all_test_sets.AddTestSet(self.test_set) params = { 'category': self.test.get_memcache_keyname(), 'results': 'apple=1,banana=2,coconut=4', } csrf_token = self.client.get('/get_csrf').content params['csrf_token'] = csrf_token response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA) self.assertEqual(204, response.status_code) def testResultStats(self): stats = { 'Other': { 'summary_display': '7/3', 'total_runs': 1, 'summary_score': 233, 'results': { 'apple': {'score': 100, 'raw_score': 1, 'display': 1}, 'banana': {'score': 100, 'raw_score': 2, 'display': 2}, 'coconut': {'score': 100, 'raw_score': 4, 'display': 4}, } }, 'total_runs': 1, } # First get results for the UserTest test_set test_set = self.test.get_test_set_from_test_keys( ['apple', 'banana', 'coconut']) results = result_stats.CategoryStatsManager.GetStats( test_set, browsers=('Other',), test_keys=['apple', 'banana', 'coconut'], use_memcache=False) self.assertEqual(stats, results) # Our MockTestSet has GetTestScoreAndDisplayValue & # GetRowScoreAndDisplayValue stats = { 'Other': { 'summary_display': '7', 'total_runs': 1, 'summary_score': 14, 'results': { 'apple': {'score': 2, 'raw_score': 1, 'display': 'd:2'}, 'banana': {'score': 4, 'raw_score': 2, 'display': 'd:4'}, 'coconut': {'score': 8, 'raw_score': 4, 'display': 'd:8'}, } }, 'total_runs': 1, } # Now see if the test_set with user_test_category gets the same. results = result_stats.CategoryStatsManager.GetStats( self.test_set, browsers=('Other',), test_keys=['apple', 'banana', 'coconut'], use_memcache=False) self.assertEqual(stats, results) class TestAPI(unittest.TestCase): def setUp(self): self.client = Client() def testCreateTestFailsWithInvalidApiKey(self): data = { 'name': 'Test test', 'url': 'http://fakeurl.com/test.html', 'description': 'whatever', 'api_key': 'invalid key' } response = self.client.post('/user/tests/create', data) self.assertEqual(200, response.status_code) tests = db.Query(models.user_test.Test) self.assertEquals(0, tests.count()) self.assertTrue(re.search('No user was found', response.content)) def testCreateTestOk(self): current_user = users.get_current_user() user = models.user_test.User.get_or_insert(current_user.user_id()) data = { 'name': 'Test test', 'url': 'http://fakeurl.com/test.html', 'description': 'whatever', 'api_key': user.key().name() } response = self.client.get('/user/tests/create', data) self.assertEqual(200, response.status_code) tests = db.Query(models.user_test.Test) self.assertEquals(1, tests.count()) self.assertEquals('{"test_key": "%s"}' % tests[0].key(), response.content)
36.852248
483
0.65828
9f82608e9e4ca0d5dc38ee2e206f834eab64ce48
2,737
py
Python
tests/data23/recipe-213027.py
JohannesBuchner/pystrict3
f442a89ac6a23f4323daed8ef829d8e9e1197f90
[ "BSD-2-Clause" ]
1
2020-06-05T08:53:26.000Z
2020-06-05T08:53:26.000Z
tests/data23/recipe-213027.py
JohannesBuchner/pystrict3
f442a89ac6a23f4323daed8ef829d8e9e1197f90
[ "BSD-2-Clause" ]
1
2020-06-04T13:47:19.000Z
2020-06-04T13:47:57.000Z
tests/data23/recipe-213027.py
JohannesBuchner/pystrict3
f442a89ac6a23f4323daed8ef829d8e9e1197f90
[ "BSD-2-Clause" ]
1
2020-11-07T17:02:46.000Z
2020-11-07T17:02:46.000Z
import copy class _IDup(object): """Internal class used only to keep a reference on the actual iterator, and to do housekeeping.""" def __init__(self,iterin): self.__iter = iterin self.__iterno = 0 self.__iteritems = [] self.__hasstopped = None def registerIter(self,oldno=-1): iterno = self.__iterno self.__iterno += 1 if oldno == -1: self.__iteritems.append([]) else: self.__iteritems.append( copy.deepcopy(self.__iteritems[oldno]) ) return iterno def getNext(self,iterno): if self.__iteritems[iterno]: iteritem = self.__iteritems[iterno].pop(0) elif self.__hasstopped is not None: raise self.__hasstopped else: try: iteritem = next(self.__iter) except StopIteration as e: self.__hasstopped = e raise for id, i in enumerate(self.__iteritems): if id != iterno: i.append(copy.deepcopy(iteritem)) return iteritem class _IDupped(object): """Duplicated Iterator class. Each iterator you get by calling isplit or split on a splitted iterator will be of this type.""" def __init__(self,idup,oldno=-1): self.__idup = idup self.__iterno = idup.registerIter(oldno) def __next__(self): return self.__idup.getNext(self.__iterno) def split(self): """Split this iterator into two pieces. The original iterator is still callable, as is the sub-iterator.""" return _IDupped(self.__idup,self.__iterno) def __iter__(self): return self def isplit(iterin,splitno=2): idup = _IDup(iterin) iduppeds = [] for i in range(splitno): iduppeds.append(_IDupped(idup)) return tuple(iduppeds) # Create first few iterators. test = ["hello","how","are","you?"] x, y = isplit(iter(test)) # Test print of iterator y. print("First item of y.") print(next(y)) # Create new iterator z after first element of y. z = y.split() # Print rest of the elements. print("Rest in x.") for i in x: print(i) print("Rest in y.") for i in y: print(i) print("Rest in z.") for i in z: print(i)
31.102273
79
0.500913
4a13aca846061d95fc97727280e81984c3f4de09
1,169
py
Python
scripts/kmer.py
akikuno/rosalind
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
[ "MIT" ]
null
null
null
scripts/kmer.py
akikuno/rosalind
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
[ "MIT" ]
null
null
null
scripts/kmer.py
akikuno/rosalind
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
[ "MIT" ]
null
null
null
# https://rosalind.info/problems/kmer/ def fmtfa(fasta: list): prev = True header = [] seq = [] for f in fasta: if ">" in f: header.append(f[1:]) prev = True elif prev: seq.append(f) prev = False else: seq[-1] += f return header, seq # INPUT ------------------------------------------- file_in = "sample/dataset/kmer.txt" file_out = "sample/output/kmer.txt" with open(file_in) as f: data = f.read().splitlines() with open(file_out) as f: outcome = f.read().splitlines() file_in = "case/dataset/kmer.txt" with open(file_in) as f: data_case = f.read().splitlines() if not data_case == []: data = data_case # MAIN ------------------------------------------- _, seq = fmtfa(data) seq = seq[0] from itertools import product kmer = {"".join(x): 0 for x in product(["A", "C", "G", "T"], repeat=4)} for i in range(len(seq) - 3): kmer[seq[i : i + 4]] += 1 ans = [str(v) for v in kmer.values()] ans = " ".join(ans) # OUTPUT ------------------------------------------- with open("case/output/kmer.txt", "w") as f: f.write(ans) # END
19.483333
71
0.492729
a003815a242f8537a2bb5041697a0b5700e51fe0
111
py
Python
Positive number.py
Pavithra120/positive-numbers-
a884e4c126483e983b84e6035e194c92249dbf07
[ "MIT" ]
null
null
null
Positive number.py
Pavithra120/positive-numbers-
a884e4c126483e983b84e6035e194c92249dbf07
[ "MIT" ]
null
null
null
Positive number.py
Pavithra120/positive-numbers-
a884e4c126483e983b84e6035e194c92249dbf07
[ "MIT" ]
null
null
null
start, end = -4, 19 for num in range(start, end + 1): if num >= 0: print(num, end = " ")
11.1
34
0.45045
f7f47179de699961d9f8b77e8fa295cf2e988277
406
py
Python
secrets.py
jimbobbennett/mandmcounter
1edc39d42e0cd8249695a7f6397675b3291957bb
[ "MIT" ]
1
2021-08-16T07:25:43.000Z
2021-08-16T07:25:43.000Z
secrets.py
jimbobbennett/mandmcounter
1edc39d42e0cd8249695a7f6397675b3291957bb
[ "MIT" ]
2
2020-04-30T16:46:24.000Z
2020-04-30T16:46:51.000Z
secrets.py
jimbobbennett/mandmcounter
1edc39d42e0cd8249695a7f6397675b3291957bb
[ "MIT" ]
null
null
null
# This file is where you keep secret settings, passwords, and tokens! # If you put them in the code you risk committing that info or sharing it # which would be not great. So, instead, keep it all in this one file and # keep it a secret. secrets = { 'ssid' : '<ssid>', 'password' : '<password>', 'prediction_key' : '<prediction_key>', 'prediction_endpoint' : '<prediction_endpoint>' }
33.833333
73
0.67734
be49bc9b1423fac0842c682b71c4aa731e3e5fd1
13,674
py
Python
schedule/__init__.py
prayashm/schedule
556de4d24cd11e6a79b71e838265636b33785420
[ "MIT" ]
7
2015-09-17T06:49:33.000Z
2021-01-31T10:54:50.000Z
schedule/__init__.py
prayashm/schedule
556de4d24cd11e6a79b71e838265636b33785420
[ "MIT" ]
null
null
null
schedule/__init__.py
prayashm/schedule
556de4d24cd11e6a79b71e838265636b33785420
[ "MIT" ]
8
2015-07-01T14:50:53.000Z
2019-09-26T14:27:29.000Z
""" Python job scheduling for humans. An in-process scheduler for periodic jobs that uses the builder pattern for configuration. Schedule lets you run Python functions (or any other callable) periodically at pre-determined intervals using a simple, human-friendly syntax. Inspired by Addam Wiggins' article "Rethinking Cron" [1] and the "clockwork" Ruby module [2][3]. Features: - A simple to use API for scheduling jobs. - Very lightweight and no external dependencies. - Excellent test coverage. - Works with Python 2.7 and 3.3 Usage: >>> import schedule >>> import time >>> def job(message='stuff'): >>> print("I'm working on:", message) >>> schedule.every(10).minutes.do(job) >>> schedule.every().hour.do(job, message='things') >>> schedule.every().day.at("10:30").do(job) >>> while True: >>> schedule.run_pending() >>> time.sleep(1) [1] http://adam.heroku.com/past/2010/4/13/rethinking_cron/ [2] https://github.com/tomykaira/clockwork [3] http://adam.heroku.com/past/2010/6/30/replace_cron_with_clockwork/ """ import datetime import functools import logging import random import time from dateutil import parser from dateutil.tz import tzlocal from .tz import tz_offsets logger = logging.getLogger('schedule') class Scheduler(object): def __init__(self): self.jobs = [] def run_pending(self): """Run all jobs that are scheduled to run. Please note that it is *intended behavior that tick() does not run missed jobs*. For example, if you've registered a job that should run every minute and you only call tick() in one hour increments then your job won't be run 60 times in between but only once. """ runnable_jobs = (job for job in self.jobs if job.should_run) for job in sorted(runnable_jobs): job.run() def run_all(self, delay_seconds=0): """Run all jobs regardless if they are scheduled to run or not. A delay of `delay` seconds is added between each job. This helps distribute system load generated by the jobs more evenly over time.""" logger.info('Running *all* %i jobs with %is delay inbetween', len(self.jobs), delay_seconds) for job in self.jobs: job.run() time.sleep(delay_seconds) def clear(self): """Deletes all scheduled jobs.""" del self.jobs[:] def every(self, interval=1): """Schedule a new periodic job.""" job = Job(interval) self.jobs.append(job) return job def on(self, *days): """Schedule a new job to run on specific weekdays. See the docstring for `Job.on()`. """ job = self.every() job.unit = 'days' return job.on(*days) @property def next_run(self): """Datetime when the next job should run.""" if not self.jobs: return None return min(self.jobs).next_run @property def idle_seconds(self): """Number of seconds until `next_run`.""" return (self.next_run - datetime.datetime.now(tzlocal()) ).total_seconds() class Job(object): """A periodic job as used by `Scheduler`.""" WEEKDAYS = {'sunday': 0, 'monday': 1, 'tuesday': 2, 'wednesday': 3, 'thursday': 4, 'friday': 5, 'saturday': 6} def __init__(self, interval): self.interval = interval # pause interval * unit between runs self.job_func = None # the job job_func to run self.unit = None # time units, e.g. 'minutes', 'hours', ... self.at_time = None # optional time at which this job runs self.between_times = () self.run_days = [] self.start_run = None # datetime after which this job will start self.last_run = None # datetime of the last run self.next_run = None # datetime of the next run self.period = None # timedelta between runs, only valid for def __lt__(self, other): """PeriodicJobs are sortable based on the scheduled time they run next.""" return self.next_run < other.next_run def __repr__(self): fmt_dt = "%Y-%m-%d %H:%M:%S %Z" fmt_t = "%H:%M:%S %Z" def format_time(t): return t.strftime(fmt_dt) if t else '[never]' timestats = '(last run: %s, next run: %s)' % ( format_time(self.last_run), format_time(self.next_run)) job_func_name = self.job_func.__name__ args = [repr(x) for x in self.job_func.args] kwargs = ['%s=%s' % (k, repr(v)) for k, v in self.job_func.keywords.items()] call_repr = job_func_name + '(' + ', '.join(args + kwargs) + ')' if self.run_days: final_days = [] for day in self.run_days: days_str = [k.title() for k, i in Job.WEEKDAYS.items() for d in day if i == d] final_days.append(' or '.join(days_str)) repr_str = 'Every %s' % ' and '.join(final_days) else: repr_str = 'Every %s %s' % ( self.interval, self.unit[:-1] if self.interval == 1 else self.unit) if self.between_times: repr_str += ' between %s' % ' and '.join( t.strftime(fmt_t).strip() for t in self.between_times) elif self.at_time: repr_str += ' at %s' % self.at_time.strftime(fmt_t).strip() if self.start_run: repr_str += ' starting %s' % self.start_run.strftime(fmt_dt) repr_str += ' do %s %s' % (call_repr, timestats) return repr_str @property def second(self): assert self.interval == 1 return self.seconds @property def seconds(self): self.unit = 'seconds' return self @property def minute(self): assert self.interval == 1 return self.minutes @property def minutes(self): self.unit = 'minutes' return self @property def hour(self): assert self.interval == 1 return self.hours @property def hours(self): self.unit = 'hours' return self @property def day(self): assert self.interval == 1 return self.days @property def days(self): self.unit = 'days' return self @property def week(self): assert self.interval == 1 return self.weeks @property def weeks(self): self.unit = 'weeks' return self def on(self, *days): """Schedule the job to run on specific weekdays. `days` can be a string (or sequence of strings) with the name of the weekday (case insensitive), e.g. 'Monday', 'sunday', etc, or a starting substring of the name of the weekday, e.g. 'tue', 'Sat', etc. If you specify multiple days, e.g. ('mon', 'wed'), the job will run every Monday and Wednesday. You can also specify OR conditions by separating the day names with a pipe, e.g. ('sun|mon', 'wed|thu'). In this case the job will run every Sunday *or* Monday, and every Wednesday *or* Thursday. """ weeknums = [] for day in days: day_or = set() for d in day.split('|'): for n, i in Job.WEEKDAYS.items(): if n.startswith(d.lower()): day_or.add(i) if day_or: weeknums.append(day_or) self.run_days = weeknums return self def at(self, time_str): """Schedule the job every day at a specific time. Calling this is only valid for jobs scheduled to run every N day(s). """ assert self.unit == 'days' self.at_time = parser.parse(time_str, tzinfos=tz_offsets) if not self.at_time.tzinfo: self.at_time = self.at_time.replace(tzinfo=tzlocal()) return self def between(self, time_str): """Schedule the job at a random time between two timestamps.""" times = [] for t in time_str.split('-'): dt = parser.parse(t, tzinfos=tz_offsets) if not dt.tzinfo: dt = dt.replace(tzinfo=tzlocal()) times.append(dt) self.between_times = tuple(times) return self def starting(self, date_str): self.start_run = parser.parse(date_str, tzinfos=tz_offsets) if not self.start_run.tzinfo: self.start_run = self.start_run.replace(tzinfo=tzlocal()) return self def do(self, job_func, *args, **kwargs): """Specifies the job_func that should be called every time the job runs. Any additional arguments are passed on to job_func when the job runs. """ self.job_func = functools.partial(job_func, *args, **kwargs) functools.update_wrapper(self.job_func, job_func) self._schedule_next_run() return self @property def should_run(self): """True if the job should be run now.""" return datetime.datetime.now(tzlocal()) >= self.next_run def run(self): """Run the job and immediately reschedule it.""" logger.info('Running job %s', self) self.job_func() self.last_run = datetime.datetime.now(tzlocal()) self._schedule_next_run() def _schedule_next_run(self): """Compute the instant when this job should run next.""" # Allow *, ** magic temporarily: # pylint: disable=W0142 assert self.unit in ('seconds', 'minutes', 'hours', 'days', 'weeks') starting = self.start_run or datetime.datetime.now(tzlocal()) self.period = datetime.timedelta(**{self.unit: self.interval}) self.next_run = starting + self.period if self.run_days: run_days = self.run_days[:] if self.last_run: starting = self.last_run # Don't consider this day group if it has been run already for day in self.run_days: if self.last_run.isoweekday() in day: run_days.remove(day) days = set() for day in run_days: days.add(random.sample(day, 1)[0]) if not days: days_delta = 0 else: # Calculate the closest day from the starting date delta_all = sorted([(i - starting.isoweekday()) % 7 for i in days]) days_delta = delta_all[0] if (days_delta == 0 and self.last_run and self.last_run.date() == starting.date()): # Make sure the job doesn't run today twice if self.unit == 'days': days_delta = 7 elif self.unit == 'weeks': days_delta = self.interval * 7 self.next_run = starting + datetime.timedelta(days=days_delta) if self.between_times: start, end = self.between_times # Choose a random time between both timestamps self.at_time = (start + datetime.timedelta( seconds=random.randint(0, int( (end - start).total_seconds())))) if self.at_time: self.next_run = self.next_run.replace(hour=self.at_time.hour, minute=self.at_time.minute, second=self.at_time.second, microsecond=0, tzinfo=self.at_time.tzinfo) # If we are running for the first time, make sure we run # at the specified time *today* as well if (not self.last_run and not self.run_days and self.at_time > datetime.datetime.now(tzlocal())): self.next_run = self.next_run - datetime.timedelta(days=1) logger.info('Scheduled job %s', self) # The following methods are shortcuts for not having to # create a Scheduler instance: default_scheduler = Scheduler() jobs = default_scheduler.jobs # todo: should this be a copy, e.g. jobs()? def every(interval=1): """Schedule a new periodic job.""" return default_scheduler.every(interval) def on(*days): """Schedule a new job to run on specific weekdays. See the docstring for `Job.on()`. """ return default_scheduler.on(*days) def run_pending(): """Run all jobs that are scheduled to run. Please note that it is *intended behavior that run_pending() does not run missed jobs*. For example, if you've registered a job that should run every minute and you only call run_pending() in one hour increments then your job won't be run 60 times in between but only once. """ default_scheduler.run_pending() def run_all(delay_seconds=0): """Run all jobs regardless if they are scheduled to run or not. A delay of `delay` seconds is added between each job. This can help to distribute the system load generated by the jobs more evenly over time.""" default_scheduler.run_all(delay_seconds=delay_seconds) def clear(): """Deletes all scheduled jobs.""" default_scheduler.clear() def next_run(): """Datetime when the next job should run.""" return default_scheduler.next_run def idle_seconds(): """Number of seconds until `next_run`.""" return default_scheduler.idle_seconds
32.712919
79
0.582785
31435573885bcb872f31aaa226bef82c47ca5133
721
py
Python
code_snippets/api-dashboard-update.py
directionless/datadog-documentation
3b5efd463f1c0e2ab9ef984d60798cba418b496e
[ "BSD-3-Clause" ]
null
null
null
code_snippets/api-dashboard-update.py
directionless/datadog-documentation
3b5efd463f1c0e2ab9ef984d60798cba418b496e
[ "BSD-3-Clause" ]
null
null
null
code_snippets/api-dashboard-update.py
directionless/datadog-documentation
3b5efd463f1c0e2ab9ef984d60798cba418b496e
[ "BSD-3-Clause" ]
null
null
null
from datadog import initialize, api options = { 'api_key': '9775a026f1ca7d1c6c5af9d94d9595a4', 'app_key': '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff' } initialize(**options) title = "My Timeboard" description = "A new and improved timeboard!" graphs = [{ "definition": { "events": [], "requests": [ {"q": "avg:system.mem.free{*} by {host}"} ], "viz": "timeseries" }, "title": "Average Memory Free By Host" }] template_variables = [{ "name": "host1", "prefix": "host", "default": "host:my-host" }] read_only = True api.Timeboard.update(4952, title=title, description=description, graphs=graphs, template_variables=template_variables, read_only=read_only)
24.033333
139
0.654646
552f91ec18aa74463ae36f86141dca13ca7d85b1
2,351
py
Python
Application/emotion_detect.py
Thehunk1206/Depression-Analysis
e8fa310bcd24e496fdf930b1ced1e9a77bf6a604
[ "Apache-2.0" ]
5
2020-09-02T20:35:17.000Z
2021-10-10T07:33:36.000Z
Application/emotion_detect.py
Thehunk1206/Depression-Analysis
e8fa310bcd24e496fdf930b1ced1e9a77bf6a604
[ "Apache-2.0" ]
8
2020-09-25T22:14:58.000Z
2021-11-10T19:59:28.000Z
Application/emotion_detect.py
Thehunk1206/Depression-Analysis
e8fa310bcd24e496fdf930b1ced1e9a77bf6a604
[ "Apache-2.0" ]
1
2021-11-12T11:10:42.000Z
2021-11-12T11:10:42.000Z
import cv2 import tensorflow from tensorflow.keras.preprocessing import image from tensorflow.keras.models import load_model import numpy as np import pandas as pd from datetime import datetime from time import sleep import sys emotion_data_dict = {"time": [], "emotions": []} classes = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] name = sys.argv[1] age = sys.argv[2] # =====================util functions============================= def load_model_objects(): print("[ info ]Loading Cascade classifier.....") face_cascade = cv2.CascadeClassifier() isload = face_cascade.load('assets/haarcascade_frontalface_alt.xml') print("[ info ]Loading Model.....") emotion_model = load_model("assets/emotion_recogtion.h5") return face_cascade, emotion_model def getDateTime(): return datetime.now().strftime('%Y-%m-%d %H:%M:%S') def clear(): emotion_data_dict["time"].clear() emotion_data_dict["emotions"].clear() # =============Emotion detection module========================= def detect_emotion(frame, model, face_cascade): flipImage = cv2.flip(frame, 1) grayFrame = cv2.cvtColor(flipImage, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale( grayFrame, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40) ) for (x, y, w, h) in faces: roi_gray = grayFrame[y:y+w, x:x+h] roi_gray = cv2.resize(roi_gray, (48, 48)) img = image.img_to_array(roi_gray) img = np.expand_dims(img, axis=0) img /= 255 pred = model.predict(img) emotion = classes[np.argmax(pred[0])] clear() emotion_data_dict["time"].append(getDateTime()) emotion_data_dict["emotions"].append(emotion) df = pd.DataFrame(emotion_data_dict) with open("results/Module1 "+name+" "+age+".csv", 'a') as f: df.to_csv(f, header=f.tell() == 0) print(emotion) f.close() # =================main================================ def main(): cascade, emotion_model = load_model_objects() capture = cv2.VideoCapture(0) try: while True: sleep(0.7) _, frame = capture.read() detect_emotion(frame, emotion_model, cascade) except KeyboardInterrupt: capture.release() pass main()
25.835165
72
0.596767
4d3cf6ecb7bbe1562d87a7c553de4807d5d37be3
237
py
Python
faker/providers/automotive/tl_PH/__init__.py
jacksmith15/faker
bc5dda1983e4d055aa2698ccf0806a462cb8370e
[ "MIT" ]
12,077
2015-01-01T18:30:07.000Z
2022-03-31T23:22:01.000Z
faker/providers/automotive/tl_PH/__init__.py
jacksmith15/faker
bc5dda1983e4d055aa2698ccf0806a462cb8370e
[ "MIT" ]
1,306
2015-01-03T05:18:55.000Z
2022-03-31T02:43:04.000Z
faker/providers/automotive/tl_PH/__init__.py
jacksmith15/faker
bc5dda1983e4d055aa2698ccf0806a462cb8370e
[ "MIT" ]
1,855
2015-01-08T14:20:10.000Z
2022-03-25T17:23:32.000Z
from ..en_PH import Provider as EnPhAutomotiveProvider class Provider(EnPhAutomotiveProvider): """Implement automotive provider for ``tl_PH`` locale. There is no difference from the ``en_PH`` implementation. """ pass
21.545455
61
0.725738
cc3f5bec137603f40a6e98a48ddaa36069c1442e
5,777
py
Python
tests/examples/test_longrun_actions.py
CareBT/carebt
44c6da1e36e1f45baa5de5d5d9a5b733423c325d
[ "Apache-2.0" ]
2
2021-11-08T12:19:39.000Z
2021-12-02T16:10:05.000Z
tests/examples/test_longrun_actions.py
CareBT/carebt
44c6da1e36e1f45baa5de5d5d9a5b733423c325d
[ "Apache-2.0" ]
null
null
null
tests/examples/test_longrun_actions.py
CareBT/carebt
44c6da1e36e1f45baa5de5d5d9a5b733423c325d
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Andreas Steck (steck.andi@gmail.com) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from io import StringIO import re from unittest.mock import patch from carebt.abstractLogger import LogLevel from carebt.behaviorTreeRunner import BehaviorTreeRunner from carebt.examples.longrun_actions import AddTwoNumbersLongRunningAction from carebt.examples.longrun_actions import AddTwoNumbersMultiTickAction from carebt.examples.longrun_actions import AddTwoNumbersMultiTickActionWithTimeout from carebt.nodeStatus import NodeStatus class TestLongrunActions: @patch('sys.stdout', new_callable=StringIO) def test_AddTwoNumbersMultiTickAction_1tick(self, mock_print): bt_runner = BehaviorTreeRunner() bt_runner.run(AddTwoNumbersMultiTickAction, '1 3 4 => ?result') assert bt_runner.get_status() == NodeStatus.SUCCESS assert bt_runner.get_contingency_message() == '' regex = re.compile(r'AddTwoNumbersMultiTickAction: DONE 3 \+ 4 = 7\n') assert bool(re.match(regex, mock_print.getvalue())) @patch('sys.stdout', new_callable=StringIO) def test_AddTwoNumbersMultiTickAction_3ticks(self, mock_print): bt_runner = BehaviorTreeRunner() bt_runner.run(AddTwoNumbersMultiTickAction, '3 3 4 => ?result') assert bt_runner.get_status() == NodeStatus.SUCCESS assert bt_runner.get_contingency_message() == '' regex = re.compile(r'AddTwoNumbersMultiTickAction: \(tick_count = 1/3\)\n' r'AddTwoNumbersMultiTickAction: \(tick_count = 2/3\)\n' r'AddTwoNumbersMultiTickAction: DONE 3 \+ 4 = 7\n') assert bool(re.match(regex, mock_print.getvalue())) ######################################################################## @patch('sys.stdout', new_callable=StringIO) def test_AddTwoNumbersMultiTickActionWithTimeout_1tick(self, mock_print): bt_runner = BehaviorTreeRunner() bt_runner.run(AddTwoNumbersMultiTickActionWithTimeout, '1 3 4 => ?result') assert bt_runner.get_status() == NodeStatus.SUCCESS assert bt_runner.get_contingency_message() == '' regex = re.compile(r'AddTwoNumbersMultiTickActionWithTimeout: DONE 3 \+ 4 = 7\n') assert bool(re.match(regex, mock_print.getvalue())) @patch('sys.stdout', new_callable=StringIO) def test_AddTwoNumbersMultiTickActionWithTimeout_3ticks(self, mock_print): bt_runner = BehaviorTreeRunner() bt_runner.run(AddTwoNumbersMultiTickActionWithTimeout, '3 3 4 => ?result') assert bt_runner.get_status() == NodeStatus.SUCCESS assert bt_runner.get_contingency_message() == '' regex = re.compile(r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 1/3\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 2/3\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: DONE 3 \+ 4 = 7\n') assert bool(re.match(regex, mock_print.getvalue())) @patch('sys.stdout', new_callable=StringIO) def test_AddTwoNumbersMultiTickActionWithTimeout_15ticks(self, mock_print): bt_runner = BehaviorTreeRunner() bt_runner.get_logger().set_log_level(LogLevel.OFF) bt_runner.run(AddTwoNumbersMultiTickActionWithTimeout, '15 3 4 => ?result') assert bt_runner.get_status() == NodeStatus.ABORTED assert bt_runner.get_contingency_message() == 'TIMEOUT' regex = re.compile(r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 1/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 2/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 3/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 4/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 5/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 6/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 7/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 8/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 9/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: \(tick_count = 10/15\)\n' r'AddTwoNumbersMultiTickActionWithTimeout: on_timeout\n' r'AddTwoNumbersMultiTickActionWithTimeout: on_abort\n') assert bool(re.match(regex, mock_print.getvalue())) ######################################################################## @patch('sys.stdout', new_callable=StringIO) def test_AddTwoNumbersLongRunningAction_ok(self, mock_print): bt_runner = BehaviorTreeRunner() bt_runner.run(AddTwoNumbersLongRunningAction, '2000 3 4 => ?result') assert bt_runner.get_status() == NodeStatus.SUCCESS assert bt_runner.get_contingency_message() == '' regex = re.compile(r'AddTwoNumbersLongRunningAction: calculating 2000 ms ...\n' r'AddTwoNumbersLongRunningAction: DONE 3 \+ 4 = 7\n') assert bool(re.match(regex, mock_print.getvalue()))
56.087379
95
0.67336
6b62189a09adb28357b1388a809f9b974687478a
320
py
Python
botmanager/plugins/transmission.py
edybsd/BotManager
17e091f9b4b8522f605ccc231fdb0828c2cf8dc4
[ "BSD-2-Clause" ]
null
null
null
botmanager/plugins/transmission.py
edybsd/BotManager
17e091f9b4b8522f605ccc231fdb0828c2cf8dc4
[ "BSD-2-Clause" ]
null
null
null
botmanager/plugins/transmission.py
edybsd/BotManager
17e091f9b4b8522f605ccc231fdb0828c2cf8dc4
[ "BSD-2-Clause" ]
null
null
null
def get_instance(config=None): return PluginTransmission(config) class PluginTransmission(object): def __init__(self,config=None): pass def get_commands(self): print("PluginTransmission get_commands") def process_command(self, args): print("PluginTransmission process_command")
26.666667
51
0.721875
30092b6ebc5b3164fdd28437d63b1823e32f13c7
7,691
py
Python
experiments/ashvin/corl2019/debug/pusher2/offpolicy_ccrig1.py
Asap7772/railrl_evalsawyer
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
[ "MIT" ]
null
null
null
experiments/ashvin/corl2019/debug/pusher2/offpolicy_ccrig1.py
Asap7772/railrl_evalsawyer
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
[ "MIT" ]
null
null
null
experiments/ashvin/corl2019/debug/pusher2/offpolicy_ccrig1.py
Asap7772/railrl_evalsawyer
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
[ "MIT" ]
null
null
null
import rlkit.misc.hyperparameter as hyp from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in, sawyer_pusher_camera_upright_v2 from rlkit.launchers.launcher_util import run_experiment from rlkit.torch.grill.launcher import * import rlkit.torch.vae.vae_schedules as vae_schedules from rlkit.torch.vae.conv_vae import imsize48_default_architecture, imsize48_default_architecture_with_more_hidden_layers from rlkit.launchers.arglauncher import run_variants from rlkit.torch.grill.cvae_experiments import ( grill_her_td3_offpolicy_online_vae_full_experiment, ) from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DWallEnv from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv from rlkit.torch.vae.conditional_conv_vae import DeltaCVAE from rlkit.torch.vae.vae_trainer import DeltaCVAETrainer from rlkit.data_management.online_conditional_vae_replay_buffer import \ OnlineConditionalVaeRelabelingBuffer x_var = 0.2 x_low = -x_var x_high = x_var y_low = 0.5 y_high = 0.7 t = 0 if __name__ == "__main__": variant = dict( double_algo=False, online_vae_exploration=False, imsize=48, init_camera=sawyer_init_camera_zoomed_in, env_class=Multiobj2DWallEnv, env_kwargs=dict( render_onscreen=False, ball_radius=1.2, wall_thickness=1.5, inner_wall_max_dist=1.5, images_are_rgb=True, show_goal=False, change_colors=True, change_walls=True, ), grill_variant=dict( save_video=True, custom_goal_sampler='replay_buffer', online_vae_trainer_kwargs=dict( beta=20, lr=0, ), save_video_period=50, qf_kwargs=dict( hidden_sizes=[400, 300], ), policy_kwargs=dict( hidden_sizes=[400, 300], ), vf_kwargs=dict( hidden_sizes=[400, 300], ), max_path_length=50, algo_kwargs=dict( batch_size=128, num_epochs=1001, num_eval_steps_per_epoch=1000, num_expl_steps_per_train_loop=1000, num_trains_per_train_loop=500, min_num_steps_before_training=1000, vae_training_schedule=vae_schedules.never_train, oracle_data=False, vae_save_period=25, parallel_vae_train=False, dataset_path="/private/home/anair17/data/datasets/Multiobj2DWallEnv_N1020_sawyer_init_camera_zoomed_in_imsize48_random_oracle_split_0.npy", rl_offpolicy_num_training_steps=0, ), td3_trainer_kwargs=dict( discount=0.99, # min_num_steps_before_training=4000, reward_scale=1.0, # render=False, tau=1e-2, ), replay_buffer_class=OnlineConditionalVaeRelabelingBuffer, replay_buffer_kwargs=dict( start_skew_epoch=10, max_size=int(100000), fraction_goals_rollout_goals=0.2, fraction_goals_env_goals=0.5, exploration_rewards_type='None', vae_priority_type='vae_prob', priority_function_kwargs=dict( sampling_method='importance_sampling', decoder_distribution='gaussian_identity_variance', # decoder_distribution='bernoulli', num_latents_to_sample=10, ), power=-1, relabeling_goal_sampling_mode='vae_prior', save_decoded_to_internal_keys=False, ), exploration_goal_sampling_mode='vae_prior', evaluation_goal_sampling_mode='reset_of_env', normalize=False, render=False, exploration_noise=0.2, exploration_type='ou', training_mode='train', testing_mode='test', reward_params=dict( epsilon=0.05, ), observation_key='latent_observation', desired_goal_key='latent_desired_goal', vae_wrapped_env_kwargs=dict( sample_from_true_prior=True, ), algorithm='ONLINE-VAE-SAC-BERNOULLI', # vae_path="ashvin/corl2019/offpolicy/dcvae2/run0/id0/itr_800.pkl", ), train_vae_variant=dict( # representation_size=4, beta=10, beta_schedule_kwargs=dict( x_values=(0, 1500), y_values=(1, 50), ), num_epochs=10, dump_skew_debug_plots=False, # decoder_activation='gaussian', decoder_activation='sigmoid', use_linear_dynamics=False, generate_vae_dataset_kwargs=dict( N=102000, n_random_steps=51, test_p=.9, use_cached=False, show=False, oracle_dataset=True, oracle_dataset_using_set_to_goal=True, non_presampled_goal_img_is_garbage=False, random_rollout_data=True, conditional_vae_dataset=True, save_trajectories=True, enviorment_dataset=False, ), vae_trainer_class=DeltaCVAETrainer, vae_class=DeltaCVAE, vae_kwargs=dict( input_channels=3, architecture=imsize48_default_architecture_with_more_hidden_layers, decoder_distribution='gaussian_identity_variance', ), # TODO: why the redundancy? algo_kwargs=dict( start_skew_epoch=5000, is_auto_encoder=False, batch_size=32, lr=1e-3, skew_config=dict( method='vae_prob', power=0, ), skew_dataset=False, priority_function_kwargs=dict( decoder_distribution='gaussian_identity_variance', sampling_method='importance_sampling', # sampling_method='true_prior_sampling', num_latents_to_sample=10, ), use_parallel_dataloading=False, ), save_period=25, ), region="us-west-2", logger_variant=dict( tensorboard=True, ), slurm_variant=dict( timeout_min=48 * 60, cpus_per_task=10, gpus_per_node=1, ), ) search_space = { 'seedid': range(5), 'grill_variant.algo_kwargs.rl_offpolicy_num_training_steps': [0, 100, 1000, 10000, 100000], # 'grill_variant.reward_params.type':['latent_bound'], #, 'latent_distance' 'train_vae_variant.latent_sizes': [(2, 4)], #(3 * objects, 3 * colors) # 'train_vae_variant.beta': [1], # 'train_vae_variant.generate_vae_dataset_kwargs.n_random_steps': [100] } sweeper = hyp.DeterministicHyperparameterSweeper( search_space, default_parameters=variant, ) variants = [] for variant in sweeper.iterate_hyperparameters(): variants.append(variant) run_variants(grill_her_td3_offpolicy_online_vae_full_experiment, variants, run_id=0)
37.334951
155
0.596281
02b2d5e14d7abfeb946a6383486c603fe0ad898f
4,410
py
Python
airflow/providers/google/cloud/sensors/dataproc.py
holly-evans/airflow
865406cbab4defd35c95afbf0a8d5987ff7788b1
[ "Apache-2.0" ]
3
2017-08-06T23:19:48.000Z
2018-05-11T05:50:25.000Z
airflow/providers/google/cloud/sensors/dataproc.py
holly-evans/airflow
865406cbab4defd35c95afbf0a8d5987ff7788b1
[ "Apache-2.0" ]
20
2016-11-21T22:54:28.000Z
2018-10-28T20:03:13.000Z
airflow/providers/google/cloud/sensors/dataproc.py
holly-evans/airflow
865406cbab4defd35c95afbf0a8d5987ff7788b1
[ "Apache-2.0" ]
3
2016-10-13T09:59:18.000Z
2017-02-22T11:29:09.000Z
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module contains a Dataproc Job sensor.""" # pylint: disable=C0302 import time from typing import TYPE_CHECKING, Optional, Sequence from google.api_core.exceptions import ServerError from google.cloud.dataproc_v1.types import JobStatus from airflow.exceptions import AirflowException from airflow.providers.google.cloud.hooks.dataproc import DataprocHook from airflow.sensors.base import BaseSensorOperator if TYPE_CHECKING: from airflow.utils.context import Context class DataprocJobSensor(BaseSensorOperator): """ Check for the state of a previously submitted Dataproc job. :param dataproc_job_id: The Dataproc job ID to poll. (templated) :param region: Required. The Cloud Dataproc region in which to handle the request. (templated) :param project_id: The ID of the google cloud project in which to create the cluster. (templated) :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform. :param wait_timeout: How many seconds wait for job to be ready. """ template_fields: Sequence[str] = ('project_id', 'region', 'dataproc_job_id') ui_color = '#f0eee4' def __init__( self, *, dataproc_job_id: str, region: str, project_id: Optional[str] = None, gcp_conn_id: str = 'google_cloud_default', wait_timeout: Optional[int] = None, **kwargs, ) -> None: super().__init__(**kwargs) self.project_id = project_id self.gcp_conn_id = gcp_conn_id self.dataproc_job_id = dataproc_job_id self.region = region self.wait_timeout = wait_timeout self.start_sensor_time: Optional[float] = None def execute(self, context: "Context") -> None: self.start_sensor_time = time.monotonic() super().execute(context) def _duration(self): return time.monotonic() - self.start_sensor_time def poke(self, context: "Context") -> bool: hook = DataprocHook(gcp_conn_id=self.gcp_conn_id) if self.wait_timeout: try: job = hook.get_job( job_id=self.dataproc_job_id, region=self.region, project_id=self.project_id ) except ServerError as err: duration = self._duration() self.log.info("DURATION RUN: %f", duration) if duration > self.wait_timeout: raise AirflowException( f"Timeout: dataproc job {self.dataproc_job_id} " f"is not ready after {self.wait_timeout}s" ) self.log.info("Retrying. Dataproc API returned server error when waiting for job: %s", err) return False else: job = hook.get_job(job_id=self.dataproc_job_id, region=self.region, project_id=self.project_id) state = job.status.state if state == JobStatus.State.ERROR: raise AirflowException(f'Job failed:\n{job}') elif state in { JobStatus.State.CANCELLED, JobStatus.State.CANCEL_PENDING, JobStatus.State.CANCEL_STARTED, }: raise AirflowException(f'Job was cancelled:\n{job}') elif JobStatus.State.DONE == state: self.log.debug("Job %s completed successfully.", self.dataproc_job_id) return True elif JobStatus.State.ATTEMPT_FAILURE == state: self.log.debug("Job %s attempt has failed.", self.dataproc_job_id) self.log.info("Waiting for job %s to complete.", self.dataproc_job_id) return False
39.72973
107
0.666213
944067c44c012bc1f1b98b2d90a4711327c54210
544
py
Python
manage.py
shivamsinghal212/Url-Shortener
4127a993272744f6f8592415314c8e8514d43153
[ "MIT" ]
1
2018-11-17T16:19:57.000Z
2018-11-17T16:19:57.000Z
manage.py
Akatsuki06/URLShortener
44dbbbeaae9e610111a50aa6aca4d2bed1ca5f34
[ "MIT" ]
8
2020-06-05T18:23:15.000Z
2022-03-11T23:23:57.000Z
manage.py
Akatsuki06/URLShortener
44dbbbeaae9e610111a50aa6aca4d2bed1ca5f34
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "URLShortener.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
34
76
0.689338
20a743e300c926ca1ef1015884fd765c45bd02b8
26,296
py
Python
endpoints_management/control/wsgi.py
robcharlwood/endpoints-management-python
9c69f0766f33faf89a69af9e6bd2aeeb3253b588
[ "Apache-2.0" ]
null
null
null
endpoints_management/control/wsgi.py
robcharlwood/endpoints-management-python
9c69f0766f33faf89a69af9e6bd2aeeb3253b588
[ "Apache-2.0" ]
null
null
null
endpoints_management/control/wsgi.py
robcharlwood/endpoints-management-python
9c69f0766f33faf89a69af9e6bd2aeeb3253b588
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """wsgi implement behaviour that provides service control as wsgi middleware. It provides the :class:`Middleware`, which is a WSGI middleware implementation that wraps another WSGI application to uses a provided :class:`endpoints_management.control.client.Client` to provide service control. """ # pylint: disable=too-many-arguments from __future__ import absolute_import from datetime import datetime import httplib import logging import os import socket import uuid import urllib2 import urlparse import wsgiref.util from webob.exc import HTTPServiceUnavailable from ..auth import suppliers, tokens from ..config.service_config import ServiceConfigException from . import check_request, quota_request, report_request, service, sm_messages logger = logging.getLogger(__name__) _CONTENT_LENGTH = u'content-length' _DEFAULT_LOCATION = u'global' _METADATA_SERVER_URL = u'http://metadata.google.internal' def _running_on_gce(): headers = {u'Metadata-Flavor': u'Google'} try: request = urllib2.Request(_METADATA_SERVER_URL, headers=headers) response = urllib2.urlopen(request) if response.info().getheader(u'Metadata-Flavor') == u'Google': return True except (urllib2.URLError, socket.error): pass return False def _get_platform(): server_software = os.environ.get(u'SERVER_SOFTWARE', u'') if server_software.startswith(u'Development'): return report_request.ReportedPlatforms.DEVELOPMENT elif os.environ.get(u'KUBERNETES_SERVICE_HOST'): return report_request.ReportedPlatforms.GKE elif _running_on_gce(): # We're either in GAE Flex or GCE if os.environ.get(u'GAE_MODULE_NAME'): return report_request.ReportedPlatforms.GAE_FLEX else: return report_request.ReportedPlatforms.GCE elif server_software.startswith(u'Google App Engine'): return report_request.ReportedPlatforms.GAE_STANDARD return report_request.ReportedPlatforms.UNKNOWN platform = _get_platform() def running_on_devserver(): return platform == report_request.ReportedPlatforms.DEVELOPMENT def add_all(application, project_id, control_client, loader=service.Loaders.FROM_SERVICE_MANAGEMENT): """Adds all endpoints middleware to a wsgi application. Sets up application to use all default endpoints middleware. Example: >>> application = MyWsgiApp() # an existing WSGI application >>> >>> # the name of the controlled service >>> service_name = 'my-service-name' >>> >>> # A GCP project with service control enabled >>> project_id = 'my-project-id' >>> >>> # wrap the app for service control >>> from endpoints_management.control import wsgi >>> control_client = client.Loaders.DEFAULT.load(service_name) >>> control_client.start() >>> wrapped_app = add_all(application, project_id, control_client) >>> >>> # now use wrapped_app in place of app Args: application: the wrapped wsgi application project_id: the project_id thats providing service control support control_client: the service control client instance loader (:class:`endpoints_management.control.service.Loader`): loads the service instance that configures this instance's behaviour """ try: a_service = loader.load() if not a_service: raise ValueError(u'No service config loaded.') except (ServiceConfigException, ValueError): logger.exception(u'Failed to load service config, installing server error handler.') # This will answer all requests with HTTP 503 Service Unavailable return HTTPServiceUnavailable authenticator = _create_authenticator(a_service) wrapped_app = Middleware(application, project_id, control_client) if authenticator: wrapped_app = AuthenticationMiddleware(wrapped_app, authenticator) return EnvironmentMiddleware(wrapped_app, a_service) def _next_operation_uuid(): return uuid.uuid4().hex class EnvironmentMiddleware(object): """A WSGI middleware that sets related variables in the environment. It attempts to add the following vars: - google.api.config.service - google.api.config.service_name - google.api.config.method_registry - google.api.config.reporting_rules - google.api.config.method_info """ # pylint: disable=too-few-public-methods SERVICE = u'google.api.config.service' SERVICE_NAME = u'google.api.config.service_name' METHOD_REGISTRY = u'google.api.config.method_registry' METHOD_INFO = u'google.api.config.method_info' REPORTING_RULES = u'google.api.config.reporting_rules' def __init__(self, application, a_service): """Initializes a new Middleware instance. Args: application: the wrapped wsgi application a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a service instance """ if not isinstance(a_service, sm_messages.Service): raise ValueError(u"service is None or not an instance of Service") self._application = application self._service = a_service method_registry, reporting_rules = self._configure() self._method_registry = method_registry self._reporting_rules = reporting_rules def _configure(self): registry = service.MethodRegistry(self._service) logs, metric_names, label_names = service.extract_report_spec(self._service) reporting_rules = report_request.ReportingRules.from_known_inputs( logs=logs, metric_names=metric_names, label_names=label_names) return registry, reporting_rules def __call__(self, environ, start_response): environ[self.SERVICE] = self._service environ[self.SERVICE_NAME] = self._service.name environ[self.METHOD_REGISTRY] = self._method_registry environ[self.REPORTING_RULES] = self._reporting_rules parsed_uri = urlparse.urlparse(wsgiref.util.request_uri(environ)) http_method = environ.get(u'REQUEST_METHOD') method_info = self._method_registry.lookup(http_method, parsed_uri.path) if method_info: environ[self.METHOD_INFO] = method_info return self._application(environ, start_response) class Middleware(object): """A WSGI middleware implementation that provides service control. Example: >>> app = MyWsgiApp() # an existing WSGI application >>> >>> # the name of the controlled service >>> service_name = 'my-service-name' >>> >>> # A GCP project with service control enabled >>> project_id = 'my-project-id' >>> >>> # wrap the app for service control >>> from endpoints_management.control import client, wsgi, service >>> control_client = client.Loaders.DEFAULT.load(service_name) >>> control_client.start() >>> wrapped_app = wsgi.Middleware(app, control_client, project_id) >>> env_app = wsgi.EnvironmentMiddleware(wrapped,app) >>> >>> # now use env_app in place of app """ # pylint: disable=too-few-public-methods, fixme _NO_API_KEY_MSG = ( u'Method does not allow callers without established identity.' u' Please use an API key or other form of API consumer identity' u' to call this API.') def __init__(self, application, project_id, control_client, next_operation_id=_next_operation_uuid, timer=datetime.utcnow): """Initializes a new Middleware instance. Args: application: the wrapped wsgi application project_id: the project_id thats providing service control support control_client: the service control client instance next_operation_id (func): produces the next operation timer (func[[datetime.datetime]]): a func that obtains the current time """ self._application = application self._project_id = project_id self._control_client = control_client self._next_operation_id = next_operation_id self._timer = timer def __call__(self, environ, start_response): # pylint: disable=too-many-locals method_info = environ.get(EnvironmentMiddleware.METHOD_INFO) if not method_info: # just allow the wrapped application to handle the request logger.debug(u'method_info not present in the wsgi environment' u', no service control') return self._application(environ, start_response) latency_timer = _LatencyTimer(self._timer) latency_timer.start() # Determine if the request can proceed http_method = environ.get(u'REQUEST_METHOD') parsed_uri = urlparse.urlparse(wsgiref.util.request_uri(environ)) app_info = _AppInfo() # TODO: determine if any of the more complex ways of getting the request size # (e.g) buffering and counting the wsgi input stream is more appropriate here try: app_info.request_size = int(environ.get(u'CONTENT_LENGTH', report_request.NOT_SET)) except ValueError: logger.warn(u'ignored bad content-length: %s', environ.get(u'CONTENT_LENGTH')) app_info.http_method = http_method app_info.url = parsed_uri # Default to 0 for consumer project number to disable per-consumer # metric reporting if the check request doesn't return one. consumer_project_number = 0 check_info = self._create_check_info(method_info, parsed_uri, environ) if not check_info.api_key and not method_info.allow_unregistered_calls: logger.debug(u"skipping %s, no api key was provided", parsed_uri) error_msg = self._handle_missing_api_key(app_info, start_response) else: check_req = check_info.as_check_request() logger.debug(u'checking %s with %s', method_info, check_request) check_resp = self._control_client.check(check_req) error_msg = self._handle_check_response(app_info, check_resp, start_response) if (check_resp and check_resp.checkInfo and check_resp.checkInfo.consumerInfo): consumer_project_number = ( check_resp.checkInfo.consumerInfo.projectNumber) if error_msg is None: quota_info = self._create_quota_info(method_info, parsed_uri, environ) if not quota_info.quota_info: logger.debug(u'no metric costs for this method') else: quota_request = quota_info.as_allocate_quota_request() quota_response = self._control_client.allocate_quota(quota_request) error_msg = self._handle_quota_response( app_info, quota_response, start_response) if error_msg: # send a report request that indicates that the request failed rules = environ.get(EnvironmentMiddleware.REPORTING_RULES) latency_timer.end() report_req = self._create_report_request(method_info, check_info, app_info, latency_timer, rules, consumer_project_number) logger.debug(u'scheduling report_request %s', report_req) self._control_client.report(report_req) return error_msg # update the client with the response latency_timer.app_start() # run the application request in an inner handler that sets the status # and response code on app_info def inner_start_response(status, response_headers, exc_info=None): app_info.response_code = int(status.partition(u' ')[0]) for name, value in response_headers: if name.lower() == _CONTENT_LENGTH: app_info.response_size = int(value) break return start_response(status, response_headers, exc_info) result = self._application(environ, inner_start_response) # perform reporting, result must be joined otherwise the latency record # is incorrect result = b''.join(result) latency_timer.end() app_info.response_size = len(result) rules = environ.get(EnvironmentMiddleware.REPORTING_RULES) report_req = self._create_report_request(method_info, check_info, app_info, latency_timer, rules, consumer_project_number) logger.debug(u'scheduling report_request %s', report_req) self._control_client.report(report_req) return result def _create_report_request(self, method_info, check_info, app_info, latency_timer, reporting_rules, consumer_project_number): # TODO: determine how to obtain the consumer_project_id and the location # correctly report_info = report_request.Info( api_key=check_info.api_key, api_key_valid=app_info.api_key_valid, api_method=method_info.selector, api_version=method_info.api_version, consumer_project_id=self._project_id, # TODO: see above consumer_project_number=consumer_project_number, location=_DEFAULT_LOCATION, # TODO: see above method=app_info.http_method, operation_id=check_info.operation_id, operation_name=check_info.operation_name, backend_time=latency_timer.backend_time, overhead_time=latency_timer.overhead_time, platform=platform, producer_project_id=self._project_id, protocol=report_request.ReportedProtocols.HTTP, request_size=app_info.request_size, request_time=latency_timer.request_time, response_code=app_info.response_code, response_size=app_info.response_size, referer=check_info.referer, service_name=check_info.service_name, url=app_info.url ) return report_info.as_report_request(reporting_rules, timer=self._timer) def _get_api_key_info(self, method_info, parsed_uri, environ): api_key = _find_api_key_param(method_info, parsed_uri) if not api_key: api_key = _find_api_key_header(method_info, environ) if not api_key: api_key = _find_default_api_key_param(parsed_uri) return api_key def _create_check_info(self, method_info, parsed_uri, environ): service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME) operation_id = self._next_operation_id() api_key = self._get_api_key_info(method_info, parsed_uri, environ) check_info = check_request.Info( android_cert_fingerprint=environ.get('HTTP_X_ANDROID_CERT', ''), android_package_name=environ.get('HTTP_X_ANDROID_PACKAGE', ''), api_key=api_key, api_key_valid=api_key is not None, client_ip=environ.get(u'REMOTE_ADDR', u''), consumer_project_id=self._project_id, # TODO: switch this to producer_project_id ios_bundle_id=environ.get('HTTP_X_IOS_BUNDLE_IDENTIFIER', ''), operation_id=operation_id, operation_name=method_info.selector, referer=environ.get(u'HTTP_REFERER', u''), service_name=service_name ) return check_info def _create_quota_info(self, method_info, parsed_uri, environ): service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME) operation_id = self._next_operation_id() api_key = self._get_api_key_info(method_info, parsed_uri, environ) service = environ.get(EnvironmentMiddleware.SERVICE) return quota_request.Info( api_key=api_key, api_key_valid=api_key is not None, referer=environ.get(u'HTTP_REFERER', u''), consumer_project_id=self._project_id, operation_id=operation_id, operation_name=method_info.selector, service_name=service_name, quota_info=method_info.quota_info, config_id=service.id, client_ip=environ.get(u'REMOTE_ADDR', u''), ) def _handle_check_response(self, app_info, check_resp, start_response): code, detail, api_key_valid = check_request.convert_response( check_resp, self._project_id) if code == httplib.OK: return None # the check was OK # there was problem; the request cannot proceed logger.warn(u'Check failed %d, %s', code, detail) error_msg = b'%d %s' % (code, detail.encode('utf-8')) start_response(error_msg, []) app_info.response_code = code app_info.api_key_valid = api_key_valid return error_msg # the request cannot continue def _handle_quota_response(self, app_info, quota_resp, start_response): code, detail = quota_request.convert_response( quota_resp, self._project_id) if code == httplib.OK: return None # the quota was OK # there was problem; the request cannot proceed logger.warn(u'Quota failed %d, %s', code, detail) error_msg = b'%d %s' % (code, detail.encode('utf-8')) start_response(error_msg, []) app_info.response_code = code return error_msg # the request cannot continue def _handle_missing_api_key(self, app_info, start_response): code = httplib.UNAUTHORIZED detail = self._NO_API_KEY_MSG logger.warn(u'Check not performed %d, %s', code, detail) error_msg = b'%d %s' % (code, detail.encode('utf-8')) start_response(error_msg, []) app_info.response_code = code app_info.api_key_valid = False return error_msg # the request cannot continue class _AppInfo(object): # pylint: disable=too-few-public-methods def __init__(self): self.api_key_valid = True self.response_code = httplib.INTERNAL_SERVER_ERROR self.response_size = report_request.NOT_SET self.request_size = report_request.NOT_SET self.http_method = None self.url = None class _LatencyTimer(object): def __init__(self, timer): self._timer = timer self._start = None self._app_start = None self._end = None def start(self): self._start = self._timer() def app_start(self): self._app_start = self._timer() def end(self): self._end = self._timer() if self._app_start is None: self._app_start = self._end @property def request_time(self): if self._start and self._end: return self._end - self._start return None @property def overhead_time(self): if self._start and self._app_start: return self._app_start - self._start return None @property def backend_time(self): if self._end and self._app_start: return self._end - self._app_start return None def _find_api_key_param(info, parsed_uri): params = info.api_key_url_query_params if not params: return None param_dict = urlparse.parse_qs(parsed_uri.query) if not param_dict: return None for q in params: value = param_dict.get(q) if value: # param's values are lists, assume the first value # is what's needed return value[0] return None _DEFAULT_API_KEYS = (u'key', u'api_key') def _find_default_api_key_param(parsed_uri): param_dict = urlparse.parse_qs(parsed_uri.query) if not param_dict: return None for q in _DEFAULT_API_KEYS: value = param_dict.get(q) if value: # param's values are lists, assume the first value # is what's needed return value[0] return None def _find_api_key_header(info, environ): headers = info.api_key_http_header if not headers: return None for h in headers: value = environ.get(u'HTTP_' + h.upper()) if value: return value # headers have single values return None def _create_authenticator(a_service): """Create an instance of :class:`google.auth.tokens.Authenticator`. Args: a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a service instance """ if not isinstance(a_service, sm_messages.Service): raise ValueError(u"service is None or not an instance of Service") authentication = a_service.authentication if not authentication: logger.info(u"authentication is not configured in service, " u"authentication checks will be disabled") return issuers_to_provider_ids = {} issuer_uri_configs = {} for provider in authentication.providers: issuer = provider.issuer jwks_uri = provider.jwksUri # Enable openID discovery if jwks_uri is unset open_id = jwks_uri is None issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri) issuers_to_provider_ids[issuer] = provider.id key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs) jwks_supplier = suppliers.JwksSupplier(key_uri_supplier) authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier) return authenticator class AuthenticationMiddleware(object): """A WSGI middleware that does authentication checks for incoming requests. In environments where os.environ is replaced with a request-local and thread-independent copy (e.g. Google Appengine), authentication result is added to os.environ so that the wrapped application can make use of the authentication result. """ # pylint: disable=too-few-public-methods USER_INFO = u"google.api.auth.user_info" def __init__(self, application, authenticator): """Initializes an authentication middleware instance. Args: application: a WSGI application to be wrapped authenticator (:class:`google.auth.tokens.Authenticator`): an authenticator that authenticates incoming requests """ if not isinstance(authenticator, tokens.Authenticator): raise ValueError(u"Invalid authenticator") self._application = application self._authenticator = authenticator def __call__(self, environ, start_response): method_info = environ.get(EnvironmentMiddleware.METHOD_INFO) if not method_info or not method_info.auth_info: # No authentication configuration for this method logger.debug(u"authentication is not configured") return self._application(environ, start_response) auth_token = _extract_auth_token(environ) user_info = None if not auth_token: logger.debug(u"No auth token is attached to the request") else: try: service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME) user_info = self._authenticator.authenticate(auth_token, method_info.auth_info, service_name) except Exception: # pylint: disable=broad-except logger.debug(u"Cannot decode and verify the auth token. The backend " u"will not be able to retrieve user info", exc_info=True) environ[self.USER_INFO] = user_info # pylint: disable=protected-access if user_info and not isinstance(os.environ, os._Environ): # Set user info into os.environ only if os.environ is replaced # with a request-local copy os.environ[self.USER_INFO] = user_info response = self._application(environ, start_response) # Erase user info from os.environ for safety and sanity. if self.USER_INFO in os.environ: del os.environ[self.USER_INFO] return response _ACCESS_TOKEN_PARAM_NAME = u"access_token" _BEARER_TOKEN_PREFIX = u"Bearer " _BEARER_TOKEN_PREFIX_LEN = len(_BEARER_TOKEN_PREFIX) def _extract_auth_token(environ): # First try to extract auth token from HTTP authorization header. auth_header = environ.get(u"HTTP_AUTHORIZATION") if auth_header: if auth_header.startswith(_BEARER_TOKEN_PREFIX): return auth_header[_BEARER_TOKEN_PREFIX_LEN:] return # Then try to read auth token from query. parameters = urlparse.parse_qs(environ.get(u"QUERY_STRING", u"")) if _ACCESS_TOKEN_PARAM_NAME in parameters: auth_token, = parameters[_ACCESS_TOKEN_PARAM_NAME] return auth_token
38.22093
94
0.655575
b2a87acaec55bbd5085906a5d7d60aa258f3c3f1
9,580
py
Python
dowhy/causal_estimators/distance_matching_estimator.py
leo-ware/dowhy
3a2a79e2159a7f29456dd419a3c90395a384364e
[ "MIT" ]
2,904
2019-05-07T08:09:33.000Z
2022-03-31T18:28:41.000Z
dowhy/causal_estimators/distance_matching_estimator.py
leo-ware/dowhy
3a2a79e2159a7f29456dd419a3c90395a384364e
[ "MIT" ]
238
2019-05-11T02:57:22.000Z
2022-03-31T23:47:18.000Z
dowhy/causal_estimators/distance_matching_estimator.py
leo-ware/dowhy
3a2a79e2159a7f29456dd419a3c90395a384364e
[ "MIT" ]
527
2019-05-08T16:23:45.000Z
2022-03-30T21:02:41.000Z
from sklearn.neighbors import NearestNeighbors import pandas as pd import numpy as np from dowhy.causal_estimator import CausalEstimate, CausalEstimator class DistanceMatchingEstimator(CausalEstimator): """ Simple matching estimator for binary treatments based on a distance metric. """ Valid_Dist_Metric_Params = ['p', 'V', 'VI', 'w'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Check if the treatment is one-dimensional if len(self._treatment_name) > 1: error_msg = str(self.__class__) + "cannot handle more than one treatment variable" raise Exception(error_msg) # Checking if the treatment is binary if not pd.api.types.is_bool_dtype(self._data[self._treatment_name[0]]): error_msg = "Distance Matching method is applicable only for binary treatments" self.logger.error(error_msg) raise Exception(error_msg) # Setting the number of matches per data point if getattr(self, 'num_matches_per_unit', None) is None: self.num_matches_per_unit = 1 # Default distance metric if not provided by the user if getattr(self, 'distance_metric', None) is None: self.distance_metric = 'minkowski' # corresponds to euclidean metric with p=2 if getattr(self, 'exact_match_cols', None) is None: self.exact_match_cols = None self.logger.debug("Back-door variables used:" + ",".join(self._target_estimand.get_backdoor_variables())) self._observed_common_causes_names = self._target_estimand.get_backdoor_variables() if self._observed_common_causes_names: if self.exact_match_cols is not None: self._observed_common_causes_names = [v for v in self._observed_common_causes_names if v not in self.exact_match_cols] self._observed_common_causes = self._data[self._observed_common_causes_names] # Convert the categorical variables into dummy/indicator variables # Basically, this gives a one hot encoding for each category # The first category is taken to be the base line. self._observed_common_causes = pd.get_dummies(self._observed_common_causes, drop_first=True) else: self._observed_common_causes = None error_msg = "No common causes/confounders present. Distance matching methods are not applicable" self.logger.error(error_msg) raise Exception(error_msg) # Dictionary of any user-provided params for the distance metric # that will be passed to sklearn nearestneighbors self.distance_metric_params = {} for param_name in self.Valid_Dist_Metric_Params: param_val = getattr(self, param_name, None) if param_val is not None: self.distance_metric_params[param_name] = param_val self.logger.info("INFO: Using Distance Matching Estimator") self.symbolic_estimator = self.construct_symbolic_estimator(self._target_estimand) self.logger.info(self.symbolic_estimator) self.matched_indices_att = None self.matched_indices_atc = None def _estimate_effect(self): # this assumes a binary treatment regime updated_df = pd.concat([self._observed_common_causes, self._data[[self._outcome_name, self._treatment_name[0]]]], axis=1) if self.exact_match_cols is not None: updated_df = pd.concat([updated_df, self._data[self.exact_match_cols]], axis=1) treated = updated_df.loc[self._data[self._treatment_name[0]] == 1] control = updated_df.loc[self._data[self._treatment_name[0]] == 0] numtreatedunits = treated.shape[0] numcontrolunits = control.shape[0] fit_att, fit_atc = False, False est = None # TODO remove neighbors that are more than a given radius apart if self._target_units == "att": fit_att = True elif self._target_units == "atc": fit_atc = True elif self._target_units == "ate": fit_att = True fit_atc = True else: raise ValueError("Target units string value not supported") if fit_att: # estimate ATT on treated by summing over difference between matched neighbors if self.exact_match_cols is None: control_neighbors = ( NearestNeighbors(n_neighbors=self.num_matches_per_unit, metric=self.distance_metric, algorithm='ball_tree', **self.distance_metric_params) .fit(control[self._observed_common_causes.columns].values) ) distances, indices = control_neighbors.kneighbors( treated[self._observed_common_causes.columns].values) self.logger.debug("distances:") self.logger.debug(distances) att = 0 for i in range(numtreatedunits): treated_outcome = treated.iloc[i][self._outcome_name].item() control_outcome = np.mean(control.iloc[indices[i]][self._outcome_name].values) att += treated_outcome - control_outcome att /= numtreatedunits if self._target_units == "att": est = att elif self._target_units == "ate": est = att*numtreatedunits # Return indices in the original dataframe self.matched_indices_att = {} treated_df_index = treated.index.tolist() for i in range(numtreatedunits): self.matched_indices_att[treated_df_index[i]] = control.iloc[indices[i]].index.tolist() else: grouped = updated_df.groupby(self.exact_match_cols) att = 0 for name, group in grouped: treated = group.loc[group[self._treatment_name[0]] == 1] control = group.loc[group[self._treatment_name[0]] == 0] if treated.shape[0] == 0: continue control_neighbors = ( NearestNeighbors(n_neighbors=self.num_matches_per_unit, metric=self.distance_metric, algorithm='ball_tree', **self.distance_metric_params) .fit(control[self._observed_common_causes.columns].values) ) distances, indices = control_neighbors.kneighbors( treated[self._observed_common_causes.columns].values) self.logger.debug("distances:") self.logger.debug(distances) for i in range(numtreatedunits): treated_outcome = treated.iloc[i][self._outcome_name].item() control_outcome = np.mean(control.iloc[indices[i]][self._outcome_name].values) att += treated_outcome - control_outcome #self.matched_indices_att[treated_df_index[i]] = control.iloc[indices[i]].index.tolist() att /= numtreatedunits if self._target_units == "att": est = att elif self._target_units == "ate": est = att*numtreatedunits if fit_atc: #Now computing ATC treated_neighbors = ( NearestNeighbors(n_neighbors=self.num_matches_per_unit, metric=self.distance_metric, algorithm='ball_tree', **self.distance_metric_params) .fit(treated[self._observed_common_causes.columns].values) ) distances, indices = treated_neighbors.kneighbors( control[self._observed_common_causes.columns].values) atc = 0 for i in range(numcontrolunits): control_outcome = control.iloc[i][self._outcome_name].item() treated_outcome = np.mean(treated.iloc[indices[i]][self._outcome_name].values) atc += treated_outcome - control_outcome atc /= numcontrolunits if self._target_units == "atc": est = atc elif self._target_units == "ate": est += atc*numcontrolunits est /= (numtreatedunits+numcontrolunits) # Return indices in the original dataframe self.matched_indices_atc = {} control_df_index = control.index.tolist() for i in range(numcontrolunits): self.matched_indices_atc[control_df_index[i]] = treated.iloc[indices[i]].index.tolist() estimate = CausalEstimate(estimate=est, control_value=self._control_value, treatment_value=self._treatment_value, target_estimand=self._target_estimand, realized_estimand_expr=self.symbolic_estimator) return estimate def construct_symbolic_estimator(self, estimand): expr = "b: " + ", ".join(estimand.outcome_variable) + "~" var_list = estimand.treatment_variable + estimand.get_backdoor_variables() expr += "+".join(var_list) return expr
47.425743
134
0.599374
a4b64bd0f91b5e79be86872b9a0ed92efc692be7
9,015
py
Python
test/unit/test_router.py
vzhd1701/plugin.video.kinomanuz
4344ccc351d20c6da92b2bda6982adcdf47b1037
[ "MIT" ]
1
2020-02-22T18:21:09.000Z
2020-02-22T18:21:09.000Z
test/unit/test_router.py
vzhd1701/plugin.video.kinomanuz
4344ccc351d20c6da92b2bda6982adcdf47b1037
[ "MIT" ]
null
null
null
test/unit/test_router.py
vzhd1701/plugin.video.kinomanuz
4344ccc351d20c6da92b2bda6982adcdf47b1037
[ "MIT" ]
null
null
null
# coding=utf-8 # pylint: disable=protected-access, no-self-use import unittest try: import mock except ImportError: from unittest import mock from resources.internal import router class TestPathSections(unittest.TestCase): def test_good_path(self): test_paths = ( ("/", [""]), ("/some/path", ["some", "path"]), ("/some/path/", ["some", "path", ""]), ) for good_path, good_sections in test_paths: path_sections = router._get_path_sections(good_path) self.assertEqual(path_sections, good_sections) def test_exception_start_with_slash(self): bad_path = "some/path" with self.assertRaisesRegexp( ValueError, r'^Path should start with "/" in path .*' ): router._get_path_sections(bad_path) def test_exception_empty_midsection(self): bad_path = "/some//path" with self.assertRaisesRegexp(ValueError, r"^Empty midsection in path .*"): router._get_path_sections(bad_path) class TestRouteParse(unittest.TestCase): def test_good_routes(self): test_paths = [ ("/some/path", ["some", "path"]), ("/some/<path>", ["some", ("string", "path")]), ] for path_type in router.PATH_TYPES: test_paths.append( ("/some/<{}:path>".format(path_type), ["some", (path_type, "path")]) ) for good_path, good_route in test_paths: path_sections = router._route_parse(good_path) self.assertEqual(path_sections, good_route) def test_exception_duplicate_variable_name(self): bad_path = "/some/<path>/<path>" with self.assertRaisesRegexp(ValueError, r"^Duplicate variable name .*"): router._route_parse(bad_path) def test_exception_unknown_variable_type(self): bad_path = "/some/<unknown_variable_type:path>" with self.assertRaisesRegexp(ValueError, r"^Unknown variable type .*"): router._route_parse(bad_path) def test_exception_bad_variable(self): bad_paths = ( "/some/<:path>", "/some/<int:>", "/some/<:>", "/some/<>", ) for bad_path in bad_paths: with self.assertRaisesRegexp(ValueError, r"^Bad variable .*"): router._route_parse(bad_path) class TestRoute(unittest.TestCase): patcher = None @classmethod def setUpClass(cls): cls.patcher = mock.patch("resources.internal.router.REGISTERED_ROUTES", {}) cls.patcher.start() @classmethod def tearDownClass(cls): cls.patcher.stop() def setUp(self): router.REGISTERED_ROUTES = {} def test_good_route(self): @router.route("/") def test_function(): pass self.assertEqual(router.REGISTERED_ROUTES, {("",): test_function}) def test_exception_bad_parameters(self): # pylint: disable=unused-variable with self.assertRaisesRegexp( ValueError, r"^Function .* does not have parameters: .*" ): # noinspection PyUnresolvedReferences @router.route("/<missing_argument>") def test_function(): pass def test_exception_duplicate_routes(self): # pylint: disable=unused-variable with self.assertRaisesRegexp(ValueError, r"^Route already defined for path .*"): @router.route("/") def test_function1(): pass @router.route("/") def test_function2(): pass class TestResolve(unittest.TestCase): patcher = None @classmethod def setUpClass(cls): cls.patcher = mock.patch("resources.internal.router.REGISTERED_ROUTES", {}) cls.patcher.start() @classmethod def tearDownClass(cls): cls.patcher.stop() def setUp(self): router.REGISTERED_ROUTES = {} def test_good_resolve_no_args(self): def test_function(): pass test_function = mock.MagicMock(spec=test_function) router.route("/other/path1")(test_function) router.route("/other/path2")(test_function) router.route("/")(test_function) router.resolve("plugin://test_plugin/") router.resolve("plugin://test_plugin/other/path1") test_function.assert_has_calls([mock.call(), mock.call()]) def test_good_resolve_with_args(self): # pylint: disable=unused-argument # noinspection PyUnusedLocal def test_function(search_query): pass test_function = mock.MagicMock(spec=test_function) test_function.__code__.co_varnames = ["search_query"] router.route("/search/<search_query>")(test_function) router.resolve("plugin://test_plugin/search/something") test_function.assert_called_once_with(search_query="something") def test_good_resolve_with_args_multiple_types(self): # pylint: disable=unused-argument # noinspection PyUnusedLocal def test_function(test_parameter): pass test_function = mock.MagicMock(spec=test_function) test_function.__code__.co_varnames = ["test_parameter"] router.route("/page/<int:test_parameter>")(test_function) router.route("/page/<float:test_parameter>")(test_function) router.resolve("plugin://test_plugin/page/10") test_function.assert_called_once_with(test_parameter=10) def test_good_resolve_with_args_and_query(self): # pylint: disable=unused-argument # noinspection PyUnusedLocal def test_function(query, search_query): pass test_function = mock.MagicMock(spec=test_function) test_function.__code__.co_varnames = ["search_query"] router.route("/search/<search_query>")(test_function) router.resolve("plugin://test_plugin/search/something?testing=123") test_function.assert_called_once_with( search_query="something", query={"testing": "123"} ) def test_exception_bad_url(self): with self.assertRaisesRegexp(ValueError, r"^Failed to resolve the url .*"): router.resolve("plugin://test_plugin/search/something?testing=123") class TestPathFor(unittest.TestCase): patcher = None @classmethod def setUpClass(cls): cls.patcher = mock.patch("resources.internal.router.REGISTERED_ROUTES", {}) cls.patcher.start() @classmethod def tearDownClass(cls): cls.patcher.stop() def setUp(self): # pylint: disable=unused-variable,unused-argument router.REGISTERED_ROUTES = {} @router.route("/") def test_function_root(): pass # noinspection PyUnusedLocal @router.route("/movies") @router.route("/movies/page/<int:page>") def test_function_args(page): pass @router.route("/ambiguous1") @router.route("/ambiguous2") def test_function_ambiguous(): pass def test_good_paths_for(self): good_path = router.path_for("test_function_root") self.assertEqual(good_path, "/") def test_good_paths_for_query(self): good_path = router.path_for("test_function_root", query={"test": "test"}) self.assertEqual(good_path, "/?test=test") def test_good_paths_for_args(self): good_path = router.path_for("test_function_args", path_vars={"page": 10}) self.assertEqual(good_path, "/movies/page/10") def test_exception_bad_vars(self): with self.assertRaisesRegexp(ValueError, r"path_vars must be a dictionary"): router.path_for("test_function_root", path_vars=[]) def test_exception_bad_query(self): with self.assertRaisesRegexp(ValueError, r"query must be a dictionary"): router.path_for("test_function_root", query=[]) def test_exception_bad_function_params(self): with self.assertRaisesRegexp( ValueError, r"^Function .* does not have parameters: .*" ): router.path_for( "test_function_root", path_vars={"non_existing_param": None} ) def test_exception_ambiguous_function_params(self): with self.assertRaisesRegexp( ValueError, r"^Ambiguous paths for .* with this set of variables" ): router.path_for("test_function_ambiguous") def test_exception_no_functions(self): with self.assertRaisesRegexp( ValueError, r"^No functions registered for endpoint .*" ): router.path_for("test_function_nonexistent") def test_exception_bad_args_type(self): with self.assertRaisesRegexp( ValueError, r"^Variable .* has wrong variable type for function .*" ): router.path_for("test_function_args", path_vars={"page": None}) if __name__ == "__main__": unittest.main()
31.193772
88
0.63239
ff072c40b9b43d8d50805e25ea6d19add99f26d7
388
py
Python
python-leetcode/1103.py
MDGSF/interviews
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
[ "MIT" ]
12
2020-01-16T08:55:27.000Z
2021-12-02T14:52:39.000Z
python-leetcode/1103.py
MDGSF/interviews
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
[ "MIT" ]
null
null
null
python-leetcode/1103.py
MDGSF/interviews
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
[ "MIT" ]
1
2019-12-11T12:00:38.000Z
2019-12-11T12:00:38.000Z
class Solution: def distributeCandies(self, candies: int, num_people: int) -> List[int]: result = [0] * num_people i, curCandy = 0, 1 while candies > 0: if candies > curCandy: result[i] += curCandy candies -= curCandy curCandy += 1 i = (i + 1) % num_people else: result[i] += candies candies = 0 return result
25.866667
74
0.554124
0aae5df144d2b8ea9d5fc26d186fd14c2a2416ed
18,271
py
Python
uniswap/analyse.py
amis-erc20/uniswap-analytics
16eb90d29a2266fdd7c81cfdb2c39eeef24a2bff
[ "MIT" ]
null
null
null
uniswap/analyse.py
amis-erc20/uniswap-analytics
16eb90d29a2266fdd7c81cfdb2c39eeef24a2bff
[ "MIT" ]
null
null
null
uniswap/analyse.py
amis-erc20/uniswap-analytics
16eb90d29a2266fdd7c81cfdb2c39eeef24a2bff
[ "MIT" ]
null
null
null
import json import logging import os import pickle from collections import defaultdict from math import sqrt from typing import List, Iterable import requests from retrying import retry from web3.utils.events import get_event_data from config import uniswap_factory, web3, pool, UNISWAP_EXCHANGE_ABI, STR_ERC_20_ABI, HARDCODED_INFO, \ STR_CAPS_ERC_20_ABI, ERC_20_ABI, HISTORY_BEGIN_BLOCK, CURRENT_BLOCK, HISTORY_CHUNK_SIZE, ETH, UNISWAP_BEGIN_BLOCK, \ LIQUIDITY_DATA, PROVIDERS_DATA, TOKENS_DATA, INFOS_DUMP, LAST_BLOCK_DUMP, \ ALL_EVENTS, EVENT_TRANSFER, EVENT_ADD_LIQUIDITY, EVENT_REMOVE_LIQUIDITY, EVENT_ETH_PURCHASE, ROI_DATA, \ EVENT_TOKEN_PURCHASE, VOLUME_DATA, TOTAL_VOLUME_DATA, web3_infura, GRAPHQL_ENDPOINT, GRAPHQL_LOGS_QUERY, \ LOGS_BLOCKS_CHUNK from exchange_info import ExchangeInfo from roi_info import RoiInfo from utils import timeit, bytes_to_str @timeit def load_token_count() -> int: return uniswap_factory.functions.tokenCount().call() @timeit def load_tokens(token_count: int) -> List[str]: if not token_count: token_count = load_token_count() tokens = [uniswap_factory.functions.getTokenWithId(i).call() for i in range(1, token_count + 1)] logging.info('Found {} tokens'.format(len(tokens))) return tokens @timeit def load_exchanges(tokens: List[str]) -> List[str]: if not tokens: tokens = load_tokens() exchanges = [uniswap_factory.functions.getExchange(t).call() for t in tokens] logging.info('Found {} exchanges'.format(len(exchanges))) return exchanges def load_exchange_data_impl(token_address, exchange_address): exchange = web3.eth.contract(abi=UNISWAP_EXCHANGE_ABI, address=exchange_address) token = web3.eth.contract(abi=STR_ERC_20_ABI, address=token_address) if token_address in HARDCODED_INFO: token_name, token_symbol, token_decimals = HARDCODED_INFO[token_address] else: try: token_name = token.functions.name().call() token_symbol = token.functions.symbol().call() token_decimals = token.functions.decimals().call() except: try: token = web3.eth.contract(abi=STR_CAPS_ERC_20_ABI, address=token_address) token_name = token.functions.NAME().call() token_symbol = token.functions.SYMBOL().call() token_decimals = token.functions.DECIMALS().call() except: try: token = web3.eth.contract(abi=ERC_20_ABI, address=token_address) token_name = bytes_to_str(token.functions.name().call()) token_symbol = bytes_to_str(token.functions.symbol().call()) token_decimals = token.functions.decimals().call() except: logging.warning('FUCKED UP {}'.format(token_address)) return None try: token_balance = token.functions.balanceOf(exchange.address).call(block_identifier=CURRENT_BLOCK) except: logging.warning('FUCKED UP {}'.format(token_address)) return None eth_balance = web3.eth.getBalance(exchange.address, block_identifier=CURRENT_BLOCK) return ExchangeInfo(token.address, token_name, token_symbol, token_decimals, exchange.address, eth_balance, token_balance) @timeit def load_exchange_infos(infos: List[ExchangeInfo]) -> List[ExchangeInfo]: token_count = load_token_count() tokens = load_tokens(token_count) exchanges = load_exchanges(tokens) new_infos = filter(None, [load_exchange_data_impl(t, e) for (t, e) in zip(tokens, exchanges)]) if infos: known_tokens = dict((info.token_address, info) for info in infos) for new_info in new_infos: info = known_tokens.get(new_info.token_address) if info: info.eth_balance = new_info.eth_balance info.token_balance = new_info.token_balance else: infos.append(new_info) else: infos += new_infos logging.info('Loaded info about {} exchanges'.format(len(exchanges))) return infos def get_chart_range(start: int = HISTORY_BEGIN_BLOCK) -> Iterable[int]: return range(start, CURRENT_BLOCK, HISTORY_CHUNK_SIZE) @timeit def load_timestamps() -> List[int]: return [web3.eth.getBlock(n)['timestamp'] for n in get_chart_range()] def get_logs(address: str, topics: List, start_block: int) -> List: @retry(stop_max_attempt_number=3, wait_fixed=1) def get_chunk(start): resp = requests.post( GRAPHQL_ENDPOINT, json={'query': GRAPHQL_LOGS_QUERY.format(fromBlock=start, toBlock=min(start + LOGS_BLOCKS_CHUNK, CURRENT_BLOCK), addresses=json.dumps([address]), topics=json.dumps(topics))} ) return postprocess_graphql_response(resp.json()['data']['logs']) log_chunks = pool.map(get_chunk, range(start_block, CURRENT_BLOCK, LOGS_BLOCKS_CHUNK)) logging.info('Loaded logs for {} successfully'.format(address)) return [log for chunk in log_chunks for log in chunk] def postprocess_graphql_response(logs: List[dict]) -> List[dict]: return [{ 'topics': log['topics'], 'blockNumber': int(log['transaction']['block']['number'], 16), 'data': log['data'] } for log in logs] @timeit def load_logs(infos: List[ExchangeInfo]) -> List[ExchangeInfo]: for info in infos: if info.logs: block_number_to_check = info.logs[-1]['blockNumber'] + 1 else: block_number_to_check = UNISWAP_BEGIN_BLOCK exchange = web3.eth.contract(abi=UNISWAP_EXCHANGE_ABI, address=info.exchange_address) new_logs = get_logs(exchange.address, [ALL_EVENTS], block_number_to_check) info.logs += new_logs logging.info('Loaded transfer logs for {} exchanges'.format(len(infos))) return infos @timeit def populate_providers(infos: List[ExchangeInfo]) -> List[ExchangeInfo]: for info in infos: exchange = web3.eth.contract(abi=UNISWAP_EXCHANGE_ABI, address=info.exchange_address) info.providers = defaultdict(int) for log in info.logs: if log['topics'][0].hex() != EVENT_TRANSFER: continue event = get_event_data(exchange.events.Transfer._get_event_abi(), log) if event['args']['from'] == '0x0000000000000000000000000000000000000000': info.providers[event['args']['to']] += event['args']['value'] elif event['args']['to'] == '0x0000000000000000000000000000000000000000': info.providers[event['args']['from']] -= event['args']['value'] else: info.providers[event['args']['from']] -= event['args']['value'] info.providers[event['args']['to']] += event['args']['value'] logging.info('Loaded info about providers of {} exchanges'.format(len(infos))) return infos @timeit def populate_roi(infos: List[ExchangeInfo]) -> List[ExchangeInfo]: for info in infos: info.roi = list() exchange = web3.eth.contract(abi=UNISWAP_EXCHANGE_ABI, address=info.exchange_address) i = 0 eth_balance, token_balance = 0, 0 for block_number in get_chart_range(): dm_numerator, dm_denominator, trade_volume = 1, 1, 0 while i < len(info.logs) and info.logs[i]['blockNumber'] < block_number: log = info.logs[i] i += 1 topic = log['topics'][0].hex() if topic == EVENT_TRANSFER: continue elif topic == EVENT_ADD_LIQUIDITY: event = get_event_data(exchange.events.AddLiquidity._get_event_abi(), log) eth_balance += event['args']['eth_amount'] token_balance += event['args']['token_amount'] elif topic == EVENT_REMOVE_LIQUIDITY: event = get_event_data(exchange.events.RemoveLiquidity._get_event_abi(), log) eth_balance -= event['args']['eth_amount'] token_balance -= event['args']['token_amount'] elif topic == EVENT_ETH_PURCHASE: event = get_event_data(exchange.events.EthPurchase._get_event_abi(), log) eth_new_balance = eth_balance - event['args']['eth_bought'] token_new_balance = token_balance + event['args']['tokens_sold'] dm_numerator *= eth_new_balance * token_new_balance dm_denominator *= eth_balance * token_balance trade_volume += event['args']['eth_bought'] / 0.997 eth_balance = eth_new_balance token_balance = token_new_balance else: event = get_event_data(exchange.events.TokenPurchase._get_event_abi(), log) eth_new_balance = eth_balance + event['args']['eth_sold'] token_new_balance = token_balance - event['args']['tokens_bought'] dm_numerator *= eth_new_balance * token_new_balance dm_denominator *= eth_balance * token_balance trade_volume += event['args']['eth_sold'] eth_balance = eth_new_balance token_balance = token_new_balance try: info.roi.append(RoiInfo(sqrt(dm_numerator / dm_denominator), eth_balance, token_balance, trade_volume)) except ValueError: print(info.token_symbol, info.exchange_address) logging.info('Loaded info about roi of {} exchanges'.format(len(infos))) return infos @timeit def populate_volume(infos: List[ExchangeInfo]) -> List[ExchangeInfo]: for info in infos: volume = list() info.volume = list() exchange = web3.eth.contract(abi=UNISWAP_EXCHANGE_ABI, address=info.exchange_address) i = 0 total_trade_volume = defaultdict(int) for block_number in get_chart_range(): trade_volume = defaultdict(int) while i < len(info.logs) and info.logs[i]['blockNumber'] < block_number: log = info.logs[i] i += 1 topic = log['topics'][0].hex() if topic == EVENT_ETH_PURCHASE: event = get_event_data(exchange.events.EthPurchase._get_event_abi(), log) trade_volume[event['args']['buyer']] += event['args']['eth_bought'] / 0.997 total_trade_volume[event['args']['buyer']] += event['args']['eth_bought'] / 0.997 elif topic == EVENT_TOKEN_PURCHASE: event = get_event_data(exchange.events.TokenPurchase._get_event_abi(), log) trade_volume[event['args']['buyer']] += event['args']['eth_sold'] total_trade_volume[event['args']['buyer']] += event['args']['eth_sold'] volume.append(trade_volume) total_volume = sum(total_trade_volume.values()) valuable_traders = {t for (t, v) in total_trade_volume.items() if v > total_volume / 1000} info.valuable_traders = list(valuable_traders) for vol in volume: filtered_vol = defaultdict(int) for (t, v) in vol.items(): if t in valuable_traders: filtered_vol[t] = v else: filtered_vol['Other'] += v info.volume.append(filtered_vol) logging.info('Volumes of {} exchanges populated'.format(len(infos))) return infos def is_valuable(info: ExchangeInfo) -> bool: return info.eth_balance >= 20 * ETH @timeit def populate_liquidity_history(infos: List[ExchangeInfo]) -> List[ExchangeInfo]: for info in infos: history_len = len(info.history) new_history = pool.map( lambda block_number: web3_infura.eth.getBalance(info.exchange_address, block_number) / ETH, get_chart_range(HISTORY_BEGIN_BLOCK + history_len * HISTORY_CHUNK_SIZE)) info.history += new_history print('Loaded history of balances of {} exchanges'.format(len(infos))) return infos def save_tokens(infos: List[ExchangeInfo]): with open(TOKENS_DATA, 'w') as out_f: json.dump({'results': [{'id': info.token_symbol.lower(), 'text': info.token_symbol} for info in infos]}, out_f, indent=1) def save_liquidity_data(infos: List[ExchangeInfo]): timestamps = load_timestamps() valuable_infos = [info for info in infos if is_valuable(info)] other_infos = [info for info in infos if not is_valuable(info)] with open(LIQUIDITY_DATA, 'w') as out_f: out_f.write(','.join(['timestamp'] + [i.token_symbol for i in valuable_infos] + ['Other\n'])) for j in range(len(timestamps)): out_f.write(','.join([str(timestamps[j] * 1000)] + ['{:.2f}'.format(i.history[j]) for i in valuable_infos] + ['{:.2f}'.format(sum(i.history[j] for i in other_infos))] ) + '\n') def save_providers_data(infos: List[ExchangeInfo]): for info in infos: with open(PROVIDERS_DATA.format(info.token_symbol.lower()), 'w') as out_f: out_f.write('provider,eth\n') total_supply = sum(info.providers.values()) remaining_supply = total_supply for p, v in sorted(info.providers.items(), key=lambda x: x[1], reverse=True): s = v / total_supply if s >= 0.01: out_f.write('\u200b{},{:.2f}\n'.format(p, info.eth_balance * s / ETH)) remaining_supply -= v if remaining_supply > 0: out_f.write('Other,{:.2f}\n'.format(info.eth_balance * remaining_supply / total_supply / ETH)) def save_roi_data(infos: List[ExchangeInfo]): timestamps = load_timestamps() for info in infos: with open(ROI_DATA.format(info.token_symbol.lower()), 'w') as out_f: out_f.write('timestamp,ROI,Token Price,Trade Volume\n') for j in range(len(timestamps)): if info.roi[j].eth_balance == 0: continue out_f.write(','.join([str(timestamps[j] * 1000), '{}'.format(info.roi[j].dm_change), '{}'.format(info.roi[j].token_balance / info.roi[j].eth_balance), '{:.2f}'.format(info.roi[j].trade_volume / ETH)]) + '\n') def save_volume_data(infos: List[ExchangeInfo]): timestamps = load_timestamps() for info in infos: with open(VOLUME_DATA.format(info.token_symbol.lower()), 'w') as out_f: out_f.write(','.join(['timestamp'] + ['\u200b{}'.format(t) for t in info.valuable_traders] + ['Other']) + '\n') for j in range(len(timestamps)): out_f.write(','.join([str(timestamps[j] * 1000)] + ['{:.2f}'.format(info.volume[j][t] / ETH) if info.volume[j][t] else '' for t in info.valuable_traders + ['Other']]) + '\n') def save_total_volume_data(infos: List[ExchangeInfo]): timestamps = load_timestamps() valuable_infos = [info for info in infos if is_valuable(info)] other_infos = [info for info in infos if not is_valuable(info)] with open(TOTAL_VOLUME_DATA, 'w') as out_f: out_f.write(','.join(['timestamp'] + [i.token_symbol for i in valuable_infos] + ['Other\n'])) for j in range(len(timestamps)): out_f.write(','.join([str(timestamps[j] * 1000)] + ['{:.2f}'.format(sum(i.volume[j].values()) / ETH) for i in valuable_infos] + ['{:.2f}'.format(sum(sum(i.volume[j].values()) for i in other_infos) / ETH)] ) + '\n') def save_raw_data(infos: List[ExchangeInfo]): with open(INFOS_DUMP, 'wb') as out_f: pickle.dump(infos, out_f) def load_raw_data() -> List[ExchangeInfo]: with open(INFOS_DUMP, 'rb') as in_f: return pickle.load(in_f) def save_last_block(block_number: int): with open(LAST_BLOCK_DUMP, 'wb') as out_f: pickle.dump(block_number, out_f) def load_last_block() -> int: with open(LAST_BLOCK_DUMP, 'rb') as in_f: return pickle.load(in_f) if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format='%(message)s') if os.path.exists(LAST_BLOCK_DUMP): saved_block = load_last_block() infos = load_raw_data() if saved_block + 1000 < CURRENT_BLOCK: logging.info('Last seen block: {}, current block: {}, loading data for {} blocks...'.format( saved_block, CURRENT_BLOCK, CURRENT_BLOCK - saved_block)) infos = sorted(load_exchange_infos(infos), key=lambda x: x.eth_balance, reverse=True) load_logs(infos) populate_liquidity_history(infos) populate_providers(infos) populate_roi(infos) populate_volume(infos) save_last_block(CURRENT_BLOCK) save_raw_data(infos) else: logging.info('Loaded data is up to date') else: logging.info('Starting from scratch...') infos = sorted(load_exchange_infos([]), key=lambda x: x.eth_balance, reverse=True) load_logs(infos) populate_liquidity_history(infos) populate_providers(infos) populate_roi(infos) populate_volume(infos) save_last_block(CURRENT_BLOCK) save_raw_data(infos) valuable_infos = [info for info in infos if is_valuable(info)] save_tokens(valuable_infos) save_liquidity_data(infos) save_providers_data(valuable_infos) save_roi_data(valuable_infos) save_volume_data(valuable_infos) save_total_volume_data(infos)
42.889671
120
0.611461
0fa7e5940a3efaa3078a2054fcc5d5cf3210e535
1,106
py
Python
onlinecourse/admin.py
serenagomez1304/final-cloud-app-with-database-master
33a7b23b9e89951a0e970abd1f9c2630b780699f
[ "Apache-2.0" ]
null
null
null
onlinecourse/admin.py
serenagomez1304/final-cloud-app-with-database-master
33a7b23b9e89951a0e970abd1f9c2630b780699f
[ "Apache-2.0" ]
null
null
null
onlinecourse/admin.py
serenagomez1304/final-cloud-app-with-database-master
33a7b23b9e89951a0e970abd1f9c2630b780699f
[ "Apache-2.0" ]
1
2022-01-05T10:46:15.000Z
2022-01-05T10:46:15.000Z
from django.contrib import admin # <HINT> Import any new Models here from .models import Course, Lesson, Instructor, Learner, Question, Choice, Submission # <HINT> Register QuestionInline and ChoiceInline classes here class QuestionInline(admin.StackedInline): model = Question extra = 5 class ChoiceInline(admin.StackedInline): model = Choice extra = 5 class LessonInline(admin.StackedInline): model = Lesson extra = 5 # Register your models here. class CourseAdmin(admin.ModelAdmin): inlines = [LessonInline] list_display = ('name', 'pub_date') list_filter = ['pub_date'] search_fields = ['name', 'description'] class LessonAdmin(admin.ModelAdmin): list_display = ['title'] class QuestionAdmin(admin.ModelAdmin): inlines = [ChoiceInline] fields = ('question_text', 'grade', 'lesson_id') # <HINT> Register Question and Choice models here admin.site.register(Course, CourseAdmin) admin.site.register(Lesson, LessonAdmin) admin.site.register(Instructor) admin.site.register(Learner) admin.site.register(Question) admin.site.register(Choice)
26.333333
85
0.738698
0e0b4c61469d6d32cd0c2d788f8ca518089c0e1d
5,210
py
Python
Algorithm.Python/IndexOptionShortCallOTMExpiryRegressionAlgorithm.py
szymanskilukasz/Lean
fe2ac131af2d0614494e5c970a57d4b7c89d5f88
[ "Apache-2.0" ]
1
2020-09-01T05:39:38.000Z
2020-09-01T05:39:38.000Z
Algorithm.Python/IndexOptionShortCallOTMExpiryRegressionAlgorithm.py
szymanskilukasz/Lean
fe2ac131af2d0614494e5c970a57d4b7c89d5f88
[ "Apache-2.0" ]
null
null
null
Algorithm.Python/IndexOptionShortCallOTMExpiryRegressionAlgorithm.py
szymanskilukasz/Lean
fe2ac131af2d0614494e5c970a57d4b7c89d5f88
[ "Apache-2.0" ]
null
null
null
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License from datetime import datetime, timedelta from QuantConnect.Algorithm import * from QuantConnect.Data import * from QuantConnect.Data.Market import * from QuantConnect.Orders import * from QuantConnect.Securities import * from QuantConnect.Securities.Future import * from QuantConnect import Market from QuantConnect import * ### <summary> ### This regression algorithm tests Out of The Money (OTM) index option expiry for short calls. ### We expect 2 orders from the algorithm, which are: ### ### * Initial entry, sell SPX Call Option (expiring OTM) ### - Profit the option premium, since the option was not assigned. ### ### * Liquidation of SPX call OTM contract on the last trade date ### ### Additionally, we test delistings for index options and assert that our ### portfolio holdings reflect the orders the algorithm has submitted. ### </summary> class IndexOptionShortCallOTMExpiryRegressionAlgorithm(QCAlgorithm): def Initialize(self): self.SetStartDate(2021, 1, 4) self.SetEndDate(2021, 1, 31) self.spx = self.AddIndex("SPX", Resolution.Minute).Symbol # Select a index option expiring ITM, and adds it to the algorithm. self.spxOption = list(self.OptionChainProvider.GetOptionContractList(self.spx, self.Time)) self.spxOption = [i for i in self.spxOption if i.ID.StrikePrice >= 4250 and i.ID.OptionRight == OptionRight.Call and i.ID.Date.year == 2021 and i.ID.Date.month == 1] self.spxOption = list(sorted(self.spxOption, key=lambda x: x.ID.StrikePrice))[0] self.spxOption = self.AddIndexOptionContract(self.spxOption, Resolution.Minute).Symbol self.expectedContract = Symbol.CreateOption(self.spx, Market.USA, OptionStyle.European, OptionRight.Call, 4250, datetime(2021, 1, 15)) if self.spxOption != self.expectedContract: raise Exception(f"Contract {self.expectedContract} was not found in the chain") self.Schedule.On(self.DateRules.Tomorrow, self.TimeRules.AfterMarketOpen(self.spx, 1), lambda: self.MarketOrder(self.spxOption, -1)) def OnData(self, data: Slice): # Assert delistings, so that we can make sure that we receive the delisting warnings at # the expected time. These assertions detect bug #4872 for delisting in data.Delistings.Values: if delisting.Type == DelistingType.Warning: if delisting.Time != datetime(2021, 1, 15): raise Exception(f"Delisting warning issued at unexpected date: {delisting.Time}") if delisting.Type == DelistingType.Delisted: if delisting.Time != datetime(2021, 1, 16): raise Exception(f"Delisting happened at unexpected date: {delisting.Time}") def OnOrderEvent(self, orderEvent: OrderEvent): if orderEvent.Status != OrderStatus.Filled: # There's lots of noise with OnOrderEvent, but we're only interested in fills. return if orderEvent.Symbol not in self.Securities: raise Exception(f"Order event Symbol not found in Securities collection: {orderEvent.Symbol}") security = self.Securities[orderEvent.Symbol] if security.Symbol == self.spx: raise Exception(f"Expected no order events for underlying Symbol {security.Symbol}") if security.Symbol == self.expectedContract: self.AssertIndexOptionContractOrder(orderEvent, security) else: raise Exception(f"Received order event for unknown Symbol: {orderEvent.Symbol}") def AssertIndexOptionContractOrder(self, orderEvent: OrderEvent, optionContract: Security): if orderEvent.Direction == OrderDirection.Sell and optionContract.Holdings.Quantity != -1: raise Exception(f"No holdings were created for option contract {optionContract.Symbol}") if orderEvent.Direction == OrderDirection.Buy and optionContract.Holdings.Quantity != 0: raise Exception("Expected no options holdings after closing position") if orderEvent.IsAssignment: raise Exception(f"Assignment was not expected for {orderEvent.Symbol}") ### <summary> ### Ran at the end of the algorithm to ensure the algorithm has no holdings ### </summary> ### <exception cref="Exception">The algorithm has holdings</exception> def OnEndOfAlgorithm(self): if self.Portfolio.Invested: raise Exception(f"Expected no holdings at end of algorithm, but are invested in: {', '.join(self.Portfolio.Keys)}")
52.626263
173
0.714012
e00986b493538e0bfc7d007b9dec7675149a0854
4,338
py
Python
test/db_task_history_test.py
vgmartinez/luigi
b5ad3eba1501bdc25e91e98901bc781128f2d8a7
[ "Apache-2.0" ]
1
2021-05-26T09:41:53.000Z
2021-05-26T09:41:53.000Z
test/db_task_history_test.py
vgmartinez/luigi
b5ad3eba1501bdc25e91e98901bc781128f2d8a7
[ "Apache-2.0" ]
null
null
null
test/db_task_history_test.py
vgmartinez/luigi
b5ad3eba1501bdc25e91e98901bc781128f2d8a7
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from helpers import unittest from luigi import six from helpers import with_config import luigi from luigi.db_task_history import DbTaskHistory, Base from luigi.task_status import DONE, PENDING, RUNNING class DummyTask(luigi.Task): foo = luigi.Parameter(default='foo') class ParamTask(luigi.Task): param1 = luigi.Parameter() param2 = luigi.IntParameter() class DbTaskHistoryTest(unittest.TestCase): @with_config(dict(task_history=dict(db_connection='sqlite:///:memory:'))) def setUp(self): self.history = DbTaskHistory() def test_task_list(self): self.run_task(DummyTask()) self.run_task(DummyTask(foo='bar')) tasks = list(self.history.find_all_by_name('DummyTask')) self.assertEqual(len(tasks), 2) for task in tasks: self.assertEqual(task.name, 'DummyTask') self.assertEqual(task.host, 'hostname') def test_task_events(self): self.run_task(DummyTask()) tasks = list(self.history.find_all_by_name('DummyTask')) self.assertEqual(len(tasks), 1) [task] = tasks self.assertEqual(task.name, 'DummyTask') self.assertEqual(len(task.events), 3) for (event, name) in zip(task.events, [DONE, RUNNING, PENDING]): self.assertEqual(event.event_name, name) def test_task_by_params(self): task1 = ParamTask('foo', 'bar') task2 = ParamTask('bar', 'foo') self.run_task(task1) self.run_task(task2) task1_record = self.history.find_all_by_parameters(task_name='ParamTask', param1='foo', param2='bar') task2_record = self.history.find_all_by_parameters(task_name='ParamTask', param1='bar', param2='foo') for task, records in zip((task1, task2), (task1_record, task2_record)): records = list(records) self.assertEqual(len(records), 1) [record] = records self.assertEqual(task.task_family, record.name) for param_name, param_value in six.iteritems(task.param_kwargs): self.assertTrue(param_name in record.parameters) self.assertEqual(str(param_value), record.parameters[param_name].value) def run_task(self, task): self.history.task_scheduled(task.task_id) self.history.task_started(task.task_id, 'hostname') self.history.task_finished(task.task_id, successful=True) class MySQLDbTaskHistoryTest(unittest.TestCase): @with_config(dict(task_history=dict(db_connection='mysql+mysqlconnector://travis@localhost/luigi_test'))) def setUp(self): try: self.history = DbTaskHistory() except Exception: raise unittest.SkipTest('DBTaskHistory cannot be created: probably no MySQL available') def test_subsecond_timestamp(self): # Add 2 events in <1s task = DummyTask() self.run_task(task) task_record = six.advance_iterator(self.history.find_all_by_name('DummyTask')) print (task_record.events) self.assertEqual(task_record.events[0].event_name, DONE) def test_utc_conversion(self): from luigi.server import from_utc task = DummyTask() self.run_task(task) task_record = six.advance_iterator(self.history.find_all_by_name('DummyTask')) last_event = task_record.events[0] try: print (from_utc(str(last_event.ts))) except ValueError: self.fail("Failed to convert timestamp {} to UTC".format(last_event.ts)) def run_task(self, task): self.history.task_scheduled(task.task_id) self.history.task_started(task.task_id, 'hostname') self.history.task_finished(task.task_id, successful=True)
35.557377
109
0.681189
653df36c1cb3cb9d80f454ee88d3e1ebcd7108b4
44,231
py
Python
octavia/tests/unit/common/sample_configs/sample_configs_combined.py
buty4649/octavia
a4aa03d3bc98eb27cc353140cd998a623baa505f
[ "Apache-2.0" ]
null
null
null
octavia/tests/unit/common/sample_configs/sample_configs_combined.py
buty4649/octavia
a4aa03d3bc98eb27cc353140cd998a623baa505f
[ "Apache-2.0" ]
2
2018-09-28T08:41:14.000Z
2019-08-01T11:20:37.000Z
octavia/tests/unit/common/sample_configs/sample_configs_combined.py
buty4649/octavia
a4aa03d3bc98eb27cc353140cd998a623baa505f
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections from oslo_config import cfg from octavia.common import constants from octavia.tests.common import sample_certs CONF = cfg.CONF def sample_amphora_tuple(id='sample_amphora_id_1', lb_network_ip='10.0.1.1', vrrp_ip='10.1.1.1', ha_ip='192.168.10.1', vrrp_port_id='1234', ha_port_id='1234', role=None, status='ACTIVE', vrrp_interface=None, vrrp_priority=None, api_version='1.0'): in_amphora = collections.namedtuple( 'amphora', 'id, lb_network_ip, vrrp_ip, ha_ip, vrrp_port_id, ' 'ha_port_id, role, status, vrrp_interface,' 'vrrp_priority, api_version') return in_amphora( id=id, lb_network_ip=lb_network_ip, vrrp_ip=vrrp_ip, ha_ip=ha_ip, vrrp_port_id=vrrp_port_id, ha_port_id=ha_port_id, role=role, status=status, vrrp_interface=vrrp_interface, vrrp_priority=vrrp_priority, api_version=api_version) RET_PERSISTENCE = { 'type': 'HTTP_COOKIE', 'cookie_name': None} RET_MONITOR_1 = { 'id': 'sample_monitor_id_1', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': '/index.html', 'expected_codes': '418', 'enabled': True, 'http_version': 1.0, 'domain_name': None} RET_MONITOR_2 = { 'id': 'sample_monitor_id_2', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': '/healthmon.html', 'expected_codes': '418', 'enabled': True, 'http_version': 1.0, 'domain_name': None} RET_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_MEMBER_3 = { 'id': 'sample_member_id_3', 'address': '10.0.0.97', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_POOL_1 = { 'id': 'sample_pool_id_1', 'protocol': 'http', 'proxy_protocol': None, 'lb_algorithm': 'roundrobin', 'listener_tls_enabled': False, 'members': [RET_MEMBER_1, RET_MEMBER_2], 'health_monitor': RET_MONITOR_1, 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', 'stick_size': '10k', constants.HTTP_REUSE: False, 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': False, } RET_POOL_2 = { 'id': 'sample_pool_id_2', 'protocol': 'http', 'proxy_protocol': None, 'lb_algorithm': 'roundrobin', 'listener_tls_enabled': False, 'members': [RET_MEMBER_3], 'health_monitor': RET_MONITOR_2, 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', 'stick_size': '10k', constants.HTTP_REUSE: False, 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': False, } RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem', 'primary_cn': 'FakeCn'} RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2', 'primary_cn': 'FakeCn'} RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3', 'primary_cn': 'FakeCn2'} RET_L7RULE_1 = { 'id': 'sample_l7rule_id_1', 'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'key': None, 'value': '/api', 'invert': False, 'enabled': True} RET_L7RULE_2 = { 'id': 'sample_l7rule_id_2', 'type': constants.L7RULE_TYPE_HEADER, 'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS, 'key': 'Some-header', 'value': 'This\\ string\\\\\\ with\\ stuff', 'invert': True, 'enabled': True} RET_L7RULE_3 = { 'id': 'sample_l7rule_id_3', 'type': constants.L7RULE_TYPE_COOKIE, 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'key': 'some-cookie', 'value': 'this.*|that', 'invert': False, 'enabled': True} RET_L7RULE_4 = { 'id': 'sample_l7rule_id_4', 'type': constants.L7RULE_TYPE_FILE_TYPE, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'key': None, 'value': 'jpg', 'invert': False, 'enabled': True} RET_L7RULE_5 = { 'id': 'sample_l7rule_id_5', 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'key': None, 'value': '.example.com', 'invert': False, 'enabled': True} RET_L7RULE_6 = { 'id': 'sample_l7rule_id_6', 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'key': None, 'value': '.example.com', 'invert': False, 'enabled': False} RET_L7POLICY_1 = { 'id': 'sample_l7policy_id_1', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool': RET_POOL_2, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_1], 'redirect_http_code': None} RET_L7POLICY_2 = { 'id': 'sample_l7policy_id_2', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool': None, 'redirect_url': 'http://www.example.com', 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': 302} RET_L7POLICY_3 = { 'id': 'sample_l7policy_id_3', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_4, RET_L7RULE_5], 'redirect_http_code': None} RET_L7POLICY_4 = { 'id': 'sample_l7policy_id_4', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [], 'redirect_http_code': None} RET_L7POLICY_5 = { 'id': 'sample_l7policy_id_5', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': False, 'l7rules': [RET_L7RULE_5], 'redirect_http_code': None} RET_L7POLICY_6 = { 'id': 'sample_l7policy_id_6', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [], 'redirect_http_code': None} RET_L7POLICY_7 = { 'id': 'sample_l7policy_id_7', 'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': 'https://example.com', 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': 302} RET_L7POLICY_8 = { 'id': 'sample_l7policy_id_8', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool': None, 'redirect_url': 'http://www.example.com', 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': None} RET_LISTENER = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, } RET_LISTENER_L7 = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', 'l7policies': [RET_L7POLICY_1, RET_L7POLICY_2, RET_L7POLICY_3, RET_L7POLICY_4, RET_L7POLICY_5, RET_L7POLICY_6, RET_L7POLICY_7], 'pools': [RET_POOL_1, RET_POOL_2], 'enabled': True, 'insert_headers': {}, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, } RET_LISTENER_TLS = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol': 'TERMINATED_HTTPS', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'tls_certificate_id': 'cont_id_1', 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}} RET_LISTENER_TLS_SNI = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol': 'TERMINATED_HTTPS', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'tls_certificate_id': 'cont_id_1', 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'crt_dir': '/v2/sample_loadbalancer_id_1', 'sni_container_ids': ['cont_id_2', 'cont_id_3'], 'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2], 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}} RET_AMPHORA = { 'id': 'sample_amphora_id_1', 'lb_network_ip': '10.0.1.1', 'vrrp_ip': '10.1.1.1', 'ha_ip': '192.168.10.1', 'vrrp_port_id': '1234', 'ha_port_id': '1234', 'role': None, 'status': 'ACTIVE', 'vrrp_interface': None, 'vrrp_priority': None} RET_LB = { 'host_amphora': RET_AMPHORA, 'id': 'sample_loadbalancer_id_1', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER], 'peer_port': 1024, 'topology': 'SINGLE', 'enabled': True, 'global_connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'amphorae': [sample_amphora_tuple()]} RET_LB_L7 = { 'host_amphora': RET_AMPHORA, 'id': 'sample_loadbalancer_id_1', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER_L7], 'peer_port': 1024, 'topology': 'SINGLE', 'enabled': True, 'global_connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'amphorae': [sample_amphora_tuple()]} UDP_SOURCE_IP_BODY = { 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, 'persistence_timeout': 33, 'persistence_granularity': '255.0.0.0' } RET_UDP_HEALTH_MONITOR = { 'id': 'sample_monitor_id_1', 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'delay': 30, 'timeout': 31, 'enabled': True, 'fall_threshold': 3, 'check_script_path': (CONF.haproxy_amphora.base_path + '/lvs/check/udp_check.sh') } UDP_HEALTH_MONITOR_NO_SCRIPT = { 'id': 'sample_monitor_id_1', 'check_script_path': None, 'delay': 30, 'enabled': True, 'fall_threshold': 3, 'timeout': 31, 'type': 'UDP' } RET_UDP_MEMBER = { 'id': 'member_id_1', 'address': '192.0.2.10', 'protocol_port': 82, 'weight': 13, 'enabled': True, 'monitor_address': None, 'monitor_port': None } RET_UDP_MEMBER_MONITOR_IP_PORT = { 'id': 'member_id_1', 'address': '192.0.2.10', 'protocol_port': 82, 'weight': 13, 'enabled': True, 'monitor_address': '192.168.1.1', 'monitor_port': 9000 } UDP_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'enabled': True, 'protocol_port': 82, 'weight': 13, 'monitor_address': None, 'monitor_port': None, } UDP_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'enabled': True, 'protocol_port': 82, 'weight': 13, 'monitor_address': None, 'monitor_port': None } RET_UDP_POOL = { 'id': 'sample_pool_id_1', 'enabled': True, 'health_monitor': UDP_HEALTH_MONITOR_NO_SCRIPT, 'lb_algorithm': 'rr', 'members': [UDP_MEMBER_1, UDP_MEMBER_2], 'protocol': 'udp', 'session_persistence': UDP_SOURCE_IP_BODY } RET_UDP_LISTENER = { 'connection_limit': 98, 'default_pool': { 'id': 'sample_pool_id_1', 'enabled': True, 'health_monitor': RET_UDP_HEALTH_MONITOR, 'lb_algorithm': 'rr', 'members': [UDP_MEMBER_1, UDP_MEMBER_2], 'protocol': 'udp', 'session_persistence': UDP_SOURCE_IP_BODY }, 'enabled': True, 'id': 'sample_listener_id_1', 'protocol_mode': 'udp', 'protocol_port': '80' } def sample_listener_loadbalancer_tuple( topology=None, enabled=True, pools=None): if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE in_lb = collections.namedtuple( 'load_balancer', 'id, name, vip, amphorae, topology, ' 'pools, listeners, enabled, project_id') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', vip=sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', lb_network_ip='10.0.1.2', vrrp_ip='10.1.1.2', role=constants.ROLE_BACKUP)] if more_amp else [sample_amphora_tuple()], topology=topology, pools=pools or [], listeners=[], enabled=enabled, project_id='12345', ) def sample_lb_with_udp_listener_tuple( topology=None, enabled=True, pools=None): if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE listeners = [sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)] in_lb = collections.namedtuple( 'load_balancer', 'id, name, vip, amphorae, topology, ' 'pools, enabled, project_id, listeners') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', vip=sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', lb_network_ip='10.0.1.2', vrrp_ip='10.1.1.2', role=constants.ROLE_BACKUP)] if more_amp else [sample_amphora_tuple()], topology=topology, listeners=listeners, pools=pools or [], enabled=enabled, project_id='12345' ) def sample_vrrp_group_tuple(): in_vrrp_group = collections.namedtuple( 'vrrp_group', 'load_balancer_id, vrrp_auth_type, vrrp_auth_pass, ' 'advert_int, smtp_server, smtp_connect_timeout, ' 'vrrp_group_name') return in_vrrp_group( vrrp_group_name='sample_loadbalancer_id_1', load_balancer_id='sample_loadbalancer_id_1', vrrp_auth_type='PASS', vrrp_auth_pass='123', advert_int='1', smtp_server='', smtp_connect_timeout='') def sample_vip_tuple(): vip = collections.namedtuple('vip', 'ip_address') return vip(ip_address='10.0.0.2') def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, persistence=True, persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None, tls=False, sni=False, peer_port=None, topology=None, l7=False, enabled=True, insert_headers=None, be_proto=None, monitor_ip_port=False, monitor_proto=None, monitor_expected_codes=None, backup_member=False, disabled_member=False, connection_limit=constants.DEFAULT_CONNECTION_LIMIT, timeout_client_data=50000, timeout_member_connect=5000, timeout_member_data=50000, timeout_tcp_inspect=0, client_ca_cert=False, client_crl_cert=False, ssl_type_l7=False, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, id='sample_listener_id_1', recursive_nest=False, provisioning_status=constants.ACTIVE, tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, backend_tls_ciphers=None, tls_versions=constants.TLS_VERSIONS_OWASP_SUITE_B, backend_tls_versions=constants. TLS_VERSIONS_OWASP_SUITE_B, alpn_protocols=constants. AMPHORA_SUPPORTED_ALPN_PROTOCOLS, sample_default_pool=1, pool_enabled=True): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto if proto != constants.PROTOCOL_TERMINATED_HTTPS: tls_ciphers = None tls_versions = None alpn_protocols = None if pool_cert is False: backend_tls_versions = None topology = 'SINGLE' if topology is None else topology port = '443' if proto in ['HTTPS', 'TERMINATED_HTTPS'] else '80' peer_port = 1024 if peer_port is None else peer_port insert_headers = insert_headers or {} in_listener = collections.namedtuple( 'listener', 'id, project_id, protocol_port, protocol, default_pool, ' 'connection_limit, tls_certificate_id, ' 'sni_container_ids, default_tls_container, ' 'sni_containers, load_balancer, peer_port, pools, ' 'l7policies, enabled, insert_headers, timeout_client_data,' 'timeout_member_connect, timeout_member_data, ' 'timeout_tcp_inspect, client_ca_tls_certificate_id, ' 'client_ca_tls_certificate, client_authentication, ' 'client_crl_container_id, provisioning_status, ' 'tls_ciphers, tls_versions, alpn_protocols') if l7: pools = [ sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, tls_versions=backend_tls_versions, enabled=pool_enabled), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, sample_pool=2, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, tls_versions=None, enabled=pool_enabled)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), sample_l7policy_tuple('sample_l7policy_id_3', sample_policy=3), sample_l7policy_tuple('sample_l7policy_id_4', sample_policy=4), sample_l7policy_tuple('sample_l7policy_id_5', sample_policy=5), sample_l7policy_tuple('sample_l7policy_id_6', sample_policy=6), sample_l7policy_tuple('sample_l7policy_id_7', sample_policy=7)] if ssl_type_l7: l7policies.append(sample_l7policy_tuple( 'sample_l7policy_id_8', sample_policy=8)) else: pools = [ sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, backup_member=backup_member, disabled_member=disabled_member, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, tls_versions=backend_tls_versions, enabled=pool_enabled)] l7policies = [] listener = in_listener( id=id, project_id='12345', protocol_port=port, protocol=proto, load_balancer=sample_listener_loadbalancer_tuple( topology=topology, pools=pools), peer_port=peer_port, default_pool=sample_pool_tuple( listener_id='sample_listener_id_1', proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, persistence_timeout=persistence_timeout, persistence_granularity=persistence_granularity, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, monitor_expected_codes=monitor_expected_codes, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, sample_pool=sample_default_pool, enabled=pool_enabled ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [], default_tls_container=sample_tls_container_tuple( id='cont_id_1', certificate=sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN ) if tls else '', sni_containers=[ sample_tls_sni_container_tuple( tls_container_id='cont_id_2', tls_container=sample_tls_container_tuple( id='cont_id_2', certificate=sample_certs.X509_CERT_2, private_key=sample_certs.X509_CERT_KEY_2, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN_2)), sample_tls_sni_container_tuple( tls_container_id='cont_id_3', tls_container=sample_tls_container_tuple( id='cont_id_3', certificate=sample_certs.X509_CERT_3, private_key=sample_certs.X509_CERT_KEY_3, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN_3))] if sni else [], pools=pools, l7policies=l7policies, enabled=enabled, insert_headers=insert_headers, timeout_client_data=timeout_client_data, timeout_member_connect=timeout_member_connect, timeout_member_data=timeout_member_data, timeout_tcp_inspect=timeout_tcp_inspect, client_ca_tls_certificate_id='cont_id_ca' if client_ca_cert else '', client_ca_tls_certificate=sample_tls_container_tuple( id='cont_id_ca', certificate=sample_certs.X509_CA_CERT, primary_cn=sample_certs.X509_CA_CERT_CN ) if client_ca_cert else '', client_authentication=( constants.CLIENT_AUTH_MANDATORY if client_ca_cert else constants.CLIENT_AUTH_NONE), client_crl_container_id='cont_id_crl' if client_crl_cert else '', provisioning_status=provisioning_status, tls_ciphers=tls_ciphers, tls_versions=tls_versions, alpn_protocols=alpn_protocols ) if recursive_nest: listener.load_balancer.listeners.append(listener) return listener def sample_tls_sni_container_tuple(tls_container_id=None, tls_container=None): sc = collections.namedtuple('sni_container', 'tls_container_id, ' 'tls_container') return sc(tls_container_id=tls_container_id, tls_container=tls_container) def sample_tls_sni_containers_tuple(tls_container_id=None, tls_container=None): sc = collections.namedtuple('sni_containers', 'tls_container_id, ' 'tls_container') return [sc(tls_container_id=tls_container_id, tls_container=tls_container)] def sample_tls_container_tuple(id='cont_id_1', certificate=None, private_key=None, intermediates=None, primary_cn=None): sc = collections.namedtuple( 'tls_container', 'id, certificate, private_key, intermediates, primary_cn') return sc(id=id, certificate=certificate, private_key=private_key, intermediates=intermediates or [], primary_cn=primary_cn) def sample_pool_tuple(listener_id=None, proto=None, monitor=True, persistence=True, persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None, sample_pool=1, monitor_ip_port=False, monitor_proto=None, monitor_expected_codes=None, backup_member=False, disabled_member=False, has_http_reuse=True, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, provisioning_status=constants.ACTIVE, tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, tls_versions=constants.TLS_VERSIONS_OWASP_SUITE_B, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, enabled=True): proto = 'HTTP' if proto is None else proto if not tls_enabled: tls_ciphers = None tls_versions = None monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( 'pool', 'id, protocol, lb_algorithm, members, health_monitor, ' 'session_persistence, enabled, operating_status, ' 'tls_certificate_id, ca_tls_certificate_id, ' 'crl_container_id, tls_enabled, tls_ciphers, ' 'tls_versions, provisioning_status, ' + constants.HTTP_REUSE) if (proto == constants.PROTOCOL_UDP and persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP): kwargs = {'persistence_type': persistence_type, 'persistence_timeout': persistence_timeout, 'persistence_granularity': persistence_granularity} else: kwargs = {'persistence_type': persistence_type, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None if sample_pool == 0: id = 'sample_pool_id_0' members = [] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) elif sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), sample_member_tuple('sample_member_id_2', '10.0.0.98', monitor_ip_port=monitor_ip_port, backup=backup_member, enabled=not disabled_member)] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) elif sample_pool == 2: id = 'sample_pool_id_2' members = [sample_member_tuple('sample_member_id_3', '10.0.0.97', monitor_ip_port=monitor_ip_port)] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, sample_hm=2, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) return in_pool( id=id, protocol=proto, lb_algorithm=lb_algorithm, members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, enabled=enabled, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, crl_container_id='pool_crl' if pool_crl else None, tls_enabled=tls_enabled, tls_ciphers=tls_ciphers, tls_versions=tls_versions, provisioning_status=provisioning_status) def sample_member_tuple(id, ip, enabled=True, operating_status='ACTIVE', provisioning_status=constants.ACTIVE, monitor_ip_port=False, backup=False): in_member = collections.namedtuple('member', 'id, ip_address, protocol_port, ' 'weight, subnet_id, ' 'enabled, operating_status, ' 'monitor_address, monitor_port, ' 'backup, provisioning_status') monitor_address = '192.168.1.1' if monitor_ip_port else None monitor_port = 9000 if monitor_ip_port else None return in_member( id=id, ip_address=ip, protocol_port=82, weight=13, subnet_id='10.0.0.1/24', enabled=enabled, operating_status=operating_status, monitor_address=monitor_address, monitor_port=monitor_port, backup=backup, provisioning_status=provisioning_status) def sample_session_persistence_tuple(persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None): spersistence = collections.namedtuple('SessionPersistence', 'type, cookie_name, ' 'persistence_timeout, ' 'persistence_granularity') pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type return spersistence(type=pt, cookie_name=persistence_cookie, persistence_timeout=persistence_timeout, persistence_granularity=persistence_granularity) def sample_health_monitor_tuple(proto='HTTP', sample_hm=1, host_http_check=False, expected_codes=None, provisioning_status=constants.ACTIVE): proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto monitor = collections.namedtuple( 'monitor', 'id, type, delay, timeout, fall_threshold, rise_threshold,' 'http_method, url_path, expected_codes, enabled, ' 'check_script_path, http_version, domain_name, ' 'provisioning_status') if sample_hm == 1: id = 'sample_monitor_id_1' url_path = '/index.html' elif sample_hm == 2: id = 'sample_monitor_id_2' url_path = '/healthmon.html' kwargs = { 'id': id, 'type': proto, 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': url_path, 'expected_codes': '418', 'enabled': True, 'provisioning_status': provisioning_status, } if host_http_check: kwargs.update({'http_version': 1.1, 'domain_name': 'testlab.com'}) else: kwargs.update({'http_version': 1.0, 'domain_name': None}) if expected_codes: kwargs.update({'expected_codes': expected_codes}) if proto == constants.HEALTH_MONITOR_UDP_CONNECT: kwargs['check_script_path'] = (CONF.haproxy_amphora.base_path + 'lvs/check/' + 'udp_check.sh') else: kwargs['check_script_path'] = None return monitor(**kwargs) def sample_l7policy_tuple(id, action=constants.L7POLICY_ACTION_REJECT, redirect_pool=None, redirect_url=None, redirect_prefix=None, enabled=True, redirect_http_code=302, sample_policy=1, provisioning_status=constants.ACTIVE): in_l7policy = collections.namedtuple('l7policy', 'id, action, redirect_pool, ' 'redirect_url, redirect_prefix, ' 'l7rules, enabled,' 'redirect_http_code,' 'provisioning_status') l7rules = [] if sample_policy == 1: action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL redirect_pool = sample_pool_tuple(sample_pool=2) l7rules = [sample_l7rule_tuple('sample_l7rule_id_1')] elif sample_policy == 2: action = constants.L7POLICY_ACTION_REDIRECT_TO_URL redirect_url = 'http://www.example.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] elif sample_policy == 3: action = constants.L7POLICY_ACTION_REJECT l7rules = [sample_l7rule_tuple('sample_l7rule_id_4', sample_rule=4), sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] elif sample_policy == 4: action = constants.L7POLICY_ACTION_REJECT elif sample_policy == 5: action = constants.L7POLICY_ACTION_REJECT enabled = False l7rules = [sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] elif sample_policy == 6: action = constants.L7POLICY_ACTION_REJECT l7rules = [sample_l7rule_tuple('sample_l7rule_id_6', sample_rule=6)] elif sample_policy == 7: action = constants.L7POLICY_ACTION_REDIRECT_PREFIX redirect_prefix = 'https://example.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] elif sample_policy == 8: action = constants.L7POLICY_ACTION_REDIRECT_TO_URL redirect_url = 'http://www.ssl-type-l7rule-test.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_7', sample_rule=7), sample_l7rule_tuple('sample_l7rule_id_8', sample_rule=8), sample_l7rule_tuple('sample_l7rule_id_9', sample_rule=9), sample_l7rule_tuple('sample_l7rule_id_10', sample_rule=10), sample_l7rule_tuple('sample_l7rule_id_11', sample_rule=11)] return in_l7policy( id=id, action=action, redirect_pool=redirect_pool, redirect_url=redirect_url, redirect_prefix=redirect_prefix, l7rules=l7rules, enabled=enabled, redirect_http_code=redirect_http_code if (action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, constants.L7POLICY_ACTION_REDIRECT_PREFIX] and redirect_http_code) else None, provisioning_status=provisioning_status) def sample_l7rule_tuple(id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, key=None, value='/api', invert=False, enabled=True, sample_rule=1, provisioning_status=constants.ACTIVE): in_l7rule = collections.namedtuple('l7rule', 'id, type, compare_type, ' 'key, value, invert, enabled,' 'provisioning_status') if sample_rule == 2: type = constants.L7RULE_TYPE_HEADER compare_type = constants.L7RULE_COMPARE_TYPE_CONTAINS key = 'Some-header' value = 'This string\\ with stuff' invert = True enabled = True if sample_rule == 3: type = constants.L7RULE_TYPE_COOKIE compare_type = constants.L7RULE_COMPARE_TYPE_REGEX key = 'some-cookie' value = 'this.*|that' invert = False enabled = True if sample_rule == 4: type = constants.L7RULE_TYPE_FILE_TYPE compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = 'jpg' invert = False enabled = True if sample_rule == 5: type = constants.L7RULE_TYPE_HOST_NAME compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH key = None value = '.example.com' invert = False enabled = True if sample_rule == 6: type = constants.L7RULE_TYPE_HOST_NAME compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH key = None value = '.example.com' invert = False enabled = False if sample_rule == 7: type = constants.L7RULE_TYPE_SSL_CONN_HAS_CERT compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = 'tRuE' invert = False enabled = True if sample_rule == 8: type = constants.L7RULE_TYPE_SSL_VERIFY_RESULT compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = '1' invert = True enabled = True if sample_rule == 9: type = constants.L7RULE_TYPE_SSL_DN_FIELD compare_type = constants.L7RULE_COMPARE_TYPE_REGEX key = 'STREET' value = r'^STREET.*NO\.$' invert = True enabled = True if sample_rule == 10: type = constants.L7RULE_TYPE_SSL_DN_FIELD compare_type = constants.L7RULE_COMPARE_TYPE_STARTS_WITH key = 'OU-3' value = 'Orgnization Bala' invert = True enabled = True return in_l7rule( id=id, type=type, compare_type=compare_type, key=key, value=value, invert=invert, enabled=enabled, provisioning_status=provisioning_status) def sample_base_expected_config(frontend=None, logging=None, backend=None, peers=None, global_opts=None, defaults=None): if frontend is None: frontend = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1" "\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) if logging is None: logging = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") if backend is None: backend = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2\n" "\n").format(maxconn=constants.HAPROXY_DEFAULT_MAXCONN) if peers is None: peers = "\n\n" if global_opts is None: global_opts = " maxconn {maxconn}\n\n".format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) if defaults is None: defaults = ("defaults\n" " log global\n" " retries 3\n" " option redispatch\n" " option splice-request\n" " option splice-response\n" " option http-keep-alive\n\n") return ("# Configuration for loadbalancer sample_loadbalancer_id_1\n" "global\n" " daemon\n" " user nobody\n" " log /run/rsyslog/octavia/log local0\n" " log /run/rsyslog/octavia/log local1 notice\n" " stats socket /var/lib/octavia/sample_loadbalancer_id_1.sock" " mode 0666 level user\n" + global_opts + defaults + peers + frontend + logging + backend)
38.064544
79
0.607673
a5cf9f6c5bbac2ca12cb22ccc39fdb4660d9d460
1,560
py
Python
Expenses/migrations/0001_initial.py
adithyanps/netprofit-django
7ba87f054d09a201352635bb6cf8d0112208609e
[ "MIT" ]
null
null
null
Expenses/migrations/0001_initial.py
adithyanps/netprofit-django
7ba87f054d09a201352635bb6cf8d0112208609e
[ "MIT" ]
null
null
null
Expenses/migrations/0001_initial.py
adithyanps/netprofit-django
7ba87f054d09a201352635bb6cf8d0112208609e
[ "MIT" ]
null
null
null
# Generated by Django 2.2.4 on 2019-09-23 04:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('Masters', '0001_initial'), ('Journal_Entry', '0001_initial'), ] operations = [ migrations.CreateModel( name='ExpenseCategory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=60)), ], ), migrations.CreateModel( name='Expenses', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('Doc_no', models.IntegerField()), ('Date', models.DateField()), ('Amount', models.DecimalField(decimal_places=2, max_digits=15)), ('CreditAcct', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='CreditAcct', to='Masters.Account')), ('ExpenseAcct', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Masters.Account')), ('ExpenseCategory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Expenses.ExpenseCategory')), ('journal_entry', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Journal_Entry.JournalEntry')), ], ), ]
41.052632
154
0.612821
1479c743fd165ee0c2c6dd08fcf2ba7cdd8165e5
734
py
Python
ffwd_rec/params/red_arec025_params.py
Felix11H/OCNC17_Rosenbaum2017
15c2eea6c46e3bbe8d505365465364b83a30de78
[ "MIT" ]
null
null
null
ffwd_rec/params/red_arec025_params.py
Felix11H/OCNC17_Rosenbaum2017
15c2eea6c46e3bbe8d505365465364b83a30de78
[ "MIT" ]
3
2020-05-06T13:20:50.000Z
2020-05-06T13:20:50.000Z
ffwd_rec/params/red_arec025_params.py
felix11h/OCNC17_Rosenbaum2017
15c2eea6c46e3bbe8d505365465364b83a30de78
[ "MIT" ]
null
null
null
from brian2.units import * param_set = 'red' Ne= 63**2 Ni= 32**2 N = Ne+Ni tau_e = 15*ms tau_i = 10*ms E_L = -60*mV V_T = -50*mV V_re = -65*mV V_th = -10*mV DelT_e = 2*mV DelT_i = 0.5*mV ref_e = 1.5*ms ref_i = 0.5*ms tau_syn_e = 6*ms tau_syn_i = 5*ms tau_syn_f = tau_syn_e j_ee = 40*mV / (N**0.5) j_ie = 120*mV / (N**0.5) j_ei = -400*mV / (N**0.5) j_ii = -400*mV / (N**0.5) j_eF = 120*mV / (N**0.5) j_iF = 120*mV / (N**0.5) a_rec = 0.25 re_nrows, re_ncols = 63,63 ri_nrows, ri_ncols = 32,32 assert((re_nrows**2==Ne) & (ri_nrows**2==Ni)) Kee = 200 Kei = 200 Kie = 50 Kii = 50 a_ffwd = 0.1 f_nrows, f_ncols = 24, 24 KeF = 1000 KiF = 80 Nf = 576 assert(f_nrows**2==Nf) rf = 5*Hz method = 'rk2' #T = 10*ms T = 10000*ms
12.877193
45
0.585831
f06b5443e818839f2f36833a824a52a216d50a17
4,094
py
Python
plaidml/plaidml_setup.py
nitescuc/plaidml
83a81af049154a2701c4fde315b5ee9bc7050a7a
[ "Apache-2.0" ]
null
null
null
plaidml/plaidml_setup.py
nitescuc/plaidml
83a81af049154a2701c4fde315b5ee9bc7050a7a
[ "Apache-2.0" ]
null
null
null
plaidml/plaidml_setup.py
nitescuc/plaidml
83a81af049154a2701c4fde315b5ee9bc7050a7a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """Creates a plaidml user configuration file.""" from __future__ import print_function import sys from six.moves import input import plaidml import plaidml.exceptions import plaidml.settings def main(): ctx = plaidml.Context() plaidml.quiet() def choice_prompt(question, choices, default): inp = "" while not inp in choices: inp = input("{0}? ({1})[{2}]:".format(question, ",".join(choices), default)) if not inp: inp = default elif inp not in choices: print("Invalid choice: {}".format(inp)) return inp print(""" PlaidML Setup ({0}) Thanks for using PlaidML! Some Notes: * Bugs and other issues: https://github.com/plaidml/plaidml * Questions: https://stackoverflow.com/questions/tagged/plaidml * Say hello: https://groups.google.com/forum/#!forum/plaidml-dev * PlaidML is licensed under the GNU AGPLv3 """.format(plaidml.__version__)) # Operate as if nothing is set plaidml.settings._setup_for_test(plaidml.settings.user_settings) plaidml.settings.experimental = False devices, _ = plaidml.devices(ctx, limit=100, return_all=True) plaidml.settings.experimental = True exp_devices, unmatched = plaidml.devices(ctx, limit=100, return_all=True) if not (devices or exp_devices): if not unmatched: print(""" No OpenCL devices found. Check driver installation. Read the helpful, easy driver installation instructions from our README: http://github.com/plaidml/plaidml """) else: print(""" No supported devices found. Run 'clinfo' and file an issue containing the full output. """) sys.exit(-1) print("Default Config Devices:") if not devices: print(" No devices.") for dev in devices: print(" {0} : {1}".format(dev.id.decode(), dev.description.decode())) print("\nExperimental Config Devices:") if not exp_devices: print(" No devices.") for dev in exp_devices: print(" {0} : {1}".format(dev.id.decode(), dev.description.decode())) print( "\nUsing experimental devices can cause poor performance, crashes, and other nastiness.\n") exp = choice_prompt("Enable experimental device support", ["y", "n"], "n") plaidml.settings.experimental = exp == "y" try: devices = plaidml.devices(ctx, limit=100) except plaidml.exceptions.PlaidMLError: print("\nNo devices available in chosen config. Rerun plaidml-setup.") sys.exit(-1) if devices: dev = 1 if len(devices) > 1: print(""" Multiple devices detected (You can override by setting PLAIDML_DEVICE_IDS). Please choose a default device: """) devrange = range(1, len(devices) + 1) for i in devrange: print(" {0} : {1}".format(i, devices[i - 1].id.decode())) dev = choice_prompt("\nDefault device", [str(i) for i in devrange], "1") plaidml.settings.device_ids = [devices[int(dev) - 1].id.decode()] print("\nSelected device:\n {0}".format(plaidml.devices(ctx)[0])) print("\nAlmost done. Multiplying some matrices...") # Reinitialize to send a usage report print("Tile code:") print(" function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }") with plaidml.open_first_device(ctx) as dev: matmul = plaidml.Function( "function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }") shape = plaidml.Shape(ctx, plaidml.DType.FLOAT32, 3, 3) a = plaidml.Tensor(dev, shape) b = plaidml.Tensor(dev, shape) c = plaidml.Tensor(dev, shape) plaidml.run(ctx, matmul, inputs={"B": b, "C": c}, outputs={"A": a}) print("Whew. That worked.\n") sav = choice_prompt("Save settings to {0}".format(plaidml.settings.user_settings), ["y", "n"], "y") if sav == "y": plaidml.settings.save(plaidml.settings.user_settings) print("Success!\n") if __name__ == "__main__": main()
33.834711
99
0.617
fb97542f8df39a1e521fa46a20a797bd086be65e
723
py
Python
low_pass_filter.py
harshmathur1990/image-processing-learn
3b51b243927f42efe9889310c8dacd5f8d857c76
[ "MIT" ]
null
null
null
low_pass_filter.py
harshmathur1990/image-processing-learn
3b51b243927f42efe9889310c8dacd5f8d857c76
[ "MIT" ]
null
null
null
low_pass_filter.py
harshmathur1990/image-processing-learn
3b51b243927f42efe9889310c8dacd5f8d857c76
[ "MIT" ]
null
null
null
import cv2 import numpy as np def do(): image = cv2.imread('raw_images/dog.png') height, width, channels = image.shape gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) fft_gray = np.fft.fft2(gray_image) fft_shift_gray_image = np.fft.fftshift(fft_gray) center_x, center_y = height/2, width/2 for i in range(0, height): for j in range(0, width): value = (i-center_x)**2 + (j-center_y)**2 if value > 100: fft_shift_gray_image[i][j] = 0 fft_gray = np.fft.fftshift(fft_shift_gray_image) final_image = abs(np.fft.ifft2(fft_gray)) cv2.imwrite('processed_images/low_pass_dog.png', final_image) if __name__ == '__main__': do()
24.931034
65
0.644537
0d122bd8bc4b11065f4416b4a8b904b92a5f2ee9
3,687
py
Python
data/pipeline/run/60_jpl/jpl_lookup.py
oneokorganization/asterank
b3194e3852f9bd4d671a63a4261fb985057ffadc
[ "MIT" ]
184
2015-01-03T21:53:31.000Z
2022-02-25T10:06:36.000Z
data/pipeline/run/60_jpl/jpl_lookup.py
oneokorganization/asterank
b3194e3852f9bd4d671a63a4261fb985057ffadc
[ "MIT" ]
14
2016-06-14T21:01:33.000Z
2022-03-11T23:19:48.000Z
data/pipeline/run/60_jpl/jpl_lookup.py
oneokorganization/asterank
b3194e3852f9bd4d671a63a4261fb985057ffadc
[ "MIT" ]
53
2015-03-25T09:51:17.000Z
2021-11-01T21:11:31.000Z
#!/usr/bin/env python # # Client for live queries to JPL database lookup # import sys import urllib import re import json import time from bs4 import BeautifulSoup from datetime import datetime class Asteroid: def __init__(self, name): self.name = name def load(self): r = JPL_Query(self.name) self.data = {} self.data['Diameter (km)'] = r.physicalParameter('diameter') self.data['GM (km^3/s^2)'] = r.physicalParameter('GM') self.data['Density (g/cm^3)'] = r.physicalParameter('bulk density') self.data['Extent (km)'] = r.physicalParameter('extent') self.data['Rotation (hrs)'] = r.physicalParameter('rotation period') self.data['Albedo'] = r.physicalParameter('geometric albedo') self.data['Inclination (deg)'] = r.orbitalParameter('i') #self.data['Passage (JED)'] = r.orbitalParameter('t') self.data['Perihelion (AU)'] = r.orbitalParameter('q') self.data['Aphelion (AU)'] = r.orbitalParameter('Q') self.data['Semi-major Axis (AU)'] = r.orbitalParameter('a') self.data['Period (days)'] = r.orbitalParameter('period') self.data['EMOID (AU)'] = r.additionalInfoParameter('Earth MOID') close_approaches = r.closeApproaches() if close_approaches: self.data['Next Pass'], self.data['Close Approaches'] = close_approaches class JPL_Query: def __init__(self, query): src = urllib.urlopen('http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=%s;cad=1' % query ).read() self.soup = BeautifulSoup(src.replace('cellspacing="0"0', '')) def orbitalParameter(self, txt): tag = self.soup.find(text=txt) if tag : el = tag.find_parent('td').next_sibling.next_sibling.find('font').next return float(el) return -1 def physicalParameter(self, txt): tag = self.soup.find(text=txt) if tag: el = tag.find_parent('td').next_sibling.next_sibling.next_sibling.next_sibling.find('font').next try: return float(el) except ValueError: return el return -1 def additionalInfoParameter(self, txt): tag = self.soup.find(text=txt) if tag: res = re.sub(r'[^\d.]+', '', tag.parent.next_sibling) return float(res) return -1 def closeApproaches(self): tag = self.soup.find(text='Nominal Distance (AU)') if not tag: return None tag = tag.find_parent('tr') if not tag: return None tag = tag.next_sibling.next_sibling results = [] soonest = None while tag: texts = map(lambda x: x.get_text(), tag.find_all('font')) d = {} pydate = datetime.strptime(texts[0], '%Y-%b-%d %H:%M') if pydate >= datetime.today() and texts[2] == 'Earth': d['date'] = pydate.strftime('%b %d, %Y') #texts[0] d['date_iso'] = pydate.isoformat() d['uncertainty'] = texts[1] d['body'] = texts[2] d['nom_dist_au'] = texts[3] d['min_dist_au'] = texts[4] d['max_dist_au'] = texts[5] d['v_relative'] = texts[6] d['v_infinity'] = texts[7] d['jd'] = texts[8] d['uncertainty2'] = texts[9] d['semi_major'] = texts[10] d['semi_minor'] = texts[11] d['range_lov'] = texts[12] d['n_sigma'] = texts[13] d['bp'] = texts[14] d['orbit_ref'] = texts[15] d['ref'] = texts[16] d['modified'] = texts[17] if not soonest: soonest = d results.append(d) tag = tag.next_sibling if tag: tag = tag.next_sibling return soonest, results if __name__ == "__main__": if len(sys.argv) < 2: print 'usage: lookup <name>' sys.exit(1) a = Asteroid(' '.join(sys.argv[1:])) a.load() print json.dumps(a.data)
29.031496
102
0.606184
e2ed001e8cef21fe43b434a2cf2a16e975f15620
135
py
Python
text/_cascade/_form/prefix.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
text/_cascade/_form/prefix.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
text/_cascade/_form/prefix.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
""" Vendor prefixing. """ class VendorPrefixing: EPub = "epub" Moz = "moz" Ms = "ms" O = "o" Webkit = "webkit"
9.642857
22
0.496296
1dd7e63c393496fa6ad555982c5795f1aa199658
342
py
Python
setup.py
awblocker/exif2kmz
0eb0357078b3fa50c7180f518d219138eb87e599
[ "MIT" ]
1
2020-01-11T10:23:41.000Z
2020-01-11T10:23:41.000Z
setup.py
awblocker/exif2kmz
0eb0357078b3fa50c7180f518d219138eb87e599
[ "MIT" ]
null
null
null
setup.py
awblocker/exif2kmz
0eb0357078b3fa50c7180f518d219138eb87e599
[ "MIT" ]
null
null
null
#!/usr/bin/env python from distutils.core import setup setup(name='exif2kmz', version='0.1', description='Converts EXIF geotagged images to KMZ', author='Alexander W Blocker', author_email='ablocker (at) gmail (dot) com', scripts=['scripts/exif2kmz'], requires=['Image (>=1.5)','pyexiv2 (>=0.1)'] )
26.307692
58
0.625731
5fcac3fa74204fe751f1b8a1edc12a08afecdf0c
626
py
Python
Archive/Scattering/data_test/code/plot_data.py
merny93/tornado
a0d92dcede59a63eb6d765d5bbf04af5eb266ea0
[ "MIT" ]
null
null
null
Archive/Scattering/data_test/code/plot_data.py
merny93/tornado
a0d92dcede59a63eb6d765d5bbf04af5eb266ea0
[ "MIT" ]
null
null
null
Archive/Scattering/data_test/code/plot_data.py
merny93/tornado
a0d92dcede59a63eb6d765d5bbf04af5eb266ea0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Jan 11 17:31:41 2021 @author: David """ import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit data_df = pd.read_csv('Na-22_Calibration_009.csv', skiprows = 6) x = np.array(data_df['Channel']) y = np.array(data_df['Counts']) def gaussian_fit(x, a, mean, sigma): return a*np.exp(-(x - mean)**2/(2*sigma**2)) popt,pcov = curve_fit(gaussian_fit, x, y, p0 = [1, 1350, 2]) print(pcov) plt.plot(x, y) plt.plot(x, gaussian_fit(x, *popt)) plt.xlim(1150,1550) plt.ylim(0,30) plt.xlabel('Channel') plt.ylabel('Counts') plt.show()
20.866667
64
0.680511
d9422b20319f8d9d49400e67c3326311af1b5632
534
py
Python
backend/home/migrations/0001_load_initial_data.py
crowdbotics-apps/beatclub-32518
482bc4a2eba8adadefdd289ce82b0fafd98d02b8
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/home/migrations/0001_load_initial_data.py
crowdbotics-apps/beatclub-32518
482bc4a2eba8adadefdd289ce82b0fafd98d02b8
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/home/migrations/0001_load_initial_data.py
crowdbotics-apps/beatclub-32518
482bc4a2eba8adadefdd289ce82b0fafd98d02b8
[ "FTL", "AML", "RSA-MD" ]
null
null
null
from django.db import migrations def create_site(apps, schema_editor): Site = apps.get_model("sites", "Site") custom_domain = "beatclub-32518.botics.co" site_params = { "name": "BeatClub", } if custom_domain: site_params["domain"] = custom_domain Site.objects.update_or_create(defaults=site_params, id=1) class Migration(migrations.Migration): dependencies = [ ("sites", "0002_alter_domain_unique"), ] operations = [ migrations.RunPython(create_site), ]
20.538462
61
0.655431
0666f1aa1daa54ca9ea02bcba9f55adf89fd152e
696
py
Python
new_nearby_places.py
Atulbargotra/smart_driving
7356b468260c1f102528df702c412f3e9cbd74d5
[ "MIT" ]
4
2020-02-21T18:25:38.000Z
2021-11-14T20:23:15.000Z
new_nearby_places.py
Atulbargotra/smart_driving
7356b468260c1f102528df702c412f3e9cbd74d5
[ "MIT" ]
3
2020-02-21T17:58:58.000Z
2020-02-21T17:58:59.000Z
new_nearby_places.py
Atulbargotra/smart_driving
7356b468260c1f102528df702c412f3e9cbd74d5
[ "MIT" ]
4
2020-02-21T18:25:43.000Z
2021-01-13T10:04:20.000Z
import requests import json from text_to_speech import speak from location import get_location import os from dotenv import load_dotenv load_dotenv() def get_Nearest_Petrol_Station(longitude,latitude,category,radius=2000): API_KEY = os.getenv('HERE_MAPS_API_KEY') url = f"https://places.sit.ls.hereapi.com/places/v1/browse?apiKey={API_KEY}&in={latitude},{longitude};r={radius}&cat={category}&pretty" x = requests.get(url) #petrol-station data = json.loads(x.text) nearest_place = data['results']['items'][0]['title'] distance = data['results']['items'][0]['distance'] text_input = f"Nearest Petrol pump is {nearest_place} at {distance} meter" speak(text_input)
40.941176
139
0.735632
465e6a1be7cfe5e7a67fc8f2b4db6b085d24a564
3,014
py
Python
docs/report/cloudmesh-openapi/tests/deprecated/test_generator_broken.py
rickotten/cybertraining-dsc.github.io
c8ea59be4f09fd543040ba0908af118df5820a70
[ "Apache-2.0" ]
7
2020-02-29T14:53:19.000Z
2021-01-17T17:08:44.000Z
tests/deprecated/test_generator_broken.py
imishra1/cloudmesh-openapi
89fdc74aae459aba4ebb959f8564bb3ddd967cbb
[ "Apache-2.0" ]
27
2020-02-29T13:38:11.000Z
2020-09-02T20:24:59.000Z
tests/deprecated/test_generator_broken.py
imishra1/cloudmesh-openapi
89fdc74aae459aba4ebb959f8564bb3ddd967cbb
[ "Apache-2.0" ]
6
2020-03-02T17:09:14.000Z
2020-10-30T22:48:01.000Z
############################################################### # pytest -v --capture=no tests/test_03_generator.py # pytest -v tests/test_03_generator.py # pytest -v --capture=no tests/test_generator..py::Test_name::<METHODNAME> ############################################################### import sys import pytest import yaml as yaml sys.path.append("cloudmesh/openapi/function") import tests.sample_function_gen as testfun from cloudmesh.common.util import HEADING from cloudmesh.common.Benchmark import Benchmark @pytest.mark.incremental class TestGenerator: def test_yaml_syntax(self): """ function to check if YAML synatx is correct or not """ HEADING() Benchmark.Start() with open("cloudmesh/openapi/function/sampleFunction.yaml", "r") as stream: try: yaml.safe_load(stream) except yaml.YAMLError as exc: assert False, "Yaml file has syntax error" Benchmark.Stop() def test_openapi_info_servers_paths(self): """ function to check if YAML contains opeaapi, info ,servers, and paths information """ HEADING() Benchmark.Start() with open("cloudmesh/openapi/function/sampleFunction.yaml", "r") as stream: try: keys = yaml.safe_load(stream).keys() assert keys.__contains__("openapi"), "openapi is not found" assert keys.__contains__("info"), "info is not found" assert keys.__contains__("servers"), "servers is not found" assert keys.__contains__("paths"), "paths is not found" except yaml.YAMLError as exc: assert False, "Yaml file has syntax error" Benchmark.Stop() def test_paths(self): """ function to validate paths information """ HEADING() Benchmark.Start() with open("cloudmesh/openapi/function/sampleFunction.yaml", "r") as stream: try: paths = yaml.safe_load(stream).get("paths") assert paths is not None, "paths value should not be null" assert paths.keys().__contains__( "/" + testfun.sampleFunction.__name__), "Resource name should be " + testfun.sampleFunction.__name__ getOperation = paths.get("/" + testfun.sampleFunction.__name__) assert getOperation.keys().__contains__( "get"), "get operation is missing " parameters = getOperation.get("get").get("parameters") # assert len(parameters)+1==len(testfun.sampleFunction.__annotations__.items()), "get operation is missing " except yaml.YAMLError as exc: assert False, "Yaml file has syntax error" Benchmark.Stop() def test_benchmark(self): HEADING() Benchmark.print(csv=True, sysinfo=False, tag=cloud)
37.675
124
0.577306
9ad1d136b8e92555bdb0708d9469500892b9f9f3
2,712
py
Python
lib/models/centernet3d.py
sjg02122/monodle
9eaf405b206f71ae13a5b86ad7a47f44f3b060a9
[ "MIT" ]
92
2021-03-31T02:40:27.000Z
2022-03-30T03:35:27.000Z
lib/models/centernet3d.py
sjg02122/monodle
9eaf405b206f71ae13a5b86ad7a47f44f3b060a9
[ "MIT" ]
22
2021-06-17T02:32:26.000Z
2022-01-30T14:23:41.000Z
lib/models/centernet3d.py
sjg02122/monodle
9eaf405b206f71ae13a5b86ad7a47f44f3b060a9
[ "MIT" ]
17
2021-06-13T23:39:30.000Z
2022-03-03T07:09:14.000Z
import os import cv2 import torch import torch.nn as nn import numpy as np from lib.backbones import dla from lib.backbones.dlaup import DLAUp from lib.backbones.hourglass import get_large_hourglass_net from lib.backbones.hourglass import load_pretrian_model class CenterNet3D(nn.Module): def __init__(self, backbone='dla34', neck='DLAUp', num_class=3, downsample=4): """ CenterNet for monocular 3D object detection. :param backbone: the backbone of pipeline, such as dla34. :param neck: the necks of detection, such as dla_up. :param downsample: the ratio of down sample. [4, 8, 16, 32] :param head_conv: the channels of convolution in head. default: 256 """ assert downsample in [4, 8, 16, 32] super().__init__() self.heads = {'heatmap': num_class, 'offset_2d': 2, 'size_2d' :2, 'depth': 2, 'offset_3d': 2, 'size_3d':3, 'heading': 24} self.backbone = getattr(dla, backbone)(pretrained=True, return_levels=True) channels = self.backbone.channels # channels list for feature maps generated by backbone self.first_level = int(np.log2(downsample)) scales = [2 ** i for i in range(len(channels[self.first_level:]))] self.neck = DLAUp(channels[self.first_level:], scales_list=scales) # feature fusion [such as DLAup, FPN] # initialize the head of pipeline, according to heads setting. for head in self.heads.keys(): output_channels = self.heads[head] fc = nn.Sequential( nn.Conv2d(channels[self.first_level], 256, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(256, output_channels, kernel_size=1, stride=1, padding=0, bias=True)) # initialization if 'heatmap' in head: fc[-1].bias.data.fill_(-2.19) else: self.fill_fc_weights(fc) self.__setattr__(head, fc) def forward(self, input): feat = self.backbone(input) feat = self.neck(feat[self.first_level:]) ret = {} for head in self.heads: ret[head] = self.__getattr__(head)(feat) return ret def fill_fc_weights(self, layers): for m in layers.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) if m.bias is not None: nn.init.constant_(m.bias, 0) if __name__ == '__main__': import torch net = CenterNet3D(backbone='dla34') print(net) input = torch.randn(4, 3, 384, 1280) print(input.shape, input.dtype) output = net(input) print(output.keys())
33.073171
129
0.619469
f7fd7ca8d0f5e73967a4f5b5743658a0a4422e31
186
py
Python
lib/matplotlib/tri/__init__.py
pierre-haessig/matplotlib
0d945044ca3fbf98cad55912584ef80911f330c6
[ "MIT", "PSF-2.0", "BSD-3-Clause" ]
8
2021-12-14T21:30:01.000Z
2022-02-14T11:30:03.000Z
lib/matplotlib/tri/__init__.py
pierre-haessig/matplotlib
0d945044ca3fbf98cad55912584ef80911f330c6
[ "MIT", "PSF-2.0", "BSD-3-Clause" ]
null
null
null
lib/matplotlib/tri/__init__.py
pierre-haessig/matplotlib
0d945044ca3fbf98cad55912584ef80911f330c6
[ "MIT", "PSF-2.0", "BSD-3-Clause" ]
3
2017-05-31T01:42:22.000Z
2020-06-23T13:57:49.000Z
""" Unstructured triangular grid functions. """ from __future__ import print_function from triangulation import * from tricontour import * from tripcolor import * from triplot import *
18.6
39
0.795699
97c185cba0123127a9989e963eb959275c5b9778
163
py
Python
backend/app/error.py
Orenoid/online-clipboard
2de2d1a096f7e5cc76c6168b5aa9ea41d5f474bd
[ "MIT" ]
2
2020-05-22T07:44:18.000Z
2020-06-07T13:57:26.000Z
app/utils/exception.py
Orenoid/wechat-push
735d77b5a9f693bce99b8e4eb91173ca4dd1e8c6
[ "MIT" ]
2
2022-02-19T02:43:56.000Z
2022-02-27T05:25:24.000Z
app/utils/exception.py
Orenoid/wechat-push
735d77b5a9f693bce99b8e4eb91173ca4dd1e8c6
[ "MIT" ]
null
null
null
from flask import current_app def handle_exception(e: Exception): current_app.logger.exception('Uncaught exception') return 'Internal Server Error', 500
23.285714
54
0.773006
0e59d5e43b142f7db499d95ae9936c011c432d04
6,818
py
Python
tests/test_minas_fake.py
luis-puhl/minas-py
7a665da366ab65bbb05b1713292492cf5ab4a859
[ "MIT" ]
4
2019-05-01T01:29:32.000Z
2019-06-22T08:16:20.000Z
tests/test_minas_fake.py
luis-puhl/minas-py
7a665da366ab65bbb05b1713292492cf5ab4a859
[ "MIT" ]
3
2020-03-24T17:04:13.000Z
2021-06-08T19:50:48.000Z
tests/test_minas_fake.py
luis-puhl/minas-py
7a665da366ab65bbb05b1713292492cf5ab4a859
[ "MIT" ]
null
null
null
import unittest import os import queue import asyncio import time import sys import shutil import logging import csv import io from typing import List from copy import deepcopy import yaml import matplotlib import numpy as np import pandas as pd matplotlib.use('Agg') import matplotlib.pyplot as plt from dask.distributed import Client from minas.timed import Timed from minas.cluster import Cluster from minas.example import Example from minas.minas_algo import MinasAlgorith, MinasConsts from minas.minas_base import MinasBase from .plots import * class MinasFakeExamplesTest(unittest.TestCase): @classmethod def setUpClass(cls): # setupLog() with open('logging.conf.yaml', 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) logging.config.dictConfig(config) @classmethod def tearDownClass(cls): pass def setUp(self): self.basedir = 'run/seeds/' def tearDown(self): pass def setupFakeExamples(self, seed): np.random.seed(seed) attributes = np.random.randint(2, 40) examples = [] for labelIndex in range(np.random.randint(2, 5)): mu = np.random.random() * 10 sigma = np.random.random() * 5 for exampleIndex in range(np.random.randint(200, 1000)): example = Example(item = [], label = 'Class #' + str(labelIndex)) for i in range(attributes): value = np.random.normal(loc=mu, scale=sigma) example.item.append(float(value)) examples.append(example) np.random.shuffle(examples) return examples def fake_seed(self, seed): dirr = self.basedir + str(seed) + '/' if os.path.exists(dirr): shutil.rmtree(dirr) if not os.path.exists(dirr): os.makedirs(dirr) timed = Timed() TimedMinasAlgorith = timed.timedClass(MinasAlgorith) CONSTS=MinasConsts() logging.info('Next seed: {}'.format(seed)) minas = MinasBase(minasAlgorith=TimedMinasAlgorith(CONSTS=CONSTS)) # rootLogger = logging.getLogger() logHandler = logging.FileHandler(dirr + 'run.log') logHandler.formatter = rootLogger.handlers[0].formatter rootLogger.addHandler(logHandler) # ------------------------------------------------------------------------------------------------ examples = self.setupFakeExamples(seed) plotExamples2D(dirr, '0-fake_base', examples) # ------------------------------------------------------------------------------------------------ training_set = examples[:int(len(examples) * .1)] with open(dirr + 'training_set.csv', 'w') as training_set_csv: for ex in training_set: training_set_csv.write(','.join([str(i) for i in ex.item]) + ',' + ex.label + '\n') plotExamples2D(dirr, '1-training_set', training_set) trainingDf = pd.DataFrame(map(lambda x: {'item': x.item, 'label': x.label}, training_set)) logging.info('trainingDf' + '\n' + str(trainingDf.groupby('label').describe()) + '\n') minas.offline(trainingDf) minas.storeToFile(dirr + 'minas.yaml') minas.restoreFromFile(dirr + 'minas.yaml') logging.info(str(minas) + str(minas)) self.assertGreater(len(minas.clusters), 0, 'model must be trainded after offline call') plotExamples2D(dirr, '2-offline_clusters', [], minas.clusters) plotExamples2D(dirr, '3-offline_training', training_set, minas.clusters) plotExamples2D(dirr, '4-offline_all_data', examples, minas.clusters) minas.minasAlgorith.checkTraining(trainingDf, minas.clusters) # ------------------------------------------------------------------------------------------------ testSet = examples[int(len(examples) * .1):] minas.online( i.item for i in testSet ) # ------------------------------------------------------------------------------------------------ logging.info('aggregatin resutls') results = [] positiveCount = 0 negativeCount = 0 unknownCount = 0 totalExamples = len(examples) with open(dirr + 'examples.csv', 'w') as examplesCsv: for ex in examples: ex = deepcopy(ex) hasLabel, cluster, d = None, None, None if minas: hasLabel, cluster, d, ex = minas.classify(ex) examplesCsv.write( ','.join([str(i) for i in ex.item]) + ',' + ex.label + ',' + (cluster.label if cluster and hasLabel else 'Unknown') + ',' + ('Positive' if cluster and cluster.label == ex.label else 'Negative') + '\n' ) if hasLabel: if cluster.label == ex.label: ex.label = 'Positive' positiveCount += 1 else: ex.label = 'Negative' negativeCount += 1 else: ex.label = 'Unknown' unknownCount += 1 results.append(ex) # end results map result = '[seed {seed}] positive: {p}({pp:.2%}), negative: {n}({nn:.2%}), unknown: {u}({uu:.2%})'.format( seed=seed, p=positiveCount, pp=positiveCount/totalExamples, n=negativeCount, nn=negativeCount/totalExamples, u=unknownCount, uu=unknownCount/totalExamples, ) logging.info('\n\n\t=== Final Results ===\n{model}\n{result}\n'.format(model=str(minas), result=result)) plotExamples2D(dirr, '5-online_clusters', [], minas.clusters if minas else []) plotExamples2D(dirr, '6-online_resutls', results, minas.clusters if minas else []) onlyFalses = [x for x in results if x.label is not 'Positive'] plotExamples2D(dirr, '7-online_neg_unk', onlyFalses, minas.clusters if minas else []) del minas rootLogger.removeHandler(logHandler) # ------------------------------------------------------------------------------------------------ df = timed.statisticSummary() logging.info(f'=========== Timed Functions Summary ===========\n{df}') fig, ax = timed.mkTimedResumePlot() plt.tight_layout(.5) plt.savefig(dirr + 'timed-run.png') plt.close(fig) timed.clearTimes() return result, df.describe() def test_fake_seed200(self): return self.fake_seed(200) def test_fake_seed201(self): return self.fake_seed(201) def test_fake_seed202(self): return self.fake_seed(202) #
42.08642
113
0.544001
e5e840243029a4e7d504e4ebdbf7facab8e65842
10,136
py
Python
affinity_pred/model.py
jglaser/affinity_pred
b51ba839449f828706b9179949015e27ae18960b
[ "BSD-2-Clause" ]
null
null
null
affinity_pred/model.py
jglaser/affinity_pred
b51ba839449f828706b9179949015e27ae18960b
[ "BSD-2-Clause" ]
null
null
null
affinity_pred/model.py
jglaser/affinity_pred
b51ba839449f828706b9179949015e27ae18960b
[ "BSD-2-Clause" ]
null
null
null
from transformers import BertModel, BertConfig from transformers.models.bert.modeling_bert import BertAttention, BertIntermediate, BertOutput from transformers.modeling_utils import apply_chunking_to_forward from transformers.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled import torch import torch.nn as nn from torch.nn import functional as F from torch import Tensor class CrossAttentionLayer(nn.Module): def __init__(self, config, other_config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) self.crossattention.self.key = nn.Linear(other_config.hidden_size, self.crossattention.self.all_head_size) self.crossattention.self.value = nn.Linear(other_config.hidden_size, self.crossattention.self.all_head_size) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): cross_attention_outputs = self.crossattention( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, past_key_value=None ) attention_output = cross_attention_outputs[0] outputs = cross_attention_outputs[1:] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class EnsembleSequenceRegressor(torch.nn.Module): def __init__(self, seq_model_name, smiles_model_name, max_seq_length, sparse_attention=True, output_attentions=False, n_cross_attention_layers=3): super().__init__() # enable gradient checkpointing seq_config = BertConfig.from_pretrained(seq_model_name) seq_config.gradient_checkpointing=True self.seq_model = BertModel.from_pretrained(seq_model_name,config=seq_config) smiles_config = BertConfig.from_pretrained(smiles_model_name) smiles_config.gradient_checkpointing=True self.smiles_model = BertModel.from_pretrained(smiles_model_name,config=smiles_config) self.max_seq_length = max_seq_length # for deepspeed stage 3 (to estimate buffer sizes) self.config = BertConfig(hidden_size = self.seq_model.config.hidden_size + self.smiles_model.config.hidden_size) self.sparsity_config = None if sparse_attention: try: from deepspeed.ops.sparse_attention import FixedSparsityConfig as STConfig self.sparsity_config = STConfig(num_heads=self.seq_model.config.num_attention_heads) except: pass if self.sparsity_config is not None: # replace the self attention layer of the sequence model from deepspeed.ops.sparse_attention import SparseAttentionUtils self.sparse_attention_utils = SparseAttentionUtils config = seq_config sparsity_config = self.sparsity_config layers = self.seq_model.encoder.layer from affinity_pred.sparse_self_attention import BertSparseSelfAttention for layer in layers: deepspeed_sparse_self_attn = BertSparseSelfAttention( config=config, sparsity_config=sparsity_config, max_seq_length=self.max_seq_length) deepspeed_sparse_self_attn.query = layer.attention.self.query deepspeed_sparse_self_attn.key = layer.attention.self.key deepspeed_sparse_self_attn.value = layer.attention.self.value layer.attention.self = deepspeed_sparse_self_attn self.pad_token_id = seq_config.pad_token_id if hasattr( seq_config, 'pad_token_id') and seq_config.pad_token_id is not None else 0 # Cross-attention layers self.n_cross_attention_layers = n_cross_attention_layers self.cross_attention_seq = nn.ModuleList([CrossAttentionLayer(config=seq_config,other_config=smiles_config) for _ in range(n_cross_attention_layers)]) self.cross_attention_smiles = nn.ModuleList([CrossAttentionLayer(config=smiles_config,other_config=seq_config) for _ in range(n_cross_attention_layers)]) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(config=deepspeed_config()): self.linear = torch.nn.Linear(seq_config.hidden_size+smiles_config.hidden_size,1) else: self.linear = torch.nn.Linear(seq_config.hidden_size+smiles_config.hidden_size,1) self.output_attentions=output_attentions def pad_to_block_size(self, block_size, input_ids, attention_mask, pad_token_id): batch_size, seq_len = input_ids.shape pad_len = (block_size - seq_len % block_size) % block_size if pad_len > 0: if input_ids is not None: input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id) # pad attention mask without attention on the padding tokens attention_mask = F.pad(attention_mask, (0, pad_len), value=False) return pad_len, input_ids, attention_mask def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, ): outputs = [] input_ids_1 = input_ids[:,:self.max_seq_length] attention_mask_1 = attention_mask[:,:self.max_seq_length] # sequence model with sparse attention input_shape = input_ids_1.size() device = input_ids_1.device extended_attention_mask_1: torch.Tensor = self.seq_model.get_extended_attention_mask(attention_mask_1, input_shape, device) if self.sparsity_config is not None: pad_len_1, input_ids_1, attention_mask_1 = self.pad_to_block_size( block_size=self.sparsity_config.block, input_ids=input_ids_1, attention_mask=attention_mask_1, pad_token_id=self.pad_token_id) embedding_output = self.seq_model.embeddings( input_ids=input_ids_1 ) encoder_outputs = self.seq_model.encoder( embedding_output, attention_mask=extended_attention_mask_1, head_mask=head_mask ) sequence_output = encoder_outputs[0] if self.sparsity_config is not None and pad_len_1 > 0: sequence_output = self.sparse_attention_utils.unpad_sequence_output( pad_len_1, sequence_output) # smiles model with full attention input_ids_2 = input_ids[:,self.max_seq_length:] input_shape = input_ids_2.size() attention_mask_2 = attention_mask[:,self.max_seq_length:] encoder_outputs = self.smiles_model(input_ids=input_ids_2, attention_mask=attention_mask_2, return_dict=False) smiles_output = encoder_outputs[0] # cross attentions if self.output_attentions: output_attentions = True # cross-attention masks cross_attention_mask_1 = self.seq_model.invert_attention_mask( attention_mask_1[:,None,:]*attention_mask_2[:,:,None]) cross_attention_mask_2 = self.smiles_model.invert_attention_mask( attention_mask_2[:,None,:]*attention_mask_1[:,:,None]) hidden_seq = sequence_output hidden_smiles = smiles_output for i in range(self.n_cross_attention_layers): attention_output_1 = self.cross_attention_seq[i]( hidden_states=hidden_seq, attention_mask=attention_mask_1, encoder_hidden_states=hidden_smiles, encoder_attention_mask=cross_attention_mask_2, output_attentions=output_attentions) attention_output_2 = self.cross_attention_smiles[i]( hidden_states=hidden_smiles, attention_mask=attention_mask_2, encoder_hidden_states=hidden_seq, encoder_attention_mask=cross_attention_mask_1, output_attentions=output_attentions) hidden_seq = attention_output_1[0] hidden_smiles = attention_output_2[0] mean_seq = torch.mean(hidden_seq,axis=1) mean_smiles = torch.mean(hidden_smiles,axis=1) last_hidden_states = torch.cat([mean_seq, mean_smiles], dim=1) if output_attentions: attentions_seq = attention_output_1[1] attentions_smiles = attention_output_2[1] logits = self.linear(last_hidden_states).squeeze(-1) if labels is not None: loss_fct = torch.nn.MSELoss() loss = loss_fct(logits.view(-1, 1), labels.view(-1,1).half()) return (loss, logits) else: if output_attentions: return logits, (attentions_seq, attentions_smiles) else: return logits
41.540984
161
0.666831
acb4031e948d8ccb8e03d7be83f20f365f46336c
1,201
py
Python
snippets/telegram-user-by-id.py
orsinium/notes
3e08977ee3329c09ab8a46b0e35a45ab65b93910
[ "CC-BY-4.0" ]
35
2018-05-17T10:11:45.000Z
2022-02-27T21:22:38.000Z
snippets/telegram-user-by-id.py
orsinium/notes
3e08977ee3329c09ab8a46b0e35a45ab65b93910
[ "CC-BY-4.0" ]
7
2018-07-06T14:00:43.000Z
2019-01-28T11:03:43.000Z
snippets/telegram-user-by-id.py
orsinium/notes
3e08977ee3329c09ab8a46b0e35a45ab65b93910
[ "CC-BY-4.0" ]
13
2018-05-23T06:02:16.000Z
2021-04-25T18:15:10.000Z
""" This script allow you to send in Telegram message with link on user. If user has login, you can just send "@login" message, and telegram transform it into link on user profile. However you cannot do it if user has no login. In this case you need to this snippet. This snippet get `user_id`, transfor it to link on this user and send this link in message to `send_to`. """ from telethon import TelegramClient, tl # https://my.telegram.org API_ID = 12345 API_HASH = '...' user_id = 82752261 # ID of user that profile you want to get in message. send_to = 'orsinium' # login or ID of message receiver. message = 'touch me' # text of message with link client = TelegramClient('session_name', API_ID, API_HASH) client.start() full_user = client(tl.functions.users.GetFullUserRequest(user_id)) input_user = tl.types.InputUser( user_id=full_user.user.id, access_hash=full_user.user.access_hash, ) client( tl.functions.messages.SendMessageRequest( send_to, message, entities=[ tl.types.InputMessageEntityMentionName( offset=0, length=len(message), user_id=input_user, ), ], ), )
26.108696
79
0.685262
b68a63b87562af78135570d473b227f2474ade0c
558
py
Python
packages/python/plotly/plotly/validators/sunburst/marker/colorbar/_showticksuffix.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
null
null
null
packages/python/plotly/plotly/validators/sunburst/marker/colorbar/_showticksuffix.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
null
null
null
packages/python/plotly/plotly/validators/sunburst/marker/colorbar/_showticksuffix.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
null
null
null
import _plotly_utils.basevalidators class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="showticksuffix", parent_name="sunburst.marker.colorbar", **kwargs, ): super(ShowticksuffixValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), values=kwargs.pop("values", ["all", "first", "last", "none"]), **kwargs, )
31
80
0.625448
41722bd3779d99fd471e8bc7027cfb51cfa46439
19,726
py
Python
tools/tune.py
Anonymous502/siamfda-for-eccv
72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb
[ "Apache-2.0" ]
null
null
null
tools/tune.py
Anonymous502/siamfda-for-eccv
72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb
[ "Apache-2.0" ]
null
null
null
tools/tune.py
Anonymous502/siamfda-for-eccv
72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb
[ "Apache-2.0" ]
2
2021-01-17T00:17:09.000Z
2022-02-17T14:18:43.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import os import cv2 import torch import numpy as np from tqdm import tqdm from multiprocessing import Pool from SiamFDA.core.config import cfg from SiamFDA.models.model_builder import ModelBuilder from SiamFDA.utils.bbox import get_axis_aligned_bbox from SiamFDA.utils.model_load import load_pretrain from toolkit.datasets import DatasetFactory, OTBDataset, UAVDataset, LaSOTDataset, \ VOTDataset, NFSDataset, VOTLTDataset from toolkit.utils.region import vot_overlap, vot_float2str from toolkit.evaluation import OPEBenchmark, AccuracyRobustnessBenchmark, \ EAOBenchmark, F1Benchmark from SiamFDA.tracker.base_tracker import SiameseTracker from SiamFDA.utils.bbox import corner2center import optuna import logging class SiamFDATracker(SiameseTracker): def __init__(self, model): super(SiamFDATracker, self).__init__() self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \ cfg.POINT.STRIDE + 1 + cfg.TRACK.BASE_SIZE hanning = np.hanning(self.score_size) window = np.outer(hanning, hanning) self.cls_out_channels = cfg.FDAM.KWARGS.cls_out_channels self.window = window.flatten() self.points = self.generate_points(cfg.POINT.STRIDE, self.score_size) self.model = model self.model.eval() def generate_points(self, stride, size): ori = - (size // 2) * stride x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)], [ori + stride * dy for dy in np.arange(0, size)]) points = np.zeros((size * size, 2), dtype=np.float32) points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten() return points def _convert_bbox(self, delta, point): delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1) delta = delta.detach().cpu().numpy() delta[0, :] = point[:, 0] - delta[0, :] delta[1, :] = point[:, 1] - delta[1, :] delta[2, :] = point[:, 0] + delta[2, :] delta[3, :] = point[:, 1] + delta[3, :] delta[0, :], delta[1, :], delta[2, :], delta[3, :] = corner2center(delta) return delta def _convert_score(self, score): if self.cls_out_channels == 1: score = score.permute(1, 2, 3, 0).contiguous().view(-1) score = score.sigmoid().detach().cpu().numpy() else: score = score.permute(1, 2, 3, 0).contiguous().view(self.cls_out_channels, -1).permute(1, 0) score = score.softmax(1).detach()[:, 1].cpu().numpy() return score def _bbox_clip(self, cx, cy, width, height, boundary): cx = max(0, min(cx, boundary[1])) cy = max(0, min(cy, boundary[0])) width = max(10, min(width, boundary[1])) height = max(10, min(height, boundary[0])) return cx, cy, width, height def init(self, img, bbox): """ args: img(np.ndarray): BGR image bbox: (x, y, w, h) bbox """ self.center_pos = np.array([bbox[0] + (bbox[2] - 1) / 2, bbox[1] + (bbox[3] - 1) / 2]) self.size = np.array([bbox[2], bbox[3]]) # calculate z crop size w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size) h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size) s_z = round(np.sqrt(w_z * h_z)) # calculate channle average self.channel_average = np.mean(img, axis=(0, 1)) # get crop z_crop = self.get_subwindow(img, self.center_pos, cfg.TRACK.EXEMPLAR_SIZE, s_z, self.channel_average) self.model.template(z_crop) #kalman filter self.kalman = cv2.KalmanFilter(4, 2) self.kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32) self.kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) self.kalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.2 self.kalman.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 1 self.initialState = np.array([[np.float32(self.center_pos[0])], [np.float32(self.center_pos[1])]]) def track(self, img): """ args: img(np.ndarray): BGR image return: bbox(list):[x, y, width, height] """ w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size) h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size) s_z = np.sqrt(w_z * h_z) scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE) x_crop = self.get_subwindow(img, self.center_pos, cfg.TRACK.INSTANCE_SIZE, round(s_x), self.channel_average) outputs = self.model.track(x_crop) score = self._convert_score(outputs['cls']) pred_bbox = self._convert_bbox(outputs['loc'], self.points) def change(r): return np.maximum(r, 1. / r) def sz(w, h): pad = (w + h) * 0.5 return np.sqrt((w + pad) * (h + pad)) # scale penalty s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) / (sz(self.size[0] * scale_z, self.size[1] * scale_z))) # aspect ratio penalty r_c = change((self.size[0] / self.size[1]) / (pred_bbox[2, :] / pred_bbox[3, :])) penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K) pscore = penalty * score pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \ self.window * cfg.TRACK.WINDOW_INFLUENCE best_idx = np.argmax(pscore) bbox = pred_bbox[:, best_idx] / scale_z lr = penalty[best_idx] * score[best_idx] * cfg.TRACK.LR cx = bbox[0] + self.center_pos[0] cy = bbox[1] + self.center_pos[1] # smooth bbox width = self.size[0] * (1 - lr) + bbox[2] * lr height = self.size[1] * (1 - lr) + bbox[3] * lr if type(cy) is np.ndarray: self.current_measurement = np.array([np.float32(cx), np.float32(cy)]) else: self.current_measurement = np.array([[np.float32(cx)], [np.float32(cy)]]) self.kalman.correct(self.current_measurement - self.initialState) self.current_prediction = self.kalman.predict() cx1, cy1 = self.current_prediction[0] + self.initialState[0], self.current_prediction[1] + self.initialState[1] slist = np.argsort(-score) if score[slist[0]] < cfg.TRACK.SCORE: cx = cx1 cy = cy1 width = self.size[0] height = self.size[1] width_ex = width * cfg.TRACK.EXPANSION height_ex = height * cfg.TRACK.EXPANSION # clip boundary cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2]) # udpate state self.center_pos = np.array([cx, cy]) self.size = np.array([width, height]) bbox = [cx - width / 2, cy - height / 2, width_ex, height_ex] best_score = score[best_idx] return { 'bbox': bbox, 'best_score': best_score } def eval(dataset, tracker_name): tracker_dir = "./" trackers = [tracker_name] if 'OTB' in args.dataset: dataset.set_tracker(tracker_dir, trackers) benchmark = OPEBenchmark(dataset) eval_auc = benchmark.eval_success(tracker_name) auc = np.mean(list(eval_auc[tracker_name].values())) return auc elif 'LaSOT' == args.dataset: dataset.set_tracker(tracker_dir, trackers) benchmark = OPEBenchmark(dataset) eval_auc = benchmark.eval_success(tracker_name) auc = np.mean(list(eval_auc[tracker_name].values())) return auc elif 'UAV' in args.dataset: dataset.set_tracker(tracker_dir, trackers) benchmark = OPEBenchmark(dataset) eval_auc = benchmark.eval_success(tracker_name) auc = np.mean(list(eval_auc[tracker_name].values())) return auc elif 'NFS' in args.dataset: dataset.set_tracker(tracker_dir, trackers) benchmark = OPEBenchmark(dataset) eval_auc = benchmark.eval_success(tracker_name) auc = np.mean(list(eval_auc[tracker_name].values())) return auc if args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']: dataset.set_tracker(tracker_dir, trackers) benchmark = EAOBenchmark(dataset) eval_eao = benchmark.eval(tracker_name) eao = eval_eao[tracker_name]['all'] return eao elif 'VOT2018-LT' == args.dataset: dataset.set_tracker(tracker_dir, trackers) benchmark = F1Benchmark(dataset) f1_result = {} with Pool(processes=num) as pool: for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers), desc='eval f1', total=len(trackers), ncols=100): f1_result.update(ret) benchmark.show_result(f1_result, show_video_level=False) return 0 # fitness function def objective(trial): # different params cfg.TRACK.WINDOW_INFLUENCE = trial.suggest_uniform('window_influence', 0.1, 0.8) cfg.TRACK.PENALTY_K = trial.suggest_uniform('penalty_k', 0, 0.4) cfg.TRACK.LR = trial.suggest_uniform('scale_lr', 0.1, 0.8) # rebuild tracker tracker = SiamFDATracker(model) model_name = args.snapshot.split('/')[-1].split('.')[0] tracker_name = os.path.join('tune_results', args.dataset, model_name, model_name + \ '_wi-{:.3f}'.format(cfg.TRACK.WINDOW_INFLUENCE) + \ '_pk-{:.3f}'.format(cfg.TRACK.PENALTY_K) + \ '_lr-{:.3f}'.format(cfg.TRACK.LR)) total_lost = 0 if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']: # restart tracking for v_idx, video in enumerate(dataset): frame_counter = 0 lost_number = 0 toc = 0 pred_bboxes = [] for idx, (img, gt_bbox) in enumerate(video): if len(gt_bbox) == 4: gt_bbox = [gt_bbox[0], gt_bbox[1], gt_bbox[0], gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1]] tic = cv2.getTickCount() if idx == frame_counter: cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] tracker.init(img, gt_bbox_) pred_bboxes.append(1) elif idx > frame_counter: outputs = tracker.track(img) pred_bbox = outputs['bbox'] overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0])) if overlap > 0: # not lost pred_bboxes.append(pred_bbox) else: # lost object pred_bboxes.append(2) frame_counter = idx + 5 # skip 5 frames lost_number += 1 else: pred_bboxes.append(0) toc += cv2.getTickCount() - tic if idx == 0: cv2.destroyAllWindows() toc /= cv2.getTickFrequency() # save results video_path = os.path.join(tracker_name, 'baseline', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: if isinstance(x, int): f.write("{:d}\n".format(x)) else: f.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format( v_idx + 1, video.name, toc, idx / toc, lost_number)) total_lost += lost_number lost_info = "{:s} total lost: {:d}".format(model_name, total_lost) logging.getLogger().info(lost_info) print(lost_info) eao = eval(dataset=dataset_eval, tracker_name=tracker_name) info = "{:s} window_influence: {:1.17f}, penalty_k: {:1.17f}, scale_lr: {:1.17f}, EAO: {:1.3f}".format( model_name, cfg.TRACK.WINDOW_INFLUENCE, cfg.TRACK.PENALTY_K, cfg.TRACK.LR, eao) logging.getLogger().info(info) print(info) return eao else: # OPE tracking for v_idx, video in enumerate(dataset): toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): tic = cv2.getTickCount() if idx == 0: cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] tracker.init(img, gt_bbox_) pred_bbox = gt_bbox_ scores.append(None) if 'VOT2018-LT' == args.dataset: pred_bboxes.append([1]) else: pred_bboxes.append(pred_bbox) else: outputs = tracker.track(img) pred_bbox = outputs['bbox'] pred_bboxes.append(pred_bbox) scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append((cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() toc /= cv2.getTickFrequency() # save results if 'VOT2018-LT' == args.dataset: video_path = os.path.join('results', args.dataset, model_name, 'longterm', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') result_path = os.path.join(video_path, '{}_001_confidence.value'.format(video.name)) with open(result_path, 'w') as f: for x in scores: f.write('\n') if x is None else f.write("{:.6f}\n".format(x)) result_path = os.path.join(video_path, '{}_time.txt'.format(video.name)) with open(result_path, 'w') as f: for x in track_times: f.write("{:.6f}\n".format(x)) elif 'GOT-10k' == args.dataset: video_path = os.path.join('results', args.dataset, model_name, video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') result_path = os.path.join(video_path, '{}_time.txt'.format(video.name)) with open(result_path, 'w') as f: for x in track_times: f.write("{:.6f}\n".format(x)) else: if not os.path.isdir(tracker_name): os.makedirs(tracker_name) result_path = os.path.join(tracker_name, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc)) auc = eval(dataset=dataset_eval, tracker_name=tracker_name) info = "{:s} window_influence: {:1.17f}, penalty_k: {:1.17f}, scale_lr: {:1.17f}, AUC: {:1.3f}".format( model_name, cfg.TRACK.WINDOW_INFLUENCE, cfg.TRACK.PENALTY_K, cfg.TRACK.LR, auc) logging.getLogger().info(info) print(info) return auc if __name__ == "__main__": parser = argparse.ArgumentParser(description='tuning for SiamFDA') parser.add_argument('--dataset', default='VOT2018', type=str, help='dataset') parser.add_argument("--datapath", default="", type=str, help="path of dataset") parser.add_argument('--config', default='config.yaml', type=str, help='config file') parser.add_argument('--snapshot', default='snapshot/checkpoint_e19.pth', type=str, help='snapshot of models to eval') parser.add_argument("--gpu_id", default="0,1,3", type=str, help="gpu id") args = parser.parse_args() torch.set_num_threads(3) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id # load config cfg.merge_from_file(args.config) dataset_root = args.datapath # create model model = ModelBuilder() # load model model = load_pretrain(model, args.snapshot).cuda().eval() # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root, load_img=False) # Eval dataset root = dataset_root if 'OTB' in args.dataset: dataset_eval = OTBDataset(args.dataset, root) elif 'LaSOT' == args.dataset: dataset_eval = LaSOTDataset(args.dataset, root) elif 'UAV' in args.dataset: dataset_eval = UAVDataset(args.dataset, root) elif 'NFS' in args.dataset: dataset_eval = NFSDataset(args.dataset, root) if args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']: dataset_eval = VOTDataset(args.dataset, root) elif 'VOT2018-LT' == args.dataset: dataset_eval = VOTLTDataset(args.dataset, root) tune_result = os.path.join('tune_results', args.dataset) if not os.path.isdir(tune_result): os.makedirs(tune_result) log_path = os.path.join(tune_result, (args.snapshot).split('/')[-1].split('.')[0] + '.log') logging.getLogger().setLevel(logging.INFO) logging.getLogger().addHandler(logging.FileHandler(log_path)) optuna.logging.enable_propagation() study = optuna.create_study(direction='maximize') study.optimize(objective, n_trials=10000) print('Best value: {} (params: {})\n'.format(study.best_value, study.best_params))
40.84058
122
0.547045
aa14a51d1a964f5a4cd9afef0127078c0af984fc
482
py
Python
06. Django + React.js apps/Todo app with CRUD functionality/backend/todolistapp/todoapp/migrations/0002_auto_20201007_0208.py
rexsimiloluwah/Python-Experiments
63291ab7fe11e61cce2a936dd41b31f0eb35cc1f
[ "MIT" ]
1
2021-01-21T11:39:07.000Z
2021-01-21T11:39:07.000Z
06. Django + React.js apps/Todo app with CRUD functionality/backend/todolistapp/todoapp/migrations/0002_auto_20201007_0208.py
rexsimiloluwah/Python-Experiments
63291ab7fe11e61cce2a936dd41b31f0eb35cc1f
[ "MIT" ]
null
null
null
06. Django + React.js apps/Todo app with CRUD functionality/backend/todolistapp/todoapp/migrations/0002_auto_20201007_0208.py
rexsimiloluwah/Python-Experiments
63291ab7fe11e61cce2a936dd41b31f0eb35cc1f
[ "MIT" ]
1
2021-01-21T11:39:19.000Z
2021-01-21T11:39:19.000Z
# Generated by Django 3.1.1 on 2020-10-07 01:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('todoapp', '0001_initial'), ] operations = [ migrations.AlterField( model_name='todo', name='priority', field=models.CharField(blank=True, choices=[('High', 'High'), ('Low', 'Low'), ('Normal', 'Normal')], default='Normal', max_length=50, null=True), ), ]
25.368421
157
0.587137
919c1eece5637c80fb502711f07a3788b0b59d0d
4,838
py
Python
bcs-ui/backend/tests/container_service/clusters/tools/test_node.py
liuchenyiwork/bk-bcs
63b3b844e90bbd240e216cce94b5ba08c28d5282
[ "Apache-2.0" ]
1
2021-11-16T08:15:13.000Z
2021-11-16T08:15:13.000Z
bcs-ui/backend/tests/container_service/clusters/tools/test_node.py
Maclon9573/bk-bcs
b062fa6659490087441c0f405af612afcbadeda5
[ "Apache-2.0" ]
null
null
null
bcs-ui/backend/tests/container_service/clusters/tools/test_node.py
Maclon9573/bk-bcs
b062fa6659490087441c0f405af612afcbadeda5
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import mock import pytest from backend.container_service.clusters.constants import ClusterManagerNodeStatus from backend.container_service.clusters.tools import node as node_tools from backend.resources.constants import NodeConditionStatus from backend.tests.container_service.clusters.test_cc_host import fake_fetch_all_hosts, fake_get_agent_status FAKE_INNER_IP = "127.0.0.1" FAKE_NODE_NAME = "bcs-test-node" def test_query_cluster_nodes(client, create_and_delete_node, ctx_cluster): cluster_nodes = node_tools.query_cluster_nodes(ctx_cluster) assert FAKE_INNER_IP in cluster_nodes assert cluster_nodes[FAKE_INNER_IP]["name"] == FAKE_NODE_NAME assert cluster_nodes[FAKE_INNER_IP]["status"] == NodeConditionStatus.Ready assert not cluster_nodes[FAKE_INNER_IP]["unschedulable"] @pytest.mark.parametrize( "cluster_node_status,unschedulable,cm_node_status,expected_status", [ (NodeConditionStatus.Ready, False, ClusterManagerNodeStatus.RUNNING, ClusterManagerNodeStatus.RUNNING), (NodeConditionStatus.Ready, True, ClusterManagerNodeStatus.RUNNING, ClusterManagerNodeStatus.REMOVABLE), (NodeConditionStatus.Ready, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.REMOVABLE), (NodeConditionStatus.NotReady, True, ClusterManagerNodeStatus.NOTREADY, ClusterManagerNodeStatus.NOTREADY), (NodeConditionStatus.NotReady, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.NOTREADY), (NodeConditionStatus.Unknown, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.UNKNOWN), ("", False, ClusterManagerNodeStatus.INITIALIZATION, ClusterManagerNodeStatus.INITIALIZATION), ("", False, ClusterManagerNodeStatus.DELETING, ClusterManagerNodeStatus.DELETING), ("", False, ClusterManagerNodeStatus.ADDFAILURE, ClusterManagerNodeStatus.ADDFAILURE), ("", False, ClusterManagerNodeStatus.REMOVEFAILURE, ClusterManagerNodeStatus.REMOVEFAILURE), ], ) def test_transform_status(cluster_node_status, unschedulable, cm_node_status, expected_status): assert expected_status == node_tools.transform_status(cluster_node_status, unschedulable, cm_node_status) @pytest.fixture def cluster_name(): return "cluster_name" class TestNodesData: def test_compose_data_by_cm_nodes(self, cm_nodes, cluster_nodes, cluster_id, cluster_name): client = node_tools.NodesData( cm_nodes=cm_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name=cluster_name ) node_data = client._compose_data_by_cm_nodes() assert len(node_data) == len( [node for inner_ip, node in cm_nodes.items() if node["status"] != ClusterManagerNodeStatus.RUNNING] ) assert node_data[0]["cluster_name"] == cluster_name def test_compose_data_by_cluster_nodes(self, cm_nodes, cluster_nodes, cluster_id): client = node_tools.NodesData( cm_nodes=cm_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name="cluster_name" ) node_data = client._compose_data_by_cluster_nodes() assert len(node_data) == len(cluster_nodes) assert node_data[0]["status"] == ClusterManagerNodeStatus.RUNNING @pytest.fixture def master_client(ctx_cluster): return node_tools.BcsClusterMaster(ctx_cluster=ctx_cluster, biz_id=1) class TestBcsClusterMaster: @mock.patch("backend.components.cc.HostQueryService.fetch_all", new=fake_fetch_all_hosts) @mock.patch("backend.components.gse.get_agent_status", new=fake_get_agent_status) def test_list_masters(self, master_client, create_and_delete_master): masters = master_client.list_masters() # 判断 ip 存在返回的数据中 detail, is_exist = {}, False for master in masters: if master["inner_ip"] == FAKE_INNER_IP: detail, is_exist = master, True break assert is_exist # 判断包含对应的字段 for field_name in ["inner_ip", "idc", "rack", "device_class", "bk_cloud_id", "agent"]: assert field_name in detail
48.868687
116
0.763745
9960cabacbaa8be97139840d045c5b7c82419def
32,368
py
Python
highway_merge.py
NKAmapper/highway_merge
996575b12f515ae9640d592914837d426abc0e63
[ "CC0-1.0" ]
null
null
null
highway_merge.py
NKAmapper/highway_merge
996575b12f515ae9640d592914837d426abc0e63
[ "CC0-1.0" ]
null
null
null
highway_merge.py
NKAmapper/highway_merge
996575b12f515ae9640d592914837d426abc0e63
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf8 # highway_merge.py # Replace OSM highways with NVDB (or Elveg) # Usage: python highway_merge.py [command] [input_osm.osm] [input_nvdb.osm] # Commands: - replace: Merge all existing OSM highways with NVDB # - offset: Include all NVDB highways above an certain average offset # - new: Include only NVDB highways not found in OSM # - tag: Update OSM highways with attributes from NVDB (maxspeed, name etc) # Resulting file will be written to a new version of input file import sys import time import math import json import os.path import urllib.request, urllib.parse, urllib.error from xml.etree import ElementTree version = "2.3.1" request_header = {"User-Agent": "osmno/highway_merge/" + version} overpass_api = "https://overpass-api.de/api/interpreter" # Overpass endpoint import_folder = "~/Jottacloud/osm/nvdb/" # Folder containing import highway files (default folder tried first) nvdb_sweden_site = "https://nvdb-osm-map-data.s3.amazonaws.com/osm/" # All Sweden .osm NVDB files country = "Norway" # Argument "-swe" for Sweden # Paramters for matching debug = False # True will provide extra keys in output file debug_gap = False # True will show gap/distance testing in output file merge_all = False # True will delete excess way from OSM if its NVDB match is already merged margin = 15 # Meters of tolarance for matching nodes margin_new = 8 # Meters of tolerance for matching nodes, for "new" command margin_offset = 5 # Minimum average distance in meters for matching ways (used with "offset" command to filter large offsets) match_factor = 0.3 # Minimum percent of length of way matched new_factor = 0.6 # Ditto for "new" command min_nodes = 2 # Min number of nodes in a way to be matched # Do not merge OSM ways with the folowing highway categories avoid_highway = ["path", "bus_stop", "rest_area", "platform", "construction", "proposed"] # Do not merge OSM ways with the following keys avoid_tags = ["area", "railway", "piste:type", "snowmobile", "turn:lanes", "turn:lanes:forward", "turn:lanes:backward", \ "destination", "destination:forward", "destination:backward", "destination:ref", "destination:ref:forward", "destination:ref:backward", \ "destination:symbol", "destination:symbol:forward", "destination:symbol:backward", "mtb:scale", "class:bicycle:mtb"] # Overwrite with the following tags from NVDB when merging ways avoid_merge = ["ref", "name", "maxspeed", "oneway", "junction", "foot", "bridge", "tunnel", "layer", "source"] # Do not consider OSM highways of the following types when updating tags avoid_highway_tags = ["cycleway", "footway", "steps"] # Overwrite with the following tags from NVDB when updating tags in OSM update_tags = ["ref", "name", "maxspeed", "maxheight", "bridge", "tunnel", "layer"] # Pedestrian highways which should not be mixed with other highway classes for cars pedestrian_highway = ["footway", "cycleway"] # Public highways which should not be mixed with other highway classes public_highway = ["motorway", "trunk", "primary", "secondary", "motorway_link", "trunk_link", "primary_link", "secondary_link"] # Only consider the following highway categories when merging (leave empty [] to merge all) replace_highway = [] #replace_highway = ["motorway", "trunk", "primary", "secondary", "motorway_link", "trunk_link", "primary_link", "secondary_link"] #replace_highway = ["primary", "secondary", "primary_link", "secondary_link"] # Output message def message (line): sys.stdout.write (line) sys.stdout.flush() # Open file/api, try up to 6 times, each time with double sleep time def open_url (url): tries = 0 while tries < 6: try: return urllib.request.urlopen(url) except urllib.error.HTTPError as e: if e.code in [429, 503, 504]: # Too many requests, Service unavailable or Gateway timed out if tries == 0: message ("\n") message ("\rRetry %i... " % (tries + 1)) time.sleep(5 * (2**tries)) tries += 1 error = e elif e.code in [401, 403]: message ("\nHTTP error %i: %s\n" % (e.code, e.reason)) # Unauthorized or Blocked sys.exit() elif e.code in [400, 409, 412]: message ("\nHTTP error %i: %s\n" % (e.code, e.reason)) # Bad request, Conflict or Failed precondition message ("%s\n" % str(e.read())) sys.exit() else: raise message ("\nHTTP error %i: %s\n" % (error.code, error.reason)) sys.exit() # Compute approximation of distance between two coordinates, in meters # Works for short distances def distance(n1_lat, n1_lon, n2_lat, n2_lon): lon1, lat1, lon2, lat2 = map(math.radians, [n1_lon, n1_lat, n2_lon, n2_lat]) x = (lon2 - lon1) * math.cos( 0.5*(lat2+lat1) ) y = lat2 - lat1 return 6371000 * math.sqrt( x*x + y*y ) # Compute closest distance from point p3 to line segment [s1, s2] # Works for short distances def line_distance(s1_lat, s1_lon, s2_lat, s2_lon, p3_lat, p3_lon): x1, y1, x2, y2, x3, y3 = map(math.radians, [s1_lon, s1_lat, s2_lon, s2_lat, p3_lon, p3_lat]) # Simplified reprojection of latitude x1 = x1 * math.cos( y1 ) x2 = x2 * math.cos( y2 ) x3 = x3 * math.cos( y3 ) A = x3 - x1 B = y3 - y1 dx = x2 - x1 dy = y2 - y1 dot = (x3 - x1)*dx + (y3 - y1)*dy len_sq = dx*dx + dy*dy if len_sq != 0: # in case of zero length line param = dot / len_sq else: param = -1 if param < 0: x4 = x1 y4 = y1 elif param > 1: x4 = x2 y4 = y2 else: x4 = x1 + param * dx y4 = y1 + param * dy # Also compute distance from p to segment x = x4 - x3 y = y4 - y3 distance = 6371000 * math.sqrt( x*x + y*y ) # In meters # Project back to longitude/latitude x4 = x4 / math.cos(y4) lon = math.degrees(x4) lat = math.degrees(y4) return (lat, lon, distance) # Compare two ways to determine if they match # Include only segments of the ways which are closer than margin parameter # Then check if the average distance between these segments is closer than the earlier best_distance # Return new best_distance, or None if no match or further appart def match_ways (way1, way2, best_distance): way_distance = 0.0 count_distance = 0 match_nodes = [] # Iterate all nodes in way1 and identify distance from node to way2 for node1 in way1['nodes']: min_node_distance = margin prev_node2 = way2['nodes'][0] for node2 in way2['nodes'][1:]: line_lat, line_lon, node_distance = line_distance(nodes[prev_node2]['lat'], nodes[prev_node2]['lon'], \ nodes[node2]['lat'], nodes[node2]['lon'], \ nodes[node1]['lat'], nodes[node1]['lon']) if node_distance < min_node_distance: min_node_distance = node_distance min_node_ref = node1 gap_test = { 'lat1': nodes[node1]['lat'], 'lon1': nodes[node1]['lon'], 'lat2': line_lat, 'lon2': line_lon, 'distance': node_distance } prev_node2 = node2 # Include node in matched nodes list if closer distance than margin if min_node_distance < margin: count_distance += 1 way_distance += min_node_distance if min_node_ref not in match_nodes: match_nodes.append(min_node_ref) if debug_gap: test_lines.append(gap_test) # No match if too few matched nodes or if average distance between matched nodes is higher than best so far. # Otherwise calculate length of matched segment to check if sufficiently long (proportion of total length of way1) if count_distance >= min_nodes and way_distance / count_distance < best_distance: match_length = 0 prev_node = None for node in way1['nodes']: if node in match_nodes and prev_node in match_nodes: match_length += distance(nodes[prev_node]['lat'], nodes[prev_node]['lon'], \ nodes[node]['lat'], nodes[node]['lon']) prev_node = node if match_length > match_factor * way1['length']: return way_distance / count_distance # Successful match return None # Identify municipality name, unless more than one hit # Returns municipality number, or input parameter if not found def get_municipality (parameter): if parameter.isdigit(): return parameter else: parameter = parameter found_id = "" duplicate = False for mun_id, mun_name in iter(municipalities.items()): if parameter.lower() == mun_name.lower(): return mun_id elif parameter.lower() in mun_name.lower(): if found_id: duplicate = True else: found_id = mun_id if found_id and not duplicate: return found_id else: return parameter # Load dict of all municipalities def load_municipalities(country): if country == "Sweden": url = "https://catalog.skl.se/rowstore/dataset/b80d412c-9a81-4de3-a62c-724192295677?_limit=400" file = urllib.request.urlopen(url) data = json.load(file) file.close() for municipality in data['results']: municipalities[ municipality['kommunkod'] ] = municipality['kommun'] # message ("%s\n" % municipalities) else: # Default Norway url = "https://ws.geonorge.no/kommuneinfo/v1/fylkerkommuner?filtrer=fylkesnummer%2Cfylkesnavn%2Ckommuner.kommunenummer%2Ckommuner.kommunenavnNorsk" file = urllib.request.urlopen(url) data = json.load(file) file.close() for county in data: for municipality in county['kommuner']: municipalities[ municipality['kommunenummer'] ] = municipality['kommunenavnNorsk'] # Load files and build data structure for analysis def load_files (name_osm): global tree_osm, root_osm, tree_nvdb, root_nvdb, count_osm_roads, filename_osm, filename_nvdb # Load OSM file if ".osm" not in name_osm.lower(): municipality_id = get_municipality(name_osm) if municipality_id in municipalities: message ("Loading municipality %s %s from OSM ..." % (municipality_id, municipalities[municipality_id])) filename_osm = "nvdb_%s_%s" % (municipality_id, municipalities[municipality_id].replace(" ", "_")) if country == "Sweden": filename_nvdb = municipalities[municipality_id] + ".osm" query = '[timeout:90];(area["ref:scb"=%s][admin_level=7];)->.a;(nwr["highway"](area.a););(._;>;<;);out meta;' % municipality_id else: # Norway filename_nvdb = filename_osm + ".osm" query = '[timeout:90];(area[ref=%s][admin_level=7][place=municipality];)->.a;(nwr["highway"](area.a););(._;>;<;);out meta;' % municipality_id filename_osm += ".osm" request = urllib.request.Request(overpass_api + "?data=" + urllib.parse.quote(query), headers=request_header) file = open_url(request) data = file.read() file.close() root_osm = ElementTree.fromstring(data) tree_osm = ElementTree.ElementTree(root_osm) else: sys.exit("\n*** Municipality '%s' not found\n\n" % name_osm) else: message ("Loading files '%s' and '%s' ..." % (name_osm, filename_nvdb)) if os.path.isfile(name_osm): tree_osm = ElementTree.parse(name_osm) root_osm = tree_osm.getroot() else: sys.exit("\n*** File '%s' not found\n\n" % name_osm) # Load NVDB file if country == "Sweden": request = urllib.request.Request(nvdb_sweden_site + urllib.parse.quote(filename_nvdb), headers=request_header) try: file = urllib.request.urlopen(request) except urllib.error.HTTPError: sys.exit("\n*** File '%s' not available\n\n" % (nvdb_sweden_site + filename_nvdb)) data = file.read() tree_nvdb = ElementTree.ElementTree(ElementTree.fromstring(data)) else: # Norway full_filename_nvdb = filename_nvdb if not os.path.isfile(full_filename_nvdb): test_filename = os.path.expanduser(import_folder + filename_nvdb) if os.path.isfile(test_filename): full_filename_nvdb = test_filename else: sys.exit("\n*** File '%s' not found\n\n" % filename_nvdb) tree_nvdb = ElementTree.parse(full_filename_nvdb) root_nvdb = tree_nvdb.getroot() # Prepare nodes message ("\nLoad nodes ...") count_osm_nodes = 0 for node in root_osm.iter("node"): if not("action" in node.attrib and node.attrib['action'] == "delete"): nodes[ node.attrib['id'] ] = { 'index': node, 'used': 0, # Will have a value larger than zero at time of output to avoid deletion 'lat': float(node.attrib['lat']), 'lon': float(node.attrib['lon']) } # Remove node tags used by early editors for tag in node.iter("tag"): if tag.attrib['k'] == "created_by": node.remove(tag) node.set("action", "modify") count_osm_nodes += 1 count_nvdb_nodes = 0 for node in root_nvdb.iter("node"): nodes[ node.attrib['id'] ] = { 'index': node, 'used': 0, 'lat': float(node.attrib['lat']), 'lon': float(node.attrib['lon']) } count_nvdb_nodes += 1 message (" %i OSM nodes, %i NVDB nodes" % (count_osm_nodes, count_nvdb_nodes)) # Determine bounding box and length of OSM ways message ("\nLoad ways ...") count_osm = 0 count_osm_roads = 0 for way in root_osm.iter("way"): count_osm += 1 way_id = way.attrib['id'] length = 0 way_nodes = [] highway = None incomplete = False avoid_match = False min_lat = 0.0 min_lon = 0.0 max_lat = 0.0 max_lon = 0.0 # Iterate tags to determine if way should be excluded for tag in way.iter("tag"): osm_tag = tag.attrib['k'] if osm_tag in avoid_tags: avoid_match = True if osm_tag == "highway": highway = tag.attrib['v'] if highway not in avoid_highway: count_osm_roads += 1 # Iterate nodes to determine if way is complete for node in way.iter("nd"): node_id = node.attrib['ref'] if node_id in nodes: nodes[ node_id ]['used'] += 1 elif not("action" in node.attrib and node.attrib['action'] == "delete"): incomplete = True if "action" in way.attrib and way.attrib['action'] == "delete": incomplete = True # Determine bounding box and length of way if not incomplete: node_tag = way.find("nd") node_id = node_tag.attrib['ref'] min_lat = nodes[ node_id ]['lat'] min_lon = nodes[ node_id ]['lon'] max_lat = min_lat max_lon = min_lon prev_lat = min_lat prev_lon = min_lon for node in way.iter("nd"): if not("action" in node.attrib and node.attrib['action'] == "delete"): node_id = node.attrib['ref'] length += distance(prev_lat, prev_lon, nodes[node_id]['lat'], nodes[node_id]['lon']) # Append node and update bbox prev_lat = nodes[node_id]['lat'] prev_lon = nodes[node_id]['lon'] way_nodes.append(node_id) min_lat = min(min_lat, prev_lat) min_lon = min(min_lon, prev_lon) max_lat = max(max_lat, prev_lat) max_lon = max(max_lon, prev_lon) # Note: Sinple reprojection of bounding box to meters ways_osm[ way_id ] = { 'index': way, 'highway': highway, 'incomplete': incomplete, 'avoid_tag': avoid_match, 'min_lat': min_lat - margin / 111500.0, 'max_lat': max_lat + margin / 111500.0, 'min_lon': min_lon - margin / (math.cos(math.radians(min_lat)) * 111320.0), 'max_lon': max_lon + margin / (math.cos(math.radians(max_lat)) * 111320.0), 'length': length, 'nodes': way_nodes, 'tags': {} } # Determine which nodes are used by relation (should be kept) for relation in root_osm.iter("relation"): for member in relation.iter("member"): if member.attrib['type'] == "node" and member.attrib['ref'] in nodes: nodes[ member.attrib['ref'] ]['used'] += 1 message (" %i OSM ways (%i roads)" % (count_osm, count_osm_roads)) # Determine bounding box and length of NVDB ways count_nvdb = 0 for way in root_nvdb.iter('way'): count_nvdb += 1 node_tag = way.find("nd") node_ref = node_tag.attrib['ref'] min_lat = nodes[ node_ref ]['lat'] min_lon = nodes[ node_ref ]['lon'] max_lat = min_lat max_lon = min_lon prev_lat = min_lat prev_lon = min_lon length = 0 way_nodes = [] for node in way.iter("nd"): node_id = node.attrib['ref'] length += distance(prev_lat, prev_lon, nodes[node_id]['lat'], nodes[node_id]['lon']) # Append node and update bbox prev_lat = nodes[node_id]['lat'] prev_lon = nodes[node_id]['lon'] way_nodes.append(node_id) min_lat = min(min_lat, prev_lat) min_lon = min(min_lon, prev_lon) max_lat = max(max_lat, prev_lat) max_lon = max(max_lon, prev_lon) highway_tag = way.find("tag[@k='highway']") if highway_tag != None: highway = highway_tag.attrib['v'] else: highway = "" # Note: Simple reprojection of bounding box to meters ways_nvdb[ way.attrib['id'] ] = { 'index': way, 'highway': highway, 'missing': False, 'min_lat': min_lat - margin / 111500.0, 'max_lat': max_lat + margin / 111500.0, 'min_lon': min_lon - margin / (math.cos(math.radians(min_lat)) * 111320.0), 'max_lon': max_lon + margin / (math.cos(math.radians(max_lat)) * 111320.0), 'length': length, 'nodes': way_nodes } message (", %i NVDB ways" % count_nvdb) # Merge NVDB and OSM highways for the commands "replace", "offset" and "tag" def merge_highways(command): message ("\nMatch highways ...\n") count = count_osm_roads count_swap = 0 total_distance = 0 # Pass 1: Match topology # Iterate OSM ways to identify best match with NVDB way for osm_id, osm_way in iter(ways_osm.items()): if not osm_way['incomplete'] and osm_way['highway'] != None and osm_way['highway'] not in avoid_highway: message ("\r%i " % count) count -= 1 best_id = None best_distance = 99999.0 for nvdb_id, nvdb_way in iter(ways_nvdb.items()): # Avoid ways with no overlapping bbox or with incompatible relative lengths if nvdb_way['min_lat'] <= osm_way['max_lat'] and nvdb_way['max_lat'] >= osm_way['min_lat'] and \ nvdb_way['min_lon'] <= osm_way['max_lon'] and nvdb_way['max_lon'] >= osm_way['min_lon'] and \ osm_way['length'] > match_factor * nvdb_way['length'] and nvdb_way['length'] > match_factor * osm_way['length']: # Avoid mixing pedestrian and car highways if nvdb_way['highway'] in pedestrian_highway and osm_way['highway'] not in pedestrian_highway + ["track"] or \ nvdb_way['highway'] not in pedestrian_highway and osm_way['highway'] in pedestrian_highway: continue # Avoid mixing trunk etc with lower highway classes if nvdb_way['highway'] in public_highway and osm_way['highway'] not in public_highway + ['tertiary'] or \ osm_way['highway'] in public_highway and nvdb_way['highway'] not in public_highway + ['road', 'tertiary']: continue # Check if match between OSM and NVDB way, and determine if closest distance between them match_distance = match_ways(nvdb_way, osm_way, best_distance) if match_distance is not None and match_distance < best_distance: # Also check reverse match if match_ways(osm_way, nvdb_way, 99999.0) is not None: best_id = nvdb_id best_distance = match_distance # Store match in data structure, if any match if best_id is not None: if command in ["replace", "offset"]: # Replace earlier match if new match is better if "osm_id" in ways_nvdb[ best_id ] and ways_nvdb[ best_id ]['distance'] > best_distance: count_swap -= 1 total_distance -= ways_nvdb[ best_id ]['distance'] del ways_osm[ ways_nvdb[ best_id ]['osm_id'] ]['nvdb_id'] del ways_nvdb[ best_id ]['osm_id'] if "osm_id" not in ways_nvdb[ best_id ]: count_swap += 1 total_distance += best_distance ways_osm[ osm_id ]['nvdb_id'] = best_id ways_nvdb[ best_id ]['osm_id'] = osm_id ways_nvdb[ best_id ]['swap_no'] = count_swap # Debug ways_nvdb[ best_id ]['distance'] = best_distance # Debug elif merge_all: ways_osm[ osm_id ]['remove'] = True # Remove redundant way if it got a match elif command == "tag": count_swap += 1 total_distance += best_distance ways_osm[ osm_id ]['nvdb_id'] = best_id ways_osm[ osm_id ]['swap_no'] = count_swap # Debug ways_osm[ osm_id ]['distance'] = best_distance # Debug # Pass 2: Match type highway # Remove matches to be avoided before output if command == "offset": for nvdb_id, nvdb_way in iter(ways_nvdb.items()): if "osm_id" in nvdb_way and (nvdb_way['distance'] < margin_offset or ways_osm[ nvdb_way['osm_id'] ]['highway'] in avoid_highway or \ replace_highway and (osm_way['highway'] not in replace_highway or ways_nvdb[ osm_way['nvdb_id'] ]['highway'] not in replace_highway)): count_swap -= 1 total_distance -= nvdb_way['distance'] del ways_nvdb[ nvdb_id ]['osm_id'] elif command == "replace": for osm_id, osm_way in iter(ways_osm.items()): if "nvdb_id" in osm_way and (osm_way['avoid_tag'] or \ replace_highway and (osm_way['highway'] not in replace_highway or ways_nvdb[ osm_way['nvdb_id'] ]['highway'] not in replace_highway)): count_swap -= 1 total_distance -= ways_nvdb[ osm_way['nvdb_id'] ]['distance'] del ways_nvdb[ osm_way['nvdb_id'] ]['osm_id'] del ways_osm[ osm_id ]['nvdb_id'] elif command == "tag": for osm_id, osm_way in iter(ways_osm.items()): if "nvdb_id" in osm_way and (osm_way['highway'] in avoid_highway or \ replace_highway and (osm_way['highway'] not in replace_highway or ways_nvdb[ osm_way['nvdb_id'] ]['highway'] not in replace_highway)): count_swap -= 1 total_distance -= osm_way['distance'] del ways_osm[ osm_id ]['nvdb_id'] # Report result message ("\r%i highways matched, %i not matched" % (count_swap, count_osm_roads - count_swap)) if command == "replace": message ("\n%i missing highways added from NVDB" % (len(ways_nvdb) - count_swap)) message ("\nAverage offset: %.1f m" % (total_distance / count_swap)) # Identify missing NVDB highways, for "new" command def add_new_highways(): message ("\nMatch highways ...\n") count = len(ways_nvdb) count_missing = 0 # Iterate NVDB ways to check if match with any OSM way for nvdb_id, nvdb_way in iter(ways_nvdb.items()): message ("\r%i " % count) count -= 1 if not nvdb_way['highway']: # Skip ferries etc continue best_id = None best_distance = 99999.0 match_nodes = [] for osm_id, osm_way in iter(ways_osm.items()): # Avoid testing ways with no overlapping bbox if not osm_way['incomplete'] and osm_way['highway'] != None and osm_way['highway'] not in avoid_highway and \ osm_way['min_lat'] <= nvdb_way['max_lat'] and osm_way['max_lat'] >= nvdb_way['min_lat'] and \ osm_way['min_lon'] <= nvdb_way['max_lon'] and osm_way['max_lon'] >= nvdb_way['min_lon']: # Iterate all nodes in nvdb_way and identify distance from node to osm_way for node_nvdb in nvdb_way['nodes']: min_node_distance = margin_new prev_node_osm = osm_way['nodes'][0] for node_osm in osm_way['nodes'][1:]: line_lat, line_lon, node_distance = line_distance(nodes[prev_node_osm]['lat'], nodes[prev_node_osm]['lon'], \ nodes[node_osm]['lat'], nodes[node_osm]['lon'], \ nodes[node_nvdb]['lat'], nodes[node_nvdb]['lon']) prev_node_osm = node_osm if node_distance < min_node_distance: min_node_distance = node_distance min_node_ref = node_nvdb gap_test = { 'lat1': nodes[node_nvdb]['lat'], 'lon1': nodes[node_nvdb]['lon'], 'lat2': line_lat, 'lon2': line_lon, 'distance': node_distance } # Include node in matched nodes list if closer distance than margin_new if min_node_distance < margin_new: if min_node_ref not in match_nodes: match_nodes.append(min_node_ref) if debug_gap: test_lines.append(gap_test) # No match if too few matched nodes # Otherwise calculate length of matched segment to check if sufficiently long (proportion of total length of nvdb_way) if len(match_nodes) >= min_nodes: match_length = 0 prev_node = None for node in nvdb_way['nodes']: if prev_node in match_nodes and node in match_nodes: match_length += distance(nodes[prev_node]['lat'], nodes[prev_node]['lon'], \ nodes[node]['lat'], nodes[node]['lon']) prev_node = node if match_length > new_factor * nvdb_way['length']: continue # Successfull match, so do not include in output # No match, so include NVDB way in output ways_nvdb[ nvdb_id ]['missing'] = True count_missing += 1 message ("\r%i missing highways" % count_missing) # Indent XML output def indent_tree(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent_tree(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i # Prepare and output file def output_file (osm_filename): global root_osm, tree_osm, root_nvdb, tree_nvdb message ("\nTransfer elements ...") count_modified_tag = 0 # Empty start for "new" and "offset" if command in ["new", "offset"]: root_osm = ElementTree.Element("osm", version="0.6") tree_osm = ElementTree.ElementTree(root_osm) # Merge NVDB ways with OSM for way in root_osm.findall("way"): osm_id = way.attrib['id'] # Replace geometry and tags if command == "replace" and "nvdb_id" in ways_osm[ osm_id ]: nvdb_id = ways_osm[ osm_id ]['nvdb_id'] nvdb_way = ways_nvdb[ nvdb_id ]['index'] for tag_osm in way.findall("tag"): if tag_osm.attrib['k'] in avoid_merge: way.remove(tag_osm) for tag_nvdb in nvdb_way.iter("tag"): tag_osm = way.find("tag[@k='%s']" % tag_nvdb.attrib['k']) if tag_nvdb.attrib['k'] == "highway": if tag_osm != None and tag_nvdb.attrib['v'] != tag_osm.attrib['v']: way.append(ElementTree.Element("tag", k="NVDB", v=tag_nvdb.attrib['v'])) elif tag_osm != None: tag_osm.set("v", tag_nvdb.attrib['v']) else: way.append(ElementTree.Element("tag", k=tag_nvdb.attrib['k'], v=tag_nvdb.attrib['v'])) if debug: way.append(ElementTree.Element("tag", k="OSMID", v=osm_id)) way.append(ElementTree.Element("tag", k="SWAP", v=str(ways_nvdb[ nvdb_id ]['swap_no']))) way.append(ElementTree.Element("tag", k="DISTANCE", v=str(round(ways_nvdb[ nvdb_id ]['distance'])))) for node in way.findall('nd'): nodes[ node.attrib['ref'] ]['used'] -= 1 way.remove(node) for node in nvdb_way.iter("nd"): nodes[ node.attrib['ref'] ]['used'] += 1 way.append(ElementTree.Element("nd", ref=node.attrib['ref'])) way.set("action", "modify") # Remove way elif command == "replace" and "remove" in ways_osm[ osm_id ]: for node in way.findall('nd'): nodes[ node.attrib['ref'] ]['used'] -= 1 way.remove(node) way.set("action", "delete") # Regplace tags only elif command == "tag" and "nvdb_id" in ways_osm[osm_id]: modified = False modified_tags = [] nvdb_id = ways_osm[osm_id]['nvdb_id'] for tag_nvdb in ways_nvdb[ nvdb_id ]['index'].findall("tag"): if tag_nvdb.attrib['k'] in update_tags: tag_osm = way.find("tag[@k='%s']" % tag_nvdb.attrib['k']) if tag_osm != None: if tag_nvdb.attrib['v'] != tag_osm.attrib['v']: modified_tags.append("Modified %s=%s to %s" % (tag_nvdb.attrib['k'], tag_osm.attrib['v'], tag_nvdb.attrib['v'])) tag_osm.set("v", tag_nvdb.attrib['v']) modified = True else: way.append(ElementTree.Element("tag", k=tag_nvdb.attrib['k'], v=tag_nvdb.attrib['v'])) modified_tags.append("Added %s=%s" % (tag_nvdb.attrib['k'], tag_nvdb.attrib['v'])) modified = True if modified: count_modified_tag += 1 way.set("action", "modify") way.append(ElementTree.Element("tag", k="EDIT", v=";".join(modified_tags))) if debug: way.append(ElementTree.Element("tag", k="NVDBID", v=nvdb_id)) way.append(ElementTree.Element("tag", k="SWAP", v=str(ways_osm[ osm_id ]['swap_no']))) way.append(ElementTree.Element("tag", k="DISTANCE", v=str(round(ways_osm[ osm_id ]['distance'])))) if command == "tag": message ("\nUpdated tags for %i highways" % count_modified_tag) # Transfer new NVDB ways to OSM for way in root_nvdb.findall("way"): nvdb_id = way.attrib['id'] if command == "new" and ways_nvdb[ nvdb_id ]['missing'] or \ command == "replace" and "osm_id" not in ways_nvdb[ nvdb_id ] or \ command == "offset" and "osm_id" in ways_nvdb[ nvdb_id ]: if command == "offset": if ways_nvdb[ nvdb_id ]['highway'] != ways_osm[ ways_nvdb[nvdb_id]['osm_id'] ]['highway']: tag_highway = way.find("tag[@k='highway']") tag_highway.set("v", ways_osm[ ways_nvdb[nvdb_id]['osm_id'] ]['highway']) way.append(ElementTree.Element("tag", k="NVDB", v=ways_nvdb[ nvdb_id ]['highway'])) if debug: way.append(ElementTree.Element("tag", k="OSMID", v=ways_nvdb[ nvdb_id ]['osm_id'])) way.append(ElementTree.Element("tag", k="SWAP", v=str(ways_nvdb[ nvdb_id ]['swap_no']))) way.append(ElementTree.Element("tag", k="DISTANCE", v=str(round(ways_nvdb[ nvdb_id ]['distance'])))) root_osm.append(way) for node in ways_nvdb[ nvdb_id ]['nodes']: nodes[ node ]['used'] += 1 # Remove OSM nodes which are not used anymore for node in root_osm.iter("node"): node_id = node.attrib['id'] tag = node.find("tag") if tag == None and nodes[ node_id ]['used'] == 0: node.set("action", "delete") # Add new NVDB nodes for node in root_nvdb.iter("node"): node_id = node.attrib['id'] if node_id in nodes and nodes[ node_id ]['used'] > 0: root_osm.append(node) # Remove possible historic NVDB tags from OSM for way in root_osm.findall("way"): tag = way.find("tag[@k='nvdb:id']") if tag != None: way.remove(tag) tag = way.find("tag[@k='nvdb:date']") if tag != None: way.remove(tag) # Add distance markers for debugging if debug_gap: i = -1000000 # Try to avoid osm id conflicts for line in test_lines: way = ElementTree.Element("way", id=str(i), action="modify") root_osm.append(way) root_osm.append(ElementTree.Element("node", id=str(i-1), action="modify", lat=str(line['lat1']), lon=str(line['lon1']))) root_osm.append(ElementTree.Element("node", id=str(i-2), action="modify", lat=str(line['lat2']), lon=str(line['lon2']))) way.append(ElementTree.Element("nd", ref=str(i-1))) way.append(ElementTree.Element("nd", ref=str(i-2))) way.append(ElementTree.Element("tag", k="GAP", v=str(line['distance']))) i -= 3 # Output file message ("\nSaving file ...") root_osm.set("generator", "highway_merge v"+version) root_osm.set("upload", "false") indent_tree(root_osm) filename_out = filename_osm.replace(" ", "_").replace(".osm", "") + "_%s.osm" % command # if ".osm" not in filename_osm.lower() and command == "new": # filename_out = filename_out.replace("_new.", "_missing.") tree_osm.write(filename_out, encoding='utf-8', method='xml', xml_declaration=True) message ("\nSaved to file '%s'\n" % filename_out) # Main program if __name__ == '__main__': # start_time = time.time() message ("\n*** highway_merge v%s ***\n\n" % version) if len(sys.argv) == 4 and sys.argv[1].lower() in ["-new", "-offset", "-replace", "-tag"]: command = sys.argv[1].lower().strip("-") filename_osm = sys.argv[2] filename_nvdb = sys.argv[3] elif len(sys.argv) == 3 and ".osm" not in sys.argv[2].lower() and sys.argv[1].lower() in ["-new", "-offset", "-replace", "-tag"]: command = sys.argv[1].lower().strip("-") filename_osm = sys.argv[2] filename_nvdb = filename_osm else: message ("Please include 1) '-new'/'-offset'/'-replace'/'-tag' 2) OSM file and 3) NVDB file as parameters\n") sys.exit() municipalities = {} if "-swe" in sys.argv: country = "Sweden" load_municipalities(country) if filename_osm.lower() == "norge": iterate_municipalities = sorted(municipalities.keys()) else: iterate_municipalities = [ filename_osm ] # Iterate all municipalities, or the one selected municipality for municipality in iterate_municipalities: if municipality < "": # Insert starting municipality number, if needed continue start_time = time.time() ways_osm = {} ways_nvdb = {} nodes = {} test_lines = [] # For debug load_files (municipality) if command == "new": add_new_highways() else: merge_highways (command) output_file (filename_osm) time_lapsed = time.time() - start_time message ("Time: %i seconds (%i ways per second)\n\n\n" % (time_lapsed, count_osm_roads / time_lapsed)) message ("Done\n\n")
31.486381
149
0.670137
bb1a85fa37be2826078b8cf05d2ccd279cd7ccce
44
py
Python
accountifie/common/view_components/__init__.py
imcallister/accountifie
094834c9d632e0353e3baf8d924eeb10cba0add4
[ "MIT", "Unlicense" ]
4
2017-06-02T08:48:48.000Z
2021-11-21T23:57:15.000Z
accountifie/common/view_components/__init__.py
imcallister/accountifie
094834c9d632e0353e3baf8d924eeb10cba0add4
[ "MIT", "Unlicense" ]
3
2020-06-05T16:55:42.000Z
2021-06-10T17:43:12.000Z
accountifie/common/view_components/__init__.py
imcallister/accountifie
094834c9d632e0353e3baf8d924eeb10cba0add4
[ "MIT", "Unlicense" ]
4
2015-12-15T14:27:51.000Z
2017-04-21T21:42:27.000Z
from .modals import * from .tables import *
14.666667
21
0.727273
c86a19e93a495a3e851915ed30c0594b51911c58
461
py
Python
lib/constants.py
Ucnt/aws-s3-webpage-searcher
156ba5ad299ad95ebb90ca4acc91a3236b166892
[ "MIT" ]
6
2018-07-08T09:59:56.000Z
2020-09-09T23:35:54.000Z
lib/constants.py
Ucnt/aws-s3-webpage-searcher
156ba5ad299ad95ebb90ca4acc91a3236b166892
[ "MIT" ]
null
null
null
lib/constants.py
Ucnt/aws-s3-webpage-searcher
156ba5ad299ad95ebb90ca4acc91a3236b166892
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import os from lib.arg_parser import * #Current file directory lib_dir = os.path.dirname(os.path.realpath(__file__)) main_dir = os.path.dirname(os.path.dirname(__file__)) log_dir = "%s/log" % (main_dir) list_dir = "%s/list" % (main_dir) #Headers for requests headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0'} #Threading vars sleep_betwee_checks = .1 max_num_threads = 10
20.954545
104
0.720174
f3fa9f2d094001f1b08ed38b40bfe1703d099ebe
5,146
py
Python
app/src/main/python/analyser.py
Motag8tor/BinaryEye
bb534bd05d5c743bf0df13f78f9ab70b898e7a5e
[ "MIT" ]
null
null
null
app/src/main/python/analyser.py
Motag8tor/BinaryEye
bb534bd05d5c743bf0df13f78f9ab70b898e7a5e
[ "MIT" ]
null
null
null
app/src/main/python/analyser.py
Motag8tor/BinaryEye
bb534bd05d5c743bf0df13f78f9ab70b898e7a5e
[ "MIT" ]
null
null
null
import validators, requests, re # Import external import apikey, wifi, url, file # Import local from io import BytesIO apikey = apikey.apikey() url_class = None wifi_class = None file_class = None headers = {"Accept": "application/json", "Content-Type": "application/x-www-form-urlencoded", "x-apikey": apikey} # -------------------------------------------------------- def get_url_conclusion(): global url_class return url_class.get_conclusion() def get_url_downloadable(): global url_class return url_class.get_downloadable() def get_url_creation_date(): global url_class return url_class.get_creation_date() # -------------------------------------------------------- def get_url_analysis(): global url_class if url_class: # Check if a url has been scanned id = url_class.get_ID() print(id) else: return 3 # If not then exit VT_url = "https://www.virustotal.com/api/v3/analyses/" + id try: response = requests.request("GET", VT_url, headers=headers) except requests.ConnectTimeout as timeout: print(timeout) return 1 if response: data = response.json() status = data["data"]["attributes"]["status"] if status == "completed": print(data["data"]["attributes"]["stats"]) url_class.set_harmless(data["data"]["attributes"]["stats"]["harmless"]) url_class.set_malicious(data["data"]["attributes"]["stats"]["malicious"]) url_class.set_suspicious(data["data"]["attributes"]["stats"]["suspicious"]) url_class.generate_report() # Generate report return "success" # Return successful message elif status == "queued": return 2 else: return 3 # -------------------------------------------------------- def upload_url_for_scanning(address): VT_url = "https://www.virustotal.com/api/v3/urls" try: response = requests.request("POST", VT_url, data="url=" + address, headers=headers) except requests.ConnectTimeout as timeout: print(timeout) return "Unable to submit URL for analysis. Please try again." if response: global url_class data = response.json() report_id = data["data"]["id"] url_class = url.URL(report_id, address) return "url" # -------------------------------------------------------- def get_wifi_analysis(): global wifi_class return wifi_class.get_report() # -------------------------------------------------------- def wifi_scanner(data): global wifi_class wifi_class = wifi.Wifi() array = re.findall("(.+?):((?:[^\\;]|\\.)*);", data[5:]) print(array) for i in array: if i[0] == "S": wifi_class.set_SSID(i[1]) elif i[0] == "T": wifi_class.set_authentication(i[1]) elif i[0] == "P": wifi_class.set_password(i[1]) elif i[0] == "H": wifi_class.set_hidden() return get_wifi_analysis() # -------------------------------------------------------- def get_file_analysis(): global file_class if file_class: # Check if a file has been scanned id = file_class.get_ID() print(id) else: return 3 # If not then exit headers = {"Accept": "application/json", "x-apikey": apikey} VT_url = "https://www.virustotal.com/api/v3/files/" + id try: response = requests.request("GET", VT_url, headers=headers) except requests.ConnectTimeout as timeout: print(timeout) return 1 if response: data = response.json() status = data["data"]["attributes"]["last_analysis_results"] #print(status) if status: print(data["data"]["attributes"]["last_analysis_stats"]) file_class.set_harmless(data["data"]["attributes"]["last_analysis_stats"]["harmless"]) file_class.set_malicious(data["data"]["attributes"]["last_analysis_stats"]["malicious"]) file_class.set_suspicious(data["data"]["attributes"]["last_analysis_stats"]["suspicious"]) return file_class.get_report() # Generate and return report elif not status: return 2 else: return 3 # -------------------------------------------------------- def upload_file_for_scanning(contents): headers = {"x-apikey": apikey} VT_url = 'https://www.virustotal.com/api/v3/files' data_file = BytesIO(bytes(contents, "utf-8")) print(data_file.read()) data_file.seek(0) files = {'file': ('file.exe', data_file)} try: response = requests.post(VT_url, headers=headers, files=files) print(response.text) except requests.ConnectTimeout as timeout: print(timeout) return "Unable to submit file for analysis. Please try again." if response: global file_class data = response.json() report_id = data["data"]["id"] file_class = file.File(report_id) return "file" # -------------------------------------------------------- def analyser(qrcode): print("\n" + qrcode + "\n") data = qrcode.strip() valid_url = validators.url(data) if valid_url: global url_class print("URL Found...") url_class = None return upload_url_for_scanning(data) valid_wifi = re.search("^WIFI:((?:.+?:(?:[^\\;]|\\.)*;)+);?$", data) if valid_wifi: global wifi_class print("Wi-Fi Network Found...") wifi_class = None wifi_scanner(data) return "wifi" if not valid_url or not valid_wifi: global file_class print("Generic file upload") file_class = None return upload_file_for_scanning(data) return 0
26.121827
93
0.635445
d6ef329b0e96dd5a888892c4d82acbef0cdab2fe
855
py
Python
alphatwirl/datasetloop/reader.py
shane-breeze/AlphaTwirl
59dbd5348af31d02e133d43fd5bfaad6b99a155e
[ "BSD-3-Clause" ]
null
null
null
alphatwirl/datasetloop/reader.py
shane-breeze/AlphaTwirl
59dbd5348af31d02e133d43fd5bfaad6b99a155e
[ "BSD-3-Clause" ]
7
2018-02-26T10:32:26.000Z
2018-03-19T12:27:12.000Z
alphatwirl/datasetloop/reader.py
shane-breeze/AlphaTwirl
59dbd5348af31d02e133d43fd5bfaad6b99a155e
[ "BSD-3-Clause" ]
null
null
null
# Tai Sakuma <tai.sakuma@gmail.com> ##__________________________________________________________________|| class DatasetReaderComposite(object): def __init__(self): self.readers = [ ] def __repr__(self): name_value_pairs = ( ('readers', self.readers), ) return '{}({})'.format( self.__class__.__name__, ', '.join(['{}={!r}'.format(n, v) for n, v in name_value_pairs]), ) def add(self, reader): self.readers.append(reader) def begin(self): for reader in self.readers: reader.begin() def read(self, dataset): for reader in self.readers: reader.read(dataset) def end(self): return [reader.end() for reader in self.readers] ##__________________________________________________________________||
25.909091
77
0.611696
44287667b7f4f8c0ba2e50a482355e9688206b66
780
py
Python
alembic_folder/versions/a025c2020370_.py
shthiago/minimo-char-generator
289b9698e58deabf4b6376b2ac55dd2740b7f30c
[ "MIT" ]
null
null
null
alembic_folder/versions/a025c2020370_.py
shthiago/minimo-char-generator
289b9698e58deabf4b6376b2ac55dd2740b7f30c
[ "MIT" ]
null
null
null
alembic_folder/versions/a025c2020370_.py
shthiago/minimo-char-generator
289b9698e58deabf4b6376b2ac55dd2740b7f30c
[ "MIT" ]
null
null
null
"""empty message Revision ID: a025c2020370 Revises: 6b48a14810ad Create Date: 2020-08-01 20:32:20.713786 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'a025c2020370' down_revision = '6b48a14810ad' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('name', 'lastname', existing_type=sa.VARCHAR(length=20), nullable=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('name', 'lastname', existing_type=sa.VARCHAR(length=20), nullable=True) # ### end Alembic commands ###
23.636364
65
0.652564
17f1fe7fb9488b60eb4b7c911e05402de12b2f4c
36,252
py
Python
pytype/pytd/visitors_test.py
dertilo/pytype
385e4e846a98d352143a3cbeb9dff12c4be850b2
[ "Apache-2.0" ]
null
null
null
pytype/pytd/visitors_test.py
dertilo/pytype
385e4e846a98d352143a3cbeb9dff12c4be850b2
[ "Apache-2.0" ]
null
null
null
pytype/pytd/visitors_test.py
dertilo/pytype
385e4e846a98d352143a3cbeb9dff12c4be850b2
[ "Apache-2.0" ]
null
null
null
import textwrap from pytype.pytd import escape from pytype.pytd import pytd from pytype.pytd import pytd_utils from pytype.pytd import visitors from pytype.pytd.parse import parser_test_base import unittest # All of these tests implicitly test pytd_utils.Print because # parser_test_base.AssertSourceEquals() uses pytd_utils.Print. DEFAULT_PYI = """ from typing import Any def __getattr__(name) -> Any: ... """ def pytd_src(text): text = textwrap.dedent(escape.preprocess_pytd(text)) text = text.replace("`", "") return text class TestVisitors(parser_test_base.ParserTest): """Tests the classes in parse/visitors.""" def test_lookup_classes(self): src = textwrap.dedent(""" from typing import Union class object: pass class A: def a(self, a: A, b: B) -> Union[A, B]: raise A() raise B() class B: def b(self, a: A, b: B) -> Union[A, B]: raise A() raise B() """) tree = self.Parse(src) new_tree = visitors.LookupClasses(tree) self.AssertSourceEquals(new_tree, src) new_tree.Visit(visitors.VerifyLookup()) def test_maybe_fill_in_local_pointers(self): src = textwrap.dedent(""" from typing import Union class A: def a(self, a: A, b: B) -> Union[A, B]: raise A() raise B() """) tree = self.Parse(src) ty_a = pytd.ClassType("A") ty_a.Visit(visitors.FillInLocalPointers({"": tree})) self.assertIsNotNone(ty_a.cls) ty_b = pytd.ClassType("B") ty_b.Visit(visitors.FillInLocalPointers({"": tree})) self.assertIsNone(ty_b.cls) def test_deface_unresolved(self): builtins = self.Parse(textwrap.dedent(""" class int: pass """)) src = textwrap.dedent(""" class A(X): def a(self, a: A, b: X, c: int) -> X: raise X() def b(self) -> X[int]: ... """) expected = textwrap.dedent(""" from typing import Any class A(Any): def a(self, a: A, b: Any, c: int) -> Any: raise Any def b(self) -> Any: ... """) tree = self.Parse(src) new_tree = tree.Visit(visitors.DefaceUnresolved([tree, builtins])) new_tree.Visit(visitors.VerifyVisitor()) self.AssertSourceEquals(new_tree, expected) def test_deface_unresolved2(self): builtins = self.Parse(textwrap.dedent(""" from typing import Generic, TypeVar class int: pass T = TypeVar("T") class list(Generic[T]): pass """)) src = textwrap.dedent(""" from typing import Union class A(X): def a(self, a: A, b: X, c: int) -> X: raise X() def c(self) -> Union[list[X], int]: ... """) expected = textwrap.dedent(""" from typing import Any, Union class A(Any): def a(self, a: A, b: Any, c: int) -> Any: raise Any def c(self) -> Union[list[Any], int]: ... """) tree = self.Parse(src) new_tree = tree.Visit(visitors.DefaceUnresolved([tree, builtins])) new_tree.Visit(visitors.VerifyVisitor()) self.AssertSourceEquals(new_tree, expected) def test_replace_types(self): src = textwrap.dedent(""" from typing import Union class A: def a(self, a: Union[A, B]) -> Union[A, B]: raise A() raise B() """) expected = textwrap.dedent(""" from typing import Union class A: def a(self: A2, a: Union[A2, B]) -> Union[A2, B]: raise A2() raise B() """) tree = self.Parse(src) new_tree = tree.Visit(visitors.ReplaceTypes({"A": pytd.NamedType("A2")})) self.AssertSourceEquals(new_tree, expected) def test_superclasses_by_name(self): src = textwrap.dedent(""" class A(): pass class B(): pass class C(A): pass class D(A,B): pass class E(C,D,A): pass """) tree = self.Parse(src) data = tree.Visit(visitors.ExtractSuperClassesByName()) self.assertCountEqual(("object",), data["A"]) self.assertCountEqual(("object",), data["B"]) self.assertCountEqual(("A",), data["C"]) self.assertCountEqual(("A", "B"), data["D"]) self.assertCountEqual(("A", "C", "D"), data["E"]) def test_strip_self(self): src = textwrap.dedent(""" def add(x: int, y: int) -> int: ... class A: def bar(self, x: int) -> float: ... def baz(self) -> float: ... def foo(self, x: int, y: float) -> float: ... """) expected = textwrap.dedent(""" def add(x: int, y: int) -> int: ... class A: def bar(x: int) -> float: ... def baz() -> float: ... def foo(x: int, y: float) -> float: ... """) tree = self.Parse(src) new_tree = tree.Visit(visitors.StripSelf()) self.AssertSourceEquals(new_tree, expected) def test_remove_unknown_classes(self): src = pytd_src(""" from typing import Union class `~unknown1`(): pass class `~unknown2`(): pass class A: def foobar(x: `~unknown1`, y: `~unknown2`) -> Union[`~unknown1`, int]: ... """) expected = textwrap.dedent(""" from typing import Any, Union class A: def foobar(x, y) -> Union[Any, int]: ... """) tree = self.Parse(src) tree = tree.Visit(visitors.RemoveUnknownClasses()) tree = tree.Visit(visitors.DropBuiltinPrefix()) self.AssertSourceEquals(tree, expected) def test_find_unknown_visitor(self): src = pytd_src(""" from typing import Any class object: pass class `~unknown1`(): pass class `~unknown_foobar`(): pass class `~int`(): pass class A(): def foobar(self, x: `~unknown1`) -> Any: ... class B(): def foobar(self, x: `~int`) -> Any: ... class C(): x = ... # type: `~unknown_foobar` class D(`~unknown1`): pass """) tree = self.Parse(src) tree = visitors.LookupClasses(tree) find_on = lambda x: tree.Lookup(x).Visit(visitors.RaiseIfContainsUnknown()) self.assertRaises(visitors.RaiseIfContainsUnknown.HasUnknown, find_on, "A") find_on("B") # shouldn't raise self.assertRaises(visitors.RaiseIfContainsUnknown.HasUnknown, find_on, "C") self.assertRaises(visitors.RaiseIfContainsUnknown.HasUnknown, find_on, "D") def test_in_place_lookup_external_classes(self): src1 = textwrap.dedent(""" def f1() -> bar.Bar: ... class Foo: pass """) src2 = textwrap.dedent(""" def f2() -> foo.Foo: ... class Bar: pass """) ast1 = self.Parse(src1, name="foo") ast2 = self.Parse(src2, name="bar") ast1 = ast1.Visit(visitors.LookupExternalTypes(dict(foo=ast1, bar=ast2))) ast2 = ast2.Visit(visitors.LookupExternalTypes(dict(foo=ast1, bar=ast2))) f1, = ast1.Lookup("foo.f1").signatures f2, = ast2.Lookup("bar.f2").signatures self.assertIs(ast2.Lookup("bar.Bar"), f1.return_type.cls) self.assertIs(ast1.Lookup("foo.Foo"), f2.return_type.cls) def test_lookup_constant(self): src1 = textwrap.dedent(""" Foo = ... # type: type """) src2 = textwrap.dedent(""" class Bar: bar = ... # type: foo.Foo """) ast1 = self.Parse(src1, name="foo").Visit( visitors.LookupBuiltins(self.loader.builtins)) ast2 = self.Parse(src2, name="bar") ast2 = ast2.Visit(visitors.LookupExternalTypes({"foo": ast1, "bar": ast2})) self.assertEqual(ast2.Lookup("bar.Bar").constants[0], pytd.Constant(name="bar", type=pytd.AnythingType())) def test_lookup_star_alias(self): src1 = textwrap.dedent(""" x = ... # type: int T = TypeVar("T") class A: ... def f(x: T) -> T: ... B = A """) src2 = "from foo import *" ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast2 = ast2.Visit(visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2}, self_name="bar")) self.assertEqual("bar", ast2.name) self.assertSetEqual({a.name for a in ast2.aliases}, {"bar.x", "bar.T", "bar.A", "bar.f", "bar.B"}) def test_lookup_star_alias_in_unnamed_module(self): src1 = textwrap.dedent(""" class A: ... """) src2 = "from foo import *" ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2) name = ast2.name ast2 = ast2.Visit(visitors.LookupExternalTypes( {"foo": ast1}, self_name=None)) self.assertEqual(name, ast2.name) self.assertEqual(pytd_utils.Print(ast2), "from foo import A") def test_lookup_two_star_aliases(self): src1 = "class A: ..." src2 = "class B: ..." src3 = textwrap.dedent(""" from foo import * from bar import * """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix()) ast3 = ast3.Visit(visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz")) self.assertSetEqual({a.name for a in ast3.aliases}, {"baz.A", "baz.B"}) def test_lookup_two_star_aliases_with_same_class(self): src1 = "class A: ..." src2 = "class A: ..." src3 = textwrap.dedent(""" from foo import * from bar import * """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix()) self.assertRaises(KeyError, ast3.Visit, visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz")) def test_lookup_star_alias_with_duplicate_class(self): src1 = "class A: ..." src2 = textwrap.dedent(""" from foo import * class A: x = ... # type: int """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast2 = ast2.Visit(visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2}, self_name="bar")) self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent(""" class bar.A: x: int """).strip()) def test_lookup_two_star_aliases_with_default_pyi(self): src1 = DEFAULT_PYI src2 = DEFAULT_PYI src3 = textwrap.dedent(""" from foo import * from bar import * """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix()) ast3 = ast3.Visit(visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz")) self.assertMultiLineEqual(pytd_utils.Print(ast3), textwrap.dedent(""" from typing import Any def baz.__getattr__(name) -> Any: ... """).strip()) def test_lookup_star_alias_with_duplicate_getattr(self): src1 = DEFAULT_PYI src2 = textwrap.dedent(""" from typing import Any from foo import * def __getattr__(name) -> Any: ... """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast2 = ast2.Visit(visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2}, self_name="bar")) self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent(""" from typing import Any def bar.__getattr__(name) -> Any: ... """).strip()) def test_lookup_two_star_aliases_with_different_getattrs(self): src1 = "def __getattr__(name) -> int: ..." src2 = "def __getattr__(name) -> str: ..." src3 = textwrap.dedent(""" from foo import * from bar import * """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix()) self.assertRaises(KeyError, ast3.Visit, visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz")) def test_lookup_star_alias_with_different_getattr(self): src1 = "def __getattr__(name) -> int: ..." src2 = textwrap.dedent(""" from foo import * def __getattr__(name) -> str: ... """) ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix()) ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix()) ast2 = ast2.Visit(visitors.LookupExternalTypes( {"foo": ast1, "bar": ast2}, self_name="bar")) self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent(""" def bar.__getattr__(name) -> str: ... """).strip()) def test_collect_dependencies(self): src = textwrap.dedent(""" from typing import Union l = ... # type: list[Union[int, baz.BigInt]] def f1() -> bar.Bar: ... def f2() -> foo.bar.Baz: ... """) deps = visitors.CollectDependencies() self.Parse(src).Visit(deps) self.assertCountEqual({"baz", "bar", "foo.bar"}, deps.dependencies) def test_expand(self): src = textwrap.dedent(""" from typing import Union def foo(a: Union[int, float], z: Union[complex, str], u: bool) -> file: ... def bar(a: int) -> Union[str, unicode]: ... """) new_src = textwrap.dedent(""" from typing import Union def foo(a: int, z: complex, u: bool) -> file: ... def foo(a: int, z: str, u: bool) -> file: ... def foo(a: float, z: complex, u: bool) -> file: ... def foo(a: float, z: str, u: bool) -> file: ... def bar(a: int) -> Union[str, unicode]: ... """) self.AssertSourceEquals( self.ApplyVisitorToString(src, visitors.ExpandSignatures()), new_src) def test_print_imports(self): src = textwrap.dedent(""" from typing import Any, List, Tuple, Union def f(x: Union[int, slice]) -> List[Any]: ... def g(x: foo.C.C2) -> None: ... """) expected = textwrap.dedent(""" import foo from typing import Any, List, Union def f(x: Union[int, slice]) -> List[Any]: ... def g(x: foo.C.C2) -> None: ... """).strip() tree = self.Parse(src) res = pytd_utils.Print(tree) self.AssertSourceEquals(res, expected) self.assertMultiLineEqual(res, expected) def test_print_imports_named_type(self): # Can't get tree by parsing so build explicitly node = pytd.Constant("x", pytd.NamedType("typing.List")) tree = pytd_utils.CreateModule(name=None, constants=(node,)) expected_src = textwrap.dedent(""" from typing import List x: List """).strip() res = pytd_utils.Print(tree) self.assertMultiLineEqual(res, expected_src) def test_print_imports_ignores_existing(self): src = "from foo import b" tree = self.Parse(src) res = pytd_utils.Print(tree) self.assertMultiLineEqual(res, src) @unittest.skip("depended on `or`") def test_print_union_name_conflict(self): src = textwrap.dedent(""" class Union: ... def g(x: Union) -> Union[int, float]: ... """) tree = self.Parse(src) res = pytd_utils.Print(tree) self.AssertSourceEquals(res, src) def test_adjust_type_parameters(self): ast = self.Parse(""" from typing import Union T = TypeVar("T") T2 = TypeVar("T2") def f(x: T) -> T: ... class A(Generic[T]): def a(self, x: T2) -> None: self = A[Union[T, T2]] """) f = ast.Lookup("f") sig, = f.signatures p_x, = sig.params self.assertEqual(sig.template, (pytd.TemplateItem(pytd.TypeParameter("T", scope="f")),)) self.assertEqual(p_x.type, pytd.TypeParameter("T", scope="f")) cls = ast.Lookup("A") f_cls, = cls.methods sig_cls, = f_cls.signatures p_self, p_x_cls = sig_cls.params self.assertEqual(cls.template, (pytd.TemplateItem(pytd.TypeParameter("T", scope="A")),)) self.assertEqual(sig_cls.template, (pytd.TemplateItem( pytd.TypeParameter("T2", scope="A.a")),)) self.assertEqual(p_self.type.parameters, (pytd.TypeParameter("T", scope="A"),)) self.assertEqual(p_x_cls.type, pytd.TypeParameter("T2", scope="A.a")) def test_adjust_type_parameters_with_builtins(self): ast = self.ParseWithBuiltins(""" T = TypeVar("T") K = TypeVar("K") V = TypeVar("V") class Foo(List[int]): pass class Bar(Dict[T, int]): pass class Baz(Generic[K, V]): pass class Qux(Baz[str, int]): pass """) foo = ast.Lookup("Foo") bar = ast.Lookup("Bar") qux = ast.Lookup("Qux") foo_parent, = foo.parents bar_parent, = bar.parents qux_parent, = qux.parents # Expected: # Class(Foo, parent=GenericType(List, parameters=(int,)), template=()) # Class(Bar, parent=GenericType(Dict, parameters=(T, int)), template=(T)) # Class(Qux, parent=GenericType(Baz, parameters=(str, int)), template=()) self.assertEqual((pytd.ClassType("int"),), foo_parent.parameters) self.assertEqual((), foo.template) self.assertEqual( (pytd.TypeParameter("T", scope="Bar"), pytd.ClassType("int")), bar_parent.parameters) self.assertEqual( (pytd.TemplateItem(pytd.TypeParameter("T", scope="Bar")),), bar.template) self.assertEqual((pytd.ClassType("str"), pytd.ClassType("int")), qux_parent.parameters) self.assertEqual((), qux.template) def test_adjust_type_parameters_with_duplicates(self): ast = self.ParseWithBuiltins(""" T = TypeVar("T") class A(Dict[T, T], Generic[T]): pass """) a = ast.Lookup("A") self.assertEqual( (pytd.TemplateItem(pytd.TypeParameter("T", (), None, "A")),), a.template) def test_adjust_type_parameters_with_duplicates_in_generic(self): src = textwrap.dedent(""" T = TypeVar("T") class A(Generic[T, T]): pass """) self.assertRaises(visitors.ContainerError, lambda: self.Parse(src)) def test_verify_containers(self): ast1 = self.ParseWithBuiltins(""" from typing import SupportsInt, TypeVar T = TypeVar("T") class Foo(SupportsInt[T]): pass """) ast2 = self.ParseWithBuiltins(""" from typing import SupportsInt class Foo(SupportsInt[int]): pass """) ast3 = self.ParseWithBuiltins(""" from typing import Generic class Foo(Generic[int]): pass """) ast4 = self.ParseWithBuiltins(""" from typing import List class Foo(List[int, str]): pass """) self.assertRaises(visitors.ContainerError, lambda: ast1.Visit(visitors.VerifyContainers())) self.assertRaises(visitors.ContainerError, lambda: ast2.Visit(visitors.VerifyContainers())) self.assertRaises(visitors.ContainerError, lambda: ast3.Visit(visitors.VerifyContainers())) self.assertRaises(visitors.ContainerError, lambda: ast4.Visit(visitors.VerifyContainers())) def test_clear_class_pointers(self): cls = pytd.Class("foo", None, (), (), (), (), (), None, ()) t = pytd.ClassType("foo", cls) t = t.Visit(visitors.ClearClassPointers()) self.assertIsNone(t.cls) def test_expand_compatible_builtins(self): src = textwrap.dedent(""" from typing import Tuple, Union, TypeVar T = TypeVar('T', float, bool) def f1(a: float) -> None: ... def f2() -> float: ... def f3(a: bool) -> None: ... def f4() -> bool: ... def f5(a: Union[bool, int]) -> None: ... def f6(a: Tuple[bool, int]) -> None: ... def f7(x: T) -> T: ... """) expected = textwrap.dedent(""" from typing import Tuple, TypeVar, Union T = TypeVar('T', float, bool) def f1(a: Union[float, int]) -> None: ... def f2() -> float: ... def f3(a: Union[bool, None]) -> None: ... def f4() -> bool: ... def f5(a: Union[bool, None, int]) -> None: ... def f6(a: Tuple[Union[bool, None], int]) -> None: ... def f7(x: T) -> T: ... """) src_tree, expected_tree = ( self.Parse(s).Visit(visitors.LookupBuiltins(self.loader.builtins)) for s in (src, expected)) new_tree = src_tree.Visit(visitors.ExpandCompatibleBuiltins( self.loader.builtins)) self.AssertSourceEquals(new_tree, expected_tree) def test_add_name_prefix(self): src = textwrap.dedent(""" from typing import TypeVar def f(a: T) -> T: ... T = TypeVar("T") class X(Generic[T]): pass """) tree = self.Parse(src) self.assertIsNone(tree.Lookup("T").scope) self.assertEqual("X", tree.Lookup("X").template[0].type_param.scope) tree = tree.Replace(name="foo").Visit(visitors.AddNamePrefix()) self.assertIsNotNone(tree.Lookup("foo.f")) self.assertIsNotNone(tree.Lookup("foo.X")) self.assertEqual("foo", tree.Lookup("foo.T").scope) self.assertEqual("foo.X", tree.Lookup("foo.X").template[0].type_param.scope) def test_add_name_prefix_twice(self): src = textwrap.dedent(""" from typing import Any, TypeVar x = ... # type: Any T = TypeVar("T") class X(Generic[T]): ... """) tree = self.Parse(src) tree = tree.Replace(name="foo").Visit(visitors.AddNamePrefix()) tree = tree.Replace(name="foo").Visit(visitors.AddNamePrefix()) self.assertIsNotNone(tree.Lookup("foo.foo.x")) self.assertEqual("foo.foo", tree.Lookup("foo.foo.T").scope) self.assertEqual("foo.foo.X", tree.Lookup("foo.foo.X").template[0].type_param.scope) def test_add_name_prefix_on_class_type(self): src = textwrap.dedent(""" x = ... # type: y class Y: ... """) tree = self.Parse(src) x = tree.Lookup("x") x = x.Replace(type=pytd.ClassType("Y")) tree = tree.Replace(constants=(x,), name="foo") tree = tree.Visit(visitors.AddNamePrefix()) self.assertEqual("foo.Y", tree.Lookup("foo.x").type.name) def test_add_name_prefix_on_nested_class_alias(self): src = textwrap.dedent(""" class A: class B: class C: ... D = A.B.C """) expected = textwrap.dedent(""" from typing import Type class foo.A: class foo.A.B: class foo.A.B.C: ... D: Type[foo.A.B.C] """).strip() self.assertMultiLineEqual(expected, pytd_utils.Print( self.Parse(src).Replace(name="foo").Visit(visitors.AddNamePrefix()))) def test_add_name_prefix_on_nested_class_outside_ref(self): src = textwrap.dedent(""" class A: class B: ... b: A.B C = A.B def f(x: A.B) -> A.B: ... class D: b: A.B def f(self, x: A.B) -> A.B: ... """) expected = textwrap.dedent(""" from typing import Type foo.b: foo.A.B foo.C: Type[foo.A.B] class foo.A: class foo.A.B: ... class foo.D: b: foo.A.B def f(self, x: foo.A.B) -> foo.A.B: ... def foo.f(x: foo.A.B) -> foo.A.B: ... """).strip() self.assertMultiLineEqual(expected, pytd_utils.Print( self.Parse(src).Replace(name="foo").Visit(visitors.AddNamePrefix()))) def test_add_name_prefix_on_nested_class_method(self): src = textwrap.dedent(""" class A: class B: def copy(self) -> A.B: ... """) expected = textwrap.dedent(""" class foo.A: class foo.A.B: def copy(self) -> foo.A.B: ... """).strip() self.assertMultiLineEqual(expected, pytd_utils.Print( self.Parse(src).Replace(name="foo").Visit(visitors.AddNamePrefix()))) def test_print_merge_types(self): src = textwrap.dedent(""" from typing import Union def a(a: float) -> int: ... def b(a: Union[int, float]) -> int: ... def c(a: object) -> Union[float, int]: ... def d(a: float) -> int: ... def e(a: Union[bool, None]) -> Union[bool, None]: ... """) expected = textwrap.dedent(""" from typing import Optional, Union def a(a: float) -> int: ... def b(a: float) -> int: ... def c(a: object) -> Union[float, int]: ... def d(a: float) -> int: ... def e(a: bool) -> Optional[bool]: ... """) self.assertMultiLineEqual(expected.strip(), pytd_utils.Print(self.ToAST(src)).strip()) def test_print_heterogeneous_tuple(self): t = pytd.TupleType(pytd.NamedType("tuple"), (pytd.NamedType("str"), pytd.NamedType("float"))) self.assertEqual("Tuple[str, float]", pytd_utils.Print(t)) def test_verify_heterogeneous_tuple(self): # Error: does not inherit from Generic base = pytd.ClassType("tuple") base.cls = pytd.Class("tuple", None, (), (), (), (), (), None, ()) t1 = pytd.TupleType(base, (pytd.NamedType("str"), pytd.NamedType("float"))) self.assertRaises(visitors.ContainerError, lambda: t1.Visit(visitors.VerifyContainers())) # Error: Generic[str, float] gen = pytd.ClassType("typing.Generic") gen.cls = pytd.Class("typing.Generic", None, (), (), (), (), (), None, ()) t2 = pytd.TupleType(gen, (pytd.NamedType("str"), pytd.NamedType("float"))) self.assertRaises(visitors.ContainerError, lambda: t2.Visit(visitors.VerifyContainers())) # Okay param = pytd.TypeParameter("T") parent = pytd.GenericType(gen, (param,)) base.cls = pytd.Class( "tuple", None, (parent,), (), (), (), (), None, (pytd.TemplateItem(param),)) t3 = pytd.TupleType(base, (pytd.NamedType("str"), pytd.NamedType("float"))) t3.Visit(visitors.VerifyContainers()) def test_typevar_value_conflict(self): # Conflicting values for _T. ast = self.ParseWithBuiltins(""" from typing import List class A(List[int], List[str]): ... """) self.assertRaises(visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())) def test_typevar_value_conflict_hidden(self): # Conflicting value for _T hidden in MRO. ast = self.ParseWithBuiltins(""" from typing import List class A(List[int]): ... class B(A, List[str]): ... """) self.assertRaises(visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())) def test_typevar_value_conflict_related_containers(self): # List inherits from Sequence, so they share a type parameter. ast = self.ParseWithBuiltins(""" from typing import List, Sequence class A(List[int], Sequence[str]): ... """) self.assertRaises(visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())) def test_typevar_value_no_conflict(self): # Not an error if the containers are unrelated, even if they use the same # type parameter name. ast = self.ParseWithBuiltins(""" from typing import ContextManager, SupportsAbs class Foo(SupportsAbs[float], ContextManager[Foo]): ... """) ast.Visit(visitors.VerifyContainers()) def test_typevar_value_consistency(self): # Type renaming makes all type parameters represent the same type `T1`. ast = self.ParseWithBuiltins(""" from typing import Generic, TypeVar T1 = TypeVar("T1") T2 = TypeVar("T2") T3 = TypeVar("T3") T4 = TypeVar("T4") T5 = TypeVar("T5") class A(Generic[T1]): ... class B1(A[T2]): ... class B2(A[T3]): ... class C(B1[T4], B2[T5]): ... class D(C[str, str], A[str]): ... """) ast.Visit(visitors.VerifyContainers()) def test_typevar_value_and_alias_conflict(self): ast = self.ParseWithBuiltins(""" from typing import Generic, TypeVar T = TypeVar("T") class A(Generic[T]): ... class B(A[int], A[T]): ... """) self.assertRaises(visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())) def test_typevar_alias_and_value_conflict(self): ast = self.ParseWithBuiltins(""" from typing import Generic, TypeVar T = TypeVar("T") class A(Generic[T]): ... class B(A[T], A[int]): ... """) self.assertRaises(visitors.ContainerError, lambda: ast.Visit(visitors.VerifyContainers())) def test_verify_container_with_mro_error(self): # Make sure we don't crash. ast = self.ParseWithBuiltins(""" from typing import List class A(List[str]): ... class B(List[str], A): ... """) ast.Visit(visitors.VerifyContainers()) def test_alias_printing(self): a = pytd.Alias("MyList", pytd.GenericType( pytd.NamedType("typing.List"), (pytd.AnythingType(),))) ty = pytd_utils.CreateModule("test", aliases=(a,)) expected = textwrap.dedent(""" from typing import Any, List MyList = List[Any]""") self.assertMultiLineEqual(expected.strip(), pytd_utils.Print(ty).strip()) def test_print_none_union(self): src = textwrap.dedent(""" from typing import Union def f(x: Union[str, None]) -> None: ... def g(x: Union[str, int, None]) -> None: ... def h(x: Union[None]) -> None: ... """) expected = textwrap.dedent(""" from typing import Optional, Union def f(x: Optional[str]) -> None: ... def g(x: Optional[Union[str, int]]) -> None: ... def h(x: None) -> None: ... """) self.assertMultiLineEqual(expected.strip(), pytd_utils.Print(self.ToAST(src)).strip()) def test_lookup_typing_class(self): node = visitors.LookupClasses(pytd.NamedType("typing.Sequence"), self.loader.concat_all()) assert node.cls def test_create_type_parameters_from_unknowns(self): src = pytd_src(""" from typing import Dict def f(x: `~unknown1`) -> `~unknown1`: ... def g(x: `~unknown2`, y: `~unknown2`) -> None: ... def h(x: `~unknown3`) -> None: ... def i(x: Dict[`~unknown4`, `~unknown4`]) -> None: ... # Should not be changed class `~unknown5`: def __add__(self, x: `~unknown6`) -> `~unknown6`: ... def `~f`(x: `~unknown7`) -> `~unknown7`: ... """) expected = pytd_src(""" from typing import Dict _T0 = TypeVar('_T0') def f(x: _T0) -> _T0: ... def g(x: _T0, y: _T0) -> None: ... def h(x: `~unknown3`) -> None: ... def i(x: Dict[_T0, _T0]) -> None: ... class `~unknown5`: def __add__(self, x: `~unknown6`) -> `~unknown6`: ... def `~f`(x: `~unknown7`) -> `~unknown7`: ... """) ast1 = self.Parse(src) ast1 = ast1.Visit(visitors.CreateTypeParametersForSignatures()) self.AssertSourceEquals(ast1, expected) @unittest.skip("We no longer support redefining TypeVar") def test_redefine_typevar(self): src = pytd_src(""" def f(x: `~unknown1`) -> `~unknown1`: ... class `TypeVar`: ... """) ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures()) self.assertMultiLineEqual(pytd_utils.Print(ast), textwrap.dedent(""" import typing _T0 = TypeVar('_T0') class `TypeVar`: ... def f(x: _T0) -> _T0: ...""").strip()) def test_create_type_parameters_for_new(self): src = textwrap.dedent(""" class Foo: def __new__(cls: Type[Foo]) -> Foo: ... class Bar: def __new__(cls: Type[Bar], x, y, z) -> Bar: ... """) ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures()) self.assertMultiLineEqual(pytd_utils.Print(ast), textwrap.dedent(""" from typing import TypeVar _TBar = TypeVar('_TBar', bound=Bar) _TFoo = TypeVar('_TFoo', bound=Foo) class Foo: def __new__(cls: Type[_TFoo]) -> _TFoo: ... class Bar: def __new__(cls: Type[_TBar], x, y, z) -> _TBar: ... """).strip()) def test_keep_custom_new(self): src = textwrap.dedent(""" class Foo: def __new__(cls: Type[X]) -> X: ... class Bar: def __new__(cls, x: Type[Bar]) -> Bar: ... """).strip() ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures()) self.assertMultiLineEqual(pytd_utils.Print(ast), src) def test_print_type_parameter_bound(self): src = textwrap.dedent(""" from typing import TypeVar T = TypeVar("T", bound=str) """) self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)), textwrap.dedent(""" from typing import TypeVar T = TypeVar('T', bound=str)""").lstrip()) def test_print_cls(self): src = textwrap.dedent(""" class A: def __new__(cls: Type[A]) -> A: ... """) self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)), textwrap.dedent(""" class A: def __new__(cls) -> A: ... """).strip()) def test_print_no_return(self): src = textwrap.dedent(""" def f() -> nothing: ... """) self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)), textwrap.dedent(""" from typing import NoReturn def f() -> NoReturn: ...""").lstrip()) def test_print_multiline_signature(self): src = textwrap.dedent(""" def f(x: int, y: str, z: bool) -> list[str]: pass """) self.assertMultiLineEqual( pytd_utils.Print(self.Parse(src), multiline_args=True), textwrap.dedent(""" from typing import List def f( x: int, y: str, z: bool ) -> List[str]: ... """).strip()) def test_rename_builtins_prefix(self): """__builtin__.foo should get rewritten to builtins.foo and then to foo.""" src = textwrap.dedent(""" import __builtin__ class MyError(__builtin__.KeyError): ... """) self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)), "class MyError(KeyError): ...") class ReplaceModulesWithAnyTest(unittest.TestCase): def test_any_replacement(self): class_type_match = pytd.ClassType("match.foo") named_type_match = pytd.NamedType("match.bar") class_type_no_match = pytd.ClassType("match_no.foo") named_type_no_match = pytd.NamedType("match_no.bar") generic_type_match = pytd.GenericType(class_type_match, ()) generic_type_no_match = pytd.GenericType(class_type_no_match, ()) visitor = visitors.ReplaceModulesWithAny(["match."]) self.assertEqual(class_type_no_match, class_type_no_match.Visit(visitor)) self.assertEqual(named_type_no_match, named_type_no_match.Visit(visitor)) self.assertEqual(generic_type_no_match, generic_type_no_match.Visit(visitor)) self.assertEqual(pytd.AnythingType, class_type_match.Visit(visitor).__class__) self.assertEqual(pytd.AnythingType, named_type_match.Visit(visitor).__class__) self.assertEqual(pytd.AnythingType, generic_type_match.Visit(visitor).__class__) class ReplaceUnionsWithAnyTest(unittest.TestCase): def test_any_replacement(self): union = pytd.UnionType((pytd.NamedType("a"), pytd.NamedType("b"))) self.assertEqual( union.Visit(visitors.ReplaceUnionsWithAny()), pytd.AnythingType()) if __name__ == "__main__": unittest.main()
33.975633
86
0.593043
5b77f09ef12910c997780635e863cb4eb817bf67
2,584
py
Python
src/pyside2_demo/frontend/example05/example05.py
chihyi-liao/pyside2_demo
410bdaf0876897febf4607ff73928c60186c7be1
[ "MIT" ]
null
null
null
src/pyside2_demo/frontend/example05/example05.py
chihyi-liao/pyside2_demo
410bdaf0876897febf4607ff73928c60186c7be1
[ "MIT" ]
null
null
null
src/pyside2_demo/frontend/example05/example05.py
chihyi-liao/pyside2_demo
410bdaf0876897febf4607ff73928c60186c7be1
[ "MIT" ]
null
null
null
from PySide2.QtCore import Slot, Qt from PySide2.QtWidgets import ( QMainWindow, QWidget, QGridLayout, QHBoxLayout, QPushButton, QTabWidget, QMessageBox) from pyside2_demo.common import QHLine class TabWidget(QWidget): def __init__(self): super(TabWidget, self).__init__() class HelpCornerWidget(QWidget): def __init__(self, parent=None): super(HelpCornerWidget, self).__init__(parent) layout = QHBoxLayout() help_btn = QPushButton("?") help_btn.setFixedWidth(30) help_btn.clicked.connect(self.help_btn_fn) layout.addWidget(help_btn) layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) self.setLayout(layout) @Slot() def help_btn_fn(self): msg = QMessageBox(QMessageBox.NoIcon, '幫助', '幫助說明') msg.exec_() class MainWidget(QWidget): def __init__(self): super(MainWidget, self).__init__() tab_main = QTabWidget() tab_main.addTab(TabWidget(), "Main") tab_main.addTab(TabWidget(), "Settings") tab_main.setCornerWidget(HelpCornerWidget(self), Qt.TopRightCorner) tab_main.setTabBarAutoHide(True) tab_main.currentChanged.connect(self.tab_bar_changed_fn) line = QHLine() add_btn = QPushButton("&Add") add_btn.clicked.connect(self.add_btn_fn) add_btn.setFixedWidth(100) del_btn = QPushButton("&Delete") del_btn.clicked.connect(self.del_btn_fn) del_btn.setFixedWidth(100) # 設定 layout layout = QGridLayout() layout.addWidget(tab_main, 0, 0, 1, 3) layout.addWidget(line, 1, 0, 1, 3) layout.addWidget(del_btn, 2, 1) layout.addWidget(add_btn, 2, 2) self.tab_main = tab_main self.setLayout(layout) @Slot() def add_btn_fn(self): count = self.tab_main.count() self.tab_main.insertTab(count, TabWidget(), "Tab%d" % (count-1, )) @Slot() def del_btn_fn(self): count = self.tab_main.count() if count > 2: self.tab_main.removeTab(count-1) @Slot() def tab_bar_changed_fn(self): idx = self.tab_main.currentIndex() if idx != -1: tab_text = self.tab_main.tabText(idx) msg = QMessageBox(QMessageBox.NoIcon, 'Tab', 'Current Tab is %s' % (tab_text, )) msg.exec_() class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.setCentralWidget(MainWidget()) self.setWindowTitle("Example04") self.resize(600, 400)
31.512195
92
0.634288
0aabd0813ba2a0c8e0a5fc65307104082cc6b4e6
9,611
py
Python
official/resnet/keras/keras_common.py
MedZghal/models
7a4ed44bc52aace1130df848a812cb696560d1e3
[ "Apache-2.0" ]
1
2019-02-09T15:30:17.000Z
2019-02-09T15:30:17.000Z
official/resnet/keras/keras_common.py
MedZghal/models
7a4ed44bc52aace1130df848a812cb696560d1e3
[ "Apache-2.0" ]
null
null
null
official/resnet/keras/keras_common.py
MedZghal/models
7a4ed44bc52aace1130df848a812cb696560d1e3
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common util functions and classes used by both keras cifar and imagenet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np # pylint: disable=g-bad-import-order from absl import flags import tensorflow as tf from tensorflow.python.keras.optimizer_v2 import (gradient_descent as gradient_descent_v2) FLAGS = flags.FLAGS BASE_LEARNING_RATE = 0.1 # This matches Jing's version. TRAIN_TOP_1 = 'training_accuracy_top_1' class BatchTimestamp(object): """A structure to store batch time stamp.""" def __init__(self, batch_index, timestamp): self.batch_index = batch_index self.timestamp = timestamp class TimeHistory(tf.keras.callbacks.Callback): """Callback for Keras models.""" def __init__(self, batch_size, log_steps): """Callback for logging performance (# image/second). Args: batch_size: Total batch size. """ self.batch_size = batch_size super(TimeHistory, self).__init__() self.log_steps = log_steps # Logs start of step 0 then end of each step based on log_steps interval. self.timestamp_log = [] def on_train_begin(self, logs=None): self.record_batch = True def on_train_end(self, logs=None): self.train_finish_time = time.time() def on_batch_begin(self, batch, logs=None): if self.record_batch: timestamp = time.time() self.start_time = timestamp self.record_batch = False if batch == 0: self.timestamp_log.append(BatchTimestamp(batch, timestamp)) def on_batch_end(self, batch, logs=None): if batch % self.log_steps == 0: timestamp = time.time() elapsed_time = timestamp - self.start_time examples_per_second = (self.batch_size * self.log_steps) / elapsed_time if batch != 0: self.record_batch = True self.timestamp_log.append(BatchTimestamp(batch, timestamp)) tf.compat.v1.logging.info( "BenchmarkMetric: {'num_batches':%d, 'time_taken': %f," "'images_per_second': %f}" % (batch, elapsed_time, examples_per_second)) class LearningRateBatchScheduler(tf.keras.callbacks.Callback): """Callback to update learning rate on every batch (not epoch boundaries). N.B. Only support Keras optimizers, not TF optimizers. Args: schedule: a function that takes an epoch index and a batch index as input (both integer, indexed from 0) and returns a new learning rate as output (float). """ def __init__(self, schedule, batch_size, num_images): super(LearningRateBatchScheduler, self).__init__() self.schedule = schedule self.batches_per_epoch = num_images / batch_size self.batch_size = batch_size self.epochs = -1 self.prev_lr = -1 def on_epoch_begin(self, epoch, logs=None): if not hasattr(self.model.optimizer, 'learning_rate'): raise ValueError('Optimizer must have a "learning_rate" attribute.') self.epochs += 1 def on_batch_begin(self, batch, logs=None): """Executes before step begins.""" lr = self.schedule(self.epochs, batch, self.batches_per_epoch, self.batch_size) if not isinstance(lr, (float, np.float32, np.float64)): raise ValueError('The output of the "schedule" function should be float.') if lr != self.prev_lr: self.model.optimizer.learning_rate = lr # lr should be a float here self.prev_lr = lr tf.compat.v1.logging.debug( 'Epoch %05d Batch %05d: LearningRateBatchScheduler ' 'change learning rate to %s.', self.epochs, batch, lr) def get_optimizer(): """Returns optimizer to use.""" # The learning_rate is overwritten at the beginning of each step by callback. return gradient_descent_v2.SGD(learning_rate=0.1, momentum=0.9) def get_callbacks(learning_rate_schedule_fn, num_images): """Returns common callbacks.""" time_callback = TimeHistory(FLAGS.batch_size, FLAGS.log_steps) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=FLAGS.model_dir) lr_callback = LearningRateBatchScheduler( learning_rate_schedule_fn, batch_size=FLAGS.batch_size, num_images=num_images) return time_callback, tensorboard_callback, lr_callback def build_stats(history, eval_output, time_callback): """Normalizes and returns dictionary of stats. Args: history: Results of the training step. Supports both categorical_accuracy and sparse_categorical_accuracy. eval_output: Output of the eval step. Assumes first value is eval_loss and second value is accuracy_top_1. time_callback: Time tracking callback likely used during keras.fit. Returns: Dictionary of normalized results. """ stats = {} if eval_output: stats['accuracy_top_1'] = eval_output[1].item() stats['eval_loss'] = eval_output[0].item() if history and history.history: train_hist = history.history # Gets final loss from training. stats['loss'] = train_hist['loss'][-1].item() # Gets top_1 training accuracy. if 'categorical_accuracy' in train_hist: stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item() elif 'sparse_categorical_accuracy' in train_hist: stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item() if time_callback: timestamp_log = time_callback.timestamp_log stats['step_timestamp_log'] = timestamp_log stats['train_finish_time'] = time_callback.train_finish_time if len(timestamp_log) > 1: stats['avg_exp_per_second'] = ( time_callback.batch_size * time_callback.log_steps * (len(time_callback.timestamp_log)-1) / (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) return stats def define_keras_flags(): flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?') flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?') flags.DEFINE_integer( name='train_steps', default=None, help='The number of steps to run for training. If it is larger than ' '# batches per epoch, then use # batches per epoch. When this flag is ' 'set, only one epoch is going to run for training.') flags.DEFINE_integer( name='log_steps', default=100, help='For every log_steps, we log the timing information such as ' 'examples per second. Besides, for every log_steps, we store the ' 'timestamp of a batch end.') def get_synth_input_fn(height, width, num_channels, num_classes, dtype=tf.float32): """Returns an input function that returns a dataset with random data. This input_fn returns a data set that iterates over a set of random data and bypasses all preprocessing, e.g. jpeg decode and copy. The host to device copy is still included. This used to find the upper throughput bound when tuning the full input pipeline. Args: height: Integer height that will be used to create a fake image tensor. width: Integer width that will be used to create a fake image tensor. num_channels: Integer depth that will be used to create a fake image tensor. num_classes: Number of classes that should be represented in the fake labels tensor dtype: Data type for features/images. Returns: An input_fn that can be used in place of a real one to return a dataset that can be used for iteration. """ # pylint: disable=unused-argument def input_fn(is_training, data_dir, batch_size, *args, **kwargs): """Returns dataset filled with random data.""" # Synthetic input should be within [0, 255]. inputs = tf.random.truncated_normal([height, width, num_channels], dtype=dtype, mean=127, stddev=60, name='synthetic_inputs') labels = tf.random.uniform([1], minval=0, maxval=num_classes - 1, dtype=tf.int32, name='synthetic_labels') # Cast to float32 for Keras model. labels = tf.cast(labels, dtype=tf.float32) data = tf.data.Dataset.from_tensors((inputs, labels)).repeat() # `drop_remainder` will make dataset produce outputs with known shapes. data = data.batch(batch_size, drop_remainder=True) data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return data return input_fn def get_strategy_scope(strategy): if strategy: strategy_scope = strategy.scope() else: strategy_scope = DummyContextManager() return strategy_scope class DummyContextManager(object): def __enter__(self): pass def __exit__(self, *args): pass
35.464945
80
0.682239
53fe1b5d59090a39a643028db3d776e8998615cd
50,868
py
Python
brownie/network/contract.py
hassoon1986/brownie
8036ef80512251e2d4a55fbbb7ad7ecdf388acb2
[ "MIT" ]
1
2020-10-09T17:23:03.000Z
2020-10-09T17:23:03.000Z
brownie/network/contract.py
hassoon1986/brownie
8036ef80512251e2d4a55fbbb7ad7ecdf388acb2
[ "MIT" ]
null
null
null
brownie/network/contract.py
hassoon1986/brownie
8036ef80512251e2d4a55fbbb7ad7ecdf388acb2
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import json import os import re import warnings from pathlib import Path from textwrap import TextWrapper from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union from urllib.parse import urlparse import eth_abi import requests import solcx from eth_utils import remove_0x_prefix from hexbytes import HexBytes from semantic_version import Version from vvm import get_installable_vyper_versions from vvm.utils.convert import to_vyper_version from brownie._config import BROWNIE_FOLDER, CONFIG, REQUEST_HEADERS from brownie.convert.datatypes import Wei from brownie.convert.normalize import format_input, format_output from brownie.convert.utils import ( build_function_selector, build_function_signature, get_type_strings, ) from brownie.exceptions import ( BrownieCompilerWarning, BrownieEnvironmentWarning, ContractExists, ContractNotFound, UndeployedLibrary, VirtualMachineError, ) from brownie.project import compiler, ethpm from brownie.typing import AccountsType, TransactionReceiptType from brownie.utils import color from . import accounts, chain from .event import _add_deployment_topics, _get_topics from .state import ( _add_contract, _add_deployment, _find_contract, _get_deployment, _remove_contract, _revert_register, ) from .web3 import _resolve_address, web3 _unverified_addresses: Set = set() class _ContractBase: _dir_color = "bright magenta" def __init__(self, project: Any, build: Dict, sources: Dict) -> None: self._project = project self._build = build.copy() self._sources = sources self.topics = _get_topics(self.abi) self.selectors = { build_function_selector(i): i["name"] for i in self.abi if i["type"] == "function" } # this isn't fully accurate because of overloaded methods - will be removed in `v2.0.0` self.signatures = { i["name"]: build_function_selector(i) for i in self.abi if i["type"] == "function" } @property def abi(self) -> List: return self._build["abi"] @property def _name(self) -> str: return self._build["contractName"] def info(self) -> None: """ Display NatSpec documentation for this contract. """ if self._build.get("natspec"): _print_natspec(self._build["natspec"]) def get_method(self, calldata: str) -> Optional[str]: sig = calldata[:10].lower() return self.selectors.get(sig) class ContractContainer(_ContractBase): """List-like container class that holds all Contract instances of the same type, and is used to deploy new instances of that contract. Attributes: abi: Complete contract ABI. bytecode: Bytecode used to deploy the contract. signatures: Dictionary of {'function name': "bytes4 signature"} topics: Dictionary of {'event name': "bytes32 topic"}""" def __init__(self, project: Any, build: Dict) -> None: self.tx = None self.bytecode = build["bytecode"] self._contracts: List["ProjectContract"] = [] super().__init__(project, build, project._sources) self.deploy = ContractConstructor(self, self._name) _revert_register(self) def __iter__(self) -> Iterator: return iter(self._contracts) def __getitem__(self, i: Any) -> "ProjectContract": return self._contracts[i] def __delitem__(self, key: Any) -> None: item = self._contracts[key] self.remove(item) def __len__(self) -> int: return len(self._contracts) def __repr__(self) -> str: if CONFIG.argv["cli"] == "console": return str(self._contracts) return super().__repr__() def _reset(self) -> None: for contract in self._contracts: _remove_contract(contract) contract._reverted = True self._contracts.clear() def _revert(self, height: int) -> None: reverted = [ i for i in self._contracts if (i.tx and i.tx.block_number is not None and i.tx.block_number > height) or len(web3.eth.getCode(i.address).hex()) <= 4 ] for contract in reverted: self.remove(contract) contract._reverted = True def remove(self, contract: "ProjectContract") -> None: """Removes a contract from the container. Args: contract: Contract instance of address string of the contract.""" if contract not in self._contracts: raise TypeError("Object is not in container.") self._contracts.remove(contract) contract._delete_deployment() _remove_contract(contract) def at( self, address: str, owner: Optional[AccountsType] = None, tx: Optional[TransactionReceiptType] = None, ) -> "ProjectContract": """Returns a contract address. Raises ValueError if no bytecode exists at the address. Args: address: Address string of the contract. owner: Default Account instance to send contract transactions from. tx: Transaction ID of the contract creation.""" contract = _find_contract(address) if isinstance(contract, ProjectContract): if contract._name == self._name and contract._project == self._project: return contract raise ContractExists( f"'{contract._name}' declared at {address} in project '{contract._project._name}'" ) build = self._build contract = ProjectContract(self._project, build, address, owner, tx) if not _verify_deployed_code(address, build["deployedBytecode"], build["language"]): # prevent trace attempts when the bytecode doesn't match del contract._build["pcMap"] contract._save_deployment() _add_contract(contract) self._contracts.append(contract) if CONFIG.network_type == "live": _add_deployment(contract) return contract def _add_from_tx(self, tx: TransactionReceiptType) -> None: tx._confirmed.wait() if tx.status and tx.contract_address is not None: try: self.at(tx.contract_address, tx.sender, tx) except ContractNotFound: # if the contract self-destructed during deployment pass class ContractConstructor: _dir_color = "bright magenta" def __init__(self, parent: "ContractContainer", name: str) -> None: self._parent = parent try: self.abi = next(i for i in parent.abi if i["type"] == "constructor") self.abi["name"] = "constructor" except Exception: self.abi = {"inputs": [], "name": "constructor", "type": "constructor"} self._name = name @property def payable(self) -> bool: if "payable" in self.abi: return self.abi["payable"] else: return self.abi["stateMutability"] == "payable" def __repr__(self) -> str: return f"<{type(self).__name__} '{self._name}.constructor({_inputs(self.abi)})'>" def __call__(self, *args: Tuple) -> Union["Contract", TransactionReceiptType]: """Deploys a contract. Args: *args: Constructor arguments. The last argument MUST be a dictionary of transaction values containing at minimum a 'from' key to specify which account to deploy this contract from. Returns: * Contract instance if the transaction confirms * TransactionReceipt if the transaction is pending or reverts""" args, tx = _get_tx(None, args) if not tx["from"]: raise AttributeError( "No deployer address given. You must supply a tx dict" " as the last argument with a 'from' field." ) return tx["from"].deploy( self._parent, *args, amount=tx["value"], gas_limit=tx["gas"], gas_price=tx["gasPrice"], nonce=tx["nonce"], required_confs=tx["required_confs"], ) @staticmethod def _autosuggest(obj: "ContractConstructor") -> List: return _contract_method_autosuggest(obj.abi["inputs"], True, obj.payable) def encode_input(self, *args: tuple) -> str: bytecode = self._parent.bytecode # find and replace unlinked library pointers in bytecode for marker in re.findall("_{1,}[^_]*_{1,}", bytecode): library = marker.strip("_") if not self._parent._project[library]: raise UndeployedLibrary( f"Contract requires '{library}' library, but it has not been deployed yet" ) address = self._parent._project[library][-1].address[-40:] bytecode = bytecode.replace(marker, address) data = format_input(self.abi, args) types_list = get_type_strings(self.abi["inputs"]) return bytecode + eth_abi.encode_abi(types_list, data).hex() def estimate_gas(self, *args: Tuple) -> int: """ Estimate the gas cost for the deployment. Raises VirtualMachineError if the transaction would revert. Arguments --------- *args Constructor arguments. The last argument MUST be a dictionary of transaction values containing at minimum a 'from' key to specify which account to deploy this contract from. Returns ------- int Estimated gas value in wei. """ args, tx = _get_tx(None, args) if not tx["from"]: raise AttributeError( "Contract has no owner, you must supply a tx dict" " as the last argument with a 'from' field." ) return tx["from"].estimate_gas( amount=tx["value"], gas_price=tx["gasPrice"], data=self.encode_input(*args) ) class InterfaceContainer: """ Container class that provides access to interfaces within a project. """ def __init__(self, project: Any) -> None: self._project = project # automatically populate with interfaces in `data/interfaces` # overwritten if a project contains an interface with the same name for path in BROWNIE_FOLDER.glob("data/interfaces/*.json"): with path.open() as fp: abi = json.load(fp) self._add(path.stem, abi) def _add(self, name: str, abi: List) -> None: constructor = InterfaceConstructor(name, abi) setattr(self, name, constructor) class InterfaceConstructor: """ Constructor used to create Contract objects from a project interface. """ def __init__(self, name: str, abi: List) -> None: self._name = name self.abi = abi self.selectors = { build_function_selector(i): i["name"] for i in self.abi if i["type"] == "function" } def __call__(self, address: str, owner: Optional[AccountsType] = None) -> "Contract": return Contract.from_abi(self._name, address, self.abi, owner) def __repr__(self) -> str: return f"<{type(self).__name__} '{self._name}'>" class _DeployedContractBase(_ContractBase): """Methods for interacting with a deployed contract. Each public contract method is available as a ContractCall or ContractTx instance, created when this class is instantiated. Attributes: bytecode: Bytecode of the deployed contract, including constructor args. tx: TransactionReceipt of the of the tx that deployed the contract.""" _reverted = False def __init__( self, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None ) -> None: address = _resolve_address(address) self.bytecode = web3.eth.getCode(address).hex()[2:] if not self.bytecode: raise ContractNotFound(f"No contract deployed at {address}") self._owner = owner self.tx = tx self.address = address _add_deployment_topics(address, self.abi) fn_names = [i["name"] for i in self.abi if i["type"] == "function"] for abi in [i for i in self.abi if i["type"] == "function"]: name = f"{self._name}.{abi['name']}" sig = build_function_signature(abi) natspec: Dict = {} if self._build.get("natspec"): natspec = self._build["natspec"]["methods"].get(sig, {}) if fn_names.count(abi["name"]) == 1: fn = _get_method_object(address, abi, name, owner, natspec) self._check_and_set(abi["name"], fn) continue # special logic to handle function overloading if not hasattr(self, abi["name"]): overloaded = OverloadedMethod(address, name, owner) self._check_and_set(abi["name"], overloaded) getattr(self, abi["name"])._add_fn(abi, natspec) def _check_and_set(self, name: str, obj: Any) -> None: if name == "balance": warnings.warn( f"'{self._name}' defines a 'balance' function, " f"'{self._name}.balance' is unavailable", BrownieEnvironmentWarning, ) elif hasattr(self, name): raise AttributeError(f"Namespace collision: '{self._name}.{name}'") setattr(self, name, obj) def __hash__(self) -> int: return hash(f"{self._name}{self.address}{self._project}") def __str__(self) -> str: return self.address def __repr__(self) -> str: alias = self._build.get("alias") if alias: return f"<'{alias}' Contract '{self.address}'>" return f"<{self._name} Contract '{self.address}'>" def __eq__(self, other: object) -> bool: if isinstance(other, _DeployedContractBase): return self.address == other.address and self.bytecode == other.bytecode if isinstance(other, str): try: address = _resolve_address(other) return address == self.address except ValueError: return False return super().__eq__(other) def __getattribute__(self, name: str) -> Any: if super().__getattribute__("_reverted"): raise ContractNotFound("This contract no longer exists.") try: return super().__getattribute__(name) except AttributeError: raise AttributeError(f"Contract '{self._name}' object has no attribute '{name}'") def get_method_object(self, calldata: str) -> Optional["_ContractMethod"]: """ Given a calldata hex string, returns a `ContractMethod` object. """ sig = calldata[:10].lower() if sig not in self.selectors: return None fn = getattr(self, self.selectors[sig], None) if isinstance(fn, OverloadedMethod): return next((v for v in fn.methods.values() if v.signature == sig), None) return fn def balance(self) -> Wei: """Returns the current ether balance of the contract, in wei.""" balance = web3.eth.getBalance(self.address) return Wei(balance) def _deployment_path(self) -> Optional[Path]: if not self._project._path or ( CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"] ): return None chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev" path = self._project._build_path.joinpath(f"deployments/{chainid}") path.mkdir(exist_ok=True) return path.joinpath(f"{self.address}.json") def _save_deployment(self) -> None: path = self._deployment_path() chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev" deployment_build = self._build.copy() deployment_build["deployment"] = { "address": self.address, "chainid": chainid, "blockHeight": web3.eth.blockNumber, } if path: self._project._add_to_deployment_map(self) if not path.exists(): with path.open("w") as fp: json.dump(deployment_build, fp) def _delete_deployment(self) -> None: path = self._deployment_path() if path: self._project._remove_from_deployment_map(self) if path.exists(): path.unlink() class Contract(_DeployedContractBase): """ Object to interact with a deployed contract outside of a project. """ def __init__( self, address_or_alias: str, *args: Any, owner: Optional[AccountsType] = None, **kwargs: Any ) -> None: """ Recreate a `Contract` object from the local database. The init method is used to access deployments that have already previously been stored locally. For new deployments use `from_abi`, `from_ethpm` or `from_etherscan`. Arguments --------- address_or_alias : str Address or user-defined alias of the deployment. owner : Account, optional Contract owner. If set, transactions without a `from` field will be performed using this account. """ if args or kwargs: warnings.warn( "Initializing `Contract` in this manner is deprecated." " Use `from_abi` or `from_ethpm` instead.", DeprecationWarning, ) kwargs["owner"] = owner return self._deprecated_init(address_or_alias, *args, **kwargs) address = "" try: address = _resolve_address(address_or_alias) build, sources = _get_deployment(address) except Exception: build, sources = _get_deployment(alias=address_or_alias) if build is not None: address = build["address"] if build is None or sources is None: if ( not address or not CONFIG.settings.get("autofetch_sources") or not CONFIG.active_network.get("explorer") ): if not address: raise ValueError(f"Unknown alias: {address_or_alias}") else: raise ValueError(f"Unknown contract address: {address}") contract = self.from_explorer(address, owner=owner, silent=True) build, sources = contract._build, contract._sources address = contract.address _ContractBase.__init__(self, None, build, sources) _DeployedContractBase.__init__(self, address, owner) def _deprecated_init( self, name: str, address: Optional[str] = None, abi: Optional[List] = None, manifest_uri: Optional[str] = None, owner: Optional[AccountsType] = None, ) -> None: if manifest_uri and abi: raise ValueError("Contract requires either abi or manifest_uri, but not both") if manifest_uri is not None: manifest = ethpm.get_manifest(manifest_uri) abi = manifest["contract_types"][name]["abi"] if address is None: address_list = ethpm.get_deployment_addresses(manifest, name) if not address_list: raise ContractNotFound( f"'{manifest['package_name']}' manifest does not contain" f" a deployment of '{name}' on this chain" ) if len(address_list) > 1: raise ValueError( f"'{manifest['package_name']}' manifest contains more than one " f"deployment of '{name}' on this chain, you must specify an address:" f" {', '.join(address_list)}" ) address = address_list[0] name = manifest["contract_types"][name]["contract_name"] elif not address: raise TypeError("Address cannot be None unless creating object from manifest") build = {"abi": abi, "contractName": name, "type": "contract"} _ContractBase.__init__(self, None, build, {}) # type: ignore _DeployedContractBase.__init__(self, address, owner, None) @classmethod def from_abi( cls, name: str, address: str, abi: List, owner: Optional[AccountsType] = None ) -> "Contract": """ Create a new `Contract` object from an ABI. Arguments --------- name : str Name of the contract. address : str Address where the contract is deployed. abi : dict Contract ABI, given as a dictionary. owner : Account, optional Contract owner. If set, transactions without a `from` field will be performed using this account. """ address = _resolve_address(address) build = {"abi": abi, "address": address, "contractName": name, "type": "contract"} self = cls.__new__(cls) _ContractBase.__init__(self, None, build, {}) # type: ignore _DeployedContractBase.__init__(self, address, owner, None) _add_deployment(self) return self @classmethod def from_ethpm( cls, name: str, manifest_uri: str, address: Optional[str] = None, owner: Optional[AccountsType] = None, ) -> "Contract": """ Create a new `Contract` object from an ethPM manifest. Arguments --------- name : str Name of the contract. manifest_uri : str erc1319 registry URI where the manifest is located address : str optional Address where the contract is deployed. Only required if the manifest contains more than one deployment with the given name on the active chain. owner : Account, optional Contract owner. If set, transactions without a `from` field will be performed using this account. """ manifest = ethpm.get_manifest(manifest_uri) if address is None: address_list = ethpm.get_deployment_addresses(manifest, name) if not address_list: raise ContractNotFound( f"'{manifest['package_name']}' manifest does not contain" f" a deployment of '{name}' on this chain" ) if len(address_list) > 1: raise ValueError( f"'{manifest['package_name']}' manifest contains more than one " f"deployment of '{name}' on this chain, you must specify an address:" f" {', '.join(address_list)}" ) address = address_list[0] manifest["contract_types"][name]["contract_name"] build = { "abi": manifest["contract_types"][name]["abi"], "contractName": name, "natspec": manifest["contract_types"][name]["natspec"], "type": "contract", } self = cls.__new__(cls) _ContractBase.__init__(self, None, build, manifest["sources"]) # type: ignore _DeployedContractBase.__init__(self, address, owner) _add_deployment(self) return self @classmethod def from_explorer( cls, address: str, as_proxy_for: Optional[str] = None, owner: Optional[AccountsType] = None, silent: bool = False, ) -> "Contract": """ Create a new `Contract` object with source code queried from a block explorer. Arguments --------- address : str Address where the contract is deployed. as_proxy_for : str, optional Address of the implementation contract, if `address` is a proxy contract. The generated object will send transactions to `address`, but use the ABI and NatSpec of `as_proxy_for`. This field is only required when the block explorer API does not provide an implementation address. owner : Account, optional Contract owner. If set, transactions without a `from` field will be performed using this account. """ address = _resolve_address(address) data = _fetch_from_explorer(address, "getsourcecode", silent) is_verified = bool(data["result"][0].get("SourceCode")) if is_verified: abi = json.loads(data["result"][0]["ABI"]) name = data["result"][0]["ContractName"] else: # if the source is not available, try to fetch only the ABI try: data_abi = _fetch_from_explorer(address, "getabi", True) except ValueError as exc: _unverified_addresses.add(address) raise exc abi = json.loads(data_abi["result"].strip()) name = "UnknownContractName" warnings.warn( f"{address}: Was able to fetch the ABI but not the source code. " "Some functionality will not be available.", BrownieCompilerWarning, ) if as_proxy_for is None and data["result"][0].get("Implementation"): try: # many proxy patterns use an `implementation()` function, so first we # try to determine the implementation address without trusting etherscan contract = cls.from_abi(name, address, abi) as_proxy_for = contract.implementation() except Exception: as_proxy_for = _resolve_address(data["result"][0]["Implementation"]) if as_proxy_for == address: as_proxy_for = None # if this is a proxy, fetch information for the implementation contract if as_proxy_for is not None: implementation_contract = Contract.from_explorer(as_proxy_for) abi = implementation_contract._build["abi"] if not is_verified: return cls.from_abi(name, address, abi, owner) compiler_str = data["result"][0]["CompilerVersion"] if compiler_str.startswith("vyper:"): try: version = to_vyper_version(compiler_str[6:]) is_compilable = version in get_installable_vyper_versions() except Exception: is_compilable = False else: try: version = Version(compiler_str.lstrip("v")).truncate() is_compilable = ( version >= Version("0.4.22") and version in solcx.get_installable_solc_versions() + solcx.get_installed_solc_versions() ) except Exception: is_compilable = False if not is_compilable: if not silent: warnings.warn( f"{address}: target compiler '{compiler_str}' cannot be installed or is not " "supported by Brownie. Some debugging functionality will not be available.", BrownieCompilerWarning, ) return cls.from_abi(name, address, abi, owner) optimizer = { "enabled": bool(int(data["result"][0]["OptimizationUsed"])), "runs": int(data["result"][0]["Runs"]), } evm_version = data["result"][0].get("EVMVersion", "Default") if evm_version == "Default": evm_version = None source_str = "\n".join(data["result"][0]["SourceCode"].splitlines()) if source_str.startswith("{{"): # source was verified using compiler standard JSON input_json = json.loads(source_str[1:-1]) sources = {k: v["content"] for k, v in input_json["sources"].items()} evm_version = input_json["settings"].get("evmVersion", evm_version) compiler.set_solc_version(str(version)) input_json.update( compiler.generate_input_json(sources, optimizer=optimizer, evm_version=evm_version) ) output_json = compiler.compile_from_input_json(input_json) build_json = compiler.generate_build_json(input_json, output_json) else: if source_str.startswith("{"): # source was submitted as multiple files sources = {k: v["content"] for k, v in json.loads(source_str).items()} else: # source was submitted as a single file if compiler_str.startswith("vyper"): path_str = f"{name}.vy" else: path_str = f"{name}-flattened.sol" sources = {path_str: source_str} build_json = compiler.compile_and_format( sources, solc_version=str(version), vyper_version=str(version), optimizer=optimizer, evm_version=evm_version, ) build_json = build_json[name] if as_proxy_for is not None: build_json.update(abi=abi, natspec=implementation_contract._build.get("natspec")) if not _verify_deployed_code( address, build_json["deployedBytecode"], build_json["language"] ): warnings.warn( f"{address}: Locally compiled and on-chain bytecode do not match!", BrownieCompilerWarning, ) del build_json["pcMap"] self = cls.__new__(cls) _ContractBase.__init__(self, None, build_json, sources) # type: ignore _DeployedContractBase.__init__(self, address, owner) _add_deployment(self) return self def set_alias(self, alias: Optional[str]) -> None: """ Apply a unique alias this object. The alias can be used to restore the object in future sessions. Arguments --------- alias: str | None An alias to apply. If `None`, any existing alias is removed. """ if "chainid" not in CONFIG.active_network: raise ValueError("Cannot set aliases in a development environment") if alias is not None: if "." in alias or alias.lower().startswith("0x"): raise ValueError("Invalid alias") build, _ = _get_deployment(alias=alias) if build is not None: if build["address"] != self.address: raise ValueError("Alias is already in use on another contract") return _add_deployment(self, alias) self._build["alias"] = alias @property def alias(self) -> Optional[str]: return self._build.get("alias") class ProjectContract(_DeployedContractBase): """Methods for interacting with a deployed contract as part of a Brownie project.""" def __init__( self, project: Any, build: Dict, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None, ) -> None: _ContractBase.__init__(self, project, build, project._sources) _DeployedContractBase.__init__(self, address, owner, tx) class OverloadedMethod: def __init__(self, address: str, name: str, owner: Optional[AccountsType]): self._address = address self._name = name self._owner = owner self.methods: Dict = {} def _add_fn(self, abi: Dict, natspec: Dict) -> None: fn = _get_method_object(self._address, abi, self._name, self._owner, natspec) key = tuple(i["type"].replace("256", "") for i in abi["inputs"]) self.methods[key] = fn def _get_fn_from_args(self, args: Tuple) -> "_ContractMethod": input_length = len(args) if args and isinstance(args[-1], dict): input_length -= 1 keys = [i for i in self.methods if len(i) == input_length] if not keys: raise ValueError("No function matching the given number of arguments") if len(keys) > 1: raise ValueError( f"Contract has more than one function '{self._name}' requiring " f"{input_length} arguments. You must explicitly declare which function " f"you are calling, e.g. {self._name}['{','.join(keys[0])}'](*args)" ) return self.methods[keys[0]] def __getitem__(self, key: Union[Tuple, str]) -> "_ContractMethod": if isinstance(key, str): key = tuple(i.strip() for i in key.split(",")) key = tuple(i.replace("256", "") for i in key) return self.methods[key] def __repr__(self) -> str: return f"<OverloadedMethod '{self._name}'>" def __len__(self) -> int: return len(self.methods) def __call__(self, *args: Tuple) -> Any: fn = self._get_fn_from_args(args) return fn(*args) # type: ignore def call(self, *args: Tuple, block_identifier: Union[int, str, bytes] = None) -> Any: """ Call the contract method without broadcasting a transaction. The specific function called is chosen based on the number of arguments given. If more than one function exists with this number of arguments, a `ValueError` is raised. Arguments --------- *args Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. block_identifier : int | str | bytes, optional A block number or hash that the call is executed at. If not given, the latest block used. Raises `ValueError` if this value is too far in the past and you are not using an archival node. Returns ------- Contract method return value(s). """ fn = self._get_fn_from_args(args) return fn.call(*args, block_identifier=block_identifier) def transact(self, *args: Tuple) -> TransactionReceiptType: """ Broadcast a transaction that calls this contract method. The specific function called is chosen based on the number of arguments given. If more than one function exists with this number of arguments, a `ValueError` is raised. Arguments --------- *args Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns ------- TransactionReceipt Object representing the broadcasted transaction. """ fn = self._get_fn_from_args(args) return fn.transact(*args) def encode_input(self, *args: Tuple) -> Any: """ Generate encoded ABI data to call the method with the given arguments. Arguments --------- *args Contract method inputs Returns ------- str Hexstring of encoded ABI data """ fn = self._get_fn_from_args(args) return fn.encode_input(*args) class _ContractMethod: _dir_color = "bright magenta" def __init__( self, address: str, abi: Dict, name: str, owner: Optional[AccountsType], natspec: Optional[Dict] = None, ) -> None: self._address = address self._name = name self.abi = abi self._owner = owner self.signature = build_function_selector(abi) self._input_sig = build_function_signature(abi) self.natspec = natspec or {} def __repr__(self) -> str: pay = "payable " if self.payable else "" return f"<{type(self).__name__} {pay}'{self.abi['name']}({_inputs(self.abi)})'>" @property def payable(self) -> bool: if "payable" in self.abi: return self.abi["payable"] else: return self.abi["stateMutability"] == "payable" @staticmethod def _autosuggest(obj: "_ContractMethod") -> List: # this is a staticmethod to be compatible with `_call_suggest` and `_transact_suggest` return _contract_method_autosuggest( obj.abi["inputs"], isinstance(obj, ContractTx), obj.payable ) def info(self) -> None: """ Display NatSpec documentation for this method. """ print(f"{self.abi['name']}({_inputs(self.abi)})") _print_natspec(self.natspec) def call(self, *args: Tuple, block_identifier: Union[int, str, bytes] = None) -> Any: """ Call the contract method without broadcasting a transaction. Arguments --------- *args Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. block_identifier : int | str | bytes, optional A block number or hash that the call is executed at. If not given, the latest block used. Raises `ValueError` if this value is too far in the past and you are not using an archival node. Returns ------- Contract method return value(s). """ args, tx = _get_tx(self._owner, args) if tx["from"]: tx["from"] = str(tx["from"]) del tx["required_confs"] tx.update({"to": self._address, "data": self.encode_input(*args)}) try: data = web3.eth.call({k: v for k, v in tx.items() if v}, block_identifier) except ValueError as e: raise VirtualMachineError(e) from None if HexBytes(data)[:4].hex() == "0x08c379a0": revert_str = eth_abi.decode_abi(["string"], HexBytes(data)[4:])[0] raise ValueError(f"Call reverted: {revert_str}") if self.abi["outputs"] and not data: raise ValueError("No data was returned - the call likely reverted") return self.decode_output(data) def transact(self, *args: Tuple) -> TransactionReceiptType: """ Broadcast a transaction that calls this contract method. Arguments --------- *args Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns ------- TransactionReceipt Object representing the broadcasted transaction. """ args, tx = _get_tx(self._owner, args) if not tx["from"]: raise AttributeError( "Contract has no owner, you must supply a tx dict" " as the last argument with a 'from' field." ) return tx["from"].transfer( self._address, tx["value"], gas_limit=tx["gas"], gas_buffer=tx["gas_buffer"], gas_price=tx["gasPrice"], nonce=tx["nonce"], required_confs=tx["required_confs"], data=self.encode_input(*args), ) def decode_input(self, hexstr: str) -> List: """ Decode input call data for this method. Arguments --------- hexstr : str Hexstring of input call data Returns ------- Decoded values """ types_list = get_type_strings(self.abi["inputs"]) result = eth_abi.decode_abi(types_list, HexBytes(hexstr)[4:]) return format_input(self.abi, result) def encode_input(self, *args: Tuple) -> str: """ Generate encoded ABI data to call the method with the given arguments. Arguments --------- *args Contract method inputs Returns ------- str Hexstring of encoded ABI data """ data = format_input(self.abi, args) types_list = get_type_strings(self.abi["inputs"]) return self.signature + eth_abi.encode_abi(types_list, data).hex() def decode_output(self, hexstr: str) -> Tuple: """ Decode hexstring data returned by this method. Arguments --------- hexstr : str Hexstring of returned call data Returns ------- Decoded values """ types_list = get_type_strings(self.abi["outputs"]) result = eth_abi.decode_abi(types_list, HexBytes(hexstr)) result = format_output(self.abi, result) if len(result) == 1: result = result[0] return result def estimate_gas(self, *args: Tuple) -> int: """ Estimate the gas cost for a transaction. Raises VirtualMachineError if the transaction would revert. Arguments --------- *args Contract method inputs Returns ------- int Estimated gas value in wei. """ args, tx = _get_tx(self._owner, args) if not tx["from"]: raise AttributeError( "Contract has no owner, you must supply a tx dict" " as the last argument with a 'from' field." ) return tx["from"].estimate_gas( to=self._address, amount=tx["value"], gas_price=tx["gasPrice"], data=self.encode_input(*args), ) class ContractTx(_ContractMethod): """ A public payable or non-payable contract method. Attributes ---------- abi : dict Contract ABI specific to this method. signature : str Bytes4 method signature. """ def __call__(self, *args: Tuple) -> TransactionReceiptType: """ Broadcast a transaction that calls this contract method. Arguments --------- *args Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns ------- TransactionReceipt Object representing the broadcasted transaction. """ return self.transact(*args) class ContractCall(_ContractMethod): """ A public view or pure contract method. Attributes ---------- abi : dict Contract ABI specific to this method. signature : str Bytes4 method signature. """ def __call__(self, *args: Tuple, block_identifier: Union[int, str, bytes] = None) -> Any: """ Call the contract method without broadcasting a transaction. Arguments --------- args Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. block_identifier : int | str | bytes, optional A block number or hash that the call is executed at. If not given, the latest block used. Raises `ValueError` if this value is too far in the past and you are not using an archival node. Returns ------- Contract method return value(s). """ if not CONFIG.argv["always_transact"] or block_identifier is not None: return self.call(*args, block_identifier=block_identifier) args, tx = _get_tx(self._owner, args) tx.update({"gas_price": 0, "from": self._owner or accounts[0]}) pc, revert_msg = None, None try: self.transact(*args, tx) chain.undo() except VirtualMachineError as exc: pc, revert_msg = exc.pc, exc.revert_msg chain.undo() except Exception: pass try: return self.call(*args) except VirtualMachineError as exc: if pc == exc.pc and revert_msg and exc.revert_msg is None: # in case we miss a dev revert string exc.revert_msg = revert_msg raise exc def _get_tx(owner: Optional[AccountsType], args: Tuple) -> Tuple: # set / remove default sender if owner is None: owner = accounts.default default_owner = CONFIG.active_network["settings"]["default_contract_owner"] if CONFIG.mode == "test" and default_owner is False: owner = None # seperate contract inputs from tx dict and set default tx values tx = { "from": owner, "value": 0, "gas": None, "gas_buffer": None, "gasPrice": None, "nonce": None, "required_confs": 1, } if args and isinstance(args[-1], dict): tx.update(args[-1]) args = args[:-1] for key, target in [("amount", "value"), ("gas_limit", "gas"), ("gas_price", "gasPrice")]: if key in tx: tx[target] = tx[key] if isinstance(tx["from"], str): tx["from"] = accounts.at(tx["from"], force=True) return args, tx def _get_method_object( address: str, abi: Dict, name: str, owner: Optional[AccountsType], natspec: Dict ) -> Union["ContractCall", "ContractTx"]: if "constant" in abi: constant = abi["constant"] else: constant = abi["stateMutability"] in ("view", "pure") if constant: return ContractCall(address, abi, name, owner, natspec) return ContractTx(address, abi, name, owner, natspec) def _inputs(abi: Dict) -> str: types_list = get_type_strings(abi["inputs"], {"fixed168x10": "decimal"}) params = zip([i["name"] for i in abi["inputs"]], types_list) return ", ".join( f"{i[1]}{color('bright blue')}{' '+i[0] if i[0] else ''}{color}" for i in params ) def _verify_deployed_code(address: str, expected_bytecode: str, language: str) -> bool: actual_bytecode = web3.eth.getCode(address).hex()[2:] expected_bytecode = remove_0x_prefix(expected_bytecode) # type: ignore if expected_bytecode.startswith("730000000000000000000000000000000000000000"): # special case for Solidity libraries return ( actual_bytecode.startswith(f"73{address[2:].lower()}") and actual_bytecode[42:] == expected_bytecode[42:] ) if "_" in expected_bytecode: for marker in re.findall("_{1,}[^_]*_{1,}", expected_bytecode): idx = expected_bytecode.index(marker) actual_bytecode = actual_bytecode[:idx] + actual_bytecode[idx + 40 :] expected_bytecode = expected_bytecode[:idx] + expected_bytecode[idx + 40 :] if language == "Solidity": # do not include metadata in comparison idx = -(int(actual_bytecode[-4:], 16) + 2) * 2 actual_bytecode = actual_bytecode[:idx] idx = -(int(expected_bytecode[-4:], 16) + 2) * 2 expected_bytecode = expected_bytecode[:idx] return actual_bytecode == expected_bytecode def _print_natspec(natspec: Dict) -> None: wrapper = TextWrapper(initial_indent=f" {color('bright magenta')}") for key in [i for i in ("title", "notice", "author", "details") if i in natspec]: wrapper.subsequent_indent = " " * (len(key) + 4) print(wrapper.fill(f"@{key} {color}{natspec[key]}")) for key, value in natspec.get("params", {}).items(): wrapper.subsequent_indent = " " * 9 print(wrapper.fill(f"@param {color('bright blue')}{key}{color} {value}")) if "return" in natspec: wrapper.subsequent_indent = " " * 10 print(wrapper.fill(f"@return {color}{natspec['return']}")) for key in sorted(natspec.get("returns", [])): wrapper.subsequent_indent = " " * 10 print(wrapper.fill(f"@return {color}{natspec['returns'][key]}")) print() def _fetch_from_explorer(address: str, action: str, silent: bool) -> Dict: url = CONFIG.active_network.get("explorer") if url is None: raise ValueError("Explorer API not set for this network") if address in _unverified_addresses: raise ValueError(f"Source for {address} has not been verified") params: Dict = {"module": "contract", "action": action, "address": address} if "etherscan" in url: if os.getenv("ETHERSCAN_TOKEN"): params["apiKey"] = os.getenv("ETHERSCAN_TOKEN") elif not silent: warnings.warn( "No Etherscan API token set. You may experience issues with rate limiting. " "Visit https://etherscan.io/register to obtain a token, and then store it " "as the environment variable $ETHERSCAN_TOKEN", BrownieEnvironmentWarning, ) if not silent: print( f"Fetching source of {color('bright blue')}{address}{color} " f"from {color('bright blue')}{urlparse(url).netloc}{color}..." ) response = requests.get(url, params=params, headers=REQUEST_HEADERS) if response.status_code != 200: raise ConnectionError(f"Status {response.status_code} when querying {url}: {response.text}") data = response.json() if int(data["status"]) != 1: raise ValueError(f"Failed to retrieve data from API: {data['result']}") return data # console auto-completion logic def _call_autosuggest(method: Any) -> List: # since methods are not unique for each object, we use `__reduce__` # to locate the specific object so we can access the correct ABI method = method.__reduce__()[1][0] return _contract_method_autosuggest(method.abi["inputs"], False, False) def _transact_autosuggest(method: Any) -> List: method = method.__reduce__()[1][0] return _contract_method_autosuggest(method.abi["inputs"], True, method.payable) # assign the autosuggest functionality to various methods ContractConstructor.encode_input.__dict__["_autosuggest"] = _call_autosuggest _ContractMethod.call.__dict__["_autosuggest"] = _call_autosuggest _ContractMethod.encode_input.__dict__["_autosuggest"] = _call_autosuggest ContractConstructor.estimate_gas.__dict__["_autosuggest"] = _transact_autosuggest _ContractMethod.estimate_gas.__dict__["_autosuggest"] = _transact_autosuggest _ContractMethod.transact.__dict__["_autosuggest"] = _transact_autosuggest def _contract_method_autosuggest(args: List, is_transaction: bool, is_payable: bool) -> List: types_list = get_type_strings(args, {"fixed168x10": "decimal"}) params = zip([i["name"] for i in args], types_list) if not is_transaction: tx_hint: List = [] elif is_payable: tx_hint = [" {'from': Account", " 'value': Wei}"] else: tx_hint = [" {'from': Account}"] return [f" {i[1]}{' '+i[0] if i[0] else ''}" for i in params] + tx_hint
35.572028
100
0.594971
80937a6295ec4adc9654321c4f5fea5721769eaf
828
py
Python
sdk/python/pulumi_oci/datascience/__init__.py
EladGabay/pulumi-oci
6841e27d4a1a7e15c672306b769912efbfd3ba99
[ "ECL-2.0", "Apache-2.0" ]
5
2021-08-17T11:14:46.000Z
2021-12-31T02:07:03.000Z
sdk/python/pulumi_oci/datascience/__init__.py
pulumi-oci/pulumi-oci
6841e27d4a1a7e15c672306b769912efbfd3ba99
[ "ECL-2.0", "Apache-2.0" ]
1
2021-09-06T11:21:29.000Z
2021-09-06T11:21:29.000Z
sdk/python/pulumi_oci/datascience/__init__.py
pulumi-oci/pulumi-oci
6841e27d4a1a7e15c672306b769912efbfd3ba99
[ "ECL-2.0", "Apache-2.0" ]
2
2021-08-24T23:31:30.000Z
2022-01-02T19:26:54.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from .. import _utilities import typing # Export this package's modules as members: from .get_model import * from .get_model_deployment import * from .get_model_deployment_shapes import * from .get_model_deployments import * from .get_model_provenance import * from .get_models import * from .get_notebook_session import * from .get_notebook_session_shapes import * from .get_notebook_sessions import * from .get_project import * from .get_projects import * from .model import * from .model_deployment import * from .model_provenance import * from .notebook_session import * from .project import * from ._inputs import * from . import outputs
31.846154
87
0.778986
47ceb43405e51b11b8ac1b9bedd204f32b75ab56
132
py
Python
pogam/scrapers/exceptions.py
ludaavics/yogam
ee92a8c712f27c132634d55a2c90e2aed8402330
[ "MIT" ]
null
null
null
pogam/scrapers/exceptions.py
ludaavics/yogam
ee92a8c712f27c132634d55a2c90e2aed8402330
[ "MIT" ]
16
2019-12-27T22:06:13.000Z
2020-03-08T01:57:40.000Z
pogam/scrapers/exceptions.py
ludaavics/yogam
ee92a8c712f27c132634d55a2c90e2aed8402330
[ "MIT" ]
1
2019-12-29T13:56:04.000Z
2019-12-29T13:56:04.000Z
import requests class Captcha(requests.exceptions.RequestException): pass class ListingParsingError(RuntimeError): pass
13.2
52
0.787879
4cdf640791e03560eb60e84f25202abd6e3c7206
4,966
py
Python
scitbx/examples/bootstrap.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
155
2016-11-23T12:52:16.000Z
2022-03-31T15:35:44.000Z
scitbx/examples/bootstrap.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
590
2016-12-10T11:31:18.000Z
2022-03-30T23:10:09.000Z
scitbx/examples/bootstrap.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
115
2016-11-15T08:17:28.000Z
2022-02-09T15:30:14.000Z
from __future__ import absolute_import, division, print_function import scitbx.lbfgs import scitbx.math from scitbx.array_family import flex import math from six.moves import range """ This example shows and easy way of obtaining reasonable estimates of standard deviations of refinable parameters. A sample run is shown below: generated data: x_obs y_obs 0.0 1.89158676083 1.0 7.26573570715 2.0 17.4139845949 3.0 34.7247015887 4.0 57.2279161206 5.0 86.7943470166 6.0 121.658728013 7.0 163.13278756 8.0 209.591863552 9.0 262.234445997 10.0 321.838939765 11.0 386.669697396 12.0 457.737267767 13.0 534.242112317 14.0 618.481785819 15.0 706.341196433 16.0 801.835917893 17.0 903.488114864 18.0 1010.44961062 19.0 1122.7567835 final residual of fit on 'true' data 2.54570348084 Resulting fit and ESDs. ------------------------------------------- True and fitted coeffcients ------------------------------------------- a 1.0 1.90288360993 b 2.0 1.92697354706 c 3.0 3.00469428576 ------------------------------------------- Bootstrapped mean and standard deviations ------------------------------------------- a 1.91734755932 0.225087363373 b 1.92587646901 0.0510724881016 c 3.00476495957 0.00291754050371 Cross-check with GNUPLOT fitting shows a good correspondence with bootstrap results: ================================================================= After 5 iterations the fit converged. final sum of squares of residuals : 2.5457 rel. change during last iteration : -4.58051e-07 degrees of freedom (ndf) : 17 rms of residuals (stdfit) = sqrt(WSSR/ndf) : 0.386972 variance of residuals (reduced chisquare) = WSSR/ndf : 0.149747 Final set of parameters Asymptotic Standard Error ======================= ========================== a = 1.90288 +/- 0.2356 (12.38%) b = 1.92697 +/- 0.05748 (2.983%) c = 3.00469 +/- 0.002921 (0.0972%) correlation matrix of the fit parameters: a b c a 1.000 b -0.840 1.000 c 0.706 -0.965 1.000 ================================================================= """ class polynomial_fit: def __init__(self, x_obs, y_obs, w_obs,n): assert x_obs.size() == y_obs.size() assert n < x_obs.size() self.x_obs = x_obs self.y_obs = y_obs self.w_obs = w_obs*w_obs*0+1.0 self.n = n self.x = flex.double(self.n,0) self.minimizer = scitbx.lbfgs.run(target_evaluator=self) self.a = self.x del self.x def compute_functional_and_gradients(self): self.a = self.x y_calc = flex.double(self.x_obs.size(),0) for i in range(self.n): y_calc = y_calc + (self.a[i])*flex.pow(self.x_obs,i) y_diff = self.y_obs - y_calc f = flex.sum(y_diff*y_diff/self.w_obs) g = flex.double(self.n,0) for i in range(self.n): g[i] = -flex.sum( 2.0*(y_diff/self.w_obs)*flex.pow(self.x_obs,i) ) print(f) return f, g class fake_data(object): def __init__(self, x_data, y_data): self.x_data = x_data self.y_data = y_data ## Make a permuation reference, this allows one to ## do non parametric resampling of multidimensional data self.permutation_reference = flex.double( range(x_data.size()) ) self.non_para_bootstrap = scitbx.math.non_parametric_bootstrap( self.permutation_reference, 0 ) def fake_it(self, size): selection_set = self.non_para_bootstrap.draw( size ) isel = flex.int() for element in selection_set: isel.append( int(element) ) new_x = flex.double( flex.select(self.x_data, isel ) ) new_y = flex.double( flex.select(self.y_data, isel ) ) return new_x, new_y def example(): x_obs = flex.double( range(20) ) a = flex.double([1,2,3]) w_obs = flex.double(20,100.0) y_ideal = a[0] + a[1]*x_obs + a[2]*x_obs*x_obs y_obs = y_ideal + flex.random_double(size=x_obs.size())*1.5 for ii in range(20): print(x_obs[ii], y_obs[ii]) faker = fake_data( x_obs, y_obs) fit = polynomial_fit(x_obs=x_obs,y_obs=y_obs,w_obs=w_obs,n=3) print("------------------------------------------- ") print(" True and fitted coeffcients") print("------------------------------------------- ") for i in range(a.size()): print(i, a[i], fit.a[i]) print("------------------------------------------- ") print(" Bootstrapped mean and standard deviations") print("------------------------------------------- ") mean=[0,0,0] std=[0,0,0] for trial in range(100): x_new, y_new = faker.fake_it(20) fit = polynomial_fit(x_obs=x_new,y_obs=y_new,w_obs=w_obs,n=3) for i in range(a.size()): mean[i]+=fit.a[i] std[i]+=fit.a[i]*fit.a[i] for i in range(3): mean[i]/=100.0 std[i]/=100.0 std[i] -= mean[i]*mean[i] std[i] = math.sqrt( std[i] ) print(i, mean[i], std[i]) if (__name__ == "__main__"): example()
27.136612
84
0.581957
afd7963733fb3b067eeb6fa4e53f70db500b8a50
11,199
py
Python
equip_property/equipment.py
dkratzert/FinalCif
07ca23dbb4e7439b108a906521a118cdb876d97e
[ "Beerware" ]
13
2020-01-14T16:23:48.000Z
2022-02-16T18:02:08.000Z
equip_property/equipment.py
dkratzert/FinalCif
07ca23dbb4e7439b108a906521a118cdb876d97e
[ "Beerware" ]
24
2021-04-21T05:30:42.000Z
2022-03-31T20:07:29.000Z
equip_property/equipment.py
dkratzert/FinalCif
07ca23dbb4e7439b108a906521a118cdb876d97e
[ "Beerware" ]
1
2021-08-09T16:48:33.000Z
2021-08-09T16:48:33.000Z
from bisect import bisect from contextlib import suppress from pathlib import Path from typing import Tuple from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidgetItem from gemmi import cif from cif.cif_file_io import CifContainer from cif.core_dict import cif_all_dict from cif.text import retranslate_delimiter from equip_property.tools import read_document_from_cif_file from gui.custom_classes import COL_CIF, COL_DATA, light_green, COL_EDIT from gui.dialogs import show_general_warning, cif_file_open_dialog, cif_file_save_dialog from tools import misc from tools.misc import include_equipment_imports from tools.settings import FinalCifSettings with suppress(ImportError): from appwindow import AppWindow class Equipment: def __init__(self, app: 'AppWindow', settings: FinalCifSettings): self.app = app self.settings = settings self.app.ui.EquipmentTemplatesStackedWidget.setCurrentIndex(0) self.app.ui.EquipmentEditTableWidget.verticalHeader().hide() self.signals_and_slots() self.load_default_equipment() def signals_and_slots(self): ## equipment self.app.ui.EditEquipmentTemplateButton.clicked.connect(self.edit_equipment_template) self.app.ui.SaveEquipmentButton.clicked.connect(self.save_equipment_template) self.app.ui.CancelEquipmentButton.clicked.connect(self.cancel_equipment_template) self.app.ui.DeleteEquipmentButton.clicked.connect(self.delete_equipment) self.app.ui.ExportEquipmentButton.clicked.connect(self.export_equipment_template) self.app.ui.ImportEquipmentTemplateButton.clicked.connect(self.import_equipment_from_file) ## equipment self.app.ui.EquipmentEditTableWidget.cellPressed.connect(self.app.ui.EquipmentEditTableWidget.add_row_if_needed) self.app.ui.EquipmentEditTableWidget.itemSelectionChanged.connect( self.app.ui.EquipmentEditTableWidget.add_row_if_needed) self.app.ui.EquipmentEditTableWidget.itemEntered.connect(self.app.ui.EquipmentEditTableWidget.add_row_if_needed) self.app.ui.EquipmentEditTableWidget.cellChanged.connect(self.app.ui.EquipmentEditTableWidget.add_row_if_needed) self.app.ui.EquipmentEditTableWidget.currentItemChanged.connect( self.app.ui.EquipmentEditTableWidget.add_row_if_needed) self.app.ui.NewEquipmentTemplateButton.clicked.connect(self.new_equipment) self.app.ui.EquipmentTemplatesListWidget.doubleClicked.connect(self.load_selected_equipment) def show_equipment(self): self.app.ui.EquipmentTemplatesListWidget.clear() for eq in self.settings.get_equipment_list(): if eq: item = QListWidgetItem(eq) self.app.ui.EquipmentTemplatesListWidget.addItem(item) def load_selected_equipment(self) -> None: """ Loads equipment data to be shown in the main Cif table. Not for template edititng! """ listwidget = self.app.ui.EquipmentTemplatesListWidget equipment_name = listwidget.currentIndex().data() if not equipment_name: return None equipment = self.settings.load_settings_list_as_dict(property='equipment', item_name=equipment_name) if self.app.ui.cif_main_table.vheaderitems: for key in equipment: if key not in self.app.ui.cif_main_table.vheaderitems: # Key is not in the main table: self.app.add_row(key, equipment[key], at_start=False, position=bisect(self.app.ui.cif_main_table.vheaderitems, key)) # Key is already there: self.app.ui.cif_main_table.setText(key, COL_CIF, txt='?') self.app.ui.cif_main_table.setText(key, COL_DATA, txt=equipment[key], color=light_green) self.app.ui.cif_main_table.setText(key, COL_EDIT, txt=equipment[key]) else: print('Empty main table!') def new_equipment(self) -> None: item = QListWidgetItem('') self.app.ui.EquipmentTemplatesListWidget.addItem(item) self.app.ui.EquipmentTemplatesListWidget.setCurrentItem(item) item.setFlags(Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable) self.app.ui.EquipmentTemplatesListWidget.editItem(item) def delete_equipment(self) -> None: # First delete the list entries index = self.app.ui.EquipmentTemplatesListWidget.currentIndex() selected_template_text = index.data() self.settings.delete_template('equipment', selected_template_text) # now make it invisible: self.app.ui.EquipmentTemplatesListWidget.takeItem(index.row()) self.cancel_equipment_template() # I do these both to clear the list: self.load_default_equipment() def load_default_equipment(self): self.store_predefined_templates() self.show_equipment() def store_predefined_templates(self): equipment_list = self.settings.get_equipment_list() or [] for item in misc.predef_equipment_templ: if not item['name'] in equipment_list: self.settings.save_settings_list('equipment', item['name'], item['items']) def edit_equipment_template(self) -> None: """Gets called when 'edit equipment' button was clicked.""" it = self.app.ui.EquipmentTemplatesListWidget.currentItem() self.app.ui.EquipmentTemplatesListWidget.setCurrentItem(None) self.app.ui.EquipmentTemplatesListWidget.setCurrentItem(it) self.app.ui.CancelPropertiesButton.click() self.load_equipment_to_edit() def load_equipment_to_edit(self) -> None: """ Load/Edit the key/value list of an equipment entry. """ table = self.app.ui.EquipmentEditTableWidget listwidget = self.app.ui.EquipmentTemplatesListWidget table.blockSignals(True) table.clearContents() table.setRowCount(0) index = listwidget.currentIndex() if index.row() == -1: # nothing selected return selected_row_text = listwidget.currentIndex().data() table_data = self.settings.load_settings_list(property='equipment', item_name=selected_row_text) # first load the previous values: if table_data: for key, value in table_data: if not key or not value: continue table.add_equipment_row(key, retranslate_delimiter(value)) else: # new empty equipment: for _ in range(8): table.add_equipment_row('', '') table.add_equipment_row('', '') table.add_equipment_row('', '') self.app.ui.EquipmentTemplatesStackedWidget.setCurrentIndex(1) table.resizeRowsToContents() table.blockSignals(False) def save_equipment_template(self) -> None: """ Saves the currently selected equipment template to the config file. """ selected_template_text, table_data = self.get_equipment_entry_data() # warn if key is not official: for key, _ in table_data: if key not in cif_all_dict: if not key.startswith('_'): show_general_warning('"{}" is not a valid keyword! ' '\nChange the name in order to save.\n' 'Keys must start with an underscore.'.format(key)) return show_general_warning('"{}" is not an official CIF keyword!'.format(key)) self.settings.save_settings_list('equipment', selected_template_text, table_data) self.app.ui.EquipmentTemplatesStackedWidget.setCurrentIndex(0) print('saved') def import_equipment_from_file(self, filename='') -> None: """ Import an equipment entry from a cif file. """ if not filename: filename = cif_file_open_dialog(filter="CIF file (*.cif *.cif_od *.cfx)") if not filename: print('No file given') return doc = read_document_from_cif_file(filename) if not doc: return block = doc.sole_block() table_data = [] for item in block: if item.pair is not None: key, value = item.pair if filename.endswith('.cif_od') and key not in include_equipment_imports: continue table_data.append([key, retranslate_delimiter(cif.as_string(value).strip('\n\r ;'))]) if filename.endswith('.cif_od'): name = Path(filename).stem else: name = block.name.replace('__', ' ') self.settings.save_settings_list('equipment', name, table_data) self.show_equipment() def get_equipment_entry_data(self) -> Tuple[str, list]: """ Returns the string of the currently selected entry and the table data behind it. """ table = self.app.ui.EquipmentEditTableWidget # Set None Item to prevent loss of the currently edited item: # The current item is closed and thus saved. table.setCurrentItem(None) selected_template_text = self.app.ui.EquipmentTemplatesListWidget.currentIndex().data() table_data = [] ncolumns = table.rowCount() for rownum in range(ncolumns): key = '' try: key = table.text(rownum, 0) value = table.text(rownum, 1).strip('\n\r ') except AttributeError: value = '' if key and value: table_data.append([key, value]) return selected_template_text, table_data def export_equipment_template(self, filename: str = None) -> None: """ Exports the currently selected equipment entry to a file. In order to export, we have to run self.edit_equipment_template() first! """ selected_template, table_data = self.get_equipment_entry_data() if not selected_template: return blockname = '__'.join(selected_template.split()) if not filename: filename = cif_file_save_dialog(blockname.replace('__', '_') + '.cif') if not filename.strip(): return equipment_cif = CifContainer(filename, new_block=blockname) for key, value in table_data: equipment_cif[key] = value.strip('\n\r ') try: equipment_cif.save(filename) # Path(filename).write_text(doc.as_string(cif.Style.Indent35)) except PermissionError: if Path(filename).is_dir(): return show_general_warning('No permission to write file to {}'.format(Path(filename).resolve())) def cancel_equipment_template(self) -> None: """ Cancel Equipment editing. """ table = self.app.ui.EquipmentEditTableWidget table.clearContents() table.setRowCount(0) self.app.ui.EquipmentTemplatesStackedWidget.setCurrentIndex(0) print('cancelled equipment')
44.61753
120
0.658094
6277506d36fe262dd9739c2f813feffd6d95184a
689
py
Python
test_Valid_Palindrome_125.py
twtrubiks/leetcode-python
b75d84cefc3d11e43c2648cb9156e65587e398eb
[ "MIT" ]
25
2018-01-15T00:09:02.000Z
2022-01-27T02:29:15.000Z
test_Valid_Palindrome_125.py
twtrubiks/leetcode-python
b75d84cefc3d11e43c2648cb9156e65587e398eb
[ "MIT" ]
null
null
null
test_Valid_Palindrome_125.py
twtrubiks/leetcode-python
b75d84cefc3d11e43c2648cb9156e65587e398eb
[ "MIT" ]
18
2018-01-07T09:36:42.000Z
2021-12-14T11:08:05.000Z
import unittest from Valid_Palindrome_125 import * ''' Example 1: Input: "A man, a plan, a canal: Panama" Output: true Example 2: Input: "race a car" Output: false ''' class Test_Case(unittest.TestCase): def test_answer_01(self): s = "A man, a plan, a canal: Panama" result = True self.assertEqual(Solution().isPalindrome(s), result) def test_answer_02(self): s = "race a car" result = False self.assertEqual(Solution().isPalindrome(s), result) def test_answer_03(self): s = "0P" result = False self.assertEqual(Solution().isPalindrome(s), result) if __name__ == '__main__': unittest.main()
19.138889
60
0.63135
14cc5c5ee13ac1f067a175515ae7363a908d8649
451
py
Python
miagenda/core/__init__.py
JoulesCH/miagenda
dbc7d68ebb697bfd9f779b2b9435d6384ea98aaa
[ "MIT" ]
null
null
null
miagenda/core/__init__.py
JoulesCH/miagenda
dbc7d68ebb697bfd9f779b2b9435d6384ea98aaa
[ "MIT" ]
null
null
null
miagenda/core/__init__.py
JoulesCH/miagenda
dbc7d68ebb697bfd9f779b2b9435d6384ea98aaa
[ "MIT" ]
null
null
null
""" Módulo que instancia la aplicación """ from flask import Flask import os app = Flask(__name__, template_folder='../templates', static_folder='../static') DATABASE_URL = os.getenv('DATABASE_URL') if 'postgresql' not in DATABASE_URL: DATABASE_URL = DATABASE_URL.replace('postgres', 'postgresql') # Se importa el archivo de configuración app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL from Agendas import views from . import database
22.55
80
0.762749
4eeb384656f858397c673d24ef5f8b26958a03e1
1,173
py
Python
code/utils/tests/test_pearson_1d.py
berkeley-stat159/project-zeta-2
7c35423fbc1407751e1aea6aac99d5d02a82dfdc
[ "BSD-3-Clause" ]
null
null
null
code/utils/tests/test_pearson_1d.py
berkeley-stat159/project-zeta-2
7c35423fbc1407751e1aea6aac99d5d02a82dfdc
[ "BSD-3-Clause" ]
null
null
null
code/utils/tests/test_pearson_1d.py
berkeley-stat159/project-zeta-2
7c35423fbc1407751e1aea6aac99d5d02a82dfdc
[ "BSD-3-Clause" ]
null
null
null
""" Test Pearson module, pearson_1d function Run with:: nosetests test_pearson_1d.py This is a test module. It is designed to be run the with the "nose" testing package (via the "nosetests" script. Nose will look for any functions with "test" in their names, and run them. Nose reports any errors, or any failures. A failure is where one of the test conditions run with an "assert" command fails. For example, if I did: assert_almost_equal(1, 2) then this would "fail". So we use the tests to check that the results of our function are (still) as we expect. """ # Python 3 compatibility from __future__ import absolute_import, division, print_function import numpy as np from .. import pearson from numpy.testing import assert_almost_equal def test_pearson_1d(): # Test pearson_1d routine x = np.random.rand(22) y = np.random.rand(22) # Does routine give same answer as np.corrcoef? expected = np.corrcoef(x, y)[0, 1] actual = pearson.pearson_1d(x, y) # Did you, gentle user, forget to return the value? if actual is None: raise RuntimeError("function returned None") assert_almost_equal(expected, actual)
24.957447
79
0.727195
9774bc900bc3972e76c7e032dee724f7fb1f3199
420
py
Python
select_models.py
seyitozztrk/Online-Al-veris-Siteleri-Yorumlar-zerine-Duygu-Analizi
12796f0ad16ba71402ad36fa1415f9eb6a256b2a
[ "MIT" ]
null
null
null
select_models.py
seyitozztrk/Online-Al-veris-Siteleri-Yorumlar-zerine-Duygu-Analizi
12796f0ad16ba71402ad36fa1415f9eb6a256b2a
[ "MIT" ]
null
null
null
select_models.py
seyitozztrk/Online-Al-veris-Siteleri-Yorumlar-zerine-Duygu-Analizi
12796f0ad16ba71402ad36fa1415f9eb6a256b2a
[ "MIT" ]
null
null
null
from basic_abc import basic_operation class models(basic_operation): def __init__(self): super().__init__() def svmModel(self): # self.embedding_process() self.train_test_split_dataset('svm') def NaiveBayesModel(self): self.train_test_split_dataset('bayes') def DecisionTreeModel(self): self.train_test_split_dataset('decision')
20
49
0.642857
1367ace05879afe3632b0964dc8453a49d3578f3
3,706
py
Python
oob_collector_baidu.py
ParkJonghyeon/out_of_band_onion_address_collector
0405d68a94edc6f82e8850c939f101f24c77a16c
[ "Apache-2.0" ]
1
2019-04-28T13:07:58.000Z
2019-04-28T13:07:58.000Z
oob_collector_baidu.py
ParkJonghyeon/out_of_band_onion_address_collector
0405d68a94edc6f82e8850c939f101f24c77a16c
[ "Apache-2.0" ]
null
null
null
oob_collector_baidu.py
ParkJonghyeon/out_of_band_onion_address_collector
0405d68a94edc6f82e8850c939f101f24c77a16c
[ "Apache-2.0" ]
null
null
null
from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException, WebDriverException from time import strftime, localtime, time, sleep import random from pyvirtualdisplay import Display collect_date = strftime('%y%m%d', localtime(time())) search_keyword = [ 'onion.city', 'onion.to', 'onion.cab', 'onion.link', 'onion.lu', 'onion.rip', 'onion.nu', 'onion.lt', 'onion.direct', 'tor2web.org', 'tor2web.fi', 'torstorm.org', 'onion.gq', 'onion.sh' ] def refresh_if_error_page(driver): trial_count = 3 while check_exists_by_css(driver, 'a#errorCode') == True: try: driver.refresh() sleep(5) trial_count -= 1 print("error page. count = "+str(trial_count)) if trial_count < 1: break except WebDriverException: continue def next_page_exist_check(driver): if check_exists_by_css(driver, "div#page"): try: current_page_num = int(driver.find_element_by_css_selector('div#page strong span.pc').text) if current_page_num > 15 and current_page_num % (random.randrange(6, 8)) == 0: try: driver.refresh() except WebDriverException: refresh_if_error_page(driver) sleep(7) driver.find_element_by_link_text(str(current_page_num + 1)).click() refresh_if_error_page(driver) sleep(random.randrange(7, 15)) new_page_num = int(driver.find_element_by_css_selector('div#page strong span.pc').text) if current_page_num >= new_page_num: return False return True except NoSuchElementException: return False else: return False def check_exists_by_css(driver, css): try: driver.find_element_by_css_selector(css) except NoSuchElementException: return False return True def crawl_onion_address(driver, keyword, result_file): refresh_if_error_page(driver) sleep(7) driver.find_element_by_id("kw").clear() driver.find_element_by_id("kw").send_keys("site:"+keyword) driver.find_element_by_id("su").click() sleep(7) refresh_if_error_page(driver) page_remain = True while page_remain == True: if check_exists_by_css(driver, 'div#content_left') == True: link_list = driver.find_elements_by_css_selector('div#content_left div.result.c-container') for link in link_list: page_title = link.find_element_by_css_selector('h3.t').text page_title = page_title.replace("\"", " ") page_link = link.find_element_by_css_selector('div.f13').text result_file.write(page_title + '\t' + page_link + '\t' + collect_date + '\n') #print(page_title+'\t'+page_link) page_remain = next_page_exist_check(driver) # sleep(random.randrange(30, 60)) else: page_remain = False if __name__ == "__main__": virtual_display = Display(visible=0, size=(1024, 960)) virtual_display.start() driver = webdriver.Firefox() driver.get('http://www.baidu.com/') sleep(2) refresh_if_error_page(driver) sleep(1) link_list_file = open('new_linkset/' + collect_date + 'baidu_link_list.tsv', 'a', encoding='utf-8') link_list_file.write('Title\tLink\tDate\n') for keyword in search_keyword: crawl_onion_address(driver, keyword, link_list_file) driver.quit() link_list_file.close() virtual_display.stop()
30.883333
103
0.633028
c147b6eded85fddd3553a1b657545fca83de4d4e
522
py
Python
gaussian-kernel.py
adl1995/image-processing-filters
850e4a6e23ef0f3843cc306cf1e42569f705f07e
[ "MIT" ]
null
null
null
gaussian-kernel.py
adl1995/image-processing-filters
850e4a6e23ef0f3843cc306cf1e42569f705f07e
[ "MIT" ]
null
null
null
gaussian-kernel.py
adl1995/image-processing-filters
850e4a6e23ef0f3843cc306cf1e42569f705f07e
[ "MIT" ]
null
null
null
#!/usr/bin/env python __author__ = "Adeel Ahmad" __email__ = "adeelahmad14@hotmail.com" __status__ = "Production" def gFilter(sigma): kernel = np.zeros((4*sigma+1,4*sigma+1)) x = -2 * sigma while(x<2 * sigma + 1): y = -2 * sigma while(y<2 * sigma + 1): norm = 1/((2 * 3.14159)*(sigma**2)) num = (x**2.0) + (y**2) denm = 2.0 * (sigma**2) kernel[x+2 * sigma, y+2 * sigma] = norm * exp(-num/denm) y+=1 x+=1 return kernel
23.727273
68
0.490421
7b0f78152531e9ed2ea225f987aefaf27cb660e7
2,047
py
Python
read_json_antara.py
kiadzaky/scraping
9d0912808787db8a1909570edc9507e5d574bff5
[ "MIT" ]
null
null
null
read_json_antara.py
kiadzaky/scraping
9d0912808787db8a1909570edc9507e5d574bff5
[ "MIT" ]
null
null
null
read_json_antara.py
kiadzaky/scraping
9d0912808787db8a1909570edc9507e5d574bff5
[ "MIT" ]
null
null
null
import json from urllib2 import urlopen from urllib2 import Request, HTTPError import pandas as pd from bs4 import BeautifulSoup url_array =[] hasil =[] judul_array = [] tanggal_array = [] gambar_array = [] caption_array = [] konten_array = [] with open('antara.json') as json_file: data = json.load(json_file) for p in data['link']: url = data['link'][p] url_array.append(url) # print url_array for i in url_array: html = i hdr = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'} req = Request(html, headers=hdr) try: page = urlopen(req) except HTTPError, e: print e.fp.read() content = page.read() hasil.append(content) # with open('text.txt','w') as outfile: # json.dump(hasil,outfile) for i in hasil: soup = BeautifulSoup(hasil[int(i)], 'html.parser') produk = soup.find_all('article', attrs={'class': 'post-wrapper clearfix'}) for p in produk: judul = p.find('h1', 'post-title').get_text() tanggal = p.find('p', 'simple-share').get_text() gambar = p.find('source', srcset=True) caption = p.find('div', 'wp-caption') konten = p.find('div', 'post-content clearfix') judul_array.append(judul) tanggal_array.append(tanggal) gambar_array.append(gambar) caption_array.append(caption) konten_array.append(konten) produk_dictionary = {'judul' : judul_array, 'tanggal': tanggal_array, 'gambar': gambar_array , 'caption': caption_array, 'konten': konten_array} df = pd.DataFrame(produk_dictionary, columns=['judul','tanggal','gambar','caption','konten']) df.to_csv("Antara_json.json",sep=',')
34.694915
129
0.62384
cfffaa734a20a4ad6e8263ceaa6d7cf11179ace7
327
py
Python
sdwan/get_devices_by_system_ip.py
sambyers/netauto_learning
22c1049bf86e188f774f1c977823abea2bb3abfe
[ "MIT" ]
null
null
null
sdwan/get_devices_by_system_ip.py
sambyers/netauto_learning
22c1049bf86e188f774f1c977823abea2bb3abfe
[ "MIT" ]
null
null
null
sdwan/get_devices_by_system_ip.py
sambyers/netauto_learning
22c1049bf86e188f774f1c977823abea2bb3abfe
[ "MIT" ]
null
null
null
import json import sys from yaml import safe_load from sdwan import Sdwan config_file = sys.argv[1] with open(config_file) as fh: config = safe_load(fh.read()) s = Sdwan(**config, verify_tls=False) params = {'system-ip': '10.10.1.17'} response = s.get_devices(params=params) print(json.dumps(response['data'], indent=2))
23.357143
45
0.727829
af72caae65307873a000a2f3d8006f7a111169ec
2,419
py
Python
graph.py
weihengSu/testing
d9be7a56c886756ba8e607a3b6e4df6efbe4893b
[ "MIT" ]
null
null
null
graph.py
weihengSu/testing
d9be7a56c886756ba8e607a3b6e4df6efbe4893b
[ "MIT" ]
null
null
null
graph.py
weihengSu/testing
d9be7a56c886756ba8e607a3b6e4df6efbe4893b
[ "MIT" ]
null
null
null
__author__ = 'weiheng su' import sys class Graph(object): def __init__(self, dict= {}): self.data = dict def get_adjlist(self, node): if node in self.data.keys(): nodes = self.data[node] return nodes else: return None def is_adjacent(self, node1, node2): if node1 in self.data.keys(): if node2 in list(self.data[node1]): return True else: return False else: return False def num_nodes(self): return len(list(self.data.keys())) def __str__(self): result = "{" for k,v in sorted(self.data.items()): a = "'"+str(k)+"': "+str(v)+ " " result += a result +="}" return result def __iter__(self): for i in self.data: yield i def __contains__(self, node): if node in self.data.keys(): return True else: return False def __len__(self): return self.num_nodes() def add_node(self, node): if node not in self.data: self.data[node] = [] return True else: return False def link_nodes(self, node1, node2): if node1 in self.data.keys() and node2 in self.data.keys(): if node1!= node2: if self.is_adjacent(node1,node2) == False: self.data[node1].append(node2) self.data[node2].append(node1) return True else: return False else: return False else: return False def unlink_nodes(self, node1, node2): if node1 in self.data.keys() and node2 in self.data.keys(): if self.is_adjacent(node1, node2) == True: del self.data[node1][self.data[node1].index(node2)] del self.data[node2][self.data[node2].index(node1)] return True else: return False else: return False def del_node(self, node): if node in self.data.keys(): del self.data[node] for i in self.data.keys(): if node in self.data[i]: del self.data[i][self.data[i].index(node)] return True else: return False
28.127907
67
0.491112
3db79362ae2080defd337a990556b12769ec71a8
7,125
py
Python
src/azure-cli-core/azure/cli/core/azlogging.py
v-Ajnava/azure-cli
febec631d79bfca151e84267b5b409594bad598e
[ "MIT" ]
null
null
null
src/azure-cli-core/azure/cli/core/azlogging.py
v-Ajnava/azure-cli
febec631d79bfca151e84267b5b409594bad598e
[ "MIT" ]
3
2021-03-26T00:48:20.000Z
2022-03-29T22:05:39.000Z
src/azure-cli-core/azure/cli/core/azlogging.py
v-Ajnava/azure-cli
febec631d79bfca151e84267b5b409594bad598e
[ "MIT" ]
1
2017-12-28T04:51:44.000Z
2017-12-28T04:51:44.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- """ Logging for Azure CLI - Loggers: The name of the parent logger is defined in AZ_ROOT_LOGGER_NAME variable. All the loggers used in the CLI must descends from it, otherwise it won't benefit from the logger handlers, filters and level configuration. - Handlers: There are two default handlers will be added to both CLI parent logger and root logger. One is a colorized stream handler for console output and the other is a file logger handler. The file logger can be enabled or disabled through 'az configure' command. The logging file locates at path defined in AZ_LOGFILE_DIR. - Level: Based on the verbosity option given by users, the logging levels for root and CLI parent loggers are: CLI Parent Root Console File Console File omitted Warning Debug Critical Debug --verbose Info Debug Critical Debug --debug Debug Debug Debug Debug """ import os import platform import logging import logging.handlers import colorama AZ_ROOT_LOGGER_NAME = 'az' class AzLoggingLevelManager(object): # pylint: disable=too-few-public-methods CONSOLE_LOG_CONFIGS = [ # (default) { AZ_ROOT_LOGGER_NAME: logging.WARNING, 'root': logging.CRITICAL, }, # --verbose { AZ_ROOT_LOGGER_NAME: logging.INFO, 'root': logging.CRITICAL, }, # --debug { AZ_ROOT_LOGGER_NAME: logging.DEBUG, 'root': logging.DEBUG, }] def __init__(self, argv): self.user_setting_level = self.determine_verbose_level(argv) def get_user_setting_level(self, logger): logger_name = logger.name if logger.name in (AZ_ROOT_LOGGER_NAME, 'root') else 'root' return self.CONSOLE_LOG_CONFIGS[self.user_setting_level][logger_name] @classmethod def determine_verbose_level(cls, argv): # Get verbose level by reading the arguments. # Remove any consumed args. verbose_level = 0 i = 0 while i < len(argv): arg = argv[i] if arg in ['--verbose']: verbose_level += 1 argv.pop(i) elif arg in ['--debug']: verbose_level += 2 argv.pop(i) else: i += 1 # Use max verbose level if too much verbosity specified. return min(verbose_level, len(cls.CONSOLE_LOG_CONFIGS) - 1) class ColorizedStreamHandler(logging.StreamHandler): COLOR_MAP = { logging.CRITICAL: colorama.Fore.RED, logging.ERROR: colorama.Fore.RED, logging.WARNING: colorama.Fore.YELLOW, logging.INFO: colorama.Fore.GREEN, logging.DEBUG: colorama.Fore.CYAN, } # Formats for console logging if coloring is enabled or not. # Show the level name if coloring is disabled (e.g. INFO). # Also, Root logger should show the logger name. CONSOLE_LOG_FORMAT = { 'az': { True: '%(message)s', False: '%(levelname)s: %(message)s', }, 'root': { True: '%(name)s : %(message)s', False: '%(levelname)s: %(name)s : %(message)s', } } def __init__(self, stream, logger, level_manager): super(ColorizedStreamHandler, self).__init__(stream) if platform.system() == 'Windows': self.stream = colorama.AnsiToWin32(self.stream).stream fmt = self.CONSOLE_LOG_FORMAT[logger.name][self.enable_color] super(ColorizedStreamHandler, self).setFormatter(logging.Formatter(fmt)) super(ColorizedStreamHandler, self).setLevel(level_manager.get_user_setting_level(logger)) def format(self, record): msg = logging.StreamHandler.format(self, record) if self.enable_color: try: msg = '{}{}{}'.format(self.COLOR_MAP[record.levelno], msg, colorama.Style.RESET_ALL) except KeyError: pass return msg @property def enable_color(self): try: # Color if tty stream available if self.stream.isatty(): return True except (AttributeError, ValueError): pass return False class AzRotatingFileHandler(logging.handlers.RotatingFileHandler): from azure.cli.core._environment import get_config_dir from azure.cli.core._config import az_config ENABLED = az_config.getboolean('logging', 'enable_log_file', fallback=False) LOGFILE_DIR = os.path.expanduser(az_config.get('logging', 'log_dir', fallback=os.path.join(get_config_dir(), 'logs'))) def __init__(self): logging_file_path = self.get_log_file_path() super(AzRotatingFileHandler, self).__init__(logging_file_path, maxBytes=10 * 1024 * 1024, backupCount=5) self.setFormatter(logging.Formatter('%(process)d : %(asctime)s : %(levelname)s : %(name)s : %(message)s')) self.setLevel(logging.DEBUG) def get_log_file_path(self): if not os.path.isdir(self.LOGFILE_DIR): os.makedirs(self.LOGFILE_DIR) return os.path.join(self.LOGFILE_DIR, 'az.log') def configure_logging(argv, stream=None): """ Configuring the loggers and their handlers. In the production setting, the method is a single entry. However, when running in automation, the method could be entered multiple times. Therefore all the handlers will be cleared first. """ level_manager = AzLoggingLevelManager(argv) loggers = [logging.getLogger(), logging.getLogger(AZ_ROOT_LOGGER_NAME)] logging.getLogger(AZ_ROOT_LOGGER_NAME).propagate = False for logger in loggers: # Set the levels of the loggers to lowest level.Handlers can override by choosing a higher level. logger.setLevel(logging.DEBUG) # clear the handlers. the handlers are not closed as this only effect the automation scenarios. kept = [h for h in logger.handlers if not isinstance(h, (ColorizedStreamHandler, AzRotatingFileHandler))] logger.handlers = kept # add colorized console handler logger.addHandler(ColorizedStreamHandler(stream, logger, level_manager)) # add file handler if AzRotatingFileHandler.ENABLED: logger.addHandler(AzRotatingFileHandler()) if AzRotatingFileHandler.ENABLED: get_az_logger(__name__).debug("File logging enabled - Writing logs to '%s'.", AzRotatingFileHandler.LOGFILE_DIR) def get_az_logger(module_name=None): return logging.getLogger(AZ_ROOT_LOGGER_NAME).getChild(module_name) if module_name else logging.getLogger( AZ_ROOT_LOGGER_NAME)
37.698413
120
0.631579
c7068e49db76604d27dc8d0bb408eac934b2c3a9
11,904
py
Python
demo/sprites.py
alexpietrow1/demo
d484c976fc961e21f15bc5b295ae357b0ba037d7
[ "MIT" ]
null
null
null
demo/sprites.py
alexpietrow1/demo
d484c976fc961e21f15bc5b295ae357b0ba037d7
[ "MIT" ]
null
null
null
demo/sprites.py
alexpietrow1/demo
d484c976fc961e21f15bc5b295ae357b0ba037d7
[ "MIT" ]
null
null
null
# sprite classes for game # i used some ideas from CodePylet https://www.youtube.com/watch?v=osDofIdja6s&t=1038s # i also borrowed pretty much all of this from kids can code - thanks! # on acceleration https://www.khanacademy.org/science/physics/one-dimensional-motion/kinematic-formulas/v/average-velocity-for-constant-acceleration # on vectors: https://www.youtube.com/watch?v=ml4NSzCQobk import pygame as pg from pygame.sprite import Sprite import random from random import randint, randrange, choice from settings import * vec = pg.math.Vector2 class Spritesheet: # class for loading and parsing sprite sheets def __init__(self, filename): self.spritesheet = pg.image.load(filename).convert() def get_image(self, x, y, width, height): image = pg.Surface((width, height)) image.blit(self.spritesheet, (0,0), (x, y, width, height)) image = pg.transform.scale(image, (width // 2, height // 2)) return image class Player(Sprite): def __init__(self, game): # allows layering in LayeredUpdates sprite group - thanks pygame! self._layer = PLAYER_LAYER # add player to game groups when instantiated self.groups = game.all_sprites Sprite.__init__(self, self.groups) self.game = game self.walking = False self.jumping = False self.current_frame = 0 self.last_update = 0 self.load_images() # self.image = pg.Surface((30,40)) # self.image = self.game.spritesheet.get_image(614,1063,120,191) self.image = self.standing_frames[0] self.image.set_colorkey(BLACK) # self.image.fill(BLACK) self.rect = self.image.get_rect() self.rect.center = (WIDTH / 2, HEIGHT /2) self.pos = vec(WIDTH / 2, HEIGHT / 2) self.vel = vec(0, 0) self.acc = vec(0, 0) print("adding vecs " + str(self.vel + self.acc)) def load_images(self): self.standing_frames = [self.game.spritesheet.get_image(584, 0, 121, 201), self.game.spritesheet.get_image(581, 1265, 121,191) ] for frame in self.standing_frames: frame.set_colorkey(BLACK) self.walk_frames_r = [self.game.spritesheet.get_image(584, 203, 121, 201), self.game.spritesheet.get_image(678, 651, 121, 207) ] '''setup left frames by flipping and appending them into an empty list''' self.walk_frames_l = [] for frame in self.walk_frames_r: frame.set_colorkey(BLACK) self.walk_frames_l.append(pg.transform.flip(frame, True, False)) self.jump_frame = self.game.spritesheet.get_image(416, 1660, 150, 181) self.jump_frame.set_colorkey(BLACK) def update(self): self.animate() self.acc = vec(0, PLAYER_GRAV) # print("acc " + str(self.acc)) # print("vel " + str(self.vel)) keys = pg.key.get_pressed() if keys[pg.K_a]: self.acc.x = -PLAYER_ACC if keys[pg.K_d]: self.acc.x = PLAYER_ACC # set player friction self.acc.x += self.vel.x * PLAYER_FRICTION # equations of motion self.vel += self.acc if abs(self.vel.x) < 0.1: self.vel.x = 0 self.pos += self.vel + 0.5 * self.acc # jump to other side of screen if self.pos.x > WIDTH + self.rect.width / 2: self.pos.x = 0 - self.rect.width / 2 if self.pos.x < 0 - self.rect.width / 2: self.pos.x = WIDTH + self.rect.width / 2 self.rect.midbottom = self.pos def animate(self): # gets time in miliseconds now = pg.time.get_ticks() if self.vel.x != 0: self.walking = True else: self.walking = False if self.walking: if now - self.last_update > 200: self.last_update = now ''' assigns current frame based on the next frame and the remaining frames in the list. If current frame is 'two' in a list with three elements, then: 2 + 1 = 3; 3 modulus 3 is zero, setting the animation back to its first frame. If current frame is zero, then: 0 + 1 = 1; 1 modulus 3 is 1; 2 modulus 3 is 2; 3 modulus 3 is o ''' self.current_frame = (self.current_frame + 1) % len(self.walk_frames_l) bottom = self.rect.bottom if self.vel.x > 0: self.image = self.walk_frames_r[self.current_frame] else: self.image = self.walk_frames_l[self.current_frame] self.rect = self.image.get_rect() self.rect.bottom = bottom # checks state if not self.jumping and not self.walking: # gets current delta time and checks against 200 miliseconds if now - self.last_update > 200: self.last_update = now self.current_frame = (self.current_frame + 1) % len(self.standing_frames) # reset bottom for each frame of animation bottom = self.rect.bottom self.image = self.standing_frames[self.current_frame] self.rect = self.image.get_rect() self.rect.bottom = bottom # collide will find this property if it is called self.mask self.mask = pg.mask.from_surface(self.image) ''' CHANGE: SINKING FUNCTION FOR WHEN PLAYER IS IN MID AIR''' def sink(self): self.vel.y = PLAYER_SINK class Cloud(Sprite): def __init__(self, game): # allows layering in LayeredUpdates sprite group self._layer = CLOUD_LAYER # add Platforms to game groups when instantiated self.groups = game.all_sprites, game.clouds Sprite.__init__(self, self.groups) self.game = game self.image = choice(self.game.cloud_images) self.image.set_colorkey(BLACK) self.rect = self.image.get_rect() scale = randrange (50, 101) / 100 self.image = pg.transform.scale(self.image, (int(self.rect.width * scale), int(self.rect.height * scale))) self.rect.x = randrange(WIDTH - self.rect.width) self.rect.y = randrange(-500, -50) self.speed = randrange(1,3) def update(self): if self.rect.top > HEIGHT * 2: self.kill ''' mr cozort added animated clouds and made it so they restart on the other side of the screen''' self.rect.x += self.speed if self.rect.x > WIDTH: self.rect.x = -self.rect.width class Platform(Sprite): def __init__(self, game, x, y): # allows layering in LayeredUpdates sprite group self._layer = PLATFORM_LAYER # add Platforms to game groups when instantiated self.groups = game.all_sprites, game.platforms Sprite.__init__(self, self.groups) self.game = game self.image = self.game.spritesheet.get_image(434, 1265, 145, 110) self.image.set_colorkey(BLACK) '''leftovers from random rectangles before images''' # self.image = pg.Surface((w,h)) # self.image.fill(WHITE) self.rect = self.image.get_rect() self.rect.x = x self.rect.y = y self.ground_level = False if random.randrange(100) < POW_SPAWN_PCT: Pow(self.game, self) if random.randrange(100) < POW_SPAWN_PCT: Cactus(self.game, self) '''CHANGE: FUNCTION FOR SPRING BEING FLATTENED WHEN JUMPED ON''' def compress(self): #gets new image and sets the colorkey self.image = self.game.spritesheet.get_image(0, 1988, 145, 57) self.image.set_colorkey(BLACK) '''CHANGE: FUNCTION FOR UNCOMPRESSING SPRING However, it is not used because I couldn't figure out how to manipulate pygame time''' def uncompress(self): self.image = self.game.spritesheet.get_image(434, 1270, 145, 110) self.image.set_colorkey(BLACK) class Pow(Sprite): def __init__(self, game, plat): # allows layering in LayeredUpdates sprite group self._layer = POW_LAYER # add a groups property where we can pass all instances of this object into game groups self.groups = game.all_sprites, game.powerups Sprite.__init__(self, self.groups) self.game = game self.plat = plat self.type = random.choice(['boost']) self.image = self.game.spritesheet.get_image(826, 134, 71, 70) self.image.set_colorkey(BLACK) self.rect = self.image.get_rect() self.rect.centerx = self.plat.rect.centerx self.rect.bottom = self.plat.rect.top - 5 def update(self): self.rect.bottom = self.plat.rect.top - 5 # checks to see if plat is in the game's platforms group so we can kill the powerup instance if not self.game.platforms.has(self.plat): self.kill() class Mob(Sprite): def __init__(self, game): # allows layering in LayeredUpdates sprite group self._layer = MOB_LAYER # add a groups property where we can pass all instances of this object into game groups self.groups = game.all_sprites, game.mobs Sprite.__init__(self, self.groups) self.game = game self.image_up = self.game.spritesheet.get_image(534, 763, 142, 148) self.image_up.set_colorkey(BLACK) self.image_down = self.game.spritesheet.get_image(464, 1122, 148, 141) self.image_down.set_colorkey(BLACK) self.image = self.image_up self.image.set_colorkey(BLACK) self.rect = self.image.get_rect() self.rect.centerx = choice([-100, WIDTH + 100]) self.rect_top = self.rect.top self.vx = randrange(1, 4) if self.rect.centerx > WIDTH: self.vx *= -1 self.rect.y = randrange(HEIGHT//1.5) self.vy = 0 self.dy = 0.5 def update(self): self.rect.x += self.vx self.vy += self.dy self.rect_top = self.rect.top if self.vy > 3 or self.vy < -3: self.dy *= -1 center = self.rect.center if self.dy < 0: self.image = self.image_up else: self.image = self.image_down self.rect = self.image.get_rect() self.mask = pg.mask.from_surface(self.image) self.rect.center = center self.rect_top = self.rect.top self.rect.y += self.vy if self.rect.left > WIDTH + 100 or self.rect.right < -100: self.kill() class Cactus(Sprite): def __init__(self, game, plat): # allows layering in LayeredUpdates sprite group self._layer = POW_LAYER # add a groups property where we can pass all instances of this object into game groups self.groups = game.all_sprites, game.cacti Sprite.__init__(self, self.groups) self.game = game self.plat = plat self.image = self.game.spritesheet.get_image(707,134,117,160) self.image.set_colorkey(BLACK) self.rect = self.image.get_rect() self.rect.centerx = self.plat.rect.centerx self.rect.bottom = self.plat.rect.top - 5 def update(self): self.rect.bottom = self.plat.rect.top - 5 # checks to see if plat is in the game's platforms group so we can kill the powerup instance if not self.game.platforms.has(self.plat): self.kill()
44.58427
150
0.588542
85021967bc09d52b22afd47cdda0f89988b6b283
3,069
py
Python
test/functional/feature_logging.py
bitcoinNickel/bitcoinnickel
7980682cf13048d0177b370d272717ada4c66910
[ "MIT" ]
1
2022-02-06T18:40:22.000Z
2022-02-06T18:40:22.000Z
test/functional/feature_logging.py
bitcoinNickel/bitcoinnickel
7980682cf13048d0177b370d272717ada4c66910
[ "MIT" ]
null
null
null
test/functional/feature_logging.py
bitcoinNickel/bitcoinnickel
7980682cf13048d0177b370d272717ada4c66910
[ "MIT" ]
1
2022-01-02T16:36:33.000Z
2022-01-02T16:36:33.000Z
#!/usr/bin/env python3 # Copyright (c) 2017-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test debug logging.""" import os from test_framework.test_framework import BitcoinNickelTestFramework from test_framework.test_node import ErrorMatch class LoggingTest(BitcoinNickelTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def relative_log_path(self, name): return os.path.join(self.nodes[0].datadir, "regtest", name) def run_test(self): # test default log file name default_log_path = self.relative_log_path("debug.log") assert os.path.isfile(default_log_path) # test alternative log file name in datadir self.restart_node(0, ["-debuglogfile=foo.log"]) assert os.path.isfile(self.relative_log_path("foo.log")) # test alternative log file name outside datadir tempname = os.path.join(self.options.tmpdir, "foo.log") self.restart_node(0, ["-debuglogfile=%s" % tempname]) assert os.path.isfile(tempname) # check that invalid log (relative) will cause error invdir = self.relative_log_path("foo") invalidname = os.path.join("foo", "foo.log") self.stop_node(0) exp_stderr = "Error: Could not open debug log file \S+$" self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % (invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (relative) works after path exists self.stop_node(0) os.mkdir(invdir) self.start_node(0, ["-debuglogfile=%s" % (invalidname)]) assert os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) will cause error self.stop_node(0) invdir = os.path.join(self.options.tmpdir, "foo") invalidname = os.path.join(invdir, "foo.log") self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % invalidname], exp_stderr, match=ErrorMatch.FULL_REGEX) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) works after path exists self.stop_node(0) os.mkdir(invdir) self.start_node(0, ["-debuglogfile=%s" % (invalidname)]) assert os.path.isfile(os.path.join(invdir, "foo.log")) # check that -nodebuglogfile disables logging self.stop_node(0) os.unlink(default_log_path) assert not os.path.isfile(default_log_path) self.start_node(0, ["-nodebuglogfile"]) assert not os.path.isfile(default_log_path) # just sanity check no crash here self.stop_node(0) self.start_node(0, ["-debuglogfile=%s" % os.devnull]) if __name__ == '__main__': LoggingTest().main()
38.848101
131
0.67188
2a45310d36de592dcc9c6cdaea9b2a90b485a87e
8,554
py
Python
algorithms/genetic/abstract_genetic/abstract_genetic_algorithm.py
UCLM-SIMD/MONRP
16b19ace0746365300b5d3d16f5dda8c3a196cf7
[ "MIT" ]
null
null
null
algorithms/genetic/abstract_genetic/abstract_genetic_algorithm.py
UCLM-SIMD/MONRP
16b19ace0746365300b5d3d16f5dda8c3a196cf7
[ "MIT" ]
null
null
null
algorithms/genetic/abstract_genetic/abstract_genetic_algorithm.py
UCLM-SIMD/MONRP
16b19ace0746365300b5d3d16f5dda8c3a196cf7
[ "MIT" ]
null
null
null
from abc import abstractmethod import random from typing import Any, Dict, List, Tuple import numpy as np from datasets import Dataset from models.Solution import Solution from algorithms.abstract_algorithm.abstract_algorithm import AbstractAlgorithm import evaluation.metrics as metrics from models.Hyperparameter import generate_hyperparameter class AbstractGeneticAlgorithm(AbstractAlgorithm): """Abstract class for genetic algorithms """ def __init__(self, dataset_name: str = "test", dataset: Dataset = None, random_seed: int = None, debug_mode: bool = False, tackle_dependencies: bool = False, population_length: int = 100, max_generations: int = 100, max_evaluations: int = 0, selection: str = "tournament", selection_candidates: int = 2, crossover: str = "onepoint", crossover_prob: float = 0.9, mutation: str = "flipeachbit", mutation_prob: float = 0.1, replacement: str = "elitism",): """Init method calls parent init and includes specific parameters of genetic algorithms """ super().__init__(dataset_name, dataset, random_seed, debug_mode, tackle_dependencies) self.population_length: int = population_length self.max_generations: int = max_generations self.max_evaluations: int = max_evaluations self.selection_scheme: str = selection self.selection_candidates: int = selection_candidates self.crossover_scheme: str = crossover self.crossover_prob: float = crossover_prob self.mutation_scheme: str = mutation self.mutation_prob: float = mutation_prob self.replacement_scheme: str = replacement self.hyperparameters.append(generate_hyperparameter( "population_length", population_length)) self.hyperparameters.append(generate_hyperparameter( "max_generations", max_generations)) self.hyperparameters.append(generate_hyperparameter( "max_evaluations", max_evaluations)) self.hyperparameters.append(generate_hyperparameter( "selection_scheme", selection)) self.hyperparameters.append(generate_hyperparameter( "selection_candidates", selection_candidates)) self.hyperparameters.append(generate_hyperparameter( "crossover_scheme", crossover)) self.hyperparameters.append(generate_hyperparameter( "crossover_prob", crossover_prob)) self.hyperparameters.append(generate_hyperparameter( "mutation_scheme", mutation)) self.hyperparameters.append(generate_hyperparameter( "mutation_prob", mutation_prob)) self.hyperparameters.append(generate_hyperparameter( "replacement_scheme", replacement)) self.population = None self.best_generation_avgValue = None self.best_generation = None self.nds = [] self.num_evaluations: int = 0 self.num_generations: int = 0 self.best_individual = None @abstractmethod def run(self) -> Dict[str, Any]: pass @abstractmethod def get_file(self) -> str: pass @abstractmethod def get_name(self) -> str: pass def df_find_data(self, df: any): return df[(df["Population Length"] == self.population_length) & (df["MaxGenerations"] == self.max_generations) & (df["Selection Candidates"] == self.selection_candidates) & (df["Selection Scheme"] == self.selection_scheme) & (df["Crossover Scheme"] == self.crossover_scheme) & (df["Crossover Probability"] == self.crossover_prob) & (df["Mutation Scheme"] == self.mutation_scheme) & (df["Mutation Probability"] == self.mutation_prob) & (df["Replacement Scheme"] == self.replacement_scheme) & (df["Algorithm"] == self.__class__.__name__) & (df["Dataset"] == self.dataset_name) & (df["MaxEvaluations"] == self.max_evaluations) ] def stop_criterion(self, num_generations, num_evaluations) -> bool: if self.max_evaluations == 0: return num_generations >= self.max_generations else: return num_evaluations >= self.max_evaluations def reset(self) -> None: super().reset() self.best_generation_avgValue = 0 self.best_generation = 0 self.num_evaluations = 0 self.num_generations = 0 self.best_individual = None self.population = None @abstractmethod def add_evaluation(self, new_population): pass def generate_starting_population(self) -> List[Solution]: """Method that generates a starting population of solutions """ population = [] for i in range(0, self.population_length): individual = Solution(self.dataset, None, uniform=True) population.append(individual) return population # LAST GENERATION ENHANCE------------------------------------------------------------------ def calculate_last_generation_with_enhance(self, best_generation, best_generation_avgValue, num_generation, population) -> Tuple[int, float]: bestAvgValue = metrics.calculate_bestAvgValue(population) if bestAvgValue > best_generation_avgValue: best_generation_avgValue = bestAvgValue best_generation = num_generation return best_generation, best_generation_avgValue def crossover_one_point(self, population: List[Solution]) -> List[Solution]: """Default crossover operator """ new_population = [] i = 0 while i < len(population): # if last element is alone-> add it if i == len(population) - 1: new_population.append(population[i]) else: # pair 2 parents -> crossover or add them and jump 1 index extra prob = random.random() if prob < self.crossover_prob: offsprings = self.crossover_aux_one_point( population[i], population[i+1]) new_population.extend(offsprings) else: new_population.extend( [population[i], population[i+1]]) i += 1 i += 1 return new_population def crossover_aux_one_point(self, parent1: Solution, parent2: Solution) -> Tuple[Solution, Solution]: """Crossover aux method """ chromosome_length = len(parent1.selected) # index aleatorio del punto de division para el cruce crossover_point = random.randint(1, chromosome_length - 1) offspring_genes1 = np.concatenate((parent1.selected[0:crossover_point], parent2.selected[crossover_point:])) offspring_genes2 = np.concatenate((parent2.selected[0:crossover_point], parent1.selected[crossover_point:])) offspring1 = Solution( self.dataset, None, selected=offspring_genes1) offspring2 = Solution( self.dataset, None, selected=offspring_genes2) return offspring1, offspring2 def mutation_flip1bit(self, population: List[Solution]) -> List[Solution]: """Default mutation operator """ new_population = [] new_population.extend(population) for individual in new_population: prob = random.random() if prob < self.mutation_prob: chromosome_length = len(individual.selected) mutation_point = random.randint(0, chromosome_length - 1) if individual.selected[mutation_point] == 0: individual.set_bit(mutation_point, 1) else: individual.set_bit(mutation_point, 0) return new_population def mutation_flipeachbit(self, population: List[Solution]) -> List[Solution]: """Default mutation operator """ new_population = [] new_population.extend(population) for individual in new_population: for gen_index in range(len(individual.selected)): prob = random.random() if prob < self.mutation_prob: if individual.selected[gen_index] == 0: individual.set_bit(gen_index, 1) else: individual.set_bit(gen_index, 0) return new_population
42.346535
161
0.629881
f3a3df5e6c7c8f5c1dd94e55cfac5454f052c1b9
14,385
py
Python
common/determined_common/experimental/checkpoint/_checkpoint.py
sean-adler/determined
5bdc0e33ef245b1168aa3835c3867b8f6532f2ae
[ "Apache-2.0" ]
1
2021-03-29T13:39:45.000Z
2021-03-29T13:39:45.000Z
common/determined_common/experimental/checkpoint/_checkpoint.py
sean-adler/determined
5bdc0e33ef245b1168aa3835c3867b8f6532f2ae
[ "Apache-2.0" ]
null
null
null
common/determined_common/experimental/checkpoint/_checkpoint.py
sean-adler/determined
5bdc0e33ef245b1168aa3835c3867b8f6532f2ae
[ "Apache-2.0" ]
null
null
null
import enum import json import pathlib import shutil from typing import Any, Dict, List, Optional, cast from determined_common import api, constants, storage from determined_common.storage import shared class ModelFramework(enum.Enum): PYTORCH = 1 TENSORFLOW = 2 class CheckpointState(enum.Enum): UNSPECIFIED = 0 ACTIVE = 1 COMPLETED = 2 ERROR = 3 DELETED = 4 class Checkpoint(object): """ A ``Checkpoint`` represents a trained model. This class provides helper functionality for downloading checkpoints to local storage and loading checkpoints into memory. The :class:`~determined.experimental.TrialReference` class contains methods that return instances of this class. Arguments: uuid (string): UUID of the checkpoint. experiment_config (dict): The configuration of the experiment that created the checkpoint. experiment_id (int): The ID of the experiment that created the checkpoint. trial_id (int): The ID of the trial that created the checkpoint. hparams (dict): Hyperparameter values for the trial that created the checkpoint. batch_number (int): Batch number during training when the checkpoint was taken. start_time (string): Timestamp when the checkpoint began being saved to persistent storage. end_time (string): Timestamp when the checkpoint completed being saved to persistent storage. resources (dict): Dictionary of file paths to file sizes (in bytes) of all files in the checkpoint. validation (dict): Dictionary of validation metric names to their values. framework (string, optional): The framework of the trial i.e., tensorflow, torch. format (string, optional): The format of the checkpoint i.e., h5, saved_model, pickle. determined_version (str, optional): The version of Determined the checkpoint was taken with. metadata (dict, optional): User defined metadata associated with the checkpoint. master (string, optional): The address of the Determined master instance. """ def __init__( self, uuid: str, experiment_config: Dict[str, Any], experiment_id: int, trial_id: int, hparams: Dict[str, Any], batch_number: int, start_time: str, end_time: str, resources: Dict[str, Any], validation: Dict[str, Any], metadata: Dict[str, Any], determined_version: Optional[str] = None, framework: Optional[str] = None, format: Optional[str] = None, model_version: Optional[int] = None, model_name: Optional[str] = None, master: Optional[str] = None, ): self.uuid = uuid self.experiment_config = experiment_config self.experiment_id = experiment_id self.trial_id = trial_id self.hparams = hparams self.batch_number = batch_number self.start_time = start_time self.end_time = end_time self.resources = resources self.validation = validation self.framework = framework self.format = format self.determined_version = determined_version self.model_version = model_version self.model_name = model_name self.metadata = metadata self._master = master def _find_shared_fs_path(self) -> pathlib.Path: """Attempt to find the path of the checkpoint if being configured to shared fs. This function assumes the host path of the shared fs exists. """ host_path = self.experiment_config["checkpoint_storage"]["host_path"] storage_path = self.experiment_config["checkpoint_storage"].get("storage_path") potential_paths = [ pathlib.Path(shared._full_storage_path(host_path, storage_path), self.uuid), pathlib.Path( shared._full_storage_path( host_path, storage_path, constants.SHARED_FS_CONTAINER_PATH ), self.uuid, ), ] for path in potential_paths: if path.exists(): return path raise FileNotFoundError( "Checkpoint {} not found in {}. This error could be caused by not having " "the same shared file system mounted on the local machine as the experiment " "checkpoint storage configuration.".format(self.uuid, potential_paths) ) def download(self, path: Optional[str] = None) -> str: """ Download checkpoint to local storage. Arguments: path (string, optional): Top level directory to place the checkpoint under. If this parameter is not set, the checkpoint will be downloaded to ``checkpoints/<checkpoint_uuid>`` relative to the current working directory. """ if path is not None: local_ckpt_dir = pathlib.Path(path) else: local_ckpt_dir = pathlib.Path("checkpoints", self.uuid) # Backward compatibility: we used MLflow's MLmodel checkpoint format for # serializing pytorch models. We now use our own format that contains a # metadata.json file. We are checking for checkpoint existence by # looking for both checkpoint formats in the output directory. potential_metadata_paths = [ local_ckpt_dir.joinpath(f) for f in ["metadata.json", "MLmodel"] ] if not any(p.exists() for p in potential_metadata_paths): # If the target directory doesn't already appear to contain a # checkpoint, attempt to fetch one. if self.experiment_config["checkpoint_storage"]["type"] == "shared_fs": src_ckpt_dir = self._find_shared_fs_path() shutil.copytree(str(src_ckpt_dir), str(local_ckpt_dir)) else: local_ckpt_dir.mkdir(parents=True, exist_ok=True) manager = storage.build( self.experiment_config["checkpoint_storage"], container_path=None, ) if not isinstance(manager, (storage.S3StorageManager, storage.GCSStorageManager)): raise AssertionError( "Downloading from S3 or GCS requires the experiment to be configured with " "S3 or GCS checkpointing, {} found instead".format( self.experiment_config["checkpoint_storage"]["type"] ) ) metadata = storage.StorageMetadata.from_json( {"uuid": self.uuid, "resources": self.resources} ) manager.download(metadata, str(local_ckpt_dir)) if not local_ckpt_dir.joinpath("metadata.json").exists(): with open(local_ckpt_dir.joinpath("metadata.json"), "w") as f: json.dump( { "determined_version": self.determined_version, "framework": self.framework, "format": self.format, "experiment_id": self.experiment_id, "trial_id": self.trial_id, "hparams": self.hparams, "experiment_config": self.experiment_config, "metadata": self.metadata, }, f, indent=2, ) return str(local_ckpt_dir) def load( self, path: Optional[str] = None, tags: Optional[List[str]] = None, **kwargs: Any ) -> Any: """ Loads a Determined checkpoint into memory. If the checkpoint is not present on disk it will be downloaded from persistent storage. Arguments: path (string, optional): Top level directory to load the checkpoint from. (default: ``checkpoints/<UUID>``) tags (list string, optional): Only relevant for TensorFlow SavedModel checkpoints. Specifies which tags are loaded from the TensorFlow SavedModel. See documentation for `tf.compat.v1.saved_model.load_v2 <https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/saved_model/load_v2>`_. kwargs: Only relevant for PyTorch checkpoints. The keyword arguments will be applied to ``torch.load``. See documentation for `torch.load <https://pytorch.org/docs/stable/torch.html?highlight=torch%20load#torch.load>`_. """ ckpt_path = self.download(path) return Checkpoint.load_from_path(ckpt_path, tags=tags, **kwargs) def add_metadata(self, metadata: Dict[str, Any]) -> None: """ Adds user-defined metadata to the checkpoint. The ``metadata`` argument must be a JSON-serializable dictionary. If any keys from this dictionary already appear in the checkpoint metadata, the corresponding dictionary entries in the checkpoint are replaced by the passed-in dictionary values. Arguments: metadata (dict): Dictionary of metadata to add to the checkpoint. """ for key, val in metadata.items(): self.metadata[key] = val if self._master: api.post( self._master, "/api/v1/checkpoints/{}/metadata".format(self.uuid), body={"checkpoint": {"metadata": self.metadata}}, ) def remove_metadata(self, keys: List[str]) -> None: """ Removes user-defined metadata from the checkpoint. Any top-level keys that appear in the ``keys`` list are removed from the checkpoint. Arguments: keys (List[string]): Top-level keys to remove from the checkpoint metadata. """ for key in keys: if key in self.metadata: del self.metadata[key] if self._master: api.post( self._master, "/api/v1/checkpoints/{}/metadata".format(self.uuid), body={"checkpoint": {"metadata": self.metadata}}, ) @staticmethod def load_from_path(path: str, tags: Optional[List[str]] = None, **kwargs: Any) -> Any: """ Loads a Determined checkpoint from a local file system path into memory. If the checkpoint is a PyTorch model, a ``torch.nn.Module`` is returned. If the checkpoint contains a TensorFlow SavedModel, a TensorFlow autotrackable object is returned. Arguments: path (string): Local path to the checkpoint directory. tags (list string, optional): Only relevant for TensorFlow SavedModel checkpoints. Specifies which tags are loaded from the TensorFlow SavedModel. See documentation for `tf.compat.v1.saved_model.load_v2 <https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/saved_model/load_v2>`_. """ checkpoint_dir = pathlib.Path(path) metadata = Checkpoint.parse_metadata(checkpoint_dir) checkpoint_type = Checkpoint.get_type(metadata) if checkpoint_type == ModelFramework.PYTORCH: import determined_common.experimental.checkpoint._torch return determined_common.experimental.checkpoint._torch.load_model( checkpoint_dir, metadata, **kwargs ) elif checkpoint_type == ModelFramework.TENSORFLOW: import determined_common.experimental.checkpoint._tf return determined_common.experimental.checkpoint._tf.load_model( checkpoint_dir, metadata, tags=tags ) raise AssertionError("Unknown checkpoint format at {}".format(checkpoint_dir)) @staticmethod def parse_metadata(directory: pathlib.Path) -> Dict[str, Any]: metadata_path = directory.joinpath("metadata.json") with metadata_path.open() as f: metadata = json.load(f) return cast(Dict[str, Any], metadata) @staticmethod def get_type(metadata: Dict[str, Any]) -> ModelFramework: if "framework" in metadata: if metadata["framework"].startswith("torch"): return ModelFramework.PYTORCH if metadata["framework"].startswith("tensorflow"): return ModelFramework.TENSORFLOW # Older metadata layout contained torch_version and tensorflow_version # as keys. Eventually, we should drop support for the older format. if "torch_version" in metadata: return ModelFramework.PYTORCH elif "tensorflow_version" in metadata: return ModelFramework.TENSORFLOW raise AssertionError("Unknown checkpoint format") def __repr__(self) -> str: if self.model_name is not None: return "Checkpoint(uuid={}, trial_id={}, model={}, version={})".format( self.uuid, self.trial_id, self.model_name, self.model_version ) return "Checkpoint(uuid={}, trial_id={})".format(self.uuid, self.trial_id) @staticmethod def from_json(data: Dict[str, Any], master: Optional[str] = None) -> "Checkpoint": validation = { "metrics": data.get("metrics", {}), "state": data.get("validation_state", None), } return Checkpoint( data["uuid"], data.get("experiment_config", data.get("experimentConfig")), data.get("experiment_id", data.get("experimentId")), data.get("trial_id", data.get("trialId")), data["hparams"], data.get("batch_number", data.get("batchNumber")), data.get("start_time", data.get("startTime")), data.get("end_time", data.get("endTime")), data["resources"], validation, data.get("metadata", {}), framework=data.get("framework"), format=data.get("format"), determined_version=data.get("determined_version", data.get("determinedVersion")), model_version=data.get("model_version"), model_name=data.get("model_name"), master=master, )
41.575145
101
0.610914
c3c7a67e4abc3344c9fe430f360e08a16a95b1f9
6,017
py
Python
utils.py
XiaoPanX/FastLatticeSuperpixels
9fb7d9680b07b155cb33aa53558e050a923f11c2
[ "MIT" ]
null
null
null
utils.py
XiaoPanX/FastLatticeSuperpixels
9fb7d9680b07b155cb33aa53558e050a923f11c2
[ "MIT" ]
null
null
null
utils.py
XiaoPanX/FastLatticeSuperpixels
9fb7d9680b07b155cb33aa53558e050a923f11c2
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ Copyright (C) 2018 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). Author: Varun Jampani """ import numpy as np from init_caffe import * import matplotlib.pyplot as plt from scipy.ndimage import zoom from scipy import interpolate from skimage.segmentation import mark_boundaries global g_rel_label global g_spix_index_init global g_new_spix_index def get_rand_scale_factor(): rand_factor = np.random.normal(1, 0.75) s_factor = np.min((3.0, rand_factor)) s_factor = np.max((0.75, s_factor)) return s_factor def initialize_net_weight(net): for param_key in net.params.keys(): # Initialize neighborhood concatenator (convolution layer) if param_key.startswith('concat_spixel_feat'): num_channels = int(param_key.rsplit('_', 1)[-1]) for j in range(num_channels): for i in range(9): net.params[param_key][0].data[9 * j + i, :, i / 3, i % 3] = 1.0 # Initialize pixel feature concatenator if param_key.startswith('img_concat_pixel_feat'): net.params[param_key][0].data[:] = 1.0 # Initialize spixel feature concatenator if param_key == 'repmat_spixel_feat': net.params['repmat_spixel_feat'][0].data[:] = 1.0 # Initialize pixel-spixel distance computation layer if param_key.startswith('pixel_spixel_dist_conv'): num_channels = int(param_key.rsplit('_', 1)[-1]) for j in range(9): for i in range(num_channels): net.params[param_key][0].data[j, 9 * i + j, 0, 0] = 1.0 # Initialize scale spixel feature computation layer if param_key.startswith('scale_spixel_feat'): num_channels = int(param_key.rsplit('_', 1)[-1]) for j in range(num_channels): for i in range(num_channels): net.params['scale_spixel_feat'][0].data[j, 5 * i + j, 0, 0] = 1.0 return net def convert_rel_to_spixel_label(rel_label, spix_index, num_spixels_h, num_spixels_w): height = rel_label.shape[0] width = rel_label.shape[1] num_spixels = num_spixels_h * num_spixels_w for i in range(height): for j in range(width): r_label = rel_label[i, j] r_label_h = r_label / 3 - 1 r_label_w = r_label % 3 - 1 spix_idx_h = spix_index[i, j] + r_label_h * num_spixels_w if spix_idx_h < num_spixels and spix_idx_h > -1: spix_idx_w = spix_idx_h + r_label_w else: spix_idx_w = spix_index[i,j] if spix_idx_w < num_spixels and spix_idx_w > -1: spix_index[i, j] = spix_idx_w return spix_index def compute_av_image(Oralimge, spix_index, avcolor): height = Oralimge.shape[0] width = Oralimge.shape[1] spix_label = spix_index.astype(int) sp_h = avcolor.shape[0] sp_w = avcolor.shape[1] num_label = sp_h * sp_w randC1 = np.random.rand(sp_h) randC2 = np.random.rand(sp_h) randC3 = np.random.rand(sp_h) #print(randC[1]) Oralimge1 = Oralimge / 255. for i in range(height): for j in range(width): r_label = spix_label[i, j] av_hi = r_label / sp_w av_hj = r_label % sp_w """ if(av_hi%2==0): if(av_hj%2==0): Oralimge1[i, j, 0] = 0 Oralimge1[i, j, 1] = 0 Oralimge1[i, j, 2] = 0 else: Oralimge1[i, j, 0] = 1 Oralimge1[i, j, 1] = 1 Oralimge1[i, j, 2] = 1 else: if (av_hj % 2 == 1): Oralimge1[i, j, 0] = 0 Oralimge1[i, j, 1] = 0 Oralimge1[i, j, 2] = 0 else: Oralimge1[i, j, 0] = 1 Oralimge1[i, j, 1] = 1 Oralimge1[i, j, 2] = 1 """ # print(Oralimge [i, j, :]) #Oralimge1 [i, j, :] = avcolor[av_hi, av_hj, :] Oralimge1[i, j, 0] = randC1[av_hi] Oralimge1[i, j, 1] = randC2[av_hi] Oralimge1[i, j, 2] = randC3[av_hi] return Oralimge1 def visualize_spixels(given_img, spix_index): spixel_image = mark_boundaries(given_img / 255., spix_index.astype(int), color = (1,1,1)) plt.imshow(spixel_image); plt.show(); def get_spixel_image(given_img, spix_index): #spixel_image = mark_boundaries(given_img / 255., spix_index.astype(int), color = (1,1,1)) spixel_image = mark_boundaries(given_img, spix_index.astype(int), color=(1, 1, 1)) return spixel_image def get_spixel_init(num_spixels, img_width, img_height): k = num_spixels k_w = int(np.floor(np.sqrt(k * img_width / img_height))) k_h = int(np.floor(np.sqrt(k * img_height / img_width))) spixel_height = img_height / (1. * k_h) spixel_width = img_width / (1. * k_w) h_coords = np.arange(-spixel_height / 2., img_height + spixel_height - 1, spixel_height) w_coords = np.arange(-spixel_width / 2., img_width + spixel_width - 1, spixel_width) spix_values = np.int32(np.arange(0, k_w * k_h).reshape((k_h, k_w))) spix_values = np.pad(spix_values, 1, 'symmetric') f = interpolate.RegularGridInterpolator((h_coords, w_coords), spix_values, method='nearest') all_h_coords = np.arange(0, img_height, 1) all_w_coords = np.arange(0, img_width, 1) all_grid = np.array(np.meshgrid(all_h_coords, all_w_coords, indexing = 'ij')) all_points = np.reshape(all_grid, (2, img_width * img_height)).transpose() spixel_initmap = f(all_points).reshape((img_height,img_width)) feat_spixel_initmap = spixel_initmap return [spixel_initmap, feat_spixel_initmap, k_w, k_h]
35.394118
105
0.591158
b665d82edd2876f6b17600919a314e4b90abfb94
2,307
py
Python
src/api-service/__app__/timer_workers/__init__.py
andrew-slutsky/onefuzz
933fe6850c902544a6296cf05b83d235137a61a5
[ "MIT" ]
1
2020-10-27T08:05:57.000Z
2020-10-27T08:05:57.000Z
src/api-service/__app__/timer_workers/__init__.py
andrew-slutsky/onefuzz
933fe6850c902544a6296cf05b83d235137a61a5
[ "MIT" ]
1
2021-02-15T00:38:32.000Z
2021-02-15T00:38:32.000Z
src/api-service/__app__/timer_workers/__init__.py
anslutsk/onefuzz
933fe6850c902544a6296cf05b83d235137a61a5
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import logging import azure.functions as func from onefuzztypes.enums import NodeState, PoolState from ..onefuzzlib.autoscale import autoscale_pool from ..onefuzzlib.events import get_events from ..onefuzzlib.orm import process_state_updates from ..onefuzzlib.workers.nodes import Node from ..onefuzzlib.workers.pools import Pool from ..onefuzzlib.workers.scalesets import Scaleset def process_scaleset(scaleset: Scaleset) -> None: logging.debug("checking scaleset for updates: %s", scaleset.scaleset_id) scaleset.update_configs() # if the scaleset is touched during cleanup, don't continue to process it if scaleset.cleanup_nodes(): logging.debug("scaleset needed cleanup: %s", scaleset.scaleset_id) return process_state_updates(scaleset) def main(mytimer: func.TimerRequest, dashboard: func.Out[str]) -> None: # noqa: F841 # NOTE: Update pools first, such that scalesets impacted by pool updates # (such as shutdown or resize) happen during this iteration `timer_worker` # rather than the following iteration. pools = Pool.search() for pool in pools: if pool.state in PoolState.needs_work(): logging.info("update pool: %s (%s)", pool.pool_id, pool.name) process_state_updates(pool) if pool.state in PoolState.available() and pool.autoscale: autoscale_pool(pool) # NOTE: Nodes, and Scalesets should be processed in a consistent order such # during 'pool scale down' operations. This means that pools that are # scaling down will more likely remove from the same scalesets over time. # By more likely removing from the same scalesets, we are more likely to # get to empty scalesets, which can safely be deleted. Node.mark_outdated_nodes() nodes = Node.search_states(states=NodeState.needs_work()) for node in sorted(nodes, key=lambda x: x.machine_id): logging.info("update node: %s", node.machine_id) process_state_updates(node) scalesets = Scaleset.search() for scaleset in sorted(scalesets, key=lambda x: x.scaleset_id): process_scaleset(scaleset) events = get_events() if events: dashboard.set(events)
35.492308
85
0.722583
7957b024f727bae66dba0ad129ba0b19840a0b32
4,051
py
Python
backend/interdisciplinaire/settings.py
jacquant/interdisciplinaire
c2155517117c00d4eee9a0b34a59cda5983d0995
[ "MIT" ]
null
null
null
backend/interdisciplinaire/settings.py
jacquant/interdisciplinaire
c2155517117c00d4eee9a0b34a59cda5983d0995
[ "MIT" ]
null
null
null
backend/interdisciplinaire/settings.py
jacquant/interdisciplinaire
c2155517117c00d4eee9a0b34a59cda5983d0995
[ "MIT" ]
null
null
null
""" Django settings for interdisciplinaire project. Generated by 'django-admin startproject' using Django 2.2.6. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) SETTINGS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(SETTINGS_DIR) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get( "SECRET_KEY", "cmbj^cdp$%#ub*4exq8r2%ns=8r*o)3!-7p9tocfla8%*vk39k" ) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ # Django's app "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", # External Django's app "import_export", "rest_framework", "drf_yasg", # Internal Django's app "backend.climate_actions.apps.ClimateActionsConfig", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "whitenoise.middleware.WhiteNoiseMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] ROOT_URLCONF = "backend.interdisciplinaire.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": ["dist"], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "backend.interdisciplinaire.wsgi.application" # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { "default": { "ENGINE": "djongo", "NAME": os.environ.get("DB_NAME", "interdisciplinaire"), "HOST": os.environ.get("DB_IP", "127.0.0.1:27017"), } } CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://redis:6379/0", "OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient", }, } } SESSION_ENGINE = "django.contrib.sessions.backends.cache" SESSION_CACHE_ALIAS = "default" # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = "fr" TIME_ZONE = "Europe/Brussels" USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ MIDDLEWARE_CLASSES = ("whitenoise.middleware.WhiteNoiseMiddleware",) STATIC_URL = "/static/" STATIC_ROOT = os.path.join(BASE_DIR, "dist", "static") STATICFILES_DIRS = [] IMPORT_EXPORT_USE_TRANSACTIONS = False STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
29.569343
91
0.709948
c9fa00bf9e2ba383dc822ce2c94b8108812f92cc
1,766
py
Python
src/urls.py
Midburn/midburn-profiles-django
e124a617986931baed0b17f5d7a1ba6c7d5528e9
[ "MIT" ]
null
null
null
src/urls.py
Midburn/midburn-profiles-django
e124a617986931baed0b17f5d7a1ba6c7d5528e9
[ "MIT" ]
20
2019-10-05T13:23:47.000Z
2022-03-11T23:37:37.000Z
src/urls.py
Midburn/midburn-profiles-django
e124a617986931baed0b17f5d7a1ba6c7d5528e9
[ "MIT" ]
1
2019-09-30T11:40:45.000Z
2019-09-30T11:40:45.000Z
from django.urls import path from django.views.generic import TemplateView from rest_auth.views import LoginView, LogoutView, UserDetailsView, PasswordChangeView from .auth.urls import users_api_router from rest_framework.documentation import include_docs_urls from django.conf.urls import include, url from django.contrib import admin from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenVerifyView, ) rest_auth_url_patterns = [ # URLs that do not require a session or valid token url(r'^login/$', LoginView.as_view(), name='rest_login'), # URLs that require a user to be logged in with a valid session / token. url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='rest_user_details'), url(r'^password/change/$', PasswordChangeView.as_view(), name='rest_password_change'), ] urlpatterns = [ path('tech_admin/', admin.site.urls), url(r'^api/auth/', include(rest_auth_url_patterns)), url(r'^api/auth/registration/', include('rest_auth.registration.urls')), url(r'^api/auth/password/reset/', include('django_rest_passwordreset.urls', namespace='password_reset')), path('api/auth/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('api/auth/token/verify/', TokenVerifyView.as_view(), name='token_verify'), url(r'^docs/', include_docs_urls(title='Midburn API', public=False)), url(r'^api/v1/', include(users_api_router.urls)), #required for allauth flow url(r'^account-email-verification-sent/$', TemplateView.as_view(), name='account_email_verification_sent'), url(r'^account-confirm-email/(?P<key>[-:\w]+)/$', TemplateView.as_view(), name='account_confirm_email'), ]
43.073171
109
0.727067
18b293751d75e5ecd346d64bdde83075f8ace295
1,533
py
Python
src/graficos/graficos_tiempo_variando_Ryb.py
ilebrero/Metodos-TP1
9b06eedbb51c6cf14b87ec84af2fa8891c637a59
[ "MIT" ]
null
null
null
src/graficos/graficos_tiempo_variando_Ryb.py
ilebrero/Metodos-TP1
9b06eedbb51c6cf14b87ec84af2fa8891c637a59
[ "MIT" ]
null
null
null
src/graficos/graficos_tiempo_variando_Ryb.py
ilebrero/Metodos-TP1
9b06eedbb51c6cf14b87ec84af2fa8891c637a59
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt from math import log #numeros del eje x x = [50,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,950,1000] # valores primera funcion y_gauss = [] y_LU = [] y_cholesky = [] #valores primer funcion #f = open('../resultadosTiempos/tiemposGaussVariandoB.txt', 'r') #for i in range(0,20): # y_gauss.append(float(f.readline()[:-1])) #valores segunda funcion f = open('../resultadosTiempos/tiemposLUVariandoRyM.txt', 'r') for i in range(0,20): y_LU.append(float(f.readline()[:-1])) f = open('../resultadosTiempos/tiemposCholeskyVariandoRyM.txt', 'r') for i in range(0,20): y_cholesky.append(float(f.readline()[:-1])) #print len(y_gauss) #print len(y_LU) #print len(y_cholesky) #print len(x) #print len(y_cholesky) #plt.plot(x,y_gauss,'ro', color='blue', label="Gauss variando vector resultado") plt.plot(x,y_LU,'ro', color='green', label="LU variando vector resultado") plt.plot(x,y_cholesky,'ro', color='red', label="Cholesky variando vector resultado") yfunction = [] a = 50 for m in range(0,20): if m == 0: yfunction.append(0) else: yfunction.append(0.0000000191721*a*a*a) a += 50 #sin 'ro' lo plotea como una funcion comun, continua plt.plot(x,yfunction, color='purple', label='T(n)=k*(n^3)',linewidth=3) plt.legend(bbox_to_anchor=(0.70,1)) #nombre que va en el eje x plt.xlabel(u"cantidad de soluciones") #nombre que va en el eje y plt.ylabel("Tiempo(segundos)") plt.show()
25.131148
85
0.691455
f5bc04445619c15b34265816ffe5d08281c67d21
5,728
py
Python
flask_mpesa/tests/test_base.py
allansifuna/Flask-Mpesa
a9f1d10a8d3d9638b00e1c48254af4f032c2ea0e
[ "MIT" ]
6
2019-11-28T10:56:49.000Z
2021-03-25T19:42:33.000Z
flask_mpesa/tests/test_base.py
allansifuna/Flask-Mpesa
a9f1d10a8d3d9638b00e1c48254af4f032c2ea0e
[ "MIT" ]
1
2020-10-15T15:40:00.000Z
2020-10-18T21:42:10.000Z
flask_mpesa/tests/test_base.py
allansifuna/Flask-Mpesa
a9f1d10a8d3d9638b00e1c48254af4f032c2ea0e
[ "MIT" ]
2
2020-01-23T07:12:31.000Z
2020-11-10T09:46:50.000Z
import pytest from .base import mp @pytest.mark.usefixtures("mock_fixture_test_auth") def test_auth(auth): auth = mp.C2B.authenticate() assert auth is not None @pytest.mark.usefixtures("mock_fixture_test_b2c") def test_b2c(b2c): data = {"initiator_name": "testapi364", "security_credential": "TziD/ydlT52Fm6SOH1ebrzUFwy3cP6OGplsrWja+X/1roQy2AzMsj5QGuqu9O+IFR1E6l16Jm87tg4bhnxoIhAufCEWusQI1wJZ6YLzpN0cHZAY/8SN1JfHdgEkrmksAY14pejHyfntyLT9Sg51kBjaj6J7/2+gHl2e64klnJAhlfPJWxC18zwEzsg58zFmypcovPPB6MHkPLyHQNFbu4oXC0e2gkZrIAWXTNN7PpYt4m/w39s5txU7/6P7hTzXgYAgqk4kxfPBIBeEmKhH5tSGxMD+xnSpZIXLovFgopexq8S76pmdLMjr2CdR60GlwXnAPnKJ5U9CIxRRewuoksQ==", "amount": "1000", "command_id": "SalaryPayment", "party_a": "600977", "party_b": "254708374149", "remarks": "Just a Transaction", "queue_timeout_url": "https://testurl.com/b2b-timeout", "result_url": "https://testurl.com/b2b-result", "occassion": "Test123" } resp = mp.B2C.transact(**data) assert dict(resp).keys() == b2c.keys() @pytest.mark.usefixtures("mock_fixture_test_express_sim") def test_express_sim(express_sim): data = { "business_shortcode": 174379, "passcode": "bfb279f9aa9bdbcf158e97dd71a467cd2e0c893059b10f78e6b72ada1ed2c919", "amount": "1", "phone_number": "254713812939", "callback_url": "https://testurl.com/b2b-result", "reference_code": "DSC", "description": "first test" } resp = mp.MpesaExpress.stk_push(**data) assert dict(resp).keys() == express_sim.keys() @pytest.mark.usefixtures("mock_fixture_test_express_query") def test_express_query(express_query): data = { "business_shortcode": 174379, "passcode": "bfb279f9aa9bdbcf158e97dd71a467cd2e0c893059b10f78e6b72ada1ed2c919", "checkout_request_id": "ws_CO_021020210904344355" } resp = mp.MpesaExpress.query(**data) assert dict(resp).keys() == express_query.keys() @pytest.mark.usefixtures("mock_fixture_test_c2b_reg") def test_c2b_reg(c2b_reg): data = {"shortcode": "600364", "response_type": "Completed", "confirmation_url": "https://testurl.com/confirmation", "validation_url": "https://testurl.com/validation" } resp = mp.C2B.register(**data) assert dict(resp).keys() == c2b_reg.keys() @pytest.mark.usefixtures("mock_fixture_test_c2b_sim") def test_c2b_sim(c2b_sim): data = {"shortcode": "600364", "command_id": "CustomerPayBillOnline", "amount": "123", "msisdn": "254708374149", "bill_ref_number": "account" } resp = mp.C2B.simulate(**data) assert dict(resp).keys() == c2b_sim.keys() @pytest.mark.usefixtures("mock_fixture_test_bal") def test_bal(bal): data = {"initiator": "testapi364", "security_credential": "TziD/ydlT52Fm6SOH1ebrzUFwy3cP6OGplsrWja+X/1roQy2AzMsj5QGuqu9O+IFR1E6l16Jm87tg4bhnxoIhAufCEWusQI1wJZ6YLzpN0cHZAY/8SN1JfHdgEkrmksAY14pejHyfntyLT9Sg51kBjaj6J7/2+gHl2e64klnJAhlfPJWxC18zwEzsg58zFmypcovPPB6MHkPLyHQNFbu4oXC0e2gkZrIAWXTNN7PpYt4m/w39s5txU7/6P7hTzXgYAgqk4kxfPBIBeEmKhH5tSGxMD+xnSpZIXLovFgopexq8S76pmdLMjr2CdR60GlwXnAPnKJ5U9CIxRRewuoksQ==", "command_id": "AccountBalance", "party_a": "600364", "identifier_type": "4", "remarks": "Just a Transaction", "queue_timeout_url": "https://testurl.com/b2b-timeout", "result_url": "https://testurl.com/b2b-result" } resp = mp.Balance.get_balance(**data) assert dict(resp).keys() == bal.keys() @pytest.mark.usefixtures("mock_fixture_test_t_status") def test_t_status(t_status): data = {"initiator": "testapi364", "transaction_id": "PJ251HK6YH", "party_a": "600364", "security_credential": "TziD/ydlT52Fm6SOH1ebrzUFwy3cP6OGplsrWja+X/1roQy2AzMsj5QGuqu9O+IFR1E6l16Jm87tg4bhnxoIhAufCEWusQI1wJZ6YLzpN0cHZAY/8SN1JfHdgEkrmksAY14pejHyfntyLT9Sg51kBjaj6J7/2+gHl2e64klnJAhlfPJWxC18zwEzsg58zFmypcovPPB6MHkPLyHQNFbu4oXC0e2gkZrIAWXTNN7PpYt4m/w39s5txU7/6P7hTzXgYAgqk4kxfPBIBeEmKhH5tSGxMD+xnSpZIXLovFgopexq8S76pmdLMjr2CdR60GlwXnAPnKJ5U9CIxRRewuoksQ==", "identifier_type": "4", "remarks": "Enterance Fee", "queue_timeout_url": "https://3e61-197-156-137-143.ngrok.io/b2b-timeout", "result_url": "https://3e61-197-156-137-143.ngrok.io/b2b-result", "occassion": "DSC-party_a" } resp = mp.TransactionStatus.check_transaction_status(**data) assert dict(resp).keys() == t_status.keys() @pytest.mark.usefixtures("mock_fixture_test_b2b") def test_b2b(b2b): data = {"initiator": "testapi364", "security_credential": "TziD/ydlT52Fm6SOH1ebrzUFwy3cP6OGplsrWja+X/1roQy2AzMsj5QGuqu9O+IFR1E6l16Jm87tg4bhnxoIhAufCEWusQI1wJZ6YLzpN0cHZAY/8SN1JfHdgEkrmksAY14pejHyfntyLT9Sg51kBjaj6J7/2+gHl2e64klnJAhlfPJWxC18zwEzsg58zFmypcovPPB6MHkPLyHQNFbu4oXC0e2gkZrIAWXTNN7PpYt4m/w39s5txU7/6P7hTzXgYAgqk4kxfPBIBeEmKhH5tSGxMD+xnSpZIXLovFgopexq8S76pmdLMjr2CdR60GlwXnAPnKJ5U9CIxRRewuoksQ==", "amount": "100", "command_id": "BusinessPayBill", "sender_identifier_type": "4", "receiver_identifier_type": "4", "party_a": "600364", "party_b": "600000", "remarks": "Enterance Fee", "queue_timeout_url": "https://4ca0-197-156-137-168.ngrok.io/b2b-timeout", "result_url": "https://4ca0-197-156-137-168.ngrok.io/b2b-result", "account_reference": "DSC-party_a" } resp = mp.B2B.transact(**data) assert dict(resp).keys() == b2b.keys()
45.824
382
0.693087
8621bf8a127d84bf6379a018894102d72b5cd5b9
8,943
py
Python
demo/attn_visualize_demo.py
visaVita/mmaction2
2d7107a2d79b990b7facd3d6f7ac0b6ad974c153
[ "Apache-2.0" ]
null
null
null
demo/attn_visualize_demo.py
visaVita/mmaction2
2d7107a2d79b990b7facd3d6f7ac0b6ad974c153
[ "Apache-2.0" ]
null
null
null
demo/attn_visualize_demo.py
visaVita/mmaction2
2d7107a2d79b990b7facd3d6f7ac0b6ad974c153
[ "Apache-2.0" ]
null
null
null
# Copyright (c) OpenMMLab. All rights reserved. import argparse import json import random from collections import deque from operator import itemgetter import cv2 import mmcv import numpy as np import torch from mmcv import Config, DictAction from mmcv.parallel import collate, scatter from mmaction.apis import init_recognizer from mmaction.datasets.pipelines import Compose FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL FONTSCALE = 1 THICKNESS = 1 LINETYPE = 1 EXCLUED_STEPS = [ 'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit', 'PyAVDecode', 'RawFrameDecode' ] def parse_args(): parser = argparse.ArgumentParser( description='MMAction2 predict different labels in a long video demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file/url') parser.add_argument('video_path', help='video file/url') parser.add_argument('label', help='label file') parser.add_argument('out_file', help='output result file in video/json') parser.add_argument( '--input-step', type=int, default=1, help='input step for sampling frames') parser.add_argument( '--device', type=str, default='cuda:0', help='CPU/CUDA device option') parser.add_argument( '--threshold', type=float, default=0.01, help='recognition score threshold') parser.add_argument( '--stride', type=float, default=0, help=('the prediction stride equals to stride * sample_length ' '(sample_length indicates the size of temporal window from ' 'which you sample frames, which equals to ' 'clip_len x frame_interval), if set as 0, the ' 'prediction stride is 1')) parser.add_argument( '--cfg-options', nargs='+', action=DictAction, default={}, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. For example, ' "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") parser.add_argument( '--label-color', nargs='+', type=int, default=(255, 255, 255), help='font color (B, G, R) of the labels in output video') parser.add_argument( '--msg-color', nargs='+', type=int, default=(128, 128, 128), help='font color (B, G, R) of the messages in output video') args = parser.parse_args() return args def show_results_video(result_queue, text_info, thr, msg, frame, video_writer, label_color=(255, 255, 255), msg_color=(128, 128, 128)): if len(result_queue) != 0: text_info = {} results = result_queue.popleft() for i, result in enumerate(results): selected_label, score = result if score < thr: break location = (0, 40 + i * 20) text = selected_label + ': ' + str(round(score, 2)) text_info[location] = text cv2.putText(frame, text, location, FONTFACE, FONTSCALE, label_color, THICKNESS, LINETYPE) elif len(text_info): for location, text in text_info.items(): cv2.putText(frame, text, location, FONTFACE, FONTSCALE, label_color, THICKNESS, LINETYPE) else: cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, msg_color, THICKNESS, LINETYPE) video_writer.write(frame) return text_info def get_results_json(result_queue, text_info, thr, msg, ind, out_json): if len(result_queue) != 0: text_info = {} results = result_queue.popleft() for i, result in enumerate(results): selected_label, score = result if score < thr: break text_info[i + 1] = selected_label + ': ' + str(round(score, 2)) out_json[ind] = text_info elif len(text_info): out_json[ind] = text_info else: out_json[ind] = msg return text_info, out_json def show_results(model, data, label, args): frame_queue = deque(maxlen=args.sample_length) result_queue = deque(maxlen=1) cap = cv2.VideoCapture(args.video_path) num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) msg = 'Preparing action recognition ...' text_info = {} out_json = {} fourcc = cv2.VideoWriter_fourcc(*'mp4v') frame_size = (frame_width, frame_height) ind = 0 video_writer = None if args.out_file.endswith('.json') \ else cv2.VideoWriter(args.out_file, fourcc, fps, frame_size) prog_bar = mmcv.ProgressBar(num_frames) backup_frames = [] while ind < num_frames: ind += 1 prog_bar.update() ret, frame = cap.read() if frame is None: # drop it when encounting None continue backup_frames.append(np.array(frame)[:, :, ::-1]) if ind == args.sample_length: # provide a quick show at the beginning frame_queue.extend(backup_frames) backup_frames = [] elif ((len(backup_frames) == args.input_step and ind > args.sample_length) or ind == num_frames): # pick a frame from the backup # when the backup is full or reach the last frame chosen_frame = random.choice(backup_frames) backup_frames = [] frame_queue.append(chosen_frame) ret, scores = inference(model, data, args, frame_queue) if ret: num_selected_labels = min(len(label), 5) scores_tuples = tuple(zip(label, scores)) scores_sorted = sorted( scores_tuples, key=itemgetter(1), reverse=True) results = scores_sorted[:num_selected_labels] result_queue.append(results) text_info, out_json = get_results_json(result_queue, text_info, args.threshold, msg, ind, out_json) cap.release() cv2.destroyAllWindows() if args.out_file.endswith('.json'): with open(args.out_file, 'w') as js: json.dump(out_json, js) def inference(model, data, args, frame_queue): if len(frame_queue) != args.sample_length: # Do no inference when there is no enough frames return False, None cur_windows = list(np.array(frame_queue)) if data['img_shape'] is None: data['img_shape'] = frame_queue[0].shape[:2] cur_data = data.copy() cur_data['imgs'] = cur_windows cur_data = args.test_pipeline(cur_data) cur_data = collate([cur_data], samples_per_gpu=1) if next(model.parameters()).is_cuda: cur_data = scatter(cur_data, [args.device])[0] with torch.no_grad(): scores = model(return_loss=False, **cur_data)[0] if args.stride > 0: pred_stride = int(args.sample_length * args.stride) for _ in range(pred_stride): frame_queue.popleft() # for case ``args.stride=0`` # deque will automatically popleft one element return True, scores def main(): args = parse_args() args.device = torch.device(args.device) cfg = Config.fromfile(args.config) cfg.merge_from_dict(args.cfg_options) model = init_recognizer(cfg, args.checkpoint, device=args.device) data = dict(img_shape=None, modality='RGB', label=-1) with open(args.label, 'r') as f: label = [line.strip() for line in f] # prepare test pipeline from non-camera pipeline cfg = model.cfg sample_length = 0 pipeline = cfg.data.test.pipeline pipeline_ = pipeline.copy() for step in pipeline: if 'SampleFrames' in step['type']: sample_length = step['clip_len'] * step['num_clips'] data['num_clips'] = step['num_clips'] data['clip_len'] = step['clip_len'] pipeline_.remove(step) elif 'SampleCharadesFrames' in step['type']: sample_length = step['clip_len'] * step['num_clips'] data['num_clips'] = step['num_clips'] data['clip_len'] = step['clip_len'] pipeline_.remove(step) if step['type'] in EXCLUED_STEPS: # remove step to decode frames pipeline_.remove(step) test_pipeline = Compose(pipeline_) assert sample_length > 0 args.sample_length = sample_length args.test_pipeline = test_pipeline show_results(model, data, label, args) if __name__ == '__main__': main()
33.494382
78
0.60785
97af0f7771de7c26b9e1b6b57362eafe7df0b8a7
393
py
Python
Facebook/asgi.py
MkAngelo/Facebook-Retro
a863b31a55600db57661f8896ef9d7896e655bfc
[ "MIT" ]
null
null
null
Facebook/asgi.py
MkAngelo/Facebook-Retro
a863b31a55600db57661f8896ef9d7896e655bfc
[ "MIT" ]
null
null
null
Facebook/asgi.py
MkAngelo/Facebook-Retro
a863b31a55600db57661f8896ef9d7896e655bfc
[ "MIT" ]
null
null
null
""" ASGI config for Facebook project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Facebook.settings') application = get_asgi_application()
23.117647
78
0.78626
917a0259f6e76a7f8614ae3c6f249c7b83bff404
883
py
Python
utils/compilers/ConnectionCompiler/connection.py
willmexe/opuntiaOS
2d8e299627dc3431948892ad377556c918090000
[ "BSD-2-Clause" ]
241
2021-08-08T18:32:11.000Z
2022-03-27T20:16:59.000Z
utils/compilers/ConnectionCompiler/connection.py
willmexe/opuntiaOS
2d8e299627dc3431948892ad377556c918090000
[ "BSD-2-Clause" ]
19
2021-08-17T03:07:23.000Z
2022-03-29T19:37:54.000Z
utils/compilers/ConnectionCompiler/connection.py
willmexe/opuntiaOS
2d8e299627dc3431948892ad377556c918090000
[ "BSD-2-Clause" ]
11
2021-08-22T10:29:23.000Z
2022-03-12T03:25:55.000Z
# Copyright 2021 Nikita Melekhin. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. class Connection: def __init__(self, name, magic, protected=False): self.name = name self.magic = magic self.protected = protected self.messages = {} self.functions = {} def add_message(self, msg_name, params): if msg_name in self.messages: op = self.messages[msg_name] if (params != op): print("{0} has 2 different params".format(msg1_name)) exit(1) self.messages[msg_name] = params def add_function(self, msg1_name, msg2_name=None): if msg1_name in self.functions: print("{0} has 2 functions".format(msg1_name)) exit(1) self.functions[msg1_name] = msg2_name
35.32
72
0.614949
d9b14232fc59cd67734da2eae97ceedcb495e6b7
333
py
Python
local_config.sample.py
Tacten/python-hl7
acd215e192546a970e4aa99854e97ac75183b874
[ "BSD-3-Clause" ]
6
2021-02-06T11:09:14.000Z
2021-04-16T02:38:34.000Z
local_config.sample.py
Tacten/python-hl7
acd215e192546a970e4aa99854e97ac75183b874
[ "BSD-3-Clause" ]
null
null
null
local_config.sample.py
Tacten/python-hl7
acd215e192546a970e4aa99854e97ac75183b874
[ "BSD-3-Clause" ]
1
2021-05-19T08:19:03.000Z
2021-05-19T08:19:03.000Z
# ERPNext related configs ERPNEXT_API_KEY = 'd8ab58401ee92a7' ERPNEXT_API_SECRET = '3e234bbf0b74934' ERPNEXT_URL = 'https://hms.aakvaerp.com' # operational configs PULL_FREQUENCY = 60 # in minutes LOGS_DIRECTORY = 'logs' # logs of this script is stored in this directory IMPORT_START_DATE = None # format: '20190501'
25.615385
74
0.747748
8354d09e4cdaa03f16ba80121aa74da35c9a839d
99
py
Python
extensions.py
SicongCheen/battleships
6d647eb4bf2251a33b89fa9676dce3f9bdaefdd0
[ "MIT" ]
null
null
null
extensions.py
SicongCheen/battleships
6d647eb4bf2251a33b89fa9676dce3f9bdaefdd0
[ "MIT" ]
null
null
null
extensions.py
SicongCheen/battleships
6d647eb4bf2251a33b89fa9676dce3f9bdaefdd0
[ "MIT" ]
null
null
null
from battleships import main # if start from extensions.py if __name__ == '__main__': main()
14.142857
29
0.707071
5293840507245c24b79f9f85cfe6f96ef60d550e
869
py
Python
shitter/management/commands/create_random_followings.py
pablo-moreno/shitter-back
33c0eb0e0bdff370b68148308ac08ab63b9e6e54
[ "MIT" ]
null
null
null
shitter/management/commands/create_random_followings.py
pablo-moreno/shitter-back
33c0eb0e0bdff370b68148308ac08ab63b9e6e54
[ "MIT" ]
null
null
null
shitter/management/commands/create_random_followings.py
pablo-moreno/shitter-back
33c0eb0e0bdff370b68148308ac08ab63b9e6e54
[ "MIT" ]
null
null
null
from random import choice, randint, randrange from django.core.management.base import BaseCommand from django.contrib.auth.models import User from shitter.models import UserFollow class Command(BaseCommand): help = 'Creates random user followings' def handle(self, *args, **options): users = User.objects.all() total_users = User.objects.count() for user in users: for i in range(4, 16): random_user_index = randrange(0, total_users) random_user = User.objects.all()[random_user_index] try: if random_user != user: UserFollow.objects.create(from_user=user, to_user=random_user) print(f'@{user.username} is now following @{random_user.username}') except Exception: pass
36.208333
91
0.608746
3d392bdfd33f424fff8045fe8d11d2926903d55e
829
py
Python
examples/spark-function.py
Hedingber/mlrun
e2269718fcc7caa7e1aa379ac28495830b45f9da
[ "Apache-2.0" ]
1
2021-02-17T08:12:33.000Z
2021-02-17T08:12:33.000Z
examples/spark-function.py
Hedingber/mlrun
e2269718fcc7caa7e1aa379ac28495830b45f9da
[ "Apache-2.0" ]
1
2020-12-31T14:36:29.000Z
2020-12-31T14:36:29.000Z
examples/spark-function.py
Hedingber/mlrun
e2269718fcc7caa7e1aa379ac28495830b45f9da
[ "Apache-2.0" ]
1
2021-08-30T21:43:38.000Z
2021-08-30T21:43:38.000Z
# Pyspark example called by mlrun_spark_k8s.ipynb from pyspark.sql import SparkSession from mlrun import get_or_create_ctx # Acquire MLRun context mlctx = get_or_create_ctx("spark-function") # Get MLRun parameters mlctx.logger.info("!@!@!@!@!@ Getting env variables") READ_OPTIONS = mlctx.get_param("data_sources") QUERY = mlctx.get_param("query") WRITE_OPTIONS = mlctx.get_param("write_options") # Create spark session spark = SparkSession.builder.appName("Spark function").getOrCreate() # Loading data from a JDBC source for data_source in READ_OPTIONS: spark.read.load(**READ_OPTIONS[data_source]).createOrReplaceTempView(data_source) # Transform the data using SQL query spark.sql(QUERY).write.save(**WRITE_OPTIONS) # write the result datadrame to destination mlctx.logger.info("!@!@!@!@!@ Saved") spark.stop()
26.741935
85
0.772014
97a3c508236145a65b6f328e988287af6dc55ac4
10,436
py
Python
pybind/slxos/v16r_1_00b/routing_system/route_map/content/match/ipv6/address/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
null
null
null
pybind/slxos/v16r_1_00b/routing_system/route_map/content/match/ipv6/address/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
null
null
null
pybind/slxos/v16r_1_00b/routing_system/route_map/content/match/ipv6/address/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
1
2021-11-05T22:15:42.000Z
2021-11-05T22:15:42.000Z
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ class address(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-common-def - based on the path /routing-system/route-map/content/match/ipv6/address. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Route address """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ipv6_prefix_list_rmm','__ipv6_acl_rmm',) _yang_name = 'address' _rest_name = 'address' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__ipv6_acl_rmm = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="ipv6-acl-rmm", rest_name="acl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 ACL', u'alt-name': u'acl'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-access-list:ipv6-l3-acl-policy-name', is_config=True) self.__ipv6_prefix_list_rmm = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="ipv6-prefix-list-rmm", rest_name="prefix-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 prefix-list', u'alt-name': u'prefix-list'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-prefix-name-t', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'routing-system', u'route-map', u'content', u'match', u'ipv6', u'address'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'route-map', u'match', u'ipv6', u'address'] def _get_ipv6_prefix_list_rmm(self): """ Getter method for ipv6_prefix_list_rmm, mapped from YANG variable /routing_system/route_map/content/match/ipv6/address/ipv6_prefix_list_rmm (ipv6-prefix-name-t) YANG Description: IPv6 prefix-list """ return self.__ipv6_prefix_list_rmm def _set_ipv6_prefix_list_rmm(self, v, load=False): """ Setter method for ipv6_prefix_list_rmm, mapped from YANG variable /routing_system/route_map/content/match/ipv6/address/ipv6_prefix_list_rmm (ipv6-prefix-name-t) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_prefix_list_rmm is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_prefix_list_rmm() directly. YANG Description: IPv6 prefix-list """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="ipv6-prefix-list-rmm", rest_name="prefix-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 prefix-list', u'alt-name': u'prefix-list'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-prefix-name-t', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_prefix_list_rmm must be of a type compatible with ipv6-prefix-name-t""", 'defined-type': "brocade-ip-policy:ipv6-prefix-name-t", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="ipv6-prefix-list-rmm", rest_name="prefix-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 prefix-list', u'alt-name': u'prefix-list'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-prefix-name-t', is_config=True)""", }) self.__ipv6_prefix_list_rmm = t if hasattr(self, '_set'): self._set() def _unset_ipv6_prefix_list_rmm(self): self.__ipv6_prefix_list_rmm = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="ipv6-prefix-list-rmm", rest_name="prefix-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 prefix-list', u'alt-name': u'prefix-list'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-prefix-name-t', is_config=True) def _get_ipv6_acl_rmm(self): """ Getter method for ipv6_acl_rmm, mapped from YANG variable /routing_system/route_map/content/match/ipv6/address/ipv6_acl_rmm (ipv6-access-list:ipv6-l3-acl-policy-name) YANG Description: IPv6 ACL """ return self.__ipv6_acl_rmm def _set_ipv6_acl_rmm(self, v, load=False): """ Setter method for ipv6_acl_rmm, mapped from YANG variable /routing_system/route_map/content/match/ipv6/address/ipv6_acl_rmm (ipv6-access-list:ipv6-l3-acl-policy-name) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_acl_rmm is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_acl_rmm() directly. YANG Description: IPv6 ACL """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="ipv6-acl-rmm", rest_name="acl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 ACL', u'alt-name': u'acl'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-access-list:ipv6-l3-acl-policy-name', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_acl_rmm must be of a type compatible with ipv6-access-list:ipv6-l3-acl-policy-name""", 'defined-type': "ipv6-access-list:ipv6-l3-acl-policy-name", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="ipv6-acl-rmm", rest_name="acl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 ACL', u'alt-name': u'acl'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-access-list:ipv6-l3-acl-policy-name', is_config=True)""", }) self.__ipv6_acl_rmm = t if hasattr(self, '_set'): self._set() def _unset_ipv6_acl_rmm(self): self.__ipv6_acl_rmm = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="ipv6-acl-rmm", rest_name="acl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 ACL', u'alt-name': u'acl'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='ipv6-access-list:ipv6-l3-acl-policy-name', is_config=True) ipv6_prefix_list_rmm = __builtin__.property(_get_ipv6_prefix_list_rmm, _set_ipv6_prefix_list_rmm) ipv6_acl_rmm = __builtin__.property(_get_ipv6_acl_rmm, _set_ipv6_acl_rmm) _pyangbind_elements = {'ipv6_prefix_list_rmm': ipv6_prefix_list_rmm, 'ipv6_acl_rmm': ipv6_acl_rmm, }
62.119048
601
0.70688
c2bf545fe28e349bf2792ce0c24c4b3d49dfca3e
676
py
Python
mmpose/models/backbones/__init__.py
woffett/mmpose
cf8cbf49759e745896b70ce69d412518568af33b
[ "Apache-2.0" ]
null
null
null
mmpose/models/backbones/__init__.py
woffett/mmpose
cf8cbf49759e745896b70ce69d412518568af33b
[ "Apache-2.0" ]
null
null
null
mmpose/models/backbones/__init__.py
woffett/mmpose
cf8cbf49759e745896b70ce69d412518568af33b
[ "Apache-2.0" ]
null
null
null
from .alexnet import AlexNet from .hourglass import HourglassNet from .hrnet import HRNet from .mobilenet_v2 import MobileNetV2 from .mobilenet_v3 import MobileNetV3 from .regnet import RegNet from .resnet import ResNet, ResNetV1d from .resnext import ResNeXt from .scnet import SCNet from .seresnet import SEResNet from .seresnext import SEResNeXt from .shufflenet_v1 import ShuffleNetV1 from .shufflenet_v2 import ShuffleNetV2 __all__ = [ 'AlexNet', 'HourglassNet', 'HRNet', 'MobileNetV2', 'MobileNetV3', 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SCNet', 'SEResNet', 'SEResNeXt', 'ShuffleNetV1', 'ShuffleNetV2', ]
21.806452
39
0.723373
69b357c842a5f5e311df7d659986581f6dda0447
64
py
Python
cloud_guardrails/bin/version.py
nachtwaffen/cloud-guardrails
b82ae1ee3b252de53364c534e2f0e41308d24488
[ "BSD-3-Clause" ]
null
null
null
cloud_guardrails/bin/version.py
nachtwaffen/cloud-guardrails
b82ae1ee3b252de53364c534e2f0e41308d24488
[ "BSD-3-Clause" ]
null
null
null
cloud_guardrails/bin/version.py
nachtwaffen/cloud-guardrails
b82ae1ee3b252de53364c534e2f0e41308d24488
[ "BSD-3-Clause" ]
null
null
null
# pylint: disable=missing-module-docstring __version__ = '0.2.8'
32
42
0.765625
98624311e05879576aeffdd6b141abd19e08c6cb
4,104
py
Python
test/TestDataObjects.py
vitorascorrea/pokemon-vgc-engine
3925deb408a70d25c4c9e7b53e021ea5a25d3bda
[ "MIT" ]
1
2022-01-05T10:00:46.000Z
2022-01-05T10:00:46.000Z
test/TestDataObjects.py
vitorascorrea/pokemon-vgc-engine
3925deb408a70d25c4c9e7b53e021ea5a25d3bda
[ "MIT" ]
null
null
null
test/TestDataObjects.py
vitorascorrea/pokemon-vgc-engine
3925deb408a70d25c4c9e7b53e021ea5a25d3bda
[ "MIT" ]
null
null
null
import itertools import random import unittest from copy import deepcopy from random import sample import numpy as np from framework.DataConstants import MIN_HIT_POINTS, MAX_HIT_POINTS from framework.DataObjects import PkmTemplate from framework.DataTypes import PkmType from framework.StandardPkmMoves import STANDARD_MOVE_ROSTER class TestEncodingMethods(unittest.TestCase): def test_PkmMove_eq(self): moves = sample(STANDARD_MOVE_ROSTER, 10) for move in moves: move_copy = deepcopy(move) self.assertEqual(move, move_copy) def test_PkmMove_eq_name(self): moves = sample(STANDARD_MOVE_ROSTER, 10) for move in moves: move_copy = deepcopy(move) move_copy.name = None self.assertEqual(move, move_copy) def test_PkmMove_eq_pp(self): moves = sample(STANDARD_MOVE_ROSTER, 10) for move in moves: move_copy = deepcopy(move) move_copy.pp = 0 self.assertEqual(move, move_copy) def test_PkmMove_ne(self): moves = sample(STANDARD_MOVE_ROSTER, 10) for move in moves: move_copy = deepcopy(move) move_copy.power += 1. self.assertNotEqual(move, move_copy) def test_PkmMove_reset(self): moves = sample(STANDARD_MOVE_ROSTER, 10) for move in moves: move_copy = deepcopy(move) move_copy.pp -= 1 self.assertNotEqual(move.pp, move_copy.pp) move_copy.reset() self.assertEqual(move.pp, move_copy.pp) def test_PkmMoveRoster_eq(self): moves = sample(STANDARD_MOVE_ROSTER, 10) move_roster = set(moves) copy = deepcopy(move_roster) self.assertEqual(move_roster, copy) def test_PkmMoveRoster_eq_name(self): moves = sample(STANDARD_MOVE_ROSTER, 10) move_roster = set(moves) copy = deepcopy(move_roster) for move in copy: move.name = None self.assertEqual(move_roster, copy) def test_PkmMoveRoster_eq_pp(self): moves = sample(STANDARD_MOVE_ROSTER, 10) move_roster = set(moves) copy = deepcopy(move_roster) for move in copy: move.pp = 0 self.assertEqual(move_roster, copy) def test_PkmMoveRoster_ne(self): moves = sample(STANDARD_MOVE_ROSTER, 10) move_roster = set(moves) move_roster_2 = set(moves[:-1]) self.assertNotEqual(move_roster, move_roster_2) move_roster_3 = deepcopy(move_roster) for move in move_roster_3: move.power += 1 self.assertNotEqual(move_roster, move_roster_3) def test_PkmMoveRoster_in(self): moves = sample(STANDARD_MOVE_ROSTER, 20) move_roster = set(moves[:10]) for move in moves[:10]: self.assertTrue(move in move_roster) for move in moves[10:]: self.assertFalse(move in move_roster) def test_PkmTemplate(self): for _ in range(10): pkm_type = random.choice(list(PkmType)) max_hp = np.random.uniform(MIN_HIT_POINTS, MAX_HIT_POINTS, 1)[0] move_roster = set(sample(deepcopy(STANDARD_MOVE_ROSTER), 10)) template = PkmTemplate(pkm_type=pkm_type, max_hp=max_hp, move_roster=move_roster) pkm_type = random.choice(list(PkmType)) max_hp = np.random.uniform(MIN_HIT_POINTS, MAX_HIT_POINTS, 1)[0] move_roster = set(sample(deepcopy(STANDARD_MOVE_ROSTER), 10)) template2 = PkmTemplate(pkm_type=pkm_type, max_hp=max_hp, move_roster=move_roster) move_combinations = itertools.combinations(range(10), 4) for idx in sample(list(move_combinations), 1): pkm = template.gen_pkm(moves=list(idx)) self.assertTrue(template.is_speciman(pkm)) if template == template2: self.assertTrue(template2.is_speciman(pkm)) else: self.assertFalse(template2.is_speciman(pkm)) if __name__ == '__main__': unittest.main()
35.686957
94
0.642788
8ce393624eedac2c0ddfa7df3a93b562263e13da
31
py
Python
BottleOIDC/__init__.py
Glocktober/BottleOIDC
12ba98a1f82b1fa26c5af9848beaf366be39014d
[ "MIT" ]
null
null
null
BottleOIDC/__init__.py
Glocktober/BottleOIDC
12ba98a1f82b1fa26c5af9848beaf366be39014d
[ "MIT" ]
null
null
null
BottleOIDC/__init__.py
Glocktober/BottleOIDC
12ba98a1f82b1fa26c5af9848beaf366be39014d
[ "MIT" ]
null
null
null
from .botOIDC import BottleOIDC
31
31
0.870968
36ad5a87e339151a956ce09b398b8fa8338b27a0
401
py
Python
xai_court/config.py
michaeljneely/court-of-xai
37eded49f46b3a05ad56986c1a9bb22eee3ac4b1
[ "MIT" ]
4
2021-05-07T09:40:11.000Z
2022-03-27T18:19:07.000Z
xai_court/config.py
michaeljneely/court-of-xai
37eded49f46b3a05ad56986c1a9bb22eee3ac4b1
[ "MIT" ]
1
2021-05-10T09:31:05.000Z
2021-05-10T09:31:05.000Z
xai_court/config.py
michaeljneely/court-of-xai
37eded49f46b3a05ad56986c1a9bb22eee3ac4b1
[ "MIT" ]
1
2021-06-06T18:45:39.000Z
2021-06-06T18:45:39.000Z
'''Define configuration classes''' class Config: '''Defines the parameters used across the code base which can be adjusted without modifying code directly''' logger_name = 'attention-not-explanation' package_name = 'xai_court' # serialization_base_dir = 'outputs' # seeds = [87, 2134, 5555] # mpl_style = 'seaborn-poster' # sns_palette = 'cubehelix'
21.105263
112
0.660848
b4c0374808b0aff9905d227a3c09c20b5d920906
2,139
py
Python
Testing/EpdToJson.py
StuartRiffle/corvid
21fbea9bf585f3713354f33a205e9fd8b96da555
[ "MIT" ]
6
2019-05-29T03:22:41.000Z
2021-03-02T09:08:16.000Z
Testing/EpdToJson.py
StuartRiffle/corvid
21fbea9bf585f3713354f33a205e9fd8b96da555
[ "MIT" ]
1
2019-05-29T16:15:55.000Z
2019-05-29T16:15:55.000Z
Testing/EpdToJson.py
StuartRiffle/corvid
21fbea9bf585f3713354f33a205e9fd8b96da555
[ "MIT" ]
null
null
null
import sys import json import chess import chess.engine epdfile = sys.argv[1] all_epd = [] refengine = chess.engine.SimpleEngine.popen_uci("..\\Engine\\stockfish-10") line_idx = 0 with open(epdfile + ".epd") as epd: for line in epd.readlines(): line_idx = line_idx + 1 line = line.strip() # Some epd files use 'am' instead of 'bm' line = line.replace( "am ", "bm " ) # Some epd files have no separator between the fen and best move line = line.replace( "bm ", ";bm " ) # A small number of epd files don't actually provide # a best move, which seems like it kind of defeats the point, # but fine. In these cases we fire up a strong reference engine # to get a quick opinion on the position. Deeper searches might # give us better data here. if not 'bm ' in line: board = chess.Board( line ) result = refengine.play( board, chess.engine.Limit( depth=10 ) ) line = line + ";bm " + str(result.move) # After the fen it's all key/value pairs. fields = line.split( ';' ) if len( fields ) > 0: this_test = {} fen = fields[0].strip() this_test['fen'] = fen for meta in fields[1:]: meta = meta.strip() if len( meta ) > 0: if ' ' in meta: sep = meta.index( ' ' ) key = meta[:sep].strip() val = meta[sep:].strip() if val.startswith( '"' ) and val.endswith( '"' ): val = val[1:-1] this_test[key] = val if not 'id' in this_test: this_test['id'] = epdfile.replace( '.', '-' ) + "-" + str( line_idx ) try: bmove = chess.Move.from_uci( bm ) except: # Oh, gross bmove = board.parse_san( bm ) all_epd.append( this_test ) ser = json.dumps( all_epd, sort_keys=True, indent=4 ) print( ser ) refengine.quit()
31.455882
85
0.499766
5b3e0332900ca42b785cfa26684e3ad309821e1a
2,353
py
Python
masakari/common/config.py
openstack/masakari
7c6380d63e4e39db30176306303323e16b38fa3d
[ "Apache-2.0" ]
70
2016-07-22T21:58:00.000Z
2022-01-04T06:05:32.000Z
masakari/common/config.py
openstack/masakari
7c6380d63e4e39db30176306303323e16b38fa3d
[ "Apache-2.0" ]
1
2017-08-10T05:14:00.000Z
2017-08-10T05:14:00.000Z
masakari/common/config.py
openstack/masakari
7c6380d63e4e39db30176306303323e16b38fa3d
[ "Apache-2.0" ]
33
2016-07-05T02:05:25.000Z
2021-12-20T07:40:43.000Z
# Copyright 2016 NTT DATA # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_middleware import cors from oslo_policy import opts def set_lib_defaults(): """Update default value for configuration options from other namespace. Example, oslo lib config options. This is needed for config generator tool to pick these default value changes. https://docs.openstack.org/oslo.config/latest/cli/ generator.html#modifying-defaults-from-other-namespaces """ set_middleware_defaults() # TODO(gmann): Remove setting the default value of config policy_file # once oslo_policy change the default value to 'policy.yaml'. # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 opts.set_defaults(cfg.CONF, 'policy.yaml') def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token', 'X-Service-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] )
40.568966
116
0.570336
f0d23078d5f4fc52244c7dbeab918a0e83f64de5
12,421
py
Python
base.py
DE-CIX/bgperf
77f1a0d1de2aedcfc8f6d64602edb2bf65f7ca66
[ "Apache-2.0" ]
9
2017-06-20T15:16:00.000Z
2017-11-20T16:41:15.000Z
base.py
DECIX/bgperf
77f1a0d1de2aedcfc8f6d64602edb2bf65f7ca66
[ "Apache-2.0" ]
1
2021-02-24T01:32:19.000Z
2021-02-24T01:32:19.000Z
base.py
DECIX/bgperf
77f1a0d1de2aedcfc8f6d64602edb2bf65f7ca66
[ "Apache-2.0" ]
2
2017-06-23T01:28:11.000Z
2017-07-17T11:44:56.000Z
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from settings import dckr import io import os import yaml import sys import subprocess import warnings import StringIO from pyroute2 import IPRoute from itertools import chain from nsenter import Namespace from threading import Thread from threading import Event from datetime import timedelta from actions import WaitConvergentAction, SleepAction, InterruptPeersAction flatten = lambda l: chain.from_iterable(l) def ctn_exists(name): return '/{0}'.format(name) in list(flatten(n['Names'] for n in dckr.containers(all=True))) def img_exists(name): return name in [ctn['RepoTags'][0].split(':')[0] for ctn in dckr.images() if ctn['RepoTags'] != None] def getoutput(cmd, successful_status=(0,), stacklevel=1): try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE) output, _ = p.communicate() status = p.returncode except EnvironmentError as e: warnings.warn(str(e), UserWarning, stacklevel=stacklevel) return False, '' if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: return True, output return False, output # Parses output in this format of cpupower # |Mperf #PKG |CORE|CPU | C0 | Cx | Freq # 0| 0| 0| 0.00| 100.0|4400 # 2| 0| 1| 0.00| 100.0|3874 # TODO Design conflict, the measured values are only valid for global measurement interval = 1 sec. def get_turbo_clks(): info = [] ok, output = getoutput(['cpupower', 'monitor', '-mMperf']) output = StringIO.StringIO(output) # convert to StringIO for later line-based parsing if ok: output.readline() # skip fist line names = tuple(s.strip() for s in output.readline().split('|')) # names tuple for line in output: # read values values = (s.strip() for s in line.split('|')) info.append(dict(zip(names, values))) else: print("Failed to execute \"cpupower\" please install it on your system.") print output return info # This method is limited to obtain the maximum Base Clk but not the turbo Clk def get_cpuinfo(): info = [{}] ok, output = getoutput(['uname', '-m']) if ok: info[0]['uname_m'] = output.strip() try: fo = open('/proc/cpuinfo') except EnvironmentError as e: warnings.warn(str(e), UserWarning) else: for line in fo: name_value = [s.strip() for s in line.split(':', 1)] if len(name_value) != 2: continue name, value = name_value if not info or name in info[-1]: # next processor info.append({}) info[-1][name] = value fo.close() return info class docker_netns(object): def __init__(self, name): pid = int(dckr.inspect_container(name)['State']['Pid']) if pid == 0: raise Exception('no container named {0}'.format(name)) self.pid = pid def __enter__(self): pid = self.pid if not os.path.exists('/var/run/netns'): os.mkdir('/var/run/netns') os.symlink('/proc/{0}/ns/net'.format(pid), '/var/run/netns/{0}'.format(pid)) return str(pid) def __exit__(self, type, value, traceback): pid = self.pid os.unlink('/var/run/netns/{0}'.format(pid)) def connect_ctn_to_br(ctn, brname): print 'connecting container {0} to bridge {1}'.format(ctn, brname) with docker_netns(ctn) as pid: ip = IPRoute() br = ip.link_lookup(ifname=brname) if len(br) == 0: ip.link_create(ifname=brname, kind='bridge', mtu=1446) br = ip.link_lookup(ifname=brname) br = br[0] ip.link('set', index=br, state='up', mtu=1446) ifs = ip.link_lookup(ifname=ctn) if len(ifs) > 0: ip.link_remove(ifs[0]) ip.link_create(ifname=ctn, kind='veth', peer=pid, mtu=1446) host = ip.link_lookup(ifname=ctn)[0] ip.link('set', index=host, master=br) ip.link('set', index=host, state='up') guest = ip.link_lookup(ifname=pid)[0] ip.link('set', index=guest, net_ns_fd=pid) with Namespace(pid, 'net'): ip = IPRoute() ip.link('set', index=guest, ifname='eth1', mtu=1446) ip.link('set', index=guest, state='up') class Sequencer(Thread): # script: the script to execute # benchmark_start: start time of the benchmark this sequencer is part of # queue: the "main" queue of the benchmark which is responsible for logging and output of measured data to STDOUT def __init__(self, script, benchmark_start, queue): Thread.__init__(self) self.daemon = True self.name = 'sequencer' self.script = script # the script is a list of benchmark actions self.benchmark_start = benchmark_start # start time of the benchmark run self.queue = queue self.elapsed = timedelta(0) self.action = None # the currently running action def run(self): print "\033[1;32;47mstarting Sequenecer\033[1;30;47m" while len(self.script) > 0: # simple sequencer, execute actions one at a time action = self.script.pop(0)['action'] info = {} info['who'] = self.name info['message'] = "\nAction \"{0}\" started at {1}".format(action['type'], self.elapsed.total_seconds()) # DEBUG print "Action \"{0}\" started at {1}".format(action['type'], self.elapsed.total_seconds()) # FIXME Debug remove self.queue.put(info) if self.execute_action(action): info['message'] = "\033[1;32;47mAction \"{0}\" finished at {1}\033[1;30;47m".format(action['type'], self.elapsed.total_seconds()) else: info['message'] = "\033[1;31;47mAction \"{0}\" FAILED at {1}\033[1;30;47m".format(action['type'], self.elapsed.total_seconds()) self.queue.put(info) print "Sequencer: script finished!" # halts until action is finished and returns True when execution finished successfullly. def execute_action(self, a): finished = Event() while True: if a['type'] == 'wait_convergent': self.action = WaitConvergentAction(a['cpu_below'], a['routes'], a['confidence'], self.queue, finished) elif a['type'] == 'interrupt_peers': recovery = a['recovery'] if 'recovery' in a and a['recovery'] else None loss = a['loss'] if 'loss' in a and a['loss'] else None self.action = InterruptPeersAction(a['peers'], a['duration'], finished, recovery, loss) elif a['type'] == 'sleep': self.action = SleepAction(a['duration'], finished) elif a['type'] =='execute': self.action = ExecuteProgramAction(a['path'],finished) else: print "ERROR: unrecognized action of type {0}".format(a['type']) return False # return error here finished.wait() finished.clear() break return True def notify(self, data): # elapsed is the ammount of time since the beginning of the action elapsed, cpu, mem, recved = data self.elapsed = elapsed if self.action: self.action.notify(data) self.action.has_finished() else: print >>sys.stderr, "Call .notify() on None object" class Container(object): def __init__(self, name, image, host_dir, guest_dir): self.name = name self.image = image self.host_dir = host_dir self.guest_dir = guest_dir self.config_name = None if not os.path.exists(host_dir): os.makedirs(host_dir) os.chmod(host_dir, 0777) self.cpuset_cpus = None self.cpus = None # list of integers containing every core id @classmethod def build_image(cls, force, tag, nocache=False): def insert_after_from(dockerfile, line): lines = dockerfile.split('\n') i = -1 for idx, l in enumerate(lines): elems = [e.strip() for e in l.split()] if len(elems) > 0 and elems[0] == 'FROM': i = idx if i < 0: raise Exception('no FROM statement') lines.insert(i+1, line) return '\n'.join(lines) for env in ['http_proxy', 'https_proxy']: if env in os.environ: cls.dockerfile = insert_after_from(cls.dockerfile, 'ENV {0} {1}'.format(env, os.environ[env])) f = io.BytesIO(cls.dockerfile.encode('utf-8')) if force or not img_exists(tag): print 'build {0}...'.format(tag) for line in dckr.build(fileobj=f, rm=True, tag=tag, decode=True, nocache=nocache): if 'stream' in line: print line['stream'].strip() def run(self, brname='', rm=True, cpus=''): if rm and ctn_exists(self.name): print 'remove container:', self.name dckr.remove_container(self.name, force=True) config = dckr.create_host_config(binds=['{0}:{1}'.format(os.path.abspath(self.host_dir), self.guest_dir)], privileged=True) ctn = dckr.create_container(image=self.image, command='bash', detach=True, name=self.name, stdin_open=True, volumes=[self.guest_dir], host_config=config) if cpus: print('running container {0} with non-default cpuset: {1}'.format(self.name, cpus)) dckr.update_container(container=self.name, cpuset_cpus=cpus) self.cpuset_cpus = cpus # parse into list of integers for later use ranges = (x.split("-") for x in cpus.split(",")) self.cpus = [i for r in ranges for i in range(int(r[0]), int(r[-1]) + 1)] dckr.start(container=self.name) if brname != '': connect_ctn_to_br(self.name, brname) self.ctn_id = ctn['Id'] return ctn def stats(self, queue): def stats(): for stat in dckr.stats(self.ctn_id, decode=True): cpu_percentage = 0.0 prev_cpu = stat['precpu_stats']['cpu_usage']['total_usage'] prev_system = stat['precpu_stats']['system_cpu_usage'] cpu = stat['cpu_stats']['cpu_usage']['total_usage'] system = stat['cpu_stats']['system_cpu_usage'] cpu_num = len(stat['cpu_stats']['cpu_usage']['percpu_usage']) cpu_delta = float(cpu) - float(prev_cpu) system_delta = float(system) - float(prev_system) if system_delta > 0.0 and cpu_delta > 0.0: cpu_percentage = (cpu_delta / system_delta) * float(cpu_num) * 100.0 # collect core speed (MHz) of cpus where the process is running (if cpuset is used) if self.cpus: cpufreqs = [] # put the current corespeeds for all cpus in cpuset in a list cpuinfo = get_turbo_clks() #cpuinfo = get_cpuinfo() for cpu in self.cpus: speed = cpuinfo[cpu]['Freq'] #speed = cpuinfo[cpu]['cpu MHz'] cpufreqs.append((cpu, speed)) # build a list of tuples with cpu_id, speed queue.put({'who': self.name, 'cpu': cpu_percentage, 'mem': stat['memory_stats']['usage'], 'cpufreqs': cpufreqs}) else: queue.put({'who': self.name, 'cpu': cpu_percentage, 'mem': stat['memory_stats']['usage']}) t = Thread(target=stats) t.daemon = True t.start()
40.72459
145
0.588197
31fae85722c72ada091c64a3b51130e6c372276a
6,804
py
Python
aleph/index/util.py
ATADDATALOG/test-repo
353cf7bdc89e0f0d4b79d7be00df639e595d4fbc
[ "MIT" ]
1
2019-06-18T21:35:59.000Z
2019-06-18T21:35:59.000Z
aleph/index/util.py
heartofstone/aleph
d66b6615d2bfa10c291c63754f53b468de8bebde
[ "MIT" ]
null
null
null
aleph/index/util.py
heartofstone/aleph
d66b6615d2bfa10c291c63754f53b468de8bebde
[ "MIT" ]
null
null
null
import logging from time import time from pprint import pprint # noqa from banal import ensure_list from elasticsearch import TransportError from elasticsearch.helpers import streaming_bulk from servicelayer.util import backoff, service_retries from aleph.core import es, settings log = logging.getLogger(__name__) # This means that text beyond the first 500 MB will not be indexed INDEX_MAX_LEN = 1024 * 1024 * 500 BULK_PAGE = 500 # cf. https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-from-size.html # noqa MAX_PAGE = 9999 SHARDS_LIGHT = 1 SHARDS_DEFAULT = 5 SHARDS_HEAVY = 10 SHARD_WEIGHTS = { 'Folder': SHARDS_LIGHT, 'Package': SHARDS_LIGHT, 'Workbook': SHARDS_LIGHT, 'Video': SHARDS_LIGHT, 'Audio': SHARDS_LIGHT, 'Airplane': SHARDS_LIGHT, 'Associate': SHARDS_LIGHT, 'Family': SHARDS_LIGHT, 'Passport': SHARDS_LIGHT, 'Document': SHARDS_LIGHT, 'Row': SHARDS_HEAVY, 'Page': SHARDS_HEAVY, 'Email': SHARDS_HEAVY, 'PlainText': SHARDS_HEAVY, # 'HyperText': SHARDS_HEAVY, 'Pages': SHARDS_HEAVY, 'Table': SHARDS_HEAVY, } def get_shard_weight(schema): if settings.TESTING: return 1 return SHARD_WEIGHTS.get(schema.name, SHARDS_DEFAULT) def refresh_sync(sync): if settings.TESTING: return True return True if sync else False def unpack_result(res): """Turn a document hit from ES into a more traditional JSON object.""" error = res.get('error') if error is not None: raise RuntimeError("Query error: %r" % error) if res.get('found') is False: return data = res.get('_source', {}) data['id'] = res.get('_id') _score = res.get('_score') if _score is not None and _score != 0.0: data['score'] = _score data['_index'] = res.get('_index') if 'highlight' in res: data['highlight'] = [] for key, value in res.get('highlight', {}).items(): data['highlight'].extend(value) return data def authz_query(authz): """Generate a search query filter from an authz object.""" # Hot-wire authorization entirely for admins. if authz.is_admin: return {'match_all': {}} collections = authz.collections(authz.READ) if not len(collections): return {'match_none': {}} return {'terms': {'collection_id': collections}} def bool_query(): return { 'bool': { 'should': [], 'filter': [], 'must': [], 'must_not': [] } } def none_query(query=None): if query is None: query = bool_query() query['bool']['must'].append({'match_none': {}}) return query def field_filter_query(field, values): """Need to define work-around for full-text fields.""" values = ensure_list(values) if not len(values): return {'match_all': {}} if field in ['_id', 'id']: return {'ids': {'values': values}} if field in ['names']: field = 'fingerprints' if len(values) == 1: # if field in ['addresses']: # field = '%s.text' % field # return {'match_phrase': {field: values[0]}} return {'term': {field: values[0]}} return {'terms': {field: values}} def query_delete(index, query, sync=False, **kwargs): "Delete all documents matching the given query inside the index." for attempt in service_retries(): try: es.delete_by_query(index=index, body={'query': query}, conflicts='proceed', wait_for_completion=sync, refresh=refresh_sync(sync), **kwargs) return except TransportError as exc: log.warning("Query delete failed: %s", exc) backoff(failures=attempt) def bulk_actions(actions, chunk_size=BULK_PAGE, sync=False): """Bulk indexing with timeouts, bells and whistles.""" start_time = time() stream = streaming_bulk(es, actions, chunk_size=chunk_size, max_chunk_bytes=INDEX_MAX_LEN * 2, max_retries=10, initial_backoff=2, yield_ok=False, raise_on_error=False, refresh=refresh_sync(sync)) for _, details in stream: if details.get('delete', {}).get('status') == 404: continue log.warning("Error during index: %r", details) duration = (time() - start_time) log.debug("Bulk write: %.4fs", duration) def index_safe(index, id, body, **kwargs): """Index a single document and retry until it has been stored.""" for attempt in service_retries(): try: es.index(index=index, id=id, body=body, **kwargs) body['id'] = str(id) body.pop('text', None) return body except TransportError as exc: log.warning("Index error [%s:%s]: %s", index, id, exc) backoff(failures=attempt) def configure_index(index, mapping, settings): """Create or update a search index with the given mapping and settings. This will try to make a new index, or update an existing mapping with new properties. """ if es.indices.exists(index=index): log.info("Configuring index: %s...", index) res = es.indices.put_mapping(index=index, body=mapping, ignore=[400]) return res.get('status') != 400 log.info("Creating index: %s...", index) res = es.indices.create(index, body={ 'settings': settings, 'mappings': mapping }, ignore=[400]) log.info("RES: %r", res) return True def index_settings(shards=5, replicas=2): """Configure an index in ES with support for text transliteration.""" return { "index": { "number_of_shards": shards, "number_of_replicas": replicas, "analysis": { "analyzer": { "icu_latin": { "tokenizer": "standard", "filter": ["latinize"] } }, "normalizer": { "icu_latin": { "type": "custom", "filter": ["latinize"] } }, "filter": { "latinize": { "type": "icu_transform", "id": "Any-Latin; NFKD; Lower(); [:Nonspacing Mark:] Remove; NFKC" # noqa } } } } }
30.648649
107
0.551587