id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5006710 | <filename>setup/client/setup.py
import os
from pyrolysis import client
from setuptools import setup, find_packages
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, 'README.rst'), 'r') as f:
long_description = f.read()
# More information on properties: https://packaging.python.org/distributing
setup(name='pyrolysis-client',
version=client.__version__,
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/fmerlin/pyrolysis.git",
description="Access REST APIs from python using functions",
long_description=long_description,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers"
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
keywords=[
'openapi',
'swagger',
'rest',
'service'
],
packages=find_packages(exclude=['tests', 'fixture']),
tests_require=[
# Used to run tests
'nose'
],
install_requires=[
# Used to communicate with services
'requests',
# Used to manage authentication
'requests-auth',
# Used to parse all date-time formats in a easy way
'python-dateutil'
],
platforms=[
'Windows',
'Linux'
]
)
| StarcoderdataPython |
3590743 | try:
from gpio_rpi import *
except ImportError:
from gpio_test import *
| StarcoderdataPython |
1911471 | <reponame>xmunoz/uwrtourist
# -*- coding: utf-8 -*-
# ...
# available languages
LANGUAGES = {
'en': 'English',
'es': 'Español'
}
| StarcoderdataPython |
31381 | <reponame>S-c-r-a-t-c-h-y/coding-projects
from arbre_binaire import AB
def parfait(ngd):
"""
Fonction qui renvoit un arbre binaire parfait à partir de
son parcourt en profondeur préfixe ngd de type str
"""
cle = ngd[0]
ab1 = AB(cle)
if len(ngd) > 1:
ngd_g = ngd[1 : len(ngd) // 2 + 1]
ngd_d = ngd[len(ngd) // 2 + 1 :]
ab1.set_ag(parfait(ngd_g))
ab1.set_ad(parfait(ngd_d))
return ab1
def dechiffrer(chaine_morse):
"""Fonction qui déchiffre une chaine de caractère morse à l'aide d'un arbre binaire"""
retour = ""
# arbre binaire pour les codes morse
ab = parfait(" EISHVUF ARL WPJTNDBXKCYMGZQO ")
# arbre binaire servant au parcours
ab1 = ab
for car in chaine_morse:
if car == ".": # .
ab1 = ab1.get_ag()
elif car == "-": # -
ab1 = ab1.get_ad()
elif car == " ": # espace entre les caractères
retour += ab1.get_val()
ab1 = ab # réinitialise le parcours
else: # espace entre les mots
retour += ab1.get_val()
retour += " "
ab1 = ab
retour += ab1.get_val()
return retour
assert dechiffrer("-... --- -. -. ./.--- --- ..- .-. -. . .") == "BONNE JOURNEE"
assert dechiffrer("-... --- -. .--- --- ..- .-./-- --- -. ... .. . ..- .-.") == "BONJOUR MONSIEUR"
print("Tous les tests sont satisfaits")
| StarcoderdataPython |
8045890 | <filename>ct-tests/lib/common/bss.py
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
BSS-related CMS test helper functions
"""
from .api import API_URL_BASE, requests_get
from .cli import int_list_to_str, run_cli_cmd
from .helpers import debug, get_bool_field_from_obj, get_int_field_from_obj, \
get_list_field_from_obj, get_str_field_from_obj, \
info, raise_test_error
BSS_URL_BASE = "%s/bss" % API_URL_BASE
BSS_BOOTPARAMETERS_URL = "%s/boot/v1/bootparameters" % BSS_URL_BASE
BSS_HOSTS_URL = "%s/boot/v1/hosts" % BSS_URL_BASE
def verify_bss_response(response_object, expected_length=None):
"""
Verify some basic things about the bss response object:
1) That it is a list
2) That it has the expected length, if specified
3) That all list entries are type dict
"""
debug("Validating BSS response")
if not isinstance(response_object, list):
raise_test_error("We expect the bss response to be a list but it is %s" % str(type(response_object)))
elif expected_length != None and len(response_object) != expected_length:
raise_test_error("We expect bss response list length to be %d, but it is %d" % (expected_length, len(response_object)))
elif not all(isinstance(x, dict) for x in response_object):
raise_test_error("We expect the bss list items to all be dicts, but at least one is not")
def validate_bss_host_entry(host, nid=None, xname=None):
"""
Validates the following about the specified bss host object:
1) The nid field is an integer equal to the specified nid, or if none specified, is a positive integer
2) The Role field is a non-empty string
3) The xname field is a string equal to the specified xname, or if none specified, looks at least a little
bit like an xname
4) The Enabled field is a boolean
5) The Type field is Node
"""
noun="bss host list entry"
if nid != None:
get_int_field_from_obj(host, "NID", noun="%s for nid %d" % (noun, nid), exact_value=nid)
else:
get_int_field_from_obj(host, "NID", noun=noun, min_value=1)
get_str_field_from_obj(host, "Role", noun=noun, min_length=1)
if xname != None:
get_str_field_from_obj(host, "ID", noun="%s for xname %s" % (noun, xname), exact_value=xname)
else:
get_str_field_from_obj(host, "ID", noun=noun, min_length=2, prefix="x")
get_str_field_from_obj(host, "Type", noun=noun, exact_value="Node")
get_bool_field_from_obj(host, "Enabled", noun=noun, null_okay=False)
def get_bss_host_by_nid(use_api, nid, expected_xname, enabled_only=True):
"""
List all host entries in bss for the specified nid. Validate that there is only one such
entry, verify that it specifies the same xname that we expect, verify that its Role field
is not empty, and then return the host object.
"""
if use_api:
params={'mac': None, 'name': None, 'nid': nid}
response_object = requests_get(BSS_HOSTS_URL, params=params)
else:
response_object = run_cli_cmd(["bss", "hosts", "list", "--nid", str(nid)])
verify_bss_response(response_object, 1)
host = response_object[0]
validate_bss_host_entry(host=host, nid=nid, xname=expected_xname)
if enabled_only and not host["Enabled"]:
raise_test_error("BSS host entry for NID %d is marked not Enabled" % nid)
return host
def list_bss_hosts(use_api, enabled_only=True):
"""
List all host entries in bss, verify that they look okay, then return the list.
"""
info("Listing all BSS hosts")
if use_api:
params={'mac': None, 'name': None, 'nid': None}
response_object = requests_get(BSS_HOSTS_URL, params=params)
else:
response_object = run_cli_cmd(["bss", "hosts", "list"])
verify_bss_response(response_object)
host_list = list()
for host in response_object:
debug("Examining host: %s" % str(host))
validate_bss_host_entry(host=host)
if host["Enabled"] or not enabled_only:
host_list.append(host)
return host_list
def bss_host_nid(host):
"""
Retrieves the nid from the BSS host object
"""
return host["NID"]
def bss_host_xname(host):
"""
Retrieves the xname from the BSS host object
"""
return host["ID"]
def bss_host_role(host):
"""
Retrieves the xname from the BSS host object
"""
return host["Role"]
def get_bss_nodes_by_role(use_api, role, enabled_only=True):
"""
List all bss host entries, validate that they look legal, and returns a list of the
entries with the specified role
"""
bss_host_list = list_bss_hosts(use_api, enabled_only=enabled_only)
return [ host for host in bss_host_list if host["Role"] == role ]
def get_bss_compute_nodes(use_api, min_number=1, enabled_only=True):
"""
List all bss host entries for compute nodes, validate that they look legal, and returns the list.
"""
bss_host_list = get_bss_nodes_by_role(use_api, role="Compute", enabled_only=enabled_only)
if len(bss_host_list) < min_number:
raise_test_error("We need at least %d compute node(s), but only %d found in BSS!" % (min_number, len(bss_host_list)))
return bss_host_list
def verify_bss_bootparameters_list(response_object, xname_to_nid):
"""
Validates that the list of bss bootparameters looks valid and has all of the
xnames we expect to find in it.
"""
verify_bss_response(response_object, len(xname_to_nid))
xnames_to_find = set(xname_to_nid.keys())
for bootparams in response_object:
if "params" not in bootparams:
raise_test_error("We expect boot parameters entry to have 'params' field, but this one does not: %s" % bootparams)
elif "kernel" not in bootparams:
raise_test_error("We expect boot parameters entry to have 'kernel' field, but this one does not: %s" % bootparams)
hostlist = get_list_field_from_obj(bootparams, "hosts", noun="boot parameters list entry", member_type=str, min_length=1)
xnames_to_find.difference_update(hostlist)
if xnames_to_find:
raise_test_error("Did not find bootparameter entries for the following nids/xnames: %s" % ", ".join(
["%d/%s" % (xname_to_nid[xname], xname) for xname in xnames_to_find]))
def list_all_bss_bootparameters(use_api):
"""
Returns list of all BSS bootparameters
"""
if use_api:
params={'mac': None, 'name': None, 'nid': None}
response_object = requests_get(BSS_BOOTPARAMETERS_URL, params=params)
else:
response_object = run_cli_cmd(["bss", "bootparameters", "list"])
verify_bss_bootparameters_list(response_object, dict())
return response_object
def list_bss_bootparameters(use_api, nid, xname):
"""
Generate a list of bss bootparameters for the specified nid, validate the list, and return its first entry.
"""
if use_api:
params={'mac': None, 'name': None, 'nid': nid}
response_object = requests_get(BSS_BOOTPARAMETERS_URL, params=params)
else:
response_object = run_cli_cmd(["bss", "bootparameters", "list", "--nid", str(nid)])
verify_bss_bootparameters_list(response_object, {xname: nid})
return response_object[0]
def list_bss_bootparameters_nidlist(use_api, xname_to_nid):
"""
List bss boot parameters for all specified nids, validate them, and return the list.
"""
nids = list(xname_to_nid.values())
if use_api:
params={'mac': None, 'name': None, 'nid': None}
json_data={ "nids": nids }
response_object = requests_get(BSS_BOOTPARAMETERS_URL, params=params, json=json_data)
else:
response_object = run_cli_cmd(["bss", "bootparameters", "list", "--nids", int_list_to_str(nids)])
verify_bss_bootparameters_list(response_object, xname_to_nid)
return response_object
| StarcoderdataPython |
4902346 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script insert items in a Google Cloud Search datasource using
the Cloud Search API.
Prerequisites:
- Google Cloud Search enable on the gSuite organization
- Created a Google Cloud Third-party data sources ID
- GCP project
- Google Cloud Search API enabled in the project
- GCS bucket (Publicly readable)
- GCP service account
To run this script, you will need Python3 packages listed in REQUIREMENTS.txt.
You can easily install them with virtualenv and pip by running these commands:
virtualenv -p python3 env
source ./env/bin/activate
pip install -r REQUIREMENTS.txt
You can than run the script as follow:
python item_create.py \
--service_account_file /PATH/TO/service.json \
--datasources YOUR_DATASOURCE_ID \
--item_json item.json \
--document_bucket GCS_BUCKET_NAME
"""
import argparse
import base64
import cloudsearch
import google.oauth2.credentials
import googleapiclient.http
import json
import logging
import time
import urllib.parse
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from cloudstorage import *
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger('cloudsearch.item')
# Scope grants [GCS]
GCS_SCOPES = ['https://www.googleapis.com/auth/devstorage.full_control']
GCS_API_SERVICE_NAME = 'storage'
GCS_API_VERSION = 'v1'
# Scope grants [CLOUD SEARCH]
SEARCH_SCOPES = ['https://www.googleapis.com/auth/cloud_search']
SEARCH_API_SERVICE_NAME = 'cloudsearch'
SEARCH_API_VERSION = 'v1'
def get_authenticated_service(service_account_file, scope, service_name, version):
# Create credentials from Service Account File
credentials = service_account.Credentials.from_service_account_file(
service_account_file, scopes=scope)
return build(service_name, version, credentials=credentials, cache_discovery=False)
def main(service_account_file, document_bucket,
datasources, item_json):
LOGGER.info('Indexing documents - START')
service_gcs = get_authenticated_service(service_account_file,
GCS_SCOPES,
GCS_API_SERVICE_NAME,
GCS_API_VERSION)
service_search = get_authenticated_service(service_account_file,
SEARCH_SCOPES,
SEARCH_API_SERVICE_NAME,
SEARCH_API_VERSION)
itemService = cloudsearch.ItemsService(service_search, datasources)
gcsService = CloudStorage(service_gcs, document_bucket)
# Retrieve item body template
with open(item_json) as f:
item_body_template = json.load(f)
LOGGER.info('Retrieve GCS blob files')
blob_files = gcsService.list_blob_file()
# Loop blob files
for blob_file in blob_files:
# Extrapolate file information
file_folder = os.path.dirname(blob_file["name"])
file_name = os.path.basename(blob_file["name"])
# Skip folders items
if file_name == '':
continue
LOGGER.info('Processing file: %s - START' % file_name)
# Save the file locally
gcsService.download_blob_file(blob_file)
# Create item id, we can use the GCS item id,
# Remove '/': it is not suppoerted.
item_id = blob_file['id'].replace('/', '')
# Retrieve item body template from the json file
item_to_insert = cloudsearch.ItemBody(item_body_template)
# Set fields specific to this item
item_to_insert.set_element(
'item/version', base64.b64encode(b'%d' % time.time()))
item_to_insert.set_element(
'item/metadata/title', file_name)
item_to_insert.set_element('item/metadata/objectType', 'document')
item_to_insert.set_element(
'item/metadata/sourceRepositoryUrl', 'https://storage.googleapis.com/'+document_bucket+'/'+urllib.parse.quote(blob_file["name"]))
item_to_insert.set_element('item/structuredData/object/properties', [
{"name": "author",
"textValues": {"values": file_folder}}
])
# Insert document in tho the Cloud Search datasource
itemService.insert_with_media(
item_id, item_to_insert, file_name)
# Remove the temporary local file
os.remove('%s' % file_name)
LOGGER.info('Processing file: %s - END' % file_name)
LOGGER.info('Indexing documents - END')
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Example to parse HTML and send to CloudSearch.')
parser.add_argument('--service_account_file', dest='service_account_file',
help='File name for the service account.')
parser.add_argument('--datasources', dest='datasources',
help='DataSource to update.')
parser.add_argument('--item_json', dest='item_json',
help='Item JSON structure.')
parser.add_argument('--document_bucket', dest='document_bucket',
help='GCS document bucket.')
args = parser.parse_args()
main(args.service_account_file, args.document_bucket,
args.datasources, args.item_json)
| StarcoderdataPython |
11307786 | <reponame>victorkifer/SocialMediaTopTrends<filename>data_source/__init__.py
__author__ = 'viktor'
| StarcoderdataPython |
1647736 | <filename>test_train_cifar.py
import test_utils
FLAGS = test_utils.parse_common_options(
datadir='../cifar-data',
batch_size=125,
num_epochs=254,
momentum=0.9,
lr=0.8)
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_xla
import torch_xla_py.data_parallel as dp
import torch_xla_py.utils as xu
import torch_xla_py.xla_model as xm
import torchvision
import torchvision.transforms as transforms
# Import utilities and models
from torch.optim.lr_scheduler import MultiStepLR
from utilities import Cutout, RandomPixelPad, CosineAnnealingRestartsLR
from models import WRN_McDonnell
def train_cifar():
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.Lambda(lambda x: RandomPixelPad(x, padding=4)),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
Cutout(18, random_pixel=True), # add Cutout
transforms.Normalize((0.5071, 0.4865, 0.4409),
(0.2673, 0.2564, 0.2762)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409),
(0.2673, 0.2564, 0.2762)),
])
trainset = torchvision.datasets.CIFAR100(
root=FLAGS.datadir,
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.num_workers)
testset = torchvision.datasets.CIFAR100(
root=FLAGS.datadir,
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset,
batch_size=FLAGS.batch_size,
shuffle=False,
num_workers=FLAGS.num_workers)
torch.manual_seed(42)
devices = (
xm.get_xla_supported_devices(
max_devices=FLAGS.num_cores) if FLAGS.num_cores !=0 else [])
# Define model here
model = WRN_McDonnell(20, 10, 100, binarize=True)
# Pass [] as device_ids to run using the PyTorch/CPU engine.
model_parallel = dp.DataParallel(model, device_ids=devices)
def train_loop_fn(model, loader, device, context):
loss_fn = nn.CrossEntropyLoss()
optimizer = context.getattr_or(
'optimizer', lambda: optim.SGD(
model.parameters(),
lr=FLAGS.lr,
momentum=FLAGS.momentum,
weight_decay=5e-4))
# LR scheduler
scheduler = context.getattr_or(
'scheduler', lambda: CosineAnnealingRestartsLR(
optimizer, T=2, eta_min=1e-4))
model.train()
for x, (data, target) in loader:
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
xm.optimizer_step(optimizer)
if x % FLAGS.log_steps == 0:
print('[{}]({}) Loss={:.5f}'.format(device, x, loss.item()))
# Step LR scheduler
scheduler.step()
def test_loop_fn(model, loader, device, context):
total_samples = 0
correct = 0
model.eval()
for x, (data, target) in loader:
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
total_samples += data.size()[0]
return correct / total_samples
best_accuracy = 0.0
for epoch in range(1, FLAGS.num_epochs + 1):
model_parallel(train_loop_fn, train_loader)
accuracies = model_parallel(test_loop_fn, test_loader)
accuracy = sum(accuracies) / len(devices)
print('Epoch {}, Accuracy={:.2f}%'.format(epoch, 100.0 * accuracy))
# Keep track of best model
if accuracy > best_accuracy:
best_accuracy = accuracy
torch.save(model_parallel._models[0].state_dict(), 'model.pt')
if FLAGS.metrics_debug:
print(torch_xla._XLAC._xla_metrics_report())
return accuracy * 100.0
# Train the model
torch.set_default_tensor_type('torch.FloatTensor')
acc = train_cifar()
print('Final accuracy: {}'.format(acc))
| StarcoderdataPython |
6672867 | <reponame>JayH5/django-watchman<gh_stars>0
from django.conf.urls import include, url
from django.contrib import admin
import watchman.views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^watchman/', include('watchman.urls')),
url(r'^watchman/bare/', watchman.views.bare_status, name='bare_status'),
]
| StarcoderdataPython |
8177175 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test_metapath_randomwalk"""
import time
import unittest
import json
import os
import numpy as np
from pgl.sample import metapath_randomwalk
from pgl.graph import Graph
from pgl import heter_graph
np.random.seed(1)
class MetapathRandomwalkTest(unittest.TestCase):
"""metapath_randomwalk test
"""
def setUp(self):
edges = {}
# for test no successor
edges['c2p'] = [(1, 4), (0, 5), (1, 9), (1, 8), (2, 8), (2, 5), (3, 6),
(3, 7), (3, 4), (3, 8)]
edges['p2c'] = [(v, u) for u, v in edges['c2p']]
edges['p2a'] = [(4, 10), (4, 11), (4, 12), (4, 14), (4, 13), (6, 12),
(6, 11), (6, 14), (7, 12), (7, 11), (8, 14), (9, 10)]
edges['a2p'] = [(v, u) for u, v in edges['p2a']]
# for test speed
# edges['c2p'] = [(0, 4), (0, 5), (1, 9), (1,8), (2,8), (2,5), (3,6), (3,7), (3,4), (3,8)]
# edges['p2c'] = [(v,u) for u, v in edges['c2p']]
# edges['p2a'] = [(4,10), (4,11), (4,12), (4,14), (5,13), (6,13), (6,11), (6,14), (7,12), (7,11), (8,14), (9,13)]
# edges['a2p'] = [(v,u) for u, v in edges['p2a']]
self.node_types = ['c' for _ in range(4)] + [
'p' for _ in range(6)
] + ['a' for _ in range(5)]
node_types = [(i, t) for i, t in enumerate(self.node_types)]
self.graph = heter_graph.HeterGraph(
num_nodes=len(node_types), edges=edges, node_types=node_types)
def test_metapath_randomwalk(self):
meta_path = 'c2p-p2a-a2p-p2c'
path = ['c', 'p', 'a', 'p', 'c']
start_nodes = [0, 1, 2, 3]
walk_len = 10
walks = metapath_randomwalk(
graph=self.graph,
start_nodes=start_nodes,
metapath=meta_path,
walk_length=walk_len)
self.assertEqual(len(walks), 4)
for walk in walks:
for i in range(len(walk)):
idx = i % (len(path) - 1)
self.assertEqual(self.node_types[walk[i]], path[idx])
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
10669 | # imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | StarcoderdataPython |
69162 | import pandas as pd
import datetime as dt
import jpholiday as jh
class Tradetime:
def __init__(self, time=None):
self.set_time(time)
def set_time(self, time=None):
if time:
self.time = time
else:
self.time = dt.datetime.now().date()
if not is_business_day(self.time):
self.time = self.previous_date(1)
def set_next_date(self,date_n):
for _ in range(date_n):
self.time=next_business_date(self.time)
def previous_date(self,date_n):
return previous_n_date(self.time,date_n)
def update(self):
self.set_next_date(1)
def is_business_day(date) -> bool:
'''
Check if the specified date is a business day of TSE or not.
Parameters
----------
date : dt.date
date which will be checked
Returns
-------
date_is_business_day : bool
True if it is, and vice versa
'''
if isinstance(date, dt.datetime):
date=date.date()
if date.weekday() == 5 or date.weekday() == 6: # Saturday, Sunday
return False
if jh.is_holiday(date):
return False
if date.month == 1 and (date.day == 2 or date.day == 3):
return False
if date.month == 12 and date.day == 31:
return False
# 東証鯖落ち日
if date == dt.date(2020,10,1):
return False
return True
def next_business_date(date) -> dt.datetime:
'''
Return dt.date of the next business day of the specified date.
Parameters
----------
date : dt.date
include : bool
when True and the specified date is a business day, return date immediately
'''
if isinstance(date, dt.datetime):
date=date.date()
while True:
date += dt.timedelta(days=1)
if is_business_day(date):
return date
def previous_n_date(date,n) -> dt.date:
'''
Return dt.date of the previous business day of the specified date.
Parameters
----------
date : dt.date
include : bool
when True and the specified date is a business day, return date immediately
'''
count=0
while count<n:
date -= dt.timedelta(days=1)
if is_business_day(date):
count+=1
return date
| StarcoderdataPython |
4890928 | __author__ = '<NAME>'
import sys
import pygame
from pygame.locals import *
def main(result_file):
pygame.init()
winSurfObj =pygame.display.set_mode((800,800))
whiteColor = pygame.Color(255,255,255)
blackColor = pygame.Color(0,0,0)
greenColor = pygame.Color(128,255,128)
pygame.display.set_caption('ChipFit -- Bin-Packed Board -- Esc to Quit, NumPad+/- to Zoom')
i = 0
j = 0
x = 0
y = 0
fd = open(result_file, 'r')
lines = fd.readlines()
fd2 = open('layout-input.json')
lines2 = fd2.readlines()
cols2 = lines2[1].split(' ')
print len(cols2)
print cols2
print cols2[3]
print cols2[3].split(',')[0]
print float(cols2[3].split(',')[0])
boardwidth = float(cols2[3].split(',')[0])
#int(cols2[3])
cols2 = lines2[2].split(' ')
print len(cols2)
print cols2
print cols2[3].split(',')[0]
print float(cols2[3].split(',')[0])
boardheight = float(cols2[3].split(',')[0])
#int(cols2[3])
scale = 10
refresh = 1
badcount = 0
font = pygame.font.SysFont("monospace",8)
clock = pygame.time.Clock()
while True:
if refresh == 1:
refresh = 0
badcount = 0
winSurfObj.fill(whiteColor)
font = pygame.font.SysFont("monospace",scale)
i = boardwidth*scale + 4
j = boardheight*scale + 4
x = 0
y = 0
print boardwidth
print boardheight
print i
print j
pygame.draw.rect(winSurfObj,greenColor,(x,y,i,j))
for l in lines[1:]:
cols = l.split(' ')
print cols
x = round(float(cols[2])*scale)
y = round(float(cols[3])*scale)
i = round(float(cols[4])*scale)
j = round(float(cols[5])*scale)
if x < 0:
if badcount < 1 :
x = boardwidth*scale + 4*scale
y = badcount * 2 * scale
label = font.render('Failed To Place:', 1, (25,0,12))
winSurfObj.blit(label,(x+2,y+2))
badcount = badcount + 1
x = boardwidth*scale + 5*scale + 2
y = badcount * 1 * scale + 2
badcount = badcount + 1
label = font.render(cols[0], 1, (25,0,12))
winSurfObj.blit(label,(x+2,y+2))
else :
pygame.draw.rect(winSurfObj,blackColor,(x,y,i,j))
pygame.draw.rect(winSurfObj,whiteColor,(x+2,y+2,i-4,j-4))
label = font.render(cols[0], 1, (25,0,12))
winSurfObj.blit(label,(x+2,y+j/2-scale/2))
pygame.display.update()
clock.tick(30)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.event.post(pygame.event.Event(QUIT))
elif event.key == 269:
scale = scale - 1
if scale < 1:
scale = 1
refresh = 1
elif event.key == 270:
scale = scale + 1
refresh = 1
| StarcoderdataPython |
6706274 | #!/usr/bin/env python3
import vlc, time, random
vlc_instance = vlc.Instance()
player = vlc_instance.media_player_new()
media = vlc_instance.media_new("testSong.wav")
player.set_media(media)
playState = player.play()
if playState != 0:
exit(1)
while player.is_playing() == 0:
pass
duration = player.get_length() / 1000
t = 0
while t < duration:
t+=0.01
volume = player.audio_get_volume()
print(volume, end="\r")
player.audio_set_volume(volume)
time.sleep(0.01)
| StarcoderdataPython |
204845 | IMAGES_PATH = 'write_box/images/'
FONTS_PATH = 'write_box/fonts/'
LOGFILE_PATH = 'log.txt'
TOKEN = 'TOKEN' | StarcoderdataPython |
150553 | # -*- coding: utf-8 -*-
from .context import ann
import numpy as np
from ann import core, helpers
import unittest
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import datasets
import cv2
class NetworksTestSuiteV2(unittest.TestCase):
"""Basic test cases."""
def test_restricted_boltzmann_machine(self):
num_inputs = 6
num_hidden_units = 2
learning_rate = 0.5
trainX = np.asarray([
[1,1,1,0,0,0],
[1,0,1,0,0,0],
[1,1,1,0,0,0],
[0,0,1,1,1,0],
[0,0,1,1,0,0],
[0,0,1,1,1,0],
])
trainY = np.asarray([1,1,1,0,0,0])
network = core.v2RBMNetwork(num_inputs, num_hidden_units, learning_rate, debug=True)
network.train(500, trainX, num_cd_steps=5, no_decay=True)
network.label_units(trainX, trainY)
network.print_labelling()
prediction = network.predict(np.asarray([[0,0,0,1,1,0]]))
network.print_prediction(prediction)
n = 10
dreamed = network.daydream(n)
print('\nDaydreaming for '+str(n)+' gibbs steps:')
print(dreamed)
def test_stacked_rbm_dbm(self):
num_inputs = 6
num_outputs = 2
num_layers = 2
learning_rate = 0.5
trainX = np.asarray([
[1,1,1,0,0,0],
[1,0,1,0,0,0],
[1,1,1,0,0,0],
[0,0,1,1,1,0],
[0,0,1,1,0,0],
[0,0,1,1,1,0],
])
trainY = np.asarray([1,1,1,0,0,0])
network = core.DBNetwork(num_inputs, num_outputs, num_layers, learning_rate, size_hidden_layers=4)
network.train(10, trainX)
network.label_units(trainX, trainY)
network.print_labelling()
prediction = network.predict(np.asarray([[0,0,0,1,1,0]]))
network.print_prediction(prediction)
def test_stacked_rbm_dbm_mnist(self):
np.seterr(all='raise')
print "(downloading data...)"
dataset = datasets.fetch_mldata("MNIST Original")
(trainX, testX, trainY, testY) = train_test_split(
dataset.data / 255.0, dataset.target.astype("int0"), test_size = 0.33, train_size=0.67, random_state=42)
learning_rate = 0.05
network = core.DBNetwork(trainX.shape[1], len(np.unique(dataset.target)), 0, learning_rate, size_hidden_layers=300)
epochs=5
network.train(epochs, trainX, trainY, num_gibbs_sampling_steps=1)
# network.label_units(trainX, trainY)
network.print_labelling_probs()
network.print_labelling()
# predict all and calculate statistics
print('\nStatistics')
prediction = network.predict(testX)
try:
pred_labels = []
for p in prediction:
pred_labels.append(network.get_label(p))
print(classification_report(testY, pred_labels))
except:
network.print_labelling_probs()
print('\n')
# for i in np.random.choice(np.arange(0, len(testY)), size = (10,)):
# # classify the digit
# pred = network.predict(np.atleast_2d(testX[i]))
# # show the image and prediction
# print "Actual digit is {0}, predicted {1}".format(testY[i], network.get_label(pred[0]))
# image = (testX[i] * 255).reshape((28, 28)).astype("uint8")
# cv2.imshow("Digit", image)
# cv2.waitKey(0)
def test_mlp_v2(self):
#np.seterr(all='raise')
dataset = datasets.fetch_mldata("MNIST Original")
(trainX, testX, trainY, testY) = train_test_split(
dataset.data / 255.0, dataset.target.astype("int0"), test_size = 0.033, train_size=0.067, random_state=42)
batch_size = 100
trainXbatches = helpers.util.create_minibatches(trainX, batch_size)
trainYbatches = helpers.util.create_minibatches(trainY, batch_size)
testXbatches = helpers.util.create_minibatches(testX, batch_size)
testYbatches = helpers.util.create_minibatches(testY, batch_size)
print trainXbatches[0].shape
num_features = trainX.shape[1]
layer_definition = [num_features,300,10]
network = core.v2MLNetwork(layer_definition, batch_size)
network.train(trainXbatches, trainYbatches, testXbatches, testYbatches, 50)
# def test_mlp_v2(self):
# trainX = np.asarray([ # boolean 'xor' function
# [1, 1],
# [1, 0],
# [0, 0],
# [0, 1]
# ])
# trainY = np.asarray([0,1,0,1])
# learning_rate = 0.5
# use_alpha=False
# network = core.v2MLNetwork(learning_rate, use_alpha, debug=False)
# network.construct([2,5,2], weights=None)
# # network.construct([trainX.shape[1],6,len(np.unique(trainY))], weights=None)
# epochs=1500
# network.train(epochs, trainX, trainY)
# # get a prediction
# prediction = network.predict(trainX, probabilities=False)
# print prediction
# prediction = network.predict(trainX, probabilities=True)
# print prediction
# def test_mlp_v2_mnist(self):
# np.seterr(all='raise')
# print "(downloading data...)"
# dataset = datasets.fetch_mldata("MNIST Original")
# (trainX, testX, trainY, testY) = train_test_split(
# dataset.data / 255.0, dataset.target.astype("int0"), test_size = 0.033, train_size=0.067, random_state=42)
# learning_rate = 0.05
# use_alpha=False
# network = core.v2MLNetwork(learning_rate, use_alpha, debug=False)
# # features, hid1, hid2, out
# network.construct([784, 100, 100, 10], weights=None)
# epochs=50
# network.train(epochs, trainX, trainY, batch_size=100)
# # get a prediction
# with open('/Users/paulenglert/Development/DataScience/ANN/tests/mlp.out', 'w') as file:
# for y_, y in zip(network.predict(testX, probabilities=False), testY):
# file.write(str(y_)+' (actual:'+str(y)+')\n')
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
8169848 | <filename>apps/aggregate_criteria.py<gh_stars>0
# the purpose of this script is
# when a button is clicked
# -some set of of criteria are checked
# -if they are met
# -the entire collection of stores are collected
# -
from dash import html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
from dash import callback_context
import pathlib
from app import app
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("../datasets").resolve()
layout=html.Div(
children=[
dbc.Row(
dbc.Col(
#html.Div(
children=[
# html.Button('bullshit button', id='bullshit button', n_clicks=0),
html.H3('Collect all specified criteria')
],
#),
width='auto',
align='center'
)
),
dbc.Row(
dbc.Col(
html.Div(
children=[
#a header
html.Button(
'Aggregate all selections into one filter',
id='button_aggregate',
n_clicks=0
)
]
)
#do not use width with sliders for some reason
)
),
dbc.Row(
dbc.Col(
#html.Div(
children=[
# html.Button('bullshit button', id='bullshit button', n_clicks=0),
html.H3('Display number of filter sets waiting')
],
#),
width='auto',
align='center'
)
),
dbc.Row(
dbc.Col(
html.Div(
id='spinners_aggregate',
children=[]
)
#do not use width with sliders for some reason
)
),
dbc.Row(
dbc.Col(
#html.Div(
children=[
# html.Button('bullshit button', id='bullshit button', n_clicks=0),
html.H3('All-search filters')
],
#),
width='auto',
align='center'
)
),
dbc.Row(
dbc.Col(
html.Div(
children=[
#a header
dcc.RangeSlider(
id='slider_aggregate',
min=0,
#max=max_fold_change,
max=10,
step=1,
#value=0,
#marks={i:str(i)[0:5] for i in [i*(max_fold_change/20) for i in range(1,20)]}
marks={i:str(i) for i in range(11)}
)
]
)
#do not use width with sliders for some reason
)
),
]
)
def check_for_errors(a,b):
print('hi')
@app.callback(
[#Output(component_id='store_compound',component_property='data'),
#Output(component_id='store_additional',component_property='data'),
Output(component_id='store_aggregate',component_property='data'),
Output(component_id='slider_aggregate',component_property='value'),
Output(component_id='spinners_aggregate',component_property='children')],
[Input(component_id='button_aggregate',component_property='n_clicks'),
Input(component_id='slider_aggregate',component_property='value')],
[State(component_id='store_compound',component_property='data'),
State(component_id='store_additional',component_property='data'),
State(component_id='store_from_species',component_property='data'),
State(component_id='store_from_organ',component_property='data'),
State(component_id='store_from_disease',component_property='data'),
State(component_id='store_to_species',component_property='data'),
State(component_id='store_to_organ',component_property='data'),
State(component_id='store_to_disease',component_property='data'),
State(component_id='store_aggregate',component_property='data'),
State(component_id='spinners_aggregate',component_property='children')]
)
def callback_compound(
button_aggregate_n_clicks,
slider_aggregate_value,
store_compound_data,
store_additional_data,
store_from_species_data,
store_from_organ_data,
store_from_disease_data,
store_to_species_data,
store_to_organ_data,
store_to_disease_data,
store_aggregate_data,
spinners_aggregate_children
):
#it was noticed that upon initial load, the callback context had length >1
#like
#[{'prop_id': 'slider_additional.value', 'value': 0}, {'prop_id': 'toggleswitch_additional.value', 'value': True}]
#this only works if the number of buttons is >1, but thats the case, so be it
#therefore, we load from store if the callback context length is >1 and the store is not none
print('@@@@@@@@@@@@@@@@@@@@')
print(callback_context.triggered)
print(button_aggregate_n_clicks)
print(slider_aggregate_value)
print(store_compound_data)
print(store_additional_data)
print(store_aggregate_data)
check_for_errors(store_compound_data,store_additional_data)
#cases
#on this page for first time and button is clicked and no errors
#because we are beyond check for errors, we assume no error
if (len(callback_context.triggered)>1) and (store_aggregate_data is None):
# and (button_aggregate_n_clicks==0):
store_aggregate_data={
'compounds':[],
'from_species':[],
'from_disease':[],
'from_organ':[],
'to_species':[],
'to_disease':[],
'to_organ':[],
'additional_slider':[],
'additional_toggleswitch':[],
'aggregate_on_page_rangeslider':None,
'aggregate_on_page_spinners':0
}
#store_compound_data=None
#store_additional_data=None
return store_aggregate_data, slider_aggregate_value,spinners_aggregate_children
elif (len(callback_context.triggered)>1) and (store_aggregate_data is not None):
#restore the "post aggregate stuff" otherwise do nothing?
slider_aggregate_value=store_aggregate_data['aggregate_on_page_rangeslider']
#if store_aggregate_data['aggregate_on_page_spinners'] >0:
for i in range(store_aggregate_data['aggregate_on_page_spinners']):
spinners_aggregate_children.append(
dbc.Spinner(
color='primary',
type='grow'
)
)
return store_aggregate_data, slider_aggregate_value,spinners_aggregate_children
elif (len(callback_context.triggered)==1) and (callback_context.triggered[0]['prop_id']=='slider_aggregate.value'):
store_aggregate_data['aggregate_on_page_rangeslider']=slider_aggregate_value
return store_aggregate_data, slider_aggregate_value,spinners_aggregate_children
elif (len(callback_context.triggered)==1) and (callback_context.triggered[0]['prop_id']=='button_aggregate.n_clicks'):
store_aggregate_data['compounds'].append(store_compound_data['compounds'])
store_aggregate_data['from_species'].append(store_from_species_data['species'])
store_aggregate_data['from_disease'].append(store_from_disease_data['disease'])
store_aggregate_data['from_organ'].append(store_from_organ_data['organ'])
store_aggregate_data['to_species'].append(store_to_species_data['species'])
store_aggregate_data['to_disease'].append(store_to_disease_data['disease'])
store_aggregate_data['to_organ'].append(store_to_organ_data['organ'])
store_aggregate_data['additional_slider'].append(store_additional_data['slider_additional'])
store_aggregate_data['additional_toggleswitch'].append(store_additional_data['toggleswitch_additional'])
store_aggregate_data['aggregate_on_page_spinners']+=1
spinners_aggregate_children.append(
dbc.Spinner(
color='primary',
type='grow'
)
)
#store_aggregate_data['aggregate_on_page_rangeslider']=slider_aggregate_value
return store_aggregate_data, slider_aggregate_value,spinners_aggregate_children
#return [store_aggregate_data] | StarcoderdataPython |
8008350 | import os
import numpy as np
import nibabel as nb
import pandas as pd
import glob
from nltools.simulator import Simulator
from nltools.data import (Brain_Data,
Adjacency,
Groupby,
Design_Matrix)
from nltools.stats import threshold, align
from nltools.mask import create_sphere
from nltools.external.hrf import glover_hrf
import matplotlib
import matplotlib.pyplot as plt
import six
from nltools.prefs import MNI_Template
shape_3d = (91, 109, 91)
shape_2d = (6, 238955)
def test_load(tmpdir):
sim = Simulator()
r = 10
sigma = 1
y = [0, 1]
n_reps = 3
output_dir = str(tmpdir)
dat = sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)
if MNI_Template["resolution"] == '2mm':
shape_3d = (91, 109, 91)
shape_2d = (6, 238955)
elif MNI_Template["resolution"] == '3mm':
shape_3d = (60, 72, 60)
shape_2d = (6, 71020)
y = pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))),header=None, index_col=None)
holdout = pd.read_csv(os.path.join(str(tmpdir.join('rep_id.csv'))),header=None,index_col=None)
# Test load list of 4D images
file_list = [str(tmpdir.join('data.nii.gz')), str(tmpdir.join('data.nii.gz'))]
dat = Brain_Data(file_list)
dat = Brain_Data([nb.load(x) for x in file_list])
# Test load list
dat = Brain_Data(data=str(tmpdir.join('data.nii.gz')), Y=y)
# Test Write
dat.write(os.path.join(str(tmpdir.join('test_write.nii'))))
assert Brain_Data(os.path.join(str(tmpdir.join('test_write.nii'))))
def test_shape(sim_brain_data):
assert sim_brain_data.shape() == shape_2d
def test_mean(sim_brain_data):
assert sim_brain_data.mean().shape()[0] == shape_2d[1]
def test_std(sim_brain_data):
assert sim_brain_data.std().shape()[0] == shape_2d[1]
def test_sum(sim_brain_data):
s = sim_brain_data.sum()
assert s.shape() == sim_brain_data[1].shape()
def test_add(sim_brain_data):
new = sim_brain_data + sim_brain_data
assert new.shape() == shape_2d
def test_subtract(sim_brain_data):
new = sim_brain_data - sim_brain_data
assert new.shape() == shape_2d
def test_multiply(sim_brain_data):
new = sim_brain_data * sim_brain_data
assert new.shape() == shape_2d
def test_indexing(sim_brain_data):
index = [0, 3, 1]
assert len(sim_brain_data[index]) == len(index)
index = range(4)
assert len(sim_brain_data[index]) == len(index)
index = sim_brain_data.Y == 1
assert len(sim_brain_data[index.values.flatten()]) == index.values.sum()
assert len(sim_brain_data[index]) == index.values.sum()
assert len(sim_brain_data[:3]) == 3
def test_concatenate(sim_brain_data):
out = Brain_Data([x for x in sim_brain_data])
assert isinstance(out, Brain_Data)
assert len(out)==len(sim_brain_data)
def test_append(sim_brain_data):
assert sim_brain_data.append(sim_brain_data).shape()[0] == shape_2d[0]*2
def test_indexing(sim_brain_data):
d = sim_brain_data.to_nifti()
assert d.shape[0:3] == shape_3d
assert Brain_Data(d)
def test_ttest(sim_brain_data):
out = sim_brain_data.ttest()
assert out['t'].shape()[0] == shape_2d[1]
def test_regress(sim_brain_data):
sim_brain_data.X = pd.DataFrame({'Intercept':np.ones(len(sim_brain_data.Y)),
'X1':np.array(sim_brain_data.Y).flatten()}, index=None)
# OLS
out = sim_brain_data.regress()
assert type(out['beta'].data) == np.ndarray
assert type(out['t'].data) == np.ndarray
assert type(out['p'].data) == np.ndarray
assert type(out['residual'].data) == np.ndarray
assert out['beta'].shape() == (2, shape_2d[1])
assert out['t'][1].shape()[0] == shape_2d[1]
# Robust OLS
out = sim_brain_data.regress(mode='robust')
assert type(out['beta'].data) == np.ndarray
assert type(out['t'].data) == np.ndarray
assert type(out['p'].data) == np.ndarray
assert type(out['residual'].data) == np.ndarray
assert out['beta'].shape() == (2, shape_2d[1])
assert out['t'][1].shape()[0] == shape_2d[1]
# Test threshold
i=1
tt = threshold(out['t'][i], out['p'][i], .05)
assert isinstance(tt, Brain_Data)
def test_ttest(sim_brain_data):
distance = sim_brain_data.distance(method='correlation')
assert isinstance(distance, Adjacency)
assert distance.square_shape()[0] == shape_2d[0]
def test_apply_mask(sim_brain_data):
s1 = create_sphere([12, 10, -8], radius=10)
assert isinstance(s1, nb.Nifti1Image)
masked_dat = sim_brain_data.apply_mask(s1)
assert masked_dat.shape()[1] == np.sum(s1.get_data() != 0)
def test_extract_roi(sim_brain_data):
mask = create_sphere([12, 10, -8], radius=10)
assert len(sim_brain_data.extract_roi(mask)) == shape_2d[0]
def test_r_to_z(sim_brain_data):
z = sim_brain_data.r_to_z()
assert z.shape() == sim_brain_data.shape()
def test_copy(sim_brain_data):
d_copy = sim_brain_data.copy()
assert d_copy.shape() == sim_brain_data.shape()
def test_detrend(sim_brain_data):
detrend = sim_brain_data.detrend()
assert detrend.shape() == sim_brain_data.shape()
def test_standardize(sim_brain_data):
s = sim_brain_data.standardize()
assert s.shape() == sim_brain_data.shape()
assert np.isclose(np.sum(s.mean().data), 0, atol=.1)
s = sim_brain_data.standardize(method='zscore')
assert s.shape() == sim_brain_data.shape()
assert np.isclose(np.sum(s.mean().data), 0, atol=.1)
def test_groupby_aggregate(sim_brain_data):
s1 = create_sphere([12, 10, -8], radius=10)
s2 = create_sphere([22, -2, -22], radius=10)
mask = Brain_Data([s1, s2])
d = sim_brain_data.groupby(mask)
assert isinstance(d, Groupby)
mn = sim_brain_data.aggregate(mask, 'mean')
assert isinstance(mn, Brain_Data)
assert len(mn.shape()) == 1
def test_threshold():
s1 = create_sphere([12, 10, -8], radius=10)
s2 = create_sphere([22, -2, -22], radius=10)
mask = Brain_Data(s1)*5
mask = mask + Brain_Data(s2)
m1 = mask.threshold(upper=.5)
m2 = mask.threshold(upper=3)
m3 = mask.threshold(upper='98%')
m4 = Brain_Data(s1)*5 + Brain_Data(s2)*-.5
m4 = mask.threshold(upper=.5,lower=-.3)
assert np.sum(m1.data > 0) > np.sum(m2.data > 0)
assert np.sum(m1.data > 0) == np.sum(m3.data > 0)
assert np.sum(m4.data[(m4.data > -.3) & (m4.data <.5)]) == 0
assert np.sum(m4.data[(m4.data < -.3) | (m4.data >.5)]) > 0
# Test Regions
r = mask.regions(min_region_size=10)
m1 = Brain_Data(s1)
m2 = r.threshold(1, binarize=True)
assert len(np.unique(r.to_nifti().get_data())) == 2
diff = m2-m1
assert np.sum(diff.data) == 0
def test_bootstrap(sim_brain_data):
masked = sim_brain_data.apply_mask(create_sphere(radius=10, coordinates=[0, 0, 0]))
n_samples = 3
b = masked.bootstrap('mean', n_samples=n_samples)
assert isinstance(b['Z'], Brain_Data)
b = masked.bootstrap('std', n_samples=n_samples)
assert isinstance(b['Z'], Brain_Data)
b = masked.bootstrap('predict', n_samples=n_samples, plot=False)
assert isinstance(b['Z'], Brain_Data)
b = masked.bootstrap('predict', n_samples=n_samples,
plot=False, cv_dict={'type':'kfolds','n_folds':3})
assert isinstance(b['Z'], Brain_Data)
b = masked.bootstrap('predict', n_samples=n_samples,
save_weights=True, plot=False)
assert len(b['samples'])==n_samples
def test_predict(sim_brain_data):
holdout = np.array([[x]*2 for x in range(3)]).flatten()
stats = sim_brain_data.predict(algorithm='svm',
cv_dict={'type': 'kfolds', 'n_folds': 2},
plot=False, **{'kernel':"linear"})
# Support Vector Regression, with 5 fold cross-validation with Platt Scaling
# This will output probabilities of each class
stats = sim_brain_data.predict(algorithm='svm',
cv_dict=None, plot=False,
**{'kernel':'linear', 'probability':True})
assert isinstance(stats['weight_map'], Brain_Data)
# Logistic classificiation, with 2 fold cross-validation.
stats = sim_brain_data.predict(algorithm='logistic',
cv_dict={'type': 'kfolds', 'n_folds': 2},
plot=False)
assert isinstance(stats['weight_map'], Brain_Data)
# Ridge classificiation,
stats = sim_brain_data.predict(algorithm='ridgeClassifier',
cv_dict=None, plot=False)
assert isinstance(stats['weight_map'], Brain_Data)
# Ridge
stats = sim_brain_data.predict(algorithm='ridge',
cv_dict={'type': 'kfolds', 'n_folds': 2,
'subject_id':holdout}, plot=False, **{'alpha':.1})
# Lasso
stats = sim_brain_data.predict(algorithm='lasso',
cv_dict={'type': 'kfolds', 'n_folds': 2,
'stratified':sim_brain_data.Y},
plot=False, **{'alpha':.1})
# PCR
stats = sim_brain_data.predict(algorithm='pcr', cv_dict=None, plot=False)
def test_similarity(sim_brain_data):
stats = sim_brain_data.predict(algorithm='svm',
cv_dict=None, plot=False, **{'kernel':'linear'})
r = sim_brain_data.similarity(stats['weight_map'])
assert len(r) == shape_2d[0]
r2 = sim_brain_data.similarity(stats['weight_map'].to_nifti())
assert len(r2) == shape_2d[0]
r = sim_brain_data.similarity(stats['weight_map'], method='dot_product')
assert len(r) == shape_2d[0]
r = sim_brain_data.similarity(stats['weight_map'], method='cosine')
assert len(r) == shape_2d[0]
r = sim_brain_data.similarity(sim_brain_data, method='correlation')
assert r.shape == (sim_brain_data.shape()[0],sim_brain_data.shape()[0])
r = sim_brain_data.similarity(sim_brain_data, method='dot_product')
assert r.shape == (sim_brain_data.shape()[0],sim_brain_data.shape()[0])
r = sim_brain_data.similarity(sim_brain_data, method='cosine')
assert r.shape == (sim_brain_data.shape()[0],sim_brain_data.shape()[0])
def test_decompose(sim_brain_data):
n_components = 3
stats = sim_brain_data.decompose(algorithm='pca', axis='voxels',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
stats = sim_brain_data.decompose(algorithm='ica', axis='voxels',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
sim_brain_data.data = sim_brain_data.data + 2
sim_brain_data.data[sim_brain_data.data<0] = 0
stats = sim_brain_data.decompose(algorithm='nnmf', axis='voxels',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
stats = sim_brain_data.decompose(algorithm='fa', axis='voxels',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
stats = sim_brain_data.decompose(algorithm='pca', axis='images',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
stats = sim_brain_data.decompose(algorithm='ica', axis='images',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
sim_brain_data.data = sim_brain_data.data + 2
sim_brain_data.data[sim_brain_data.data<0] = 0
stats = sim_brain_data.decompose(algorithm='nnmf', axis='images',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
stats = sim_brain_data.decompose(algorithm='fa', axis='images',
n_components=n_components)
assert n_components == len(stats['components'])
assert stats['weights'].shape == (len(sim_brain_data), n_components)
def test_hyperalignment():
sim = Simulator()
y = [0, 1]
n_reps = 10
s1 = create_sphere([0, 0, 0], radius=3)
d1 = sim.create_data(y, 1, reps=n_reps, output_dir=None).apply_mask(s1)
d2 = sim.create_data(y, 2, reps=n_reps, output_dir=None).apply_mask(s1)
d3 = sim.create_data(y, 3, reps=n_reps, output_dir=None).apply_mask(s1)
data = [d1, d2, d3]
# Test procrustes using align
out = align(data, method='procrustes')
assert len(data) == len(out['transformed'])
assert len(data) == len(out['transformation_matrix'])
assert data[0].shape() == out['common_model'].shape()
transformed = np.dot(d1.data, out['transformation_matrix'][0])
centered = d1.data - np.mean(d1.data, 0)
transformed = (np.dot(centered/np.linalg.norm(centered), out['transformation_matrix'][0])*out['scale'][0])
np.testing.assert_almost_equal(0, np.sum(out['transformed'][0].data - transformed), decimal=5)
# Test deterministic brain_data
bout = d1.align(out['common_model'], method='deterministic_srm')
assert d1.shape() == bout['transformed'].shape()
assert d1.shape() == bout['common_model'].shape()
assert d1.shape()[1] == bout['transformation_matrix'].shape[0]
btransformed = np.dot(d1.data, bout['transformation_matrix'])
np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data - btransformed))
# Test deterministic brain_data
bout = d1.align(out['common_model'], method='probabilistic_srm')
assert d1.shape() == bout['transformed'].shape()
assert d1.shape() == bout['common_model'].shape()
assert d1.shape()[1] == bout['transformation_matrix'].shape[0]
btransformed = np.dot(d1.data, bout['transformation_matrix'])
np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed))
# Test procrustes brain_data
bout = d1.align(out['common_model'], method='procrustes')
assert d1.shape() == bout['transformed'].shape()
assert d1.shape() == bout['common_model'].shape()
assert d1.shape()[1] == bout['transformation_matrix'].shape[0]
centered = d1.data - np.mean(d1.data, 0)
btransformed = (np.dot(centered/np.linalg.norm(centered), bout['transformation_matrix'])*bout['scale'])
np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed), decimal=5)
np.testing.assert_almost_equal(0, np.sum(out['transformed'][0].data - bout['transformed'].data))
# Test over time
sim = Simulator()
y = [0, 1]
n_reps = 10
s1 = create_sphere([0, 0, 0], radius=5)
d1 = sim.create_data(y, 1, reps=n_reps, output_dir=None).apply_mask(s1)
d2 = sim.create_data(y, 2, reps=n_reps, output_dir=None).apply_mask(s1)
d3 = sim.create_data(y, 3, reps=n_reps, output_dir=None).apply_mask(s1)
data = [d1, d2, d3]
out = align(data, method='procrustes', axis=1)
assert len(data) == len(out['transformed'])
assert len(data) == len(out['transformation_matrix'])
assert data[0].shape() == out['common_model'].shape()
centered = data[0].data.T-np.mean(data[0].data.T, 0)
transformed = (np.dot(centered/np.linalg.norm(centered), out['transformation_matrix'][0])*out['scale'][0])
np.testing.assert_almost_equal(0,np.sum(out['transformed'][0].data-transformed.T), decimal=5)
bout = d1.align(out['common_model'], method='deterministic_srm', axis=1)
assert d1.shape() == bout['transformed'].shape()
assert d1.shape() == bout['common_model'].shape()
assert d1.shape()[0] == bout['transformation_matrix'].shape[0]
btransformed = np.dot(d1.data.T, bout['transformation_matrix'])
np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed.T))
bout = d1.align(out['common_model'], method='probabilistic_srm', axis=1)
assert d1.shape() == bout['transformed'].shape()
assert d1.shape() == bout['common_model'].shape()
assert d1.shape()[0] == bout['transformation_matrix'].shape[0]
btransformed = np.dot(d1.data.T, bout['transformation_matrix'])
np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed.T))
bout = d1.align(out['common_model'], method='procrustes', axis=1)
assert d1.shape() == bout['transformed'].shape()
assert d1.shape() == bout['common_model'].shape()
assert d1.shape()[0] == bout['transformation_matrix'].shape[0]
centered = d1.data.T-np.mean(d1.data.T, 0)
btransformed = (np.dot(centered/np.linalg.norm(centered), bout['transformation_matrix'])*bout['scale'])
np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed.T), decimal=5)
np.testing.assert_almost_equal(0, np.sum(out['transformed'][0].data-bout['transformed'].data))
| StarcoderdataPython |
6542063 | import logging as log
def f(arg):
return "arg" + arg
# Function
a = "arg_var"
log.debug("debug function", "arg1", "arg2", f())
# Nested function
a = "arg_var"
log.debug("debug nested function", "arg1", "arg2", f(f(f())))
# Multi line
a = "arg_var"
log.debug("debug multi line",
"arg1", "arg2",
a)
# Multiline nested function
a = "arg_var"
log.debug("debug multi line nested function",
"arg1", "arg2",
f(f(f())))
# Comment at the end
a = "arg_var"
log.debug("debug comment at end", # comment
"arg1", "arg2", #comment
a) # comment
# Comment in the middle
a = "arg_var"
log.debug("debug comment in between",
# "arg1", "arg2",
a)
| StarcoderdataPython |
12814423 | <filename>src/ydata_quality/utils/logger.py<gh_stars>100-1000
"Data Quality logger functions"
import logging
import os
import sys
from logging import _nameToLevel
from typing import TextIO
# Default vars for the logger
NAME = os.getenv('DQ_LOGGER_NAME', 'DQ_Logger')
def get_logger(name, stream: TextIO = sys.stdout, level: str = logging.INFO):
"Returns a logger instance. Will not create another if one with the same name already exists."
acceptable_levels = [None] + list(_nameToLevel.keys())
assert level in acceptable_levels, f"Valid levels for warning severity are {acceptable_levels}. \
Defaults to info level."
if not level:
level = logging.INFO # Default threshold
handler = logging.StreamHandler(stream)
handler.setFormatter(
logging.Formatter("%(levelname)s | %(message)s")
)
logger = logging.getLogger(name)
logger.setLevel(level)
if len(logger.handlers) == 0:
logger.addHandler(handler)
logger.propagate = False
return logger
| StarcoderdataPython |
98973 | B77;10003;0c#!python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from preprocess import Preprocess
from assemble import Assemble
sys.path.append(INITIAL_UTILS)
from ruffus import *
_readlibs = []
_skipsteps = []
_settings = Settings()
_asm = None
_mapper = "bowtie"
def init(reads, skipsteps, asm,mapper):
global _readlibs
global _asm
global _skipsteps
_mapper = mapper
_readlibs = reads
_skipsteps = skipsteps
_asm = asm
@files("%s/Assemble/out/%s.bout"%(_settings.rundir,_settings.PREFIX))
#@posttask(create_symlink,touch_file("completed.flag"))
@follows(MapReads)
def CalcDist(input,output):
if "CalcDist" in _skipsteps or "calcdist" in _skipsteps:
return 0
#given read pairs mapped to contigs, calc insert length
| StarcoderdataPython |
4868378 | import pytest
from liualgotrader.common import assets
@pytest.mark.devtest
def test_round_asset() -> bool:
f = 0.999349343434
print(f, assets.round_asset("BTCUSD", f))
return True
| StarcoderdataPython |
6535183 | import os
from httpx import AsyncClient
from canvas.files import FileModel, FolderModel
class CanvasFileManager:
"""A file manager"""
client: AsyncClient
root: FolderModel
def __init__(self, client: AsyncClient) -> None:
self.client = client
def latest_update(self, t = '', count: int = 10):
"""Get the latest updated files and/or folders"""
pass
async def download_file(self, file: FileModel):
s = self.scrapers['file'](self.client)
data = await s.scrape(file.id)
if data:
filepath = file.get_relative_path()
dir = os.path.dirname(filepath)
if not os.path.exists(dir):
os.makedirs(dir)
with open(filepath, 'wb') as f:
f.write(data)
print(f'{filepath} saved ({len(data)})')
def download_folder(self, folder: FolderModel):
pass
def download_all(self):
"""Download all files"""
pass
def get_latest_file_info(self):
"""Get the latest file and folder information from the web"""
pass
def update_local(self):
"""Update local files and folders to match stored file information in memory"""
async def download_latest(self):
"""Download the latest updated files"""
await self.get_latest_file_info()
self.update_local()
| StarcoderdataPython |
4804037 | <reponame>JustinKuli/adventofcode2020
#!/usr/bin/env python3
data = []
with open('data.txt', 'r') as file:
for line in file:
data.append(line[:-1]) # Strip the newline char!
your_ticket = [79,193,53,97,137,179,131,73,191,139,197,181,67,71,211,199,167,61,59,127]
def get_ranges():
ranges = []
for line in data:
for word in line.split(sep=" "):
if "-" in word:
nums = word.split(sep="-")
ranges.append([int(nums[0]), int(nums[1])])
return ranges
def could_be_valid(num, ranges):
for r in ranges:
if num >= r[0] and num <= r[1]:
return True
return False
def part_one():
invalid_sum = 0
ranges = get_ranges()
with open('nearby-tickets.txt', 'r') as file:
for line in file:
line = line[:-1] # strip the newline
for num in line.split(sep=","):
if not could_be_valid(int(num), ranges):
invalid_sum += int(num)
print("part one answer:", invalid_sum)
def get_valid_tickets():
tickets = []
ranges = get_ranges()
with open('nearby-tickets.txt', 'r') as file:
for line in file:
line = line[:-1] # strip the newline
valid = True
for num in line.split(sep=","):
if not could_be_valid(int(num), ranges):
valid = False
break
if valid:
tickets.append(line.split(sep=","))
return tickets
def get_labelled_ranges():
ranges = dict()
for line in data:
label = line.split(sep=":")[0]
r = []
for word in line.split(sep=" "):
if "-" in word:
nums = word.split(sep="-")
r.append([int(nums[0]), int(nums[1])])
ranges[label] = r.copy()
return ranges
def get_potential_labels(num, l_ranges):
labels = set()
for label, ranges in l_ranges.items():
for r in ranges:
if num >= r[0] and num <= r[1]:
labels.add(label)
break
return labels
def part_two():
tickets = get_valid_tickets()
l_ranges = get_labelled_ranges()
curr_labels = []
for i, t0 in enumerate(tickets[0]):
pot_labels = get_potential_labels(int(t0), l_ranges)
for t in tickets[1:]:
if len(pot_labels) == 1:
break
pot_labels = pot_labels.intersection(get_potential_labels(int(t[i]), l_ranges))
curr_labels.append(pot_labels)
while [len(x) for x in curr_labels] != [1 for _ in curr_labels]:
for i, item in enumerate(curr_labels):
if len(item) == 1:
for jtem in curr_labels[:i] + curr_labels[i+1:]:
jtem.discard(next(iter(item)))
departure_indexes = [i for i, x in enumerate(curr_labels) if "departure" in next(iter(x))]
your_departures = [x for i, x in enumerate(your_ticket) if i in departure_indexes]
prod = 1
for d in your_departures:
prod *= d
print("part two answer:", prod)
part_one()
part_two()
| StarcoderdataPython |
4887959 | from django.db import models
from django.contrib import auth
class Publisher(models.Model):
"""A company that publishes books."""
name = models.CharField(max_length=50, help_text="The name of the Publisher.")
website = models.URLField(help_text="The publisher's website.")
email = models.EmailField(help_text="The publisher's email address.")
def __str__(self) -> str:
return self.name
class Contributor(models.Model):
"""A contributor to a book, e.g. author, editor, co-author."""
first_names = models.CharField(
max_length=50, help_text="The contributor's first name or names."
)
last_names = models.CharField(
max_length=50, help_text="The contributor's last name or names."
)
email = models.EmailField(help_text="The contact email for the contributor.")
def __str__(self):
return self.initialed_name
@property
def initialed_name(self):
"""Name of contributor with only first names' initials. E.g. Pax, RW"""
fn_initials = ''.join(name[0] for name in self.first_names.split(' '))
return f"{self.last_names}, {fn_initials}"
@property
def full_name(self):
"""Full name of contributor. E.g. Williams, Pax"""
return f"{self.last_names}, {self.first_names}"
@property
def contribution_count(self):
return self.bookcontributor_set.count()
class Book(models.Model):
"""A published book."""
title = models.CharField(max_length=70, help_text="The title of the book.")
publication_date = models.DateField(verbose_name="Date the book was published.")
isbn = models.CharField(max_length=20, verbose_name="ISBN number of the book.")
# Book has trailing slash on directories. E.g. 'book_covers/'
# TODO We'll add the trailing slash if this doesn't work. Otherwise, remove
# TODO and mention that it also works without a trailing slash.
cover = models.ImageField(null=True, blank=True, upload_to='book_covers')
sample = models.FileField(null=True, blank=True, upload_to='book_samples')
### RELATIONSHIPS
# Since `publisher` is a non-nullable ForeignKey, it is mandatory to pass.
# `contributors`, on the other hand, are not mandatory.
# Book.objects.create(
# title="Cracking the Code",
# publication_date=date(2012, 11, 21),
# isbn="7537334534243",
# publisher=some_publisher_object
# )
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE)
contributors = models.ManyToManyField(Contributor, through='BookContributor')
def isbn13(self):
"""Format isbn with hyphens."""
return f"{self.isbn[0:3]}-{self.isbn[3:4]}-{self.isbn[4:6]}-{self.isbn[6:12]}-{self.isbn[12:13]}"
def __str__(self) -> str:
return self.title
class BookContributor(models.Model):
class ContributionRole(models.TextChoices):
AUTHOR = "AUTHOR", "Author"
CO_AUTHOR = "CO_AUTHOR", "Co-Author"
EDITOR = "EDITOR", "Editor"
book = models.ForeignKey(Book, on_delete=models.CASCADE)
contributor = models.ForeignKey(Contributor, on_delete=models.CASCADE)
role = models.CharField(
verbose_name="The role this contributor had in the book.",
choices=ContributionRole.choices,
max_length=20,
)
class Review(models.Model):
content = models.TextField(help_text="The review text.")
rating = models.IntegerField(help_text="The rating the reviewer has given.")
date_created = models.DateTimeField(
auto_now_add=True, help_text="The date and time the review was created."
)
date_edited = models.DateTimeField(
null=True, help_text="The date and time the review was last edited."
)
# `auth.get_user_model()` refers to the `User` model from Django's built-in
# authentication module.
creator = models.ForeignKey(auth.get_user_model(), on_delete=models.CASCADE)
book = models.ForeignKey(
Book, on_delete=models.CASCADE, help_text="The book that this review is for."
)
| StarcoderdataPython |
9767439 | from string import Template
preamble = '''#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
class Graph${network}Example : public Example
{
public:
Graph${network}Example()
: cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "${network}")
{
}
bool do_setup(int argc, char **argv) override
{
// Parse arguments
cmd_parser.parse(argc, argv);
// Consume common parameters
common_params = consume_common_graph_parameters(common_opts);
// Return when help menu is requested
if(common_params.help)
{
cmd_parser.print_help(argv[0]);
return false;
}
// Checks
ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph");
// Print parameter values
std::cout << common_params << std::endl;
// Get trainable parameters data path
std::string data_path = common_params.data_path;
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { ${mean_r}, ${mean_g}, ${mean_b} } };
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const TensorShape tensor_shape = permute_shape(TensorShape(${W}, ${H}, ${C}, ${N}), DataLayout::NCHW, common_params.data_layout);
TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
// Set weights trained layout
const DataLayout weights_layout = DataLayout::NCHW;
graph << common_params.target
<< common_params.fast_math_hint
<< InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor)))
'''
postamble = '''
// Finalize graph
GraphConfig config;
config.num_threads = common_params.threads;
config.use_tuner = common_params.enable_tuner;
config.tuner_file = common_params.tuner_file;
graph.finalize(common_params.target, config);
return true;
}
void do_run() override
{
// Run graph
graph.run();
}
private:
CommandLineParser cmd_parser;
CommonGraphOptions common_opts;
CommonGraphParams common_params;
Stream graph;
};
'''
main_code_preamble_A = ''' unsigned times[NO_OF_RUNS];
auto t1 = std::chrono::high_resolution_clock::now();
for (unsigned i = 0; i < NO_OF_RUNS; i++) {
'''
main_code_preamble_B = '''
auto t2 = std::chrono::high_resolution_clock::now();
times[i] = std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t1).count();
t1 = t2;
}
for (unsigned i = 0; i < NO_OF_RUNS; i++) {
std::cout << times[i] << std::endl;
}
'''
main_code_postamble = '''
return 0;
}
'''
class ARMCLRuntime(object):
def __init__(self, output):
self.output = output
def generate(self, code, transformer):
(in_N, in_C, in_H, in_W) = transformer.graph.get_node('data').output_shape
self.output.write(Template(preamble).substitute(network=transformer.graph.name,
mean_r="122.68f", mean_g="116.67f", mean_b="104.01f",
W="{}U".format(in_W), H="{}U".format(in_H),
C="{}U".format(in_C), N="{}U".format(in_N)))
self.output.write('\n')
self.output.write(code[0])
self.output.write('\n')
self.output.write(postamble)
self.output.write('\n')
self.output.write('int main(int argc, char **argv) {\n')
self.output.write('\n\n')
self.output.write(' arm_compute::CLScheduler::get().default_init();\n\n')
self.output.write(main_code_preamble_A)
self.output.write(main_code_preamble_B)
self.output.write(main_code_postamble)
| StarcoderdataPython |
9722338 | <reponame>cdev-framework/cdev-sdk<gh_stars>1-10
# Generated as part of Quick Start project template
from cdev.resources.simple.xlambda import simple_function_annotation
import aurora_data_api
import pandas
from .utils import helper_function
@simple_function_annotation("hello_world_function")
def hello_world(event, context):
print("Hello from inside your Function!")
helper_function()
return {"status_code": 200, "message": "Hello Outside World!"}
@simple_function_annotation("hello_world_function2")
def hello_world2(event, context):
print("Hello from inside your Function!")
print(aurora_data_api)
return {"status_code": 200, "message": "Hello Outside World!"}
@simple_function_annotation("hello_world_function3")
def hello_world3(event, context):
print("Hello from inside your Function!")
print(aurora_data_api)
print(pandas)
return {"status_code": 200, "message": "Hello Outside World!"}
@simple_function_annotation("hello_world_function4")
def hello_world4(event, context):
return {"status_code": 200, "message": "Hello Outside World!"}
| StarcoderdataPython |
3228676 | <filename>bch/cli.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import logging
import click
import click_log
import simplejson as json
from datetime import datetime
import paho.mqtt.client
from paho.mqtt.client import topic_matches_sub
import bch.node
import bch.gateway
from bch.mqtt_client import MqttClient
__version__ = '@@VERSION@@'
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
logger = logging.getLogger()
handler = click_log.ClickHandler()
handler.setFormatter(click_log.ColorFormatter('%(asctime)s %(message)s'))
logger.addHandler(handler)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option('--gateway', type=click.STRING, help="Gateway name [default: usb-dongle].", default="usb-dongle")
@click.option('-H', '--mqtt-host', type=click.STRING, default="127.0.0.1", help="MQTT host to connect to [default: 127.0.0.1].")
@click.option('-P', '--mqtt-port', type=click.IntRange(0, 65535), default=1883, help="MQTT port to connect to [default: 1883].")
@click.option('--mqtt-username', type=click.STRING, help="MQTT username.")
@click.option('--mqtt-password', type=click.STRING, help="MQTT password.")
@click.option('--mqtt-cafile', type=click.Path(exists=True), help="MQTT cafile.")
@click.option('--mqtt-certfile', type=click.Path(exists=True), help="MQTT certfile.")
@click.option('--mqtt-keyfile', type=click.Path(exists=True), help="MQTT keyfile.")
@click_log.simple_verbosity_option(logger, default='WARNING')
@click.pass_context
def cli(ctx, gateway, mqtt_host, mqtt_port, mqtt_username, mqtt_password, mqtt_cafile, mqtt_certfile, mqtt_keyfile):
ctx.obj['mqttc'] = MqttClient(mqtt_host, mqtt_port, mqtt_username, mqtt_password, mqtt_cafile, mqtt_certfile, mqtt_keyfile)
ctx.obj['gateway'] = gateway
# mqttc.reconnect()
@cli.command()
@click.option('--start', 'command', flag_value='start')
@click.option('--stop', 'command', flag_value='stop')
@click.pass_context
def pairing(ctx, command):
if not command:
click.echo(pairing.get_help(ctx))
sys.exit(1)
mqttc = ctx.obj['mqttc']
gateway = ctx.obj['gateway']
mqttc.loop_start()
msg = mqttc.publish('gateway/' + gateway + '/pairing-mode/' + command, None, qos=1)
msg.wait_for_publish()
@cli.command()
@click.argument('topic', type=click.STRING)
@click.argument('payload', type=click.STRING, required=False)
@click.pass_context
def pub(ctx, topic, payload):
if payload:
try:
payload = json.loads(payload, use_decimal=True)
except json.decoder.JSONDecodeError as e:
pass
mqttc = ctx.obj['mqttc']
msg = mqttc.publish(topic, payload, qos=1)
msg.wait_for_publish()
@cli.command(help="Subscribe topic.", epilog="TOPIC [default: #]")
@click.argument('topic', type=click.STRING, default="#")
@click.option('-n', '--number', type=click.INT, help="Number of messages.")
@click.pass_context
def sub(ctx, topic, number):
def on_message(client, userdata, message):
dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:22]
click.echo(dt + ' ' + message.topic + ' ' + message.payload.decode('utf-8'))
on_message.cnt += 1
if number and on_message.cnt == number:
sys.exit(0)
on_message.cnt = 0
mqttc = ctx.obj['mqttc']
mqttc.mqttc.on_message = on_message
mqttc.subscribe(topic)
mqttc.loop_forever()
@cli.command(help="Show program's version number and exit.")
def version():
click.echo(__version__)
cli.add_command(bch.gateway.gw)
cli.add_command(bch.node.node)
def main():
cli(obj={})
if __name__ == '__main__':
main()
| StarcoderdataPython |
4977850 | <reponame>vcyrus/python-workshop<filename>module.py
def add(x, y):
return x + y
print('This is module.py')
if __name__ == '__main__':
print('Running module.py as main script')
| StarcoderdataPython |
8163336 | <reponame>PaloAltoNetworks/SLI<filename>sli/commands/createTemplate.py
from .base import BaseCommand
from sli.decorators import require_single_skillet, require_skillet_type
from sli.tools import format_xml_string
from lxml import etree
import re
from io import BytesIO
class CreateTemplate(BaseCommand):
sli_command = 'create_template'
short_desc = 'Create an XML template from a panos or panorama skillet'
no_context = True
help_text = """
Usage:
sli create_template -n [skillet] [baseline-file] [out-file]
"""
@staticmethod
def insert_template_str(line, config):
"""
Break up an xml config (str) at a specified line to inject template_string.
"""
i = 1
r = ""
for cl in [x for x in config.split("\n") if len(x)]:
if line["line"] == i:
leading_spaces = len(cl) - len(cl.lstrip())
closing_tag = None
# If tag without children, expand to tag with children and insert template
if re.search(r'<[a-zA-Z0-9-]+/>', cl):
ll = cl.replace("/", "")
r += ll + "\n"
closing_tag = ll.replace("<", "</") + "\n"
# If not a tag at all, there's already a template here, just insert
elif not cl.strip().startswith("<"):
pass
# If just an opening tag is present, insert after
else:
r += cl + "\n"
for tl in line["template"].split("\n"):
r += " " * leading_spaces + tl + "\n"
if closing_tag is not None:
r += closing_tag
else:
r += cl + "\n"
i += 1
return r
@staticmethod
def find_closest_entry(xml, xpath):
"""
find the shortest xpath that returns a valid element, and determine what
nodes are missing from the full expath. Returns a tuple containing the
shortened xpath that matches a node, and a list of the missing nodes
"""
xs = [x for x in xpath.split("/") if len(x)]
for i in range(len(xs)):
xpath_short = "/" + "/".join(xs[:-1 * (i + 1)])
if len(xpath_short) < 1:
raise Exception(f"Could not find valid entry point for {xpath}")
found = xml.xpath(xpath_short)
if len(found) == 0:
continue
else:
missing = [x for x in xpath.replace(xpath_short, "").split("/") if len(x)]
break
return xpath_short, missing
@staticmethod
def strip_var_spaces(in_str) -> str:
"""Remove spaces around variables inside of a block of text"""
return in_str.replace("{{ ", "{{").replace(" }}", "}}")
@require_single_skillet
@require_skillet_type("panos", "panorama")
def run(self):
if not len(self.args) == 2:
print(self.help_text)
return
out_file = self.args[1]
baseline_file = self.args[0]
with open(baseline_file, "r") as f:
config = f.read()
f.seek(0)
parser = etree.XMLParser(remove_blank_text=True)
baseline_xml = etree.parse(f, parser)
snippets = self.sli.skillet.get_snippets()
# Verify required parameters present in snippets
for snippet in snippets:
if getattr(snippet, "template_str", None) is None:
print(f"Snippet {snippet.name} has no template_str")
return
if "xpath" not in snippet.metadata:
print(f"Snippet {snippet.name} has no xpath")
return
# Expand any missing XML nodes
for snippet in snippets:
snippet_xpath = self.strip_var_spaces(snippet.metadata["xpath"])
if len(baseline_xml.xpath(snippet_xpath)):
continue
short_xpath, missing = self.find_closest_entry(baseline_xml, snippet_xpath)
ele = baseline_xml.xpath(short_xpath)[0]
for missing_ele in missing:
attr = None
new_ele_tag = missing_ele
if "[" in missing_ele:
es = missing_ele.split("[")
new_ele_tag = es[0]
attr = es[1].strip()
for c in "[]@'\" ":
attr = attr.replace(c, "")
attr = attr.split("=")
new_ele = etree.Element(new_ele_tag)
if attr is not None:
new_ele.set(*attr)
ele.append(new_ele)
ele = new_ele
# Rewrite config var and reload xml document to ensure accurate line numbers
temp_file = BytesIO()
baseline_xml.write(temp_file, pretty_print=True)
temp_file.seek(0)
config = format_xml_string(temp_file.read().decode())
baseline_xml = etree.fromstring(config)
# Find the various insert points on all snippets
lines = []
for snippet in snippets:
xpath = snippet.metadata["xpath"]
found = baseline_xml.xpath(self.strip_var_spaces(xpath))
if len(found) > 1:
raise Exception(f"xpath {xpath} returned more than 1 result in baseline")
elif not len(found):
raise Exception(f" Unable to find entry point for {xpath}")
# Insert point found
else:
lines.append({
"template": self.strip_var_spaces(snippet.template_str),
"line": found[0].sourceline
})
# Sort the keys so we're starting from the point furthest down the file
lines = sorted(lines, key=lambda i: i["line"], reverse=True)
# Insert snippets one at a time until complete
for line in lines:
config = self.insert_template_str(line, config)
with open(out_file, "w") as f:
f.write(config)
| StarcoderdataPython |
4931330 | #
# provide a pointer to the right settings.py file for
# programs that assume it starts in the current directory
from htsworkflow.settings import *
| StarcoderdataPython |
9664224 | <filename>src/wagtail_site_inheritance/receivers.py
from django.db import transaction
from wagtail.core.signals import page_unpublished
from wagtail_site_inheritance.models import PageInheritanceItem
def remove_copies(sender, instance, **kwargs):
"""
Remove all published copies when unpublishing a page.
For now we remove all non modified copies of the inherited page on unpublish for the
modified copies we only remove the page link and make them "stand-alone".
"""
items = PageInheritanceItem.objects.filter(page=instance)
if not items.exists():
return
with transaction.atomic():
for item in items:
if not item.modified:
item.inherited_page.delete()
item.delete()
def register_handlers():
page_unpublished.connect(remove_copies)
| StarcoderdataPython |
3561902 | from django.db import models
# Create your models here.
class ApplicantLoggedIn(models.Model):
username = models.CharField(max_length=70)
password = models.CharField(max_length=50)
class ApplicantSignedUp(models.Model):
username = models.CharField(max_length=70)
emailID = models.EmailField(max_length=70)
newPassword = models.CharField(max_length=50)
| StarcoderdataPython |
3259834 | #! /usr/bin/python3
# modules additionnels
import falcon
import psycopg2
class dgac_drone(object):
def getDrone(self, req, resp):
db = psycopg2.connect("") # connexion à la base PG locale
cur = db.cursor()
where = ''
lat = float(req.params.get('lat', None).replace(',','.'))
lon = float(req.params.get('lon', None).replace(',', '.'))
dist=min(int(req.params.get('rayon', 1000)), 50000)
limite = req.params.get('limite', None)
if limite:
where = where + cur.mogrify(" AND limite = %s", (int(limite),)).decode()
limite_min = req.params.get('limite_min', None)
if limite_min:
where = where + cur.mogrify(" AND limite >= %s", (int(limite_min),)).decode()
if lat and lon: # recherche géographique
query = """
select json_build_object('source', 'DGAC / SIA',
'derniere_maj', '2021-01',
'type','Featurecollection',
'nb_features', count(d.*),
'features', case when count(*)=0 then array[]::json[] else array_agg(json_build_object(
'type','Feature',
'properties',json_build_object(
'limite_alti_m', limite,
'distance_m', ST_Distance(geom::geography,st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326)::geography)::int,
'cap_deg', case when ST_Distance(geom::geography,st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326)::geography)>0
then degrees(ST_Azimuth(st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326), ST_ClosestPoint(geom, st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326))))::int
else null end
),
'geometry',st_asgeojson(geom,6,0)::json)
order by ST_Distance(geom::geography,st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326)::geography)
) end )::text
from drones d
where st_buffer(st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326)::geography, %(dist)s)::geometry && geom
and ST_DWithin(st_setsrid(st_makepoint(%(lon)s, %(lat)s),4326)::geography, geom::geography, %(dist)s)
""" % {'lon': lon, 'lat': lat, 'dist': dist} + where
print(query)
cur.execute(query)
drones = cur.fetchone()
resp.status = falcon.HTTP_200
resp.set_header('X-Powered-By', 'drone_as_api (https://github.com/cquest/drone_as_api/)')
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header("Access-Control-Expose-Headers","Access-Control-Allow-Origin")
resp.set_header('Access-Control-Allow-Headers','Origin, X-Requested-With, Content-Type, Accept')
resp.body = drones[0]
else:
resp.status = falcon.HTTP_413
resp.body = '{"erreur": "aucun critère de recherche indiqué"}'
db.close()
def on_get(self, req, resp):
self.getDrone(req, resp);
# instance WSGI et route vers notre API
app = falcon.API()
app.add_route('/drone', dgac_drone())
| StarcoderdataPython |
9662281 | #
# This file is part of MicroPython M5Stack package
# Copyright (c) 2017-2018 <NAME>
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# Project home:
# https://github.com/tuupola/micropython-m5stack
#
"""
M5Stack specific constants and classes.
"""
# pylint: disable=import-error
import utime as time
import display
import machine
from input import DigitalInput
from machine import Pin, PWM
from micropython import const
# pylint: enable=import-error
BUTTON_A_PIN = const(39)
BUTTON_B_PIN = const(38)
BUTTON_C_PIN = const(37)
SPEAKER_PIN = const(25)
TFT_LED_PIN = const(32)
TFT_DC_PIN = const(27)
TFT_CS_PIN = const(14)
TFT_MOSI_PIN = const(23)
TFT_CLK_PIN = const(18)
TFT_RST_PIN = const(33)
TFT_MISO_PIN = const(19)
def tone(frequency, duration=100, pin=None, volume=1):
if pin is None:
pin = Pin(SPEAKER_PIN)
pwm = PWM(pin, duty=volume % 50)
pwm.freq(frequency)
time.sleep_ms(duration)
pwm.deinit()
class ButtonA(DigitalInput):
def __init__(self, callback=None, trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING):
pin = Pin(BUTTON_A_PIN, Pin.IN)
DigitalInput.__init__(self, pin, callback=callback, trigger=trigger)
class ButtonB(DigitalInput):
def __init__(self, callback=None, trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING):
pin = Pin(BUTTON_B_PIN, Pin.IN)
DigitalInput.__init__(self, pin, callback=callback, trigger=trigger)
class ButtonC(DigitalInput):
def __init__(self, callback=None, trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING):
pin = Pin(BUTTON_C_PIN, Pin.IN)
DigitalInput.__init__(self, pin, callback=callback, trigger=trigger)
class Display(object):
def __init__(self):
self.tft = self.create()
try:
self.__doc__ = display.TFT.__doc__
except :
pass
def __getattr__(self, name):
return getattr(self.tft, name)
def create(self):
tft = display.TFT()
tft.init(
tft.ILI9341,
spihost=tft.HSPI,
width=320,
height=240,
mosi=TFT_MOSI_PIN,
miso=TFT_MISO_PIN,
clk=TFT_CLK_PIN,
cs=TFT_CS_PIN,
dc=TFT_DC_PIN,
rst_pin=TFT_RST_PIN,
backl_pin=TFT_LED_PIN,
backl_on=1,
speed=2600000,
invrot=3,
bgr=True
)
tft.orient(tft.LANDSCAPE)
tft.font(tft.FONT_Small, fixedwidth=True)
return tft
| StarcoderdataPython |
6538586 | from django.shortcuts import render
# importing http response to create a response.
from django.http import HttpResponse
# every view method needs to have access to
# request and it will be passed using method argument.
def index(request):
# return a response.
return HttpResponse("Hello World!!!") | StarcoderdataPython |
294677 | # Copyright 2021 <NAME>
# Licensed under the MIT license
import asyncio
import logging
import sys
import click
from jaymap.client import JMAP
from jaymap.types.core import Request, Invocation, Capabilities
from jaymap.types.mail import Email, MailboxCondition
@click.group()
@click.pass_context
@click.option("--domain", prompt=True)
@click.option("--username", prompt=True)
@click.option("--password", prompt=True, hide_input=True)
def main(ctx: click.Context, domain: str, username: str, password: str):
ctx.obj = (domain, username, password)
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
@main.command()
@click.pass_context
def subscribe(ctx: click.Context):
async def callback(*args, **kwargs):
print(f"callback({args=}, {kwargs=})")
async def inner():
async with JMAP(*ctx.obj) as client:
await client.subscribe(callback=callback)
asyncio.run(inner())
@main.command()
@click.pass_context
def multiquery(ctx: click.Context):
async def inner():
async with JMAP(*ctx.obj) as client:
click.echo(client.session)
account_id = client.session.primary_accounts[Capabilities.MAIL]
methods = [
(
"Mailbox/query",
{"accountId": account_id, "filter": {"role": "inbox"}},
"c1",
),
(
"Mailbox/get",
{
"accountId": account_id,
"#ids": {
"resultOf": "c1",
"name": "Mailbox/query",
"path": "/ids",
},
},
"c2",
),
(
"Email/query",
{
"accountId": account_id,
"filter": {
"inMailbox": "d00b1aa9-ce5c-4775-ba79-e8a8d1a042fd",
},
"limit": 2,
},
"c3",
),
(
"Email/get",
{
"accountId": account_id,
"#ids": {
"resultOf": "c3",
"name": "Email/query",
"path": "/ids",
},
},
"c3",
),
]
req = Request(
using=client.capabilities,
method_calls=methods,
)
res = await client.request(req)
emails = res.method_responses[-1][1]["list"]
emails = Email.from_list(emails)
click.secho(f"{emails!r}")
asyncio.run(inner())
@main.command()
@click.pass_context
def mailbox(ctx: click.Context):
async def inner():
async with JMAP(*ctx.obj) as client:
result = await client.mailbox.get()
mailboxes = result.list
for mailbox in mailboxes:
click.secho(f"{mailbox!r}", fg="red")
result = await client.mailbox.query(filter=MailboxCondition(role="inbox"))
result = await client.mailbox.get(ids=result.ids)
mailboxes = result.list
for mailbox in mailboxes:
click.secho(f"{mailbox!r}", fg="yellow")
asyncio.run(inner())
if __name__ == "__main__":
main() # pylint: disable=all
| StarcoderdataPython |
4819125 | <reponame>Hadisalman/AirSim
import copy
import json
import threading
import numpy as np
import torch
from robustness import airsim
from .sim_object import SimObject
class AdversarialObjects(SimObject):
def __init__(self, name='3DAdversary', car=None, **kwargs):
super().__init__(name)
assert 'resolution_coord_descent' in kwargs and 'num_iter' in kwargs and 'adv_config_path' in kwargs
self.ped_detection_callback = car.detection.ped_detection_callback
# TODO: un-hardcode this.
self.ped_object_name = 'Adv_Ped2'
self.thread = threading.Thread(target=self.coordinate_ascent_object_attack, args=(kwargs['resolution_coord_descent'], kwargs['num_iter']))
self.is_thread_active = False
self.scene_objs = self.client.simListSceneObjects()
self.adv_objects = [
'Adv_House',
'Adv_Fence',
'Adv_Hedge',
'Adv_Car',
'Adv_Tree'
]
self.adv_config_path = kwargs['adv_config_path']
for obj in self.adv_objects:
print('{} exists? {}'.format(obj, obj in self.scene_objs))
for obj in ['BoundLowerLeft', 'BoundUpperRight']:
print('{} exists? {}'.format(obj, obj in self.scene_objs))
self.BoundLowerLeft = self.client.simGetObjectPose('BoundLowerLeft')
self.BoundUpperRight = self.client.simGetObjectPose('BoundUpperRight')
self.x_range_adv_objects_bounds = (self.BoundLowerLeft.position.x_val, self.BoundUpperRight.position.x_val)
self.y_range_adv_objects_bounds = (self.BoundLowerLeft.position.y_val, self.BoundUpperRight.position.y_val)
def dump_env_config_to_json(self, path):
def _populate_pose_dic(pose_dic, pose):
pose_dic['X'] = pose.position.x_val
pose_dic['Y'] = pose.position.y_val
pose_dic['Z'] = pose.position.z_val
euler_angles = airsim.to_eularian_angles(pose.orientation)
pose_dic['Pitch'] = euler_angles[0]
pose_dic['Roll'] = euler_angles[1]
pose_dic['Yaw'] = euler_angles[2]
with open(path, 'w') as f:
output = {}
output['Vehicle'] = {}
pose = self.client.simGetVehiclePose()
_populate_pose_dic(output['Vehicle'], pose)
output[self.ped_object_name] = {}
pose = self.client.simGetObjectPose(self.ped_object_name)
_populate_pose_dic(output[self.ped_object_name], pose)
for obj in self.adv_objects:
output[obj] = {}
pose = self.client.simGetObjectPose(obj)
_populate_pose_dic(output[obj], pose)
# print(output)
json.dump(output, f, indent=2, sort_keys=False)
def update_env_from_config(self, path):
with open(path, 'r') as f:
dic = json.load(f)
for obj_name, obj_pose in dic.items():
pose = airsim.Pose(airsim.Vector3r(obj_pose['X'], obj_pose['Y'], obj_pose['Z']),
airsim.to_quaternion(obj_pose['Pitch'], obj_pose['Roll'], obj_pose['Yaw']))
if obj_name == 'Vehicle':
self.client.simSetVehiclePose(pose, ignore_collison=True)
else:
assert obj_name in self.scene_objs, 'Object {} is not found in the scene'.format(obj_name)
self.client.simSetObjectPose(obj_name, pose)
print('-->[Updated the position of the {}]'.format(obj_name))
def coordinate_ascent_object_attack(self, resolution=10, num_iter=1):
x_range = np.linspace(self.x_range_adv_objects_bounds[0], self.x_range_adv_objects_bounds[1], resolution)
y_range = np.linspace(self.y_range_adv_objects_bounds[0], self.y_range_adv_objects_bounds[1], resolution)
xv, yv = np.meshgrid(x_range, y_range)
self.adv_poses = []
best_loss = -1
for _ in range(num_iter):
for obj in np.random.permutation(self.adv_objects).tolist():
pose = self.client.simGetObjectPose(obj)
best_pose = copy.deepcopy(pose)
grid2d_poses_list = zip(xv.flatten(), yv.flatten())
for grid2d_pose in grid2d_poses_list:
pose.position.x_val = grid2d_pose[0]
pose.position.y_val = grid2d_pose[1]
self.client.simSetObjectPose(obj, pose)
if not self.is_thread_active:
print('-->[Saving whatever coniguration is reached]')
self.dump_env_config_to_json(path=self.adv_config_path)
return
_, correct, loss = self.ped_detection_callback()
if loss > best_loss:
best_loss = loss
best_pose = copy.deepcopy(pose)
print('Best loss so far {}'.format(best_loss.item()))
self.client.simSetObjectPose(obj, best_pose)
# dump results into a json file after each iteration
self.dump_env_config_to_json(path=self.adv_config_path)
def spsa_object_attack(self, resolution=10, num_iter=1):
def calc_est_grad(func, x, y, rad, num_samples):
B, *_ = x.shape
Q = num_samples//2
N = len(x.shape) - 1
with torch.no_grad():
# Q * B * C * H * W
extender = [1]*N
queries = x.repeat(Q, *extender)
noise = torch.randn_like(queries)
norm = noise.view(B*Q, -1).norm(dim=-1).view(B*Q, *extender)
noise = noise / norm
noise = torch.cat([-noise, noise])
queries = torch.cat([queries, queries])
y_shape = [1] * (len(y.shape) - 1)
l = func(queries + rad * noise, y.repeat(2*Q, *y_shape)).view(-1, *extender)
grad = (l.view(2*Q, B, *extender) * noise.view(2*Q, B, *noise.shape[1:])).mean(dim=0)
return grad
x_range = np.linspace(self.x_range_adv_objects_bounds[0], self.x_range_adv_objects_bounds[1], resolution)
y_range = np.linspace(self.y_range_adv_objects_bounds[0], self.y_range_adv_objects_bounds[1], resolution)
xv, yv = np.meshgrid(x_range, y_range)
self.adv_poses = []
best_loss = -1
for _ in range(num_iter):
for obj in np.random.permutation(self.adv_objects).tolist():
pose = self.client.simGetObjectPose(obj)
best_pose = copy.deepcopy(pose)
grid2d_poses_list = zip(xv.flatten(), yv.flatten())
for grid2d_pose in grid2d_poses_list:
pose.position.x_val = grid2d_pose[0]
pose.position.y_val = grid2d_pose[1]
self.client.simSetObjectPose(obj, pose)
if not self.is_thread_active:
print('[-->[Saving whatever coniguration is reached]')
self.dump_env_config_to_json(path=self.adv_config_path)
return
_, correct, loss = self.ped_detection_callback()
if loss > best_loss:
best_loss = loss
best_pose = copy.deepcopy(pose)
print('Best loss so far {}'.format(best_loss.item()))
self.client.simSetObjectPose(obj, best_pose)
# dump results into a json file after each iteration
self.dump_env_config_to_json(path=self.adv_config_path)
def attack(self):
if not self.is_thread_active:
self.is_thread_active = True
self.thread.start()
print("-->[Started adv thread]")
| StarcoderdataPython |
8169600 | # Copyright 2015 <EMAIL>
# Copyright 2015-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
from falcon import testing
from oslo_config import cfg
from oslo_config import fixture as oo_cfg
from oslo_context import fixture as oo_ctx
from oslotest import base as oslotest_base
from monasca_api.api.core import request
from monasca_api import conf
from monasca_api import config
class MockedAPI(falcon.API):
"""MockedAPI
Subclasses :py:class:`falcon.API` in order to overwrite
request_type property with custom :py:class:`request.Request`
"""
def __init__(self):
super(MockedAPI, self).__init__(
media_type=falcon.DEFAULT_MEDIA_TYPE,
request_type=request.Request,
response_type=falcon.Response,
middleware=None,
router=None
)
class ConfigFixture(oo_cfg.Config):
"""Mocks configuration"""
def __init__(self):
super(ConfigFixture, self).__init__(config.CONF)
def setUp(self):
super(ConfigFixture, self).setUp()
self.addCleanup(self._clean_config_loaded_flag)
conf.register_opts()
self._set_defaults()
config.parse_args(argv=[]) # prevent oslo from parsing test args
@staticmethod
def _clean_config_loaded_flag():
config._CONF_LOADED = False
def _set_defaults(self):
self.conf.set_default('user', 'monasca', 'influxdb')
class BaseTestCase(oslotest_base.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(ConfigFixture())
self.useFixture(oo_ctx.ClearRequestContext())
@staticmethod
def conf_override(**kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.items():
cfg.CONF.set_override(k, v, group)
@staticmethod
def conf_default(**kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.items():
cfg.CONF.set_default(k, v, group)
class BaseApiTestCase(BaseTestCase, testing.TestBase):
api_class = MockedAPI
@staticmethod
def create_environ(*args, **kwargs):
return testing.create_environ(
*args,
**kwargs
)
| StarcoderdataPython |
9687088 | <gh_stars>0
# -*- encoding: utf-8 -*-
"""
generic
"""
| StarcoderdataPython |
11360510 | <reponame>kraushm/hpctools<filename>reframechecks/mpip/mpip.py
# Copyright 2019-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# HPCTools Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
import reframe as rfm
import reframe.utility.sanity as sn
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../common'))) # noqa: E402
import sphexa.sanity as sphs
import sphexa.sanity_mpip as sphsmpip
# NOTE: jenkins restricted to 1 cnode
mpi_tasks = [24]
cubeside_dict = {24: 100}
steps_dict = {24: 0}
@rfm.parameterized_test(*[[mpi_task] for mpi_task in mpi_tasks])
class SphExaMpipCheck(sphsmpip.MpipBaseTest):
# {{{
'''
This class runs the test code with mpiP, the light-weight MPI profiler (mpi
only): http://llnl.github.io/mpiP
2 parameters can be set for simulation:
:arg mpi_task: number of mpi tasks; the size of the cube in the 3D
square patch test is set with a dictionary depending on mpitask,
but cubesize could also be on the list of parameters,
:arg steps: number of simulation steps.
'''
# }}}
def __init__(self, mpi_task):
# super().__init__()
# {{{ pe
self.descr = 'Tool validation'
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel',
'PrgEnv-cray', 'PrgEnv-pgi']
# self.valid_systems = ['daint:gpu', 'dom:gpu']
self.valid_systems = ['*']
self.maintainers = ['JG']
self.tags = {'sph', 'hpctools', 'cpu'}
# }}}
# {{{ compile
self.testname = 'sqpatch'
self.modules = ['mpiP']
# unload xalt to avoid _buffer_decode error:
self.prebuild_cmds = ['module rm xalt', 'module list -t']
tool_ver = '57fc864'
tc_ver = '20.08'
self.tool_modules = {
'PrgEnv-gnu': [f'{self.modules[0]}/{tool_ver}-CrayGNU-{tc_ver}'],
'PrgEnv-intel': [f'{self.modules[0]}/{tool_ver}-CrayIntel-'
f'{tc_ver}'],
'PrgEnv-cray': [f'{self.modules[0]}/{tool_ver}-CrayCCE-{tc_ver}'],
'PrgEnv-pgi': [f'{self.modules[0]}/{tool_ver}-CrayPGI-{tc_ver}'],
}
self.prgenv_flags = {
'PrgEnv-gnu': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
'-DUSE_MPI', '-DNDEBUG'],
'PrgEnv-intel': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
'-DUSE_MPI', '-DNDEBUG'],
'PrgEnv-cray': ['-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
'-DUSE_MPI', '-DNDEBUG'],
'PrgEnv-pgi': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
'-DUSE_MPI', '-DNDEBUG'],
}
self.build_system = 'SingleSource'
self.build_system.cxx = 'CC'
self.sourcepath = '%s.cpp' % self.testname
self.executable = './%s.exe' % self.testname
# {{{ openmp:
# 'PrgEnv-intel': ['-qopenmp'],
# 'PrgEnv-gnu': ['-fopenmp'],
# 'PrgEnv-pgi': ['-mp'],
# 'PrgEnv-cray_classic': ['-homp'],
# 'PrgEnv-cray': ['-fopenmp'],
# # '-homp' if lang == 'F90' else '-fopenmp',
# }}}
# }}}
# {{{ run
ompthread = 1
self.num_tasks = mpi_task
self.cubeside = cubeside_dict[mpi_task]
self.steps = steps_dict[mpi_task]
self.name = 'sphexa_mpiP_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
format(self.testname, mpi_task, ompthread, self.cubeside,
self.steps)
self.num_tasks_per_node = 24
self.num_tasks_per_core = 2
self.use_multithreading = True
# {{{ ht:
# self.num_tasks_per_node = mpitasks if mpitasks < 36 else 36 # noht
# self.use_multithreading = False # noht
# self.num_tasks_per_core = 1 # noht
# self.num_tasks_per_node = mpitasks if mpitasks < 72 else 72
# self.use_multithreading = True # ht
# self.num_tasks_per_core = 2 # ht
# }}}
self.num_cpus_per_task = ompthread
self.exclusive = True
self.time_limit = '10m'
self.variables = {
'CRAYPE_LINK_TYPE': 'dynamic',
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
# 'MPIP': '"-c"',
}
self.executable_opts = [f'-n {self.cubeside}', f'-s {self.steps}',
'2>&1']
self.prerun_cmds = [
'module rm xalt',
]
# }}}
# {{{ sanity_patterns
# set externally (in sanity_mpip.py)
# }}}
# {{{ performance
# {{{ internal timers
self.prerun_cmds += ['echo starttime=`date +%s`']
self.postrun_cmds += ['echo stoptime=`date +%s`']
# }}}
# }}}
# {{{ hooks
@rfm.run_before('compile')
def set_compiler_flags(self):
self.modules = self.tool_modules[self.current_environ.name]
self.build_system.cxxflags = \
self.prgenv_flags[self.current_environ.name]
self.build_system.ldflags = self.build_system.cxxflags + \
['-L$EBROOTMPIP/lib', '-Wl,--whole-archive -lmpiP',
'-Wl,--no-whole-archive -lunwind', '-lbfd -liberty -ldl -lz']
# }}}
| StarcoderdataPython |
252763 | <filename>Python3/0856-Score-of-Parentheses/soln-1.py<gh_stars>1-10
class Solution:
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
cnt, layer = 0, 0
for a, b in zip(S, S[1:]):
layer += (1 if a == '(' else -1)
if a + b == '()':
cnt += 2 ** (layer - 1)
return cnt | StarcoderdataPython |
1913840 | """
Application CLI Commands.
This file defines command functions which are invokable as commands from the
command line. Additional application-specific commands are encouraged, and may
be added with the {{app_name}}.cli.command decorator.
"""
import logging
import os
import sys
from argparse import REMAINDER
from tensorflow.io import gfile
import yaml
from .cli import command, arg
from .config import get_config
from .pipelines.harness import PipelineHarness
from .util import dynamic_import_func, get_collection
@command([
arg("pipeline",
help=("pipeline ID from config"),
type=str),
arg("-o", "--out-dir", type=str, default=None,
help="output directory (default: build.local_root variable from config)"),
])
def build_pipeline(args):
"""Build a pipeline job spec using configured settings."""
pipeline_id = args.pipeline
# load configured variables
config = get_config()
output_dir = args.out_dir
if output_dir is None:
output_dir = config['build']['local_root']
# prepare output directory
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
logging.fatal(
"Path supplied for output directory already exists and is not a directory.")
sys.exit("exiting")
os.makedirs(output_dir, exist_ok=True)
pipeline_job_path = PipelineHarness().build_pipeline(
pipeline_id=pipeline_id,
output_dir=output_dir,
)
logging.info("Wrote compiled pipeline: %s", pipeline_job_path)
manifest = {
pipeline_id: {
"job_spec": pipeline_job_path
},
}
return manifest
@command([
arg("-o", "--out-dir", type=str, default=None,
help="output directory (default: local.build_root from config)"),
])
def build_pipelines(args):
"""Build all pipeline job specs using configured settings."""
# load configured variables
config = get_config()
output_dir = args.out_dir
if output_dir is None:
output_dir = config['build']['local_root']
# prepare output directory
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
logging.fatal("path supplied for output is not a directory")
sys.exit("exiting")
os.makedirs(output_dir, exist_ok=True)
manifest = {}
for pipeline_id in get_collection(config, 'pipelines'):
logging.info("building pipeline: %s", pipeline_id)
pipeline_job_path = PipelineHarness().build_pipeline(
pipeline_id=pipeline_id,
output_dir=output_dir,
)
manifest[pipeline_id] = {"job_spec": pipeline_job_path}
return manifest
@command()
def deploy_pipelines(args): # pylint: disable=unused-argument
"""Deploy all configured pipelines"""
return PipelineHarness().deploy()
@command([
arg("pipeline", type=str,
help="pipeline ID from config"),
arg("--manifest", type=str, default=None,
help="pipeline build manifest file containing job spec path"),
arg("--job-spec", type=str, default=None, help="job spec file path"),
arg("--out-manifest", type=str, default=None,
help="output manifest destination"),
])
def run_pipeline(args):
"""Run a pipeline from either a job spec or a pipeline build manifest."""
pipeline_id = args.pipeline
# derive job_spec_path from args
if args.job_spec:
job_spec_path = args.job_spec
elif args.manifest:
# TODO(axelmagn): replace with manifests module
with open(args.manifest, 'r') as file:
manifest = yaml.safe_load(file)
if pipeline_id not in manifest:
logging.fatal("manifest does not contain '%s'", pipeline_id)
sys.exit("exiting")
elif 'job_spec' not in manifest[pipeline_id]:
logging.fatal(
"manifest section for '%s' does not contain 'job_spec'",
pipeline_id
)
sys.exit("exiting")
job_spec_path = manifest[pipeline_id]['job_spec']
else:
logging.fatal(
"Cannot derive job spec path. Neither '--job-spec' nor '--manifest' is set."
)
sys.exit("exiting")
response = PipelineHarness().run_pipeline(
pipeline_id,
job_spec_path,
)
manifest = {
pipeline_id: {
"run_response": response
}
}
return manifest
@command([
arg("import_path", type=str,
help="python import string specifying task function to run"),
arg("task_args", nargs=REMAINDER, help="task arguments"),
])
def task(args):
"""Run a task function."""
func = dynamic_import_func(args.import_path)
task_args = func.parser.parse_args(args.task_args)
func(task_args)
@command()
def dump_config(_args):
"""Dump the current config to stdout or file"""
config = get_config()
return config.dumps()
@command([
arg("--label", type=str, default=None, help="release label"),
arg("--build", type=str, default=None, help="build label"),
arg("--image", type=str, default=[], action="append", dest="images",
help="release image key/value pair of the form KEY=IMAGE_VALUE (repeatable)"),
arg("--pipelines-manifest", type=str, default=None,
help="path to pipeline manifest generated by `build_pipelines` invocation"),
])
def release_config(args):
"""Build a release configuration section additively."""
config = get_config()
images = {}
for image in args.images:
key, value = image.split('=', 1)
images[key] = value
release = get_collection(config, 'release')
if args.label is not None:
release["label"] = args.label
if args.build is not None:
release["build"] = args.build
release["images"] = get_collection(release, "images")
release["images"].update(images)
if args.pipelines_manifest is not None:
with gfile.GFile(args.pipelines_manifest) as file:
release["pipelines"] = get_collection(release, "pipelines")
manifest = yaml.safe_load(file)
release["pipelines"].update(manifest)
config.set('release', release)
return config.dumps()
| StarcoderdataPython |
6487338 | #!/usr/bin/env python2.7
# __BEGIN_LICENSE__
#
# Copyright 2012 Stanford University. All rights reserved.
#
# __END_LICENSE__
# calibrate.py
#
# Usage: calibrate.py <calibration_dir>
#
# This script re-runs the calibration from a set of calibration images
# captured by the uScope GUI. This script is mostly useful for
# debugging the calibration routines offline (i.e. away from the
# microscope), but can be used to update old calibration files from
# early experiments as well.
import sys, os, math
from lflib.imageio import load_image, save_image
import numpy as np
import glob
def avg_images(image_files):
"""
Averages a set of images passed.
Numerical error will become an issue with large number of files.
"""
if len(image_files) == 0:
return None
im = load_image(image_files[0])
im_type = im.dtype
im = im.astype('float32')
# numerical error will become a problem with large numbers of files
for ifile in image_files[1:]:
im = im + load_image(ifile, dtype='float32')
return np.round(im/len(image_files)).astype(im_type)
if __name__ == "__main__":
import lflib
print 'LFcalibrate v%s' % (lflib.version)
# Parse command line options
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--output-filename", dest="output_filename",
help="Specify the name of the calibration file.")
parser.add_option('', "--synthetic",
action="store_true", dest="synthetic_lf", default=False,
help="Use this option to create a synthetic light field (i.e. with no calibration image")
parser.add_option('', "--use-ray-optics",
action="store_true", dest="use_ray_optics", default=False,
help="Use the less accurate ray optics model rather than wave optics model.")
parser.add_option('', "--voxels-as-points",
action="store_true", dest="voxels_as_points", default=False,
help="Treat each voxel as an ideal point source. This turns of numerical integration that gives the voxel spatial extent (which can be important for anti-aliasing.")
# Calibration routine parameters
parser.add_option("", "--dark-frame", dest="dark_frame_file", default=None,
help="Specify the a dark frame image to subtract from the input light-field before processing. (This makes radiometric calibration more accurate.)")
parser.add_option("", "--radiometry-frame", dest="radiometry_frame_file", default=None,
help="Specify the a radiometry frame to use for radiometric correction. If no frame is specified, then no radiometric correction is carried out.")
parser.add_option("", "--align-radiometry", action="store_true", dest="align_radiometry", default=False,
help="Align the radiometry image automatically to the geometric calibration image. (use this option when the radiometry frame has been \"bumped\" before imaging begins.")
# Optical parameters
parser.add_option('', "--pitch", dest="ulens_pitch", type=float, default=None,
help="Specify the microlens pitch (in microns).")
parser.add_option('', "--pixel-size", dest="pixel_size", type=float, default=None,
help="Specify the size of a pixel on the sensor, taking magnification due to relay optics into account (in microns).")
parser.add_option('', "--focal-length", dest="ulens_focal_length", type=float, default=2432.72,
help="Specify the microlens focal length (in microns).")
parser.add_option('', "--ulens-focal-distance", dest="ulens_focal_distance", type=float, default=None,
help="Specify the microlens focal distance (in microns). If you do not specify a value, it is assumed that the focal distance is equal to the focal length.")
parser.add_option('', "--magnification", dest="objective_magnification", type=int, default=20,
help="Specify the objective magnification.")
parser.add_option('', "--na", dest="objective_na", type=float, default = 0.5,
help="Specify the objective numerical aperture.")
parser.add_option('', "--tubelens-focal-length", dest="tubelens_focal_length", type=float, default = 200.0,
help="Tube lens focal length (in millimeters).")
parser.add_option('', "--wavelength", dest="center_wavelength", type=float, default = 509,
help="Center wavelength of emission spectrum of the sample (nm).")
parser.add_option('', "--medium-index", dest="medium_index", type=float, default = 1.33,
help="Set the index of refraction of the medium.")
parser.add_option('', "--ulens-fill-factor", dest="ulens_fill_factor", type=float, default=1.0,
help="Specify the microlens fill factor (e.g. 1.0, 0.7, ...).")
parser.add_option('', "--pixel-fill-factor", dest="pixel_fill_factor", type=float, default=1.0,
help="Specify the pixel fill factor (e.g. 1.0, 0.7, ...).")
parser.add_option('', "--ulens-profile", dest="ulens_profile", default='rect',
help="Specify the shape of the microlens apertures. Options include: ['rect', 'circ']")
# Volume parameters
parser.add_option('', "--num-slices", dest="num_slices", type=int, default=30,
help="Set the number of slices to produce in the output stacks.")
parser.add_option('', "--um-per-slice", dest="um_per_slice", type=float, default=10.0,
help="Set the thickness of each slice (in um).")
parser.add_option('', "--z-center", dest="z_center", type=float, default=0.0,
help="Set the offset for the central z slice (in um).")
parser.add_option('', "--supersample", dest="supersample", type=int, default= 1,
help="Supersample the light field volume. This results in a higher resolution reconstruction up to a point, and interpolation after that point.")
# Geometric calibration Options
parser.add_option("", "--affine-alignment", action="store_true", dest="affine_alignment", default=False,
help="Use affine warp for correcting geometric distortion (default is cubic).")
parser.add_option("", "--isometry-alignment", action="store_true", dest="isometry_alignment", default=False,
help="Use isometry warp for correcting geometric distortion (default is cubic).")
parser.add_option("--chief-ray", action="store_true", dest="chief_ray_image", default=False,
help="Use this flag to indicate that the calibration frame is a chief ray image.")
# Synthetic parameters
parser.add_option('', "--ns", dest="ns", type=int, default=50,
help="Set the lenslets in s direction.")
parser.add_option('', "--nt", dest="nt", type=int, default=50,
help="Set the lenslets in t direction.")
# Other Options
parser.add_option('', "--crop-center-lenslets",
action="store_true", dest="crop_center_lenslets", default=False,
help="For severe aperture vignetting (high NA objectives), use only center lenslets for calibration, and extrapolate outwards.")
parser.add_option('', "--skip-alignment",
action="store_true", dest="skip_alignment", default=False,
help="Skip the alignment step during geometric calibration (useful if you are working with an already-rectified light field or a synthetic light field.")
parser.add_option("", "--skip-subpixel-alignment", action="store_true", dest="skip_subpixel_alignment", default=False,
help="Skip subpixel alignment for determining lenslet centers.")
parser.add_option('', "--num-threads", dest="num_threads", type=int, default=10,
help="Set the number of CPU threads to use when generating the raydb.")
parser.add_option("", "--pinhole", dest="pinhole_filename", default=None,
help="After calibrating, save the rectified light field as a rectified sub-aperture image.")
parser.add_option("", "--lenslet", dest="lenslet_filename", default=None,
help="After calibrating, save the rectified light field as a rectified lenslet image.")
parser.add_option('-d', "--debug",
action="store_true", dest="debug", default=False,
help="Save debug images.")
(options, args) = parser.parse_args()
# If no focal distance is supplied, then set it (by default) to be equal to the ulens focal length.
if options.ulens_focal_distance == None:
options.ulens_focal_distance = options.ulens_focal_length
if not options.synthetic_lf and len(args) != 1:
print 'You must supply exactly one calibration image.\n'
sys.exit(1)
calibration_filename = args[0]
if options.pixel_size == None or options.ulens_pitch == None:
print 'Please supply necessary pixel per lenslet information via the \'--pitch\' and \'--pixel-size\' options.'
sys.exit(1)
if options.synthetic_lf:
ns = options.ns
nt = options.nt
nu = nv = int(np.ceil(float(options.ulens_pitch) / options.pixel_size))
synthetic_lf = 65535 * np.ones((nt*nv, ns*nu), dtype=np.uint16)
save_image(calibration_filename, synthetic_lf, dtype=np.uint16)
options.skip_alignment = True
# Default output filename has a -RECTIFIED suffix
if not options.output_filename:
fileName, fileExtension = os.path.splitext(calibration_filename)
output_filename = fileName + '.lfc'
else:
output_filename = options.output_filename
# Check if dark-frame or radiometry-frame are regular expressions referring to multiple files.
# If so, save an average image as the dark/radiometry frame
if options.dark_frame_file is not None and len(glob.glob(options.dark_frame_file)) > 1:
dark_frame_files = glob.glob(options.dark_frame_file)
avg_dark_frame = avg_images(dark_frame_files)
options.dark_frame_file = os.path.dirname(output_filename) + os.sep + 'darkframe_avg' + os.path.splitext(dark_frame_files[0])[1]
save_image(options.dark_frame_file, avg_dark_frame)
if options.radiometry_frame_file is not None and len(glob.glob(options.radiometry_frame_file)) > 1:
radiometry_frame_files = glob.glob(options.radiometry_frame_file)
avg_radiometry_frame = avg_images(radiometry_frame_files)
options.radiometry_frame_file = os.path.dirname(output_filename) + os.sep + 'radiometryframe_avg' + os.path.splitext(radiometry_frame_files[0])[1]
save_image(options.radiometry_frame_file, avg_radiometry_frame)
# Create a new calibration object
from lflib.calibration import LightFieldCalibration
# FOR DEBUGGING: Load a previous calibration
#
#lfcal = LightFieldCalibration.load(output_filename)
from lflib.calibration.imaging import CalibrationAlignmentMethods
if options.affine_alignment:
calibration_method = CalibrationAlignmentMethods.CALIBRATION_AFFINE_ALIGNMENT
elif options.isometry_alignment:
calibration_method = CalibrationAlignmentMethods.CALIBRATION_ISOMETRY_ALIGNMENT
else:
calibration_method = CalibrationAlignmentMethods.CALIBRATION_CUBIC_ALIGNMENT
lfcal = LightFieldCalibration(options.ulens_focal_length, options.ulens_focal_distance,
options.ulens_pitch, options.pixel_size,
options.objective_magnification, options.objective_na, options.medium_index,
options.tubelens_focal_length,
options.ulens_fill_factor, options.pixel_fill_factor,
options.ulens_profile, options.center_wavelength,
calibration_method)
# STEP 1 : MEASURE THE GEOMETRIC DISTORTION
#
# This routine computes an affine transform that squares the
# lenslet array to a nice, regularly space grid.
lfcal.calibrate_geometry(calibration_filename,
skip_alignment = options.skip_alignment,
skip_subpixel_alignment = options.skip_subpixel_alignment,
debug_mode = options.debug,
chief_ray_image = options.chief_ray_image,
radiometry_file = options.radiometry_frame_file,
align_radiometry = options.align_radiometry,
crop_center_lenslets = options.crop_center_lenslets)
print (' Calibrated light field has [ %d x %d ] ray samples and [ %d x %d ] spatial samples.' %
(lfcal.nu, lfcal.nv, lfcal.ns, lfcal.nt))
# Optionally, create a rectified sub-aperture image
if (options.pinhole_filename):
from lflib.lightfield import LightField
im = load_image(calibration_filename, dtype=np.float32, normalize = False)
lf = lfcal.rectify_lf(im).asimage(LightField.TILED_SUBAPERTURE)
save_image(options.pinhole_filename, lf/lf.max() * 65535, dtype=np.uint16)
# Optionally, create a rectified lenslet image
if (options.lenslet_filename):
from lflib.lightfield import LightField
im = load_image(calibration_filename, dtype=np.float32, normalize = False)
lf = lfcal.rectify_lf(im).asimage(LightField.TILED_LENSLET)
save_image(options.lenslet_filename, lf/lf.max() * 65535, dtype=np.uint16)
# For debugging
#print "DEBUG MODE ON!!!!!"
#raise SystemExit
# STEP 2 : Compute rayspread database
#
# The rayspread database is a look-up table that serves as the
# optical model of forward and back-projection of the light field.
print '-> Generating light field psf database. (This may take a little while...)'
lfcal.generate_raydb(options.num_slices, options.um_per_slice,
options.supersample, options.z_center, options.num_threads,
use_geometric_optics = options.use_ray_optics,
voxels_as_points = options.voxels_as_points)
# STEP 3 : MEASURE THE APERTURE PLANE VIGNETTING FOR THIS LENSLET IMAGE
#
# The vignetting function can be used for deconvolution.
# First we determine a reasonable number of pixels per lenslet to
# use. This must be the same scheme used in lfstack.py and
# elsewhere. It's a little dangerous here to be accessing
# coefficient directly... we should veil this in some layer of
# abstraction soon!
print '-> Calibrating radiometry using ', options.radiometry_frame_file
from lflib.lfexceptions import ZeroImageException
try:
lfcal.calibrate_radiometry(calibration_filename,
radiometry_frame_file = options.radiometry_frame_file,
dark_frame_file = options.dark_frame_file)
# Save the result
lfcal.save(output_filename);
lfcal.print_summary()
except ZeroImageException:
print "ERROR: calibrating against a blank radiometric image"
| StarcoderdataPython |
6577182 | <gh_stars>1-10
__all__ = ['interpolate', 'polynomial']
| StarcoderdataPython |
6453938 | <filename>aiaia_detector/aiaia_detector/evaluation.py
#!/usr/bin/env python
# coding: utf-8
#### Citations
#
# https://github.com/tensorflow/models/tree/master/research/object_detection
# "Speed/accuracy trade-offs for modern convolutional object detectors."
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, CVPR 2017
#
# https://github.com/rafaelpadilla/Object-Detection-Metrics
# @INPROCEEDINGS {padillaCITE2020,
# author = {<NAME>} and <NAME>. {Netto} and <NAME>. {<NAME>}},
# title = {A Survey on Performance Metrics for Object-Detection Algorithms},
# booktitle = {2020 International Conference on Systems, Signals and Image Processing (IWSSIP)},
# year = {2020},
# pages = {237-242},}
#
# ## Loading in TFRecords from GCS bucket
import os
from os import path as op
import tensorflow as tf # version 2
import tensorflow.compat.v1 as tf1
from object_detection.utils import dataset_util
from object_detection.utils import visualization_utils as viz_utils
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw
import glob
import matplotlib.pyplot as plt
import click
import seaborn as sn
# Just disables the warning, doesn't enable AVX/FMA
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
print("Tensorflow version " + tf.__version__)
def _parse_image_function(example_proto):
"""return parse features"""
image_feature_description = {
"image/height": tf.io.FixedLenFeature([], tf.int64),
"image/width": tf.io.FixedLenFeature([], tf.int64),
"image/filename": tf.io.FixedLenFeature([], tf.string),
"image/source_id": tf.io.FixedLenFeature([], tf.string),
"image/encoded": tf.io.FixedLenFeature([], tf.string),
"image/format": tf.io.FixedLenFeature([], tf.string),
"image/object/bbox/xmin": tf.io.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True, default_value=None
),
"image/object/bbox/xmax": tf.io.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True, default_value=None
),
"image/object/bbox/ymin": tf.io.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True, default_value=None
),
"image/object/bbox/ymax": tf.io.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True, default_value=None
),
"image/object/class/text": tf.io.FixedLenSequenceFeature(
[], tf.string, allow_missing=True, default_value=None
),
"image/object/class/label": tf.io.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True, default_value=None
),
}
return tf.io.parse_single_example(example_proto, image_feature_description)
def get_example(tfrecords_path):
"""decode tfrecord file, returns items"""
dataset = tf.data.TFRecordDataset([tfrecords_path])
parsed_image_dataset = dataset.map(_parse_image_function)
items = []
for image_features in parsed_image_dataset:
image_id = image_features["image/filename"].numpy()
image_raw = image_features["image/encoded"].numpy()
img = tf.image.decode_image(image_raw)
label = image_features["image/object/class/label"].numpy()
cls_name = image_features["image/object/class/text"].numpy()
ymin = image_features["image/object/bbox/ymin"].numpy()
xmin = image_features["image/object/bbox/xmin"].numpy()
ymax = image_features["image/object/bbox/ymax"].numpy()
xmax = image_features["image/object/bbox/xmax"].numpy()
bboxes = [
[xmin[i], ymin[i], xmax[i], ymax[i]] for i in range(len(label))
]
labels = [label[i] for i in range(len(label))]
cls_names = [cls_name[i] for i in range(len(cls_name))]
items.append(
{
"image_id": image_id,
"img_arr": img,
"labels": labels,
"class_name": cls_names,
"bboxes": bboxes,
}
)
return items
def get_example_no_img(tfrecords_path):
"""decode tfrecord file, returns items"""
dataset = tf.data.TFRecordDataset([tfrecords_path])
parsed_image_dataset = dataset.map(_parse_image_function)
items = []
for image_features in parsed_image_dataset:
image_id = image_features["image/filename"].numpy()
label = image_features["image/object/class/label"].numpy()
cls_name = image_features["image/object/class/text"].numpy()
ymin = image_features["image/object/bbox/ymin"].numpy()
xmin = image_features["image/object/bbox/xmin"].numpy()
ymax = image_features["image/object/bbox/ymax"].numpy()
xmax = image_features["image/object/bbox/xmax"].numpy()
bboxes = [
[xmin[i], ymin[i], xmax[i], ymax[i]] for i in range(len(label))
]
labels = [label[i] for i in range(len(label))]
cls_names = [cls_name[i] for i in range(len(cls_name))]
items.append(
{
"image_id": image_id,
"labels": labels,
"class_name": cls_names,
"bboxes": bboxes,
}
)
return items
def tf1_od_pred(test_img_dict_lst, detection_graph):
"""
Runs inference with a frozen graph on images and returns list of dicts, with image_id.
Args:
test_img_dict_lst (list): list of dicts containing the image and image gt metadata, read from the tfrecord by get_example().
detection_graph (str): the loaded frozen graph
Returns:
det_dicts (list): A list of dictionaries containing detection data, including bounding boxes and class scores
"""
det_dicts = (
[]
) # contains img name as key and detection info as value (boxes, scores, classes, num)
with detection_graph.as_default():
with tf1.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name("image_tensor:0")
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
"detection_boxes:0"
)
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
"detection_scores:0"
)
detection_classes = detection_graph.get_tensor_by_name(
"detection_classes:0"
)
num_detections = detection_graph.get_tensor_by_name(
"num_detections:0"
)
for item in test_img_dict_lst:
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(item["img_arr"], axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[
detection_boxes,
detection_scores,
detection_classes,
num_detections,
],
feed_dict={image_tensor: image_np_expanded},
)
det_dicts.append(
{
"image_id": item["image_id"],
"boxes": boxes,
"scores": scores,
"classes": classes,
"num": num,
}
)
return det_dicts
def filter_detections(det, conf_threshold):
# remove artifact of no detection
if det["num"][0] == 0.0:
det["scores"] = []
det["boxes"] = []
det["classes"] = []
# remove low scores
high_score_mask = np.array(det["scores"]) > conf_threshold
det["scores"] = np.array(det["scores"])[high_score_mask]
det["boxes"] = np.array(det["boxes"])[high_score_mask]
det["classes"] = np.array(det["classes"])[high_score_mask]
return det
def filter_detections_and_concat_boxes(all_gt, det_dicts, conf_threshold):
"""
Filters detections and groundtruth by a confidence score threshold and removes empty detections.
Args:
all_gt (list): all groundtruth dictionaries
det_dicts (list): all detection dictionaries
conf_threshold (float): all values below this will not be included as valid detections that are used to generate metrics
Returns:
A filtered lists of dictionaries, as well as lists of bounding boxes and classes extracted from these dictionaries
"""
all_det_scores = []
all_det_img_names = []
all_det_boxes = []
all_gt_img_names = []
all_gt_boxes = []
all_gt_classes = []
all_det_classes = []
for gt, det in zip(all_gt, det_dicts):
det = filter_detections(det, conf_threshold)
if len(det["boxes"]) > 0:
all_det_boxes.append(det["boxes"])
all_det_classes.extend(det["classes"])
all_det_scores.extend(det["scores"])
all_det_img_names.append(det["image_id"])
all_gt_boxes.append(np.array(gt["bboxes"]))
all_gt_classes.extend(gt["labels"])
all_gt_img_names.append(gt["image_id"])
all_gt_boxes = np.concatenate(all_gt_boxes)
all_det_boxes = np.concatenate(all_det_boxes)
all_gt_classes = np.array(all_gt_classes).astype(int)
all_det_classes = np.array(all_det_classes).astype(int)
all_det_scores = np.array(all_det_scores).astype(float)
return (
all_gt,
det_dicts,
all_gt_boxes,
all_det_boxes,
all_gt_classes,
all_det_classes,
all_det_scores,
all_det_img_names,
all_gt_img_names,
)
def compute_iou(groundtruth_box, detection_box):
"""
compute IOU score by compare ground truth bbox and predicted bbox
Args:
groundtruth_box: ground truth bbox in [x0, y0, x1, y1]
detection_box: predicted truth bbox in [y0, x0, y1, x1]
Returns:
iou: IOU score between ground truth and detected bboxes
"""
g_xmin, g_ymin, g_xmax, g_ymax = tuple(groundtruth_box)
d_ymin, d_xmin, d_ymax, d_xmax = tuple(detection_box)
x_left = max(g_xmin, d_xmin)
y_top = max(g_ymin, d_ymin)
x_right = min(g_xmax, d_xmax)
y_bottom = min(g_ymax, d_ymax)
boxGArea = (g_xmax - g_xmin + 1) * (g_ymax - g_ymin + 1)
if x_right < x_left or y_bottom < y_top:
return 0, boxGArea
intersection = (x_right - x_left + 1) * (y_bottom - y_top + 1)
boxDArea = (d_xmax - d_xmin + 1) * (d_ymax - d_ymin + 1)
iou = intersection / float(boxGArea + boxDArea - intersection)
return iou, boxGArea
def get_box_matches(
groundtruth_boxes, detection_boxes, detection_scores, iou_threshold
):
"""
Returns pred list and gt list indices for the box matches, the iou,
and the groundtruth box area to examine size effect on accuracy.
Args:
groundtruth_boxes: list of ground truth bbox in [x0, y0, x1, y1]
detection_boxes: list of predicted truth bbox in [y0, x0, y1, x1]
IOU_THRESHOLD: threshold used to consider a detection a valid overlap and possible true positive
Returns:
iou: IOU score between ground truth and detected bboxes
"""
matches = []
for i in range(len(groundtruth_boxes)):
for j in range(len(detection_boxes)):
iou, gt_box_area = compute_iou(
groundtruth_boxes[i], detection_boxes[j]
)
if iou > iou_threshold:
matches.append([i, j, iou, detection_scores[j], gt_box_area])
matches = np.array(matches)
if matches.shape[0] > 0:
# Sort list of matches by descending IOU so we can remove duplicate detections
# while keeping the highest IOU entry.
matches = matches[matches[:, 2].argsort()[::-1][: len(matches)]]
# Remove duplicate detections from the list.
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# Sort the list again by descending confidence score first, then IOU.
# This deals with cases where a gt detection has multiple high IOU
# detections of different classes, with one correct, higher confidence
# detection. Removing duplicates doesn't preserve our previous sort.
matches = matches[np.lexsort((matches[:, 2], matches[:, 3]))[::-1]]
# Remove duplicate ground truths from the list.
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
return matches
def compute_confusion_matrix(matches, gt_classes, det_classes, num_categories):
"""Computes a confusion matrix to count true positives, false negatives, false positives.
This iterates over the groundtruth and detection indices representing
bounding boxes and their classes. Matches are identified when IOU is
above the threshold.Each match in `matches` will count as a true positive
or misidentified positive in a confusion matrix cell. If indices are not
in `matches` they are then counted as either false negative or false positive.
Worth noting, many detections ascribed as false positives actually have
substantial overlap with a groundtruth detection, but not as much overlap as the
matching true positive. In order to not double count objects, these extra
detections need to be set as false positives.
Args:
matches (list): [description]
gt_classes (list): [description]
det_classes (list): [description]
num_categories (int, optional): The number of categories.
Returns:
[type]: [description]
"""
gt_classes_sorted = []
det_classes_sorted = []
confusion_matrix = np.zeros(shape=(num_categories + 1, num_categories + 1))
for i in range(len(gt_classes)):
if matches.shape[0] > 0 and matches[matches[:, 0] == i].shape[0] == 1:
# match identified, count a true positive or a misidentification
detection_class_cm_i = int(
det_classes[int(matches[matches[:, 0] == i, 1][0])] - 1
) # index along cm axis and the class label associated with the match
groundtruth_class_cm_i = int(gt_classes[i] - 1)
else:
# detection is background but groundtruth is another class, false negative
detection_class_cm_i = int(confusion_matrix.shape[1] - 1)
groundtruth_class_cm_i = int(gt_classes[i] - 1)
gt_classes_sorted.append(groundtruth_class_cm_i)
det_classes_sorted.append(detection_class_cm_i)
confusion_matrix[groundtruth_class_cm_i][detection_class_cm_i] += 1
for i in range(len(det_classes)):
# catches case where some detections have matches and when there are no matches but there are gt
if (
matches.shape[0] > 0 and matches[matches[:, 1] == i].shape[0] == 0
) or matches.size == 0:
detection_class_cm_i = int(det_classes[i] - 1)
groundtruth_class_cm_i = int(confusion_matrix.shape[0] - 1)
gt_classes_sorted.append(groundtruth_class_cm_i)
det_classes_sorted.append(detection_class_cm_i)
confusion_matrix[groundtruth_class_cm_i][detection_class_cm_i] += 1
assert len(gt_classes_sorted) == len(det_classes_sorted)
return confusion_matrix, gt_classes_sorted, det_classes_sorted
def plot_cm_sn(
cm,
names,
outpath,
title,
iou_threshold,
conf_threshold,
normalize,
norm_axis="predicted",
cmap="Blues",
):
num_classes = len(names)
cm_copy = cm.copy()
if normalize:
if norm_axis == "predicted":
cm_copy = cm / (cm.sum(0).reshape(1, num_classes + 1) + 1e-6)
elif norm_axis == "true":
cm_copy = cm / (cm.sum(1).reshape(num_classes + 1, 1) + 1e-6)
cm_copy[cm_copy < 0.005] = np.nan # don't annotate (would appear as 0.00)
# reorder confusion matrix by diagonal
diag = np.diag(cm_copy) # get diagonal
idx = np.argsort(
diag
) # get all indicies of diag ordered, but true values sorted wrong
idx_nonnan = np.argwhere(~np.isnan(diag)) # get nonnan indices
nonnanlen = len(idx_nonnan)
idx[0:nonnanlen] = np.concatenate(
idx_nonnan, axis=0
) # replace non nan indices with sorted
yticklabels = np.array(names + ["False Negative, \nonly groundtruth"])
xticklabels = np.array(names + ["False Positive, \nonly detection"])
cm_copy = cm_copy[idx, :][:, idx]
yticklabels = yticklabels[idx]
xticklabels = xticklabels[idx]
# plot
fig = plt.figure(figsize=(12, 9))
sn.set(font_scale=1.0 if num_classes < 50 else 0.8) # for label size
labels = (0 < len(names) < 99) and len(
names
) == num_classes # apply names to ticklabels
sn.heatmap(
cm_copy,
annot=num_classes < 30,
annot_kws={"size": 8},
cmap=cmap,
fmt=".2f",
square=True,
yticklabels=list(yticklabels)
if labels
else "auto", # switched ticklabels
xticklabels=list(xticklabels) if labels else "auto",
).set_facecolor((1, 1, 1))
fig.axes[0].set_xlabel("True", fontsize=16)
fig.axes[0].set_ylabel(
"Predicted",
fontsize=16,
)
fig.axes[0].set_title(
title
+ f"\n {iou_threshold} IOU and {conf_threshold} Confidence Score Thresholds",
fontsize=19,
)
fig.tight_layout()
fig.savefig(outpath, dpi=300)
plt.yticks(rotation=0)
plt.close()
def cm_from_img_tuple(img_detection_tuple, class_names, IOU_thresh=0.5):
"""Computes confusion matrix from a single sample of predictions and groundtruth.
Args:
img_detection_tuple (tuple): Tuple with two dictionaries, one for
predictions and one for groundtruth.
IOU_thresh (float): The intersection over union threshold used to
determine a match.
Returns:
np.array: The confusion matrix in numpy array format.
"""
img_gt_dict = img_detection_tuple[0]
det_dict = img_detection_tuple[1]
assert det_dict["image_id"] == img_gt_dict["image_id"]
gt_bboxes = img_gt_dict["bboxes"]
gt_labels = img_gt_dict["labels"]
pred_scores = det_dict["scores"]
pred_bboxes = det_dict["boxes"]
pred_labels = det_dict["classes"].astype(int)
# # Getting image specific counts of TP, FP, FN
box_matches = get_box_matches(
gt_bboxes, pred_bboxes, pred_scores, IOU_thresh
)
# y_true, y_pred for scikit_learn format to compute metrics
cm, y_true, y_pred = compute_confusion_matrix(
box_matches, gt_labels, pred_labels, len(class_names)
)
return cm
def save_image(
img_detection_tuple,
outdir,
class_names,
iou_threshold,
conf_threshold,
cm=None,
):
"""Saves images with bounding boxes for prediction (blue) and groundtruth (red).
Args:
img_detection_tuple (tuple): Tuple with two dictionaries, one for predictions
and one for groundtruth.
outdir (str): where to save the images
class_names (list): list of class names. must be supplied in order that matches
both detection and groundtruth numerical class IDs
cm (np.array, optional): The confusion matrix corresponding to img_detection_tuple.
Defaults to None.
"""
img_gt_dict = img_detection_tuple[0]
det_dict = img_detection_tuple[1]
assert det_dict["image_id"] == img_gt_dict["image_id"]
image_np = img_gt_dict["img_arr"].numpy()
image_id = str(img_gt_dict["image_id"].decode("utf-8")).strip("''")
gt_bboxes = img_gt_dict["bboxes"]
gt_class_name = img_gt_dict["class_name"]
pred_bboxes = det_dict["boxes"]
pred_labels = det_dict["classes"]
pred_scores = det_dict["scores"]
if cm is not None:
cmname = image_id[:-4] + "_cm" + ".png"
plot_cm_sn(
cm,
class_names,
outpath=op.join(outdir, cmname),
title=f"{image_id[:-4]} Only Confusion Matrix",
normalize=False,
iou_threshold=iou_threshold,
conf_threshold=conf_threshold,
)
############################
# Draw bbox
############################
img = Image.fromarray(image_np).convert("RGB")
draw = ImageDraw.Draw(img)
image_path = op.join(outdir, str(image_id))
for i, gtbbox in enumerate(gt_bboxes):
gtbbox = [gtbbox[i] * 400 for i in range(len(gtbbox))]
xmin, ymin, xmax, ymax = gtbbox
draw.rectangle(gtbbox, outline="#ff0000")
x_label = xmin + (xmax - xmin) / 2
draw.text(
(x_label - 15, ymax),
text=str(gt_class_name[i].decode("utf-8")),
fill="red",
align="right",
)
for i, pred_bbox in enumerate(pred_bboxes):
pred_bbox = [pred_bbox[i] * 400 for i in range(len(pred_bbox))]
ymin, xmin, ymax, xmax = pred_bbox
pred_bbox = [xmin, ymin, xmax, ymax]
draw.rectangle(pred_bbox, outline="#0000ff")
x_label = xmin + (xmax - xmin) / 2
class_i = int(pred_labels[i])
draw.text(
(x_label - 15, ymax),
text=f"{class_names[class_i-1]} {np.round(pred_scores[i],decimals=3).astype('|S4').decode('utf-8')}",
fill="blue",
align="right",
)
img.save(image_path, "JPEG")
return cmname
def save_metrics(confusion_matrix, categories, output_path, iou_threshold):
"""
Write a cvs that saves recall, precision, f1 and map score
Args:
confusion_matrix: computed confusion matrix for classes
categories: classes in list;
output_path: output path for the csv
Returns:
(None): saved csv
"""
print("\nConfusion Matrix:")
print(confusion_matrix, "\n")
print(f"Confusion Matrix Shape: {confusion_matrix.shape}")
labels = [categories[i] for i in range(len(categories))]
results = []
for i in range(len(categories)):
name = categories[i]
total_target = np.sum(confusion_matrix[:, i])
total_predicted = np.sum(confusion_matrix[i, :])
if total_target == 0:
recall = 0
else:
recall = float(confusion_matrix[i, i] / total_target)
# print(f'recalls are {recall}')
if total_predicted == 0:
precision = 0
else:
precision = float(confusion_matrix[i, i] / total_predicted)
try:
f1 = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
f1 = 0
prec_at_rec = []
for recall_level in np.linspace(0.0, 1.0, 11):
if recall >= recall_level:
prec_at_rec.append(precision)
prec_at_rec = np.array(prec_at_rec)
avg_prec = np.mean(prec_at_rec)
results.append(
{
"category": name,
f"precision_@{ iou_threshold}IOU": precision,
f"recall_@{ iou_threshold}IOU": recall,
f"map_@{ iou_threshold}IOU": avg_prec,
f"f1_@{ iou_threshold}IOU": f1,
}
)
df = pd.DataFrame(results)
df.to_csv(output_path)
@click.command()
@click.option(
"--tfrecords_folder",
default="./training_data_aiaia_p400",
help="The folder containing the subfolders that contain tfrecords.",
)
@click.option(
"--outdir",
default="./outputs",
help="Where to save output images with bounding boxes drawn, metrics, and plots.",
)
@click.option(
"--model_rel_path",
default="./frozen_inference_graph.pb",
help="The path to the folder containing the frozen graph .pb file.",
)
@click.option(
"--class_names",
"-cn",
multiple=True,
default=[
"buffalo",
"dark_coloured_large",
"elephant",
"giraffe",
"hippopotamus",
"light_coloured_large",
"smaller_ungulates",
"warthog",
"zebra",
],
help="The class names that match the order of the class IDs from the prediction output and groundtruth. Use like -cn buffalo -cn dark_coloured_large -cn elephant etc. Order matters and should match the order defined in the class_map.csv.",
)
@click.option(
"--model_type",
default="wildlife",
type=click.Choice(
["human_activities", "wildlife", "livestock"], case_sensitive=True
),
help="The model type used to filter tfrecords in subfolders to run evaluation on different detection problems. These include human_activities, wildlife, and livestock.",
)
@click.option(
"--iou_threshold",
default=0.5,
type=float,
help="Threshold to set boxes with low overlap as not potential true positives. Defaults to .5",
)
@click.option(
"--conf_threshold",
default=0.5,
type=float,
help="Threshold to throw away low confidence detections. Defaults to .5",
)
@click.option(
"--save_individual_images",
default=True,
type=bool,
help="Whether to save individual images with bounding boxes drawn. Useful for debugging and inspecting model results. Defaults to True",
)
def run_eval(
tfrecords_folder,
outdir,
model_rel_path,
class_names,
model_type,
iou_threshold,
conf_threshold,
save_individual_images,
):
"""Computes metrics for TFrecords containing test images and groundtruth.
Also saves out groundtruth and prediction boxes drawn on images.
A frozen graph model must be supplied for predictions. Computes, plots and
saves confusion matrix and a csv with metrics, including total f1 score.
"""
print(f"Starting evaluation for model: {model_type}")
# Reading all images and groundtruth
class_names = list(class_names) # it initially gets parsed as a tuple
all_gt = []
all_imgs_and_gt = []
tfrecord_regex = f"test*{model_type}*.tfrecords"
# list tfrecord folders
for tfrecords_subfolder in glob.glob(tfrecords_folder + "/*/"):
for tfrecord_path in glob.glob(
op.join(tfrecords_subfolder, tfrecord_regex)
):
sub_dir = op.join(outdir, "")
if not op.exists(sub_dir):
os.makedirs(sub_dir)
items = get_example(tfrecord_path)
items_no_imgs = get_example_no_img(tfrecord_path)
all_gt.extend(items_no_imgs)
all_imgs_and_gt.extend(items)
print("TFRecords opened")
# Testing the tf model on subset of test set, all_imgs_and_gt
model_path = op.join(os.getcwd(), model_rel_path)
detection_graph = tf1.Graph()
with detection_graph.as_default():
od_graph_def = tf1.GraphDef()
with tf1.gfile.GFile(model_path, "rb") as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf1.import_graph_def(od_graph_def, name="")
det_dicts = tf1_od_pred(all_imgs_and_gt, detection_graph)
print("Inference complete")
(
all_gt,
det_dicts_filtered,
all_gt_boxes,
all_detection_boxes,
all_gt_classes,
all_det_classes,
all_det_scores,
all_detection_img_names,
all_gt_img_names,
) = filter_detections_and_concat_boxes(
all_gt, det_dicts, conf_threshold=conf_threshold
)
print("Detections filtered by confidence")
# save out images with both boxes drawn for sanity check
if save_individual_images:
test_imgs_dir = op.join(outdir, "test_imgs_with_boxes")
if not op.exists(test_imgs_dir):
os.makedirs(test_imgs_dir)
all_imgs_gt_det = list(zip(all_imgs_and_gt, det_dicts_filtered))
cms = []
for item in all_imgs_gt_det:
cm = cm_from_img_tuple(item, class_names, iou_threshold)
cm_copy = cm.copy()
cm[:, len(class_names)] = cm_copy[len(class_names), :]
cm[len(class_names), :] = cm_copy[:, len(class_names)]
cms.append(cm)
if save_individual_images:
cmname = save_image(
item,
test_imgs_dir,
class_names,
iou_threshold,
conf_threshold,
cm,
)
save_metrics(
cm,
class_names,
op.join(
test_imgs_dir,
f"{cmname}_{iou_threshold}_{conf_threshold}_metrics.csv",
),
iou_threshold=iou_threshold,
)
print("Images saved with bounding boxes drawn.")
bigcm = np.sum(np.array(cms), axis=0)
cnames_bground = class_names.copy().append("background")
pd.DataFrame(bigcm, index=cnames_bground, columns=cnames_bground).to_csv(
op.join(outdir, f"{model_type}_confusion_matrix.csv")
)
plot_title = model_type.replace("_", " ").title()
print(class_names)
plot_cm_sn(
bigcm,
class_names,
outpath=op.join(
outdir,
f"{model_type}_{iou_threshold}_{conf_threshold}_confusion_matrix_predicted_normed.png",
),
title=f"{plot_title} Confusion Matrix, Proportions",
iou_threshold=iou_threshold,
conf_threshold=conf_threshold,
normalize=True,
norm_axis="predicted",
)
plot_cm_sn(
bigcm,
class_names,
outpath=op.join(
outdir,
f"{model_type}_{iou_threshold}_{conf_threshold}_confusion_matrix_true_normed.png",
),
title=f"{plot_title} Confusion Matrix, Proportions",
iou_threshold=iou_threshold,
conf_threshold=conf_threshold,
normalize=True,
norm_axis="true",
)
plot_cm_sn(
bigcm,
class_names,
outpath=op.join(
outdir,
f"{model_type}_{iou_threshold}_{conf_threshold}_confusion_matrix_counts.png",
),
title=f"{plot_title} Confusion Matrix, Counts",
iou_threshold=iou_threshold,
conf_threshold=conf_threshold,
normalize=False,
cmap="Greens",
)
save_metrics(
bigcm,
class_names,
op.join(
outdir,
f"{model_type}_{iou_threshold}_{conf_threshold}_metrics.csv",
),
iou_threshold=iou_threshold,
)
class_str = "\n".join(class_names)
print(
f"Complete, Confusion matrix, images, and metrics saved in {outdir} for these classes: {class_str}"
)
if __name__ == "__main__":
run_eval()
| StarcoderdataPython |
1983996 | <filename>explorer/utils/rpc.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright Blaze 2021.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
https://www.boost.org/LICENSE_1_0.txt)
"""
from typing import Callable, Dict, Optional, Tuple, Union, cast, List, overload
from collections import namedtuple
import time
from web3.types import FilterParams, LogReceipt
from hexbytes import HexBytes
from web3 import Web3
import psycopg
import gevent
from explorer.utils.data import BRIDGE_ABI, PSQL, SYN_DATA, LOGS_REDIS_URL, \
TOKENS_INFO, TOPICS, TOPIC_TO_EVENT, Direction, CHAINS_REVERSED, \
MISREPRESENTED_MAP
from explorer.utils.helpers import convert, retry, search_logs, \
iterate_receipt_logs
from explorer.utils.database import Transaction, LostTransaction
from explorer.utils.contract import get_pool_data
# Start blocks of the 4pool >=Nov-7th-2021.
_start_blocks = {
'ethereum': 13566427,
'arbitrum': 2876718, # nUSD pool
'avalanche': 6619002, # nUSD pool
'bsc': 12431591, # nUSD pool
'fantom': 21297076, # nUSD Pool
'polygon': 21071348, # nUSD pool
'harmony': 19163634, # nUSD pool
'boba': 16221, # nUSD pool
'moonriver': 890949,
'optimism': 30819, # nETH pool
'aurora': 56092179,
'moonbeam': 173355,
'cronos': 1578335,
'metis': 957508,
'dfk': 0,
}
WETH = HexBytes('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2')
MAX_BLOCKS = 2048
OUT_SQL = """
INSERT into
txs (
from_tx_hash,
from_address,
to_address,
sent_value,
from_chain_id,
to_chain_id,
sent_time,
sent_token,
kappa
)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
IN_SQL = """
UPDATE
txs
SET
(
to_tx_hash,
received_value,
pending,
received_time,
received_token,
swap_success
) = (
%s,
%s,
false,
%s,
%s,
%s
)
WHERE
kappa = %s;
"""
LOST_IN_SQL = """
INSERT into
lost_txs (
to_tx_hash,
to_address,
received_value,
to_chain_id,
received_time,
received_token,
swap_success,
kappa
)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s);
"""
class Events(object):
# OUT EVENTS
@classmethod
def TokenDepositAndSwap(cls, args):
x = namedtuple('x', ['to', 'chain_id', 'token_idx_to'])
return x(HexBytes(args['to']), args['chainId'], args['tokenIndexTo'])
TokenRedeemAndSwap = TokenDepositAndSwap
@classmethod
def TokenDeposit(cls, args):
x = namedtuple('x', ['to', 'chain_id', 'sent_token', 'sent_value'])
return x(HexBytes(args['to']), args['chainId'], args['token'],
args['amount'])
@classmethod
def TokenRedeemAndRemove(cls, args):
x = namedtuple('x', ['to', 'chain_id', 'token_idx_to'])
return x(HexBytes(args['to']), args['chainId'], args['swapTokenIndex'])
@classmethod
def TokenRedeem(cls, args):
x = namedtuple('x', ['to', 'chain_id', 'token'])
return x(HexBytes(args['to']), args['chainId'], args['token'])
# IN EVENTS
@classmethod
def TokenWithdrawAndRemove(cls, args):
x = namedtuple('x',
['to', 'fee', 'token_idx_to', 'swap_success', 'token'])
return x(HexBytes(args['to']), args['fee'], args['swapTokenIndex'],
args['swapSuccess'], args['token'])
@classmethod
def TokenWithdraw(cls, args):
x = namedtuple('x', ['to', 'fee', 'token', 'amount'])
return x(HexBytes(args['to']), args['fee'], args['token'],
args['amount'])
TokenMint = TokenWithdraw
@classmethod
def TokenMintAndSwap(cls, args):
x = namedtuple('x',
['to', 'fee', 'token_idx_to', 'swap_success', 'token'])
return x(HexBytes(args['to']), args['fee'], args['tokenIndexTo'],
args['swapSuccess'], args['token'])
def check_factory(max_value: int):
def check(token: HexBytes, received: int) -> bool:
return max_value >= received
return check
@overload
def bridge_callback(chain: str,
address: str,
log: LogReceipt,
abi: str = BRIDGE_ABI,
save_block_index: bool = True) -> None:
...
@overload
def bridge_callback(
chain: str,
address: str,
log: LogReceipt,
abi: str = BRIDGE_ABI,
save_block_index: bool = True,
testing: bool = False) -> Union[Transaction, LostTransaction]:
...
# REF: https://github.com/synapsecns/synapse-contracts/blob/master/contracts/bridge/SynapseBridge.sol#L63-L129
def bridge_callback(
chain: str,
address: str,
log: LogReceipt,
abi: str = BRIDGE_ABI,
save_block_index: bool = True,
testing: bool = False
) -> Optional[Union[Transaction, LostTransaction]]:
w3: Web3 = SYN_DATA[chain]['w3']
contract = w3.eth.contract(w3.toChecksumAddress(address), abi=abi)
tx_hash = log['transactionHash']
timestamp = w3.eth.get_block(log['blockNumber'])
timestamp = timestamp['timestamp'] # type: ignore
tx_info = w3.eth.get_transaction(tx_hash)
assert 'from' in tx_info # Make mypy happy - look key 'from' exists!
from_chain = CHAINS_REVERSED[chain]
# The info before wrapping the asset can be found in the receipt.
receipt = w3.eth.wait_for_transaction_receipt(tx_hash,
timeout=10,
poll_latency=0.5)
topic = cast(str, convert(log['topics'][0]))
if topic not in TOPICS:
raise RuntimeError(f'sanity check? got invalid topic: {topic}')
event = TOPIC_TO_EVENT[topic]
direction = TOPICS[topic]
args = contract.events[event]().processLog(log)['args']
if direction == Direction.OUT:
kappa = w3.keccak(text=tx_hash.hex())
def get_sent_info(_log: LogReceipt) -> Optional[Tuple[HexBytes, int]]:
if _log['address'].lower() not in TOKENS_INFO[chain]:
return None
sent_token_address = HexBytes(_log['address'])
sent_token = TOKENS_INFO[chain][sent_token_address.hex()]
# TODO: test WETH transfers on other chains.
if sent_token['symbol'] != 'WETH' and chain == 'ethereum':
ret = sent_token['_contract'].events.Transfer()
ret = ret.processLog(_log)
sent_value = ret['args']['value']
else:
# Deposit (index_topic_1 address dst, uint256 wad)
sent_value = int(_log['data'], 16)
return sent_token_address, sent_value
sent_token_address = sent_value = None
for _log in receipt['logs']:
ret = get_sent_info(_log)
if ret is not None:
sent_token_address, sent_value = ret
break
if sent_token_address is None or sent_value is None:
raise RuntimeError(
f'did not find sent_token_address or sent_value got: ',
sent_token_address,
sent_value,
)
if event in ['TokenDepositAndSwap', 'TokenRedeemAndSwap']:
data = Events.TokenDepositAndSwap(args)
elif event == 'TokenDeposit':
data = Events.TokenDeposit(args)
elif event == 'TokenRedeemAndRemove':
data = Events.TokenRedeemAndRemove(args)
elif event == 'TokenRedeem':
data = Events.TokenRedeem(args)
else:
raise RuntimeError(
f'did not converge OUT event: {event} {tx_hash.hex()} {chain}'
f' args: {args}')
if testing:
return Transaction(tx_hash, None, HexBytes(tx_info['from']),
data.to, sent_value, None, True, from_chain,
data.chain_id, timestamp, None, None,
sent_token_address, None, kappa)
with PSQL.connection() as conn:
with conn.cursor() as c:
try:
c.execute(OUT_SQL,
(tx_hash, HexBytes(tx_info['from']), data.to,
sent_value, from_chain, data.chain_id,
timestamp, sent_token_address, kappa))
except psycopg.errors.UniqueViolation:
# TODO: stderr? rollback?
pass
elif direction == Direction.IN:
received_value = None
kappa = args['kappa']
if event in ['TokenWithdrawAndRemove', 'TokenMintAndSwap']:
assert 'input' in tx_info # IT EXISTS MYPY!
_, inp_args = contract.decode_function_input(tx_info['input'])
pool = get_pool_data(chain, inp_args['pool'])
if event == 'TokenWithdrawAndRemove':
data = Events.TokenWithdrawAndRemove(args)
elif event == 'TokenMintAndSwap':
data = Events.TokenMintAndSwap(args)
else:
# Will NEVER reach here - comprendo mypy???
raise
if data.swap_success:
received_token = pool[data.token_idx_to]
elif chain == 'ethereum':
# nUSD (eth) - nexus assets are not in eth pools.
received_token = '0x1b84765de8b7566e4ceaf4d0fd3c5af52d3dde4f'
else:
received_token = pool[0]
received_token = HexBytes(received_token)
swap_success = data.swap_success
elif event in ['TokenWithdraw', 'TokenMint']:
data = Events.TokenWithdraw(args)
received_token = HexBytes(data.token)
swap_success = None
if event == 'TokenWithdraw':
received_value = data.amount - data.fee
else:
raise RuntimeError(
f'did not converge event IN: {event} {tx_hash.hex()} {chain} '
f'args: {args}')
if (chain in MISREPRESENTED_MAP
and received_token in MISREPRESENTED_MAP[chain]):
received_token = MISREPRESENTED_MAP[chain][received_token]
if received_value is None:
received_value = search_logs(chain, receipt,
received_token)['value']
if event == 'TokenMint':
# emit TokenMint(to, token, amount.sub(fee), fee, kappa);
if received_value != data.amount: # type: ignore
received_token, received_value = iterate_receipt_logs(
receipt,
check_factory(data.amount) # type: ignore
)
# Must equal to False rather than eval to False since None is falsy.
if swap_success == False:
# The `received_value` we get earlier would be the initial bridged
# amount without the fee excluded.
received_value -= data.fee
if testing:
return LostTransaction(tx_hash, data.to, received_value,
from_chain, timestamp, received_token,
swap_success, kappa)
params = (tx_hash, received_value, timestamp, received_token,
swap_success, kappa)
with PSQL.connection() as conn:
conn.autocommit = True
with conn.cursor() as c:
try:
c.execute(IN_SQL, params)
if c.rowcount == 0:
c.execute(LOST_IN_SQL,
(tx_hash, data.to, received_value,
from_chain, timestamp, received_token,
swap_success, args['kappa']))
else:
if c.rowcount != 1:
# TODO: Rollback here?
raise RuntimeError(
f'`IN_SQL` with args {params}, affected {c.rowcount}'
f' {tx_hash.hex()} {chain}')
except Exception as e:
try:
c.execute(LOST_IN_SQL,
(tx_hash, data.to, received_value,
from_chain, timestamp, received_token,
swap_success, args['kappa']))
print(e)
except psycopg.errors.UniqueViolation:
pass
if save_block_index:
LOGS_REDIS_URL.set(f'{chain}:logs:{address}:MAX_BLOCK_STORED',
log['blockNumber'])
LOGS_REDIS_URL.set(f'{chain}:logs:{address}:TX_INDEX',
log['transactionIndex'])
def get_logs(
chain: str,
callback: Callable[[str, str, LogReceipt], None],
address: str,
start_block: int = None,
till_block: int = None,
max_blocks: int = MAX_BLOCKS,
topics: List[str] = list(TOPICS),
key_namespace: str = 'logs',
start_blocks: Dict[str, int] = _start_blocks,
) -> None:
w3: Web3 = SYN_DATA[chain]['w3']
_chain = f'[{chain}]'
chain_len = max(len(c) for c in SYN_DATA) + 2
tx_index = -1
if start_block is None:
_key_block = f'{chain}:{key_namespace}:{address}:MAX_BLOCK_STORED'
_key_index = f'{chain}:{key_namespace}:{address}:TX_INDEX'
if (ret := LOGS_REDIS_URL.get(_key_block)) is not None:
start_block = max(int(ret), start_blocks[chain])
if (ret := LOGS_REDIS_URL.get(_key_index)) is not None:
tx_index = int(ret)
else:
start_block = start_blocks[chain]
if till_block is None:
till_block = w3.eth.block_number
print(
f'{key_namespace} | {_chain:{chain_len}} starting from {start_block} '
f'with block height of {till_block}')
jobs: List[gevent.Greenlet] = []
_start = time.time()
x = 0
total_events = 0
initial_block = start_block
while start_block < till_block:
to_block = min(start_block + max_blocks, till_block)
params: FilterParams = {
'fromBlock': start_block,
'toBlock': to_block,
'address': w3.toChecksumAddress(address),
'topics': [topics], # type: ignore
}
logs: List[LogReceipt] = w3.eth.get_logs(params)
# Apparently, some RPC nodes don't bother
# sorting events in a chronological order.
# Let's sort them by block (from oldest to newest)
# And by transaction index (within the same block,
# also in ascending order)
logs = sorted(
logs,
key=lambda k: (k['blockNumber'], k['transactionIndex']),
)
for log in logs:
# Skip transactions from the very first block
# that are already in the DB
if log['blockNumber'] == initial_block \
and log['transactionIndex'] <= tx_index:
continue
retry(callback, chain, address, log)
start_block += max_blocks + 1
y = time.time() - _start
total_events += len(logs)
percent = 100 * (to_block - initial_block) \
/ (till_block - initial_block)
print(f'{key_namespace} | {_chain:{chain_len}} elapsed {y:5.1f}s'
f' ({y - x:5.1f}s), found {total_events:5} events,'
f' {percent:4.1f}% done: so far at block {start_block}')
x = y
gevent.joinall(jobs)
print(f'{_chain:{chain_len}} it took {time.time() - _start:.1f}s!')
| StarcoderdataPython |
5012027 | print('-=-'*20)
print('Analizador de Triângulos')
print('-=-'*20)
r1 = float(input('Digite o 1º segmento: '))
r2 = float(input('Digite o 2º segmento: '))
r3 = float(input('Digite o 3º segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print(' Os segmentos {}, {} e {} formam um triangulo.'.format(r1, r2, r3))
if r1 == r2 == r3:
print('Equilatero.')
elif r1 != r2 != r3 != r1:
print('Escaleno.')
else:
print('Esoceles.')
else:
print(' Os segmentos {}, {} e {} não formam um triangulo.'.format(r1, r2, r3))
| StarcoderdataPython |
3591179 | # coding=UTF-8
import tensorflow as tf
from google.protobuf import text_format
from utils.common_info import CommonInfo
class SimpleModel:
''' model info class
'''
def __init__(self):
self.model_name = ''
self.nodes = set()
self.inputs = set()
self.outputs = set()
self.valid_node_nums = 0
def add_single_node(self, node):
''' add single graph node to model
'''
if node is None:
return
self.nodes.add(node)
def add_node_set(self, node_set):
''' add graph nodes to model
'''
if node_set is None:
return
self.nodes = self.nodes.union(node_set)
def set_model_name(self, name):
''' set model name
'''
self.model_name = name
return
def get_inputs(self):
''' get input nodes of model
'''
return list(self.inputs)
def get_outputs(self):
''' get output nodes of model
'''
return list(self.outputs)
def get_nodes(self):
''' get all nodes of model
'''
return list(self.nodes)
def get_valid_node_num(self):
''' get the number of valid nodes
'''
return self.valid_node_nums
def count_valid_node_num(self, valid_nodes_dict):
''' count the number of valid nodes
'''
count = 0
for node_name in self.nodes:
node = valid_nodes_dict.get(node_name)
if not node:
continue
if CommonInfo.is_skip_tf_op(node.type):
continue
count += 1
self.valid_node_nums = count
def gen_io_nodes(self, valid_nodes_dict):
''' generate the input/output nodes
'''
self.inputs.clear()
self.outputs.clear()
for node_name in self.nodes:
node = valid_nodes_dict.get(node_name)
if not node:
continue
if not node.parent:
self.inputs.add(node_name)
for parent in node.parent:
if parent not in self.nodes:
self.inputs.add(node_name)
continue
if not node.child:
self.outputs.add(node_name)
for child in node.child:
if child not in self.nodes:
self.outputs.add(node_name)
continue
class Node:
''' node class
'''
def __init__(self, name, op, input_nodes):
self.name = name
self.type = op
self.parent = set()
self.child = set()
self.add_parents(input_nodes)
def add_parents(self, nodes):
''' add parent nodes
'''
if nodes is None:
return
for node in nodes:
self.parent.add(node)
def add_child(self, node):
''' add child nodes
'''
if node is None:
return
self.child.add(node)
class Parser:
''' class of parse tensorflow graph
'''
def __init__(self):
self.invalid = 'invalid'
self.model_inputs = list()
self.cross_node_map = set()
self.valid_node_map = dict()
self.complete_nodes = dict()
self.models_name = ['top_model', 'bottom_model']
@staticmethod
def get_graph_def(pb):
''' get graph_def from pb file
'''
with open(pb, 'rb') as pb_file:
proto = tf.compat.v1.GraphDef()
proto.ParseFromString(pb_file.read())
return proto
@staticmethod
def save_pbtxt(path, proto):
''' convert graph_def to txt
'''
with open(path, 'w') as pbtxt_file:
pbtxt_file.write(text_format.MessageToString(proto))
print('pbtxt path: ' + path)
@staticmethod
def save_pb(path, proto):
''' save graph_def as pb file
'''
with open(path, 'wb') as pb_file:
pb_file.write(proto.SerializeToString())
def get_cross_node_map(self):
''' generate cross nodes map
'''
for node in self.valid_node_map.values():
if len(node.parent) > 1 or len(node.child) > 1:
self.cross_node_map.add(node.name)
def graph_builder(self, graph_def):
''' get model info from graph_def
'''
self.complete_nodes.clear()
for node in graph_def.node:
type = node.op
if (node.op == 'Identity' and '_class' in node.attr.keys()) \
or node.op == 'Const':
type = 'Weight'
self.complete_nodes[node.name] = Node(node.name, type, node.input)
del self.model_inputs[:]
self.valid_node_map.clear()
for node_name in self.complete_nodes:
node = self.complete_nodes[node_name]
if node.type == 'Weight':
continue
valid_inputs = []
for pre_node in node.parent:
parent = self.complete_nodes.get(pre_node, self.invalid)
if parent != self.invalid and parent.type != 'Weight':
valid_inputs.append(pre_node)
self.valid_node_map[node.name] = Node(node.name, node.type,
valid_inputs)
if not node.parent:
self.model_inputs.append(node.name)
def update_graph_edge(self):
''' add edge of graph nodes
'''
for node_name in self.valid_node_map:
node = self.valid_node_map[node_name]
for pre_node in node.parent:
parent = self.valid_node_map.get(pre_node, self.invalid)
if parent != self.invalid and parent.type != 'Weight':
parent.add_child(node.name)
self.valid_node_map[parent.name] = parent
else:
print('[Error] the input of node is invalid. \
Please check graph-builder')
print('current node {} --> input {}'.format(
node_name, parent.name))
def parse_model(self, graph_def):
''' parse tensorflow model
'''
self.graph_builder(graph_def)
self.update_graph_edge()
self.get_cross_node_map()
def get_longest_path(self, node_map, root_nodes):
''' find the longest path in model
'''
# {cur_node, parent_node} in longest path
longest_node_pair = dict()
cache_nodes_name = set()
for input_name in root_nodes:
longest_node_pair[input_name] = self.invalid
cache_nodes_name.add(input_name)
while cache_nodes_name:
tmp_cache = set()
for name in cache_nodes_name:
node = node_map.get(name, self.invalid)
if node == self.invalid:
continue
for child in node.child:
longest_node_pair[child] = node.name
tmp_cache.add(child)
if not tmp_cache:
break
cache_nodes_name = tmp_cache
longest_path = []
# If there are more than 1 longest paths, choose a path randomly.
cur_node = cache_nodes_name.pop()
while cur_node:
pair_node = longest_node_pair.get(cur_node, self.invalid)
if pair_node == self.invalid:
break
longest_path.append(cur_node)
cur_node = pair_node
longest_path.reverse()
return longest_path
def get_middle_node(self):
''' find the middle nodes of longest path
'''
longest_path = self.get_longest_path(self.valid_node_map,
self.model_inputs)
if not longest_path:
return None, None
cross_node_path = list()
for name in longest_path:
if name in self.cross_node_map:
cross_node_path.append(name)
top_end = None
mid_index = 0
# search split_node from corss_node_list firstly
for node_path in cross_node_path, longest_path:
mid_index = int(len(node_path) / 2) - 1
while mid_index > 0:
node_name = node_path[mid_index]
top_end = self.valid_node_map.get(node_name)
if CommonInfo.is_skip_tf_op(top_end.type):
mid_index -= 1
else:
break
if mid_index == 0:
top_end = self.valid_node_map.get(longest_path[0])
bottom_start = self.valid_node_map.get(longest_path[1])
else:
bottom_start = top_end
return top_end, bottom_start
def split_graph(self, top_end, bottom_start):
''' split model into two parts
'''
# search from high-level to low-level, to generate top_graph
top_model = self.gen_simple_model(top_end, self.models_name[0], True)
# search from low-level to high-level, to generate bottom_graph
bottom_model = self.gen_simple_model(bottom_start, self.models_name[1],
False)
remaining_node = set()
for name in self.complete_nodes:
remaining_node.add(name)
remaining_node.difference_update(top_model.nodes)
remaining_node.difference_update(bottom_model.nodes)
self.extend_simple_model(remaining_node, top_model)
self.extend_simple_model(remaining_node, bottom_model)
if remaining_node:
print('[ERROR] there are some unattached nodes. \
Please check graph builder.')
print(remaining_node)
return top_model, bottom_model
def extend_simple_model(self, remaining_node, model):
''' extend single path model
'''
self.add_supple_node(remaining_node, model)
model.count_valid_node_num(self.valid_node_map)
self.add_weight_node(remaining_node, model)
model.gen_io_nodes(self.valid_node_map)
def gen_simple_model(self, mid_node, model_name, is_top_model):
''' generate single path model
'''
model = SimpleModel()
model.set_model_name(model_name)
cache_nodes = set()
cache_nodes.add(mid_node.name)
while cache_nodes:
tmp_cache = set()
for name in cache_nodes:
model.add_single_node(name)
node = self.valid_node_map.get(name, self.invalid)
if is_top_model:
node_list = node.parent
else:
node_list = node.child
for tmp_node in node_list:
if tmp_node not in model.nodes:
tmp_cache.add(tmp_node)
cache_nodes = tmp_cache
return model
def add_supple_node(self, remaining_node, model):
''' add supplement nodes to single path model
'''
while remaining_node:
supple_node = set()
for name in remaining_node:
is_added = False
node = self.valid_node_map.get(name, self.invalid)
if node == self.invalid:
continue
for parent in node.parent:
if parent in model.nodes or parent in supple_node:
supple_node.add(name)
is_added = True
break
if is_added:
break
for child in node.child:
if child in model.nodes or child in supple_node:
supple_node.add(name)
break
if not supple_node:
break
remaining_node.difference_update(supple_node)
model.add_node_set(supple_node)
def add_weight_node(self, remaining_node, model):
''' add weight nodes to submodel
'''
next_node_set = model.nodes
while remaining_node:
cur_node_set = next_node_set.copy()
next_node_set = set()
for name in cur_node_set:
node = self.complete_nodes.get(name, self.invalid)
for parent in node.parent:
if parent in remaining_node:
next_node_set.add(parent)
if not next_node_set:
break
remaining_node.difference_update(next_node_set)
model.add_node_set(next_node_set)
def split_graph(proto):
''' split original graph into two parts
'''
parser = Parser()
parser.parse_model(proto)
top_end, bottom_start = parser.get_middle_node()
if not top_end or not bottom_start:
return None, None
print('split node: ' + top_end.name)
top_graph, bottom_graph = parser.split_graph(top_end, bottom_start)
print('split finish...')
return top_graph, bottom_graph
def get_worse_model(distance_dict, top_model, bottom_model):
''' calculate worse model
'''
worse_op_name = ''
max_distance = 0.0
for name in distance_dict:
distance = distance_dict[name][0]
if distance > max_distance:
max_distance = distance
worse_op_name = name
for model in top_model, bottom_model:
for output in model.outputs:
name = output.replace('/', '_')
if worse_op_name == name:
return model, worse_op_name
print('please check output nodes')
return None, None
def get_model_nodes(model_path):
''' print the nodes of model
'''
with tf.gfile.FastGFile(model_path, 'rb') as pb_f:
graph_def = tf.compat.v1.GraphDef.FromString(pb_f.read())
node_list = list()
for node in graph_def.node:
type = node.op
if (type == 'Identity' and '_class' in node.attr.keys()) \
or type == 'Const' or type == 'Placeholder':
continue
node_list.append(node.name)
return node_list
| StarcoderdataPython |
6678301 | <filename>donor/migrations/0012_auto_20201216_1729.py
# Generated by Django 3.1.4 on 2020-12-16 11:44
from django.db import migrations, models
import donor.models
class Migration(migrations.Migration):
dependencies = [
("donor", "0011_merge_20201216_1728"),
]
operations = [
migrations.AlterField(
model_name="testreport",
name="report",
field=models.ImageField(
unique=True, upload_to=donor.models.TestReport.donor_directory_path
),
),
]
| StarcoderdataPython |
9624484 | <filename>python/faceblurer.py
import cv2 as cv
import numpy as np
cam=cv.VideoCapture(0)
face=cv.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
_,frame=cam.read()
frame=cv.flip(frame,1)
try:
new=face.detectMultiScale(frame)
for i in range(len(new)):
new1=new[i]
new1[0]-=(new1[2])*0.10
new1[1]-=(new1[3])*0.10
new1[2]*=1.1
new1[3]*=1.1
new_frame=frame[new1[1]:new1[1]+new1[3],new1[0]:new1[0]+new1[2]]
new_frame=cv.blur(new_frame,(35,35))
frame[new1[1]:new1[1]+new1[3],new1[0]:new1[0]+new1[2]]=new_frame
except:
pass
cv.imshow('output',frame)
if(cv.waitKey(1)&0xff == ord('q')):
break
cam.release()
cv.destroyAllWindows() | StarcoderdataPython |
1798569 | <gh_stars>1-10
from src.main.managers.game.base_game_manager import BaseGameManager
from src.main.managers.conflict.automated_conflict_manager import AutomatedConflictManager
from src.main.managers.players.base_player_manager import BasePlayerManager
class AutomatedGameManager(BaseGameManager):
def _build_player_manager(self):
return BasePlayerManager()
def _build_conflict_manager(self, player_manager):
return AutomatedConflictManager(player_manager)
| StarcoderdataPython |
3207453 | # coding=utf-8
from flask import Flask, json, render_template
from threading import Thread
from flask_socketio import SocketIO
from graphqlclient import GraphQLClient
import serial, time, serial.tools.list_ports, datetime, socket
app = Flask(__name__)
app.config['SECRET_KEY'] = 'SECRET!'
socketio = SocketIO(app)
uuid_last = ''
data = ''
connexion_genius = GraphQLClient('https://##.###.##/')
connexion_genius.inject_token('Bearer ####','Authorization')
REMOTE_SERVER = "##.###.##"
@app.route('/')
def index():
return render_template('index.html')
def is_connected():
try:
host = socket.gethostbyname(REMOTE_SERVER)
socket.create_connection((host, 80), 2)
return True
except:
pass
return False
def getprofilewithbadge(badge):
tmp = connexion_genius.execute('''{
profiles(where:{badge:"''' + badge + '''"}){
firstName
lastName
}
}
''')
return tmp
def sethello(badge):
tmp = connexion_genius.execute('''mutation{terminalHello(data:{badge:"''' + badge + '''",timeOfArrival:"''' + str(datetime.datetime.now().isoformat()) + '''"}){status}}''')
return tmp
class SerialRead(Thread):
global j
def __init__(self):
Thread.__init__(self)
ports = list(serial.tools.list_ports.comports())
for p in ports:
if "Arduino" in p[1] or "ttyACM0" in p[1]:
print("Arduino detecte sur le port : ", p[0])
self.serial = serial.Serial(str(p[0]), 9600, timeout=1)
socketio.emit('Internet', {'internet': True})
def init_serial(self):
ports = list(serial.tools.list_ports.comports())
self.serial.close()
for p in ports:
if "Arduino" in p[1] or "ttyACM0" in p[1]:
print("Arduino detecte sur le port : ", p[0])
self.serial = serial.Serial(str(p[0]), 9600, timeout=1)
socketio.emit('Internet', {'internet': True})
self.run()
def run(self):
global uuid_last
while True:
try:
if self.serial is not None:
data = self.serial.readline().strip(b'\n\r')
try:
if is_connected():
j = json.loads(data.decode('UTF-8'))
socketio.emit('Internet', {'internet': True})
if "ESTIAM" in j['uuid']:
if uuid_last != j['uuid']:
uuid_last = j['uuid']
try:
reponse = json.loads(sethello(uuid_last))
try:
if len(reponse['errors']) > 0:
socketio.emit('CardFound', {'error':True,'user': None, 'late':False})
except:
if reponse['data']['terminalHello']['status'] == "OK":
profile = json.loads(getprofilewithbadge(uuid_last))
socketio.emit('CardFound', {'error':False,'user': {'firstName': profile['data']['profiles'][0]['firstName'],'lastName': profile['data']['profiles'][0]['lastName'],'late': None}, 'late':False})
if reponse['data']['terminalHello']['status'] == "ALREADYBADGED":
profile = json.loads(getprofilewithbadge(uuid_last))
socketio.emit('CardFound', {'error':False,'user': {'firstName': profile['data']['profiles'][0]['firstName'],'lastName': profile['data']['profiles'][0]['lastName'],'late': None}, 'late':False})
if reponse['data']['terminalHello']['status'] == "NO_DATE":
profile = json.loads(getprofilewithbadge(uuid_last))
socketio.emit('CardFound', {'error':False,'user': {'firstName': profile['data']['profiles'][0]['firstName'],'lastName': profile['data']['profiles'][0]['lastName'],'late': None}, 'late':False})
if reponse['data']['terminalHello']['status'] == "UNKNOWN_CARD":
socketio.emit('CardFound', {'error':True,'user':False,'late':False})
if reponse['data']['terminalHello']['status'] == "FAILED_SYS_ERROR":
socketio.emit('CardFound', {'error': True, 'user': False, 'late': False})
except:
continue
else:
socketio.emit('Internet', {'internet': False})
except:
continue
except:
socketio.emit('Internet', {'internet': False})
print("La liaison serie ne peut etre etablie")
time.sleep(1)
self.init_serial()
def first(self):
self.run()
if __name__ == '__main__':
ThreadSerial = SerialRead()
ThreadSerial.start()
socketio.run(app,host='0.0.0.0',port=8000)
| StarcoderdataPython |
9670455 | <reponame>mcvine/mcvine<gh_stars>1-10
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from . import units
meV = units.energy.meV
angstrom = units.length.angstrom
def sansspheremodel_kernel(*args, **kwds):
from .SANSSphereModelKernel import SANSSphereModelKernel
return SANSSphereModelKernel(*args, **kwds)
from . import ComputationEngineRendererExtension
#make bindings available
def _import_bindings():
from . import bindings
return
_import_bindings()
# version
__id__ = "$Id$"
# End of file
| StarcoderdataPython |
8166293 | <gh_stars>1-10
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from sqlalchemy.engine import Result
from src.schema import user as user_schema
from src.models import model
async def create_user(db: AsyncSession, form_data: user_schema.UserCreate) -> model.User:
user = model.User(**form_data.dict())
user.activated = True
db.add(user)
await db.commit()
await db.refresh(user)
return user
async def get_user(db: AsyncSession, username: str):
stmt = select(model.User).where(model.User.username == username)
result: Result = await db.execute(stmt)
return result.scalar()
| StarcoderdataPython |
1646081 | <reponame>almoratalla/mimo-python-projects<gh_stars>0
class Employee:
def __init__(self, name, job):
self.name = name
self.job = job
def tasks(self):
print("Tasks are:")
class Manager(Employee):
def __init__(self, name, job, staff):
super().__init__(name, job)
self.staff = staff
def tasks(self):
print("Oversees:")
print(self.staff)
class Associate(Employee):
def tasks(self):
print("Take orders from manager") | StarcoderdataPython |
107508 | <gh_stars>0
import serial
import serial.tools.list_ports as spools
ports = spools.comports()
from flask import Flask,request
from flask import render_template, send_from_directory
import atexit
import time
import requests as req
from subprocess import call
from colorthief import ColorThief
import numpy as np
import random
# creates a Flask application, named app
app = Flask(__name__)
import glob
#connect to correct serial
sp = ""
for port, desc, hwid in sorted(ports):
if desc.find("CircuitPlayground Express") > -1:
sp = port
ser = serial.Serial(sp,9600)
time.sleep(.1)
def read():
try:
return ser.read_all()
except:
ser = serial.Serial(sp,9600)
time.sleep(.1)
return ser.read_all()
def write(inp):
try:
ser.write(bytearray(str(inp)+"\r\n",'ascii'))
except:
ser = serial.Serial(sp,9600)
ser.write(bytearray(str(inp)+"\r\n",'ascii'))
print(read())
# a route where we will display a welcome message via an HTML template
@app.route("/")
def hello():
return open("index.html").read()
# Custom static data
@app.route('/static/<path:filename>')
def custom_static(filename):
return send_from_directory('static/', filename)
#thanks stack https://stackoverflow.com/a/14032302/565514
@app.route('/lights/',defaults={'val': None}, methods=["GET","POST"])
@app.route('/lights/<val>', methods=["GET","POST"])
def light_post(val):
if request.method == "POST":
value = request.form['light'].strip()+"\n"
print(value)
write(value)
print(read())
elif request.method == "GET":
try:
val = val.replace("_",",")
except:
val = "0,0,0"
if val == "rand":
val = ("%i,%i,%i" % (jj(),jj(),jj()))
value = val
write(val)
return value
def jj():
return 255*round(random.random())
@app.route("/reset/")
def reset():
write('\x04')
return "reset"
@app.route("/match_screen/")
def screen():
call(["screencapture", '-x',"screenshot.png"])
call(["sips","-Z", "320","screenshot.png"])
print("called")
c = ColorThief('screenshot.png').get_palette(quality=10)
print(c)
pm = 0
mi = [0,0,0]
for i in c:
a = np.abs(np.diff(i))
for j in a:
if j > pm:
j = pm
mi = tuple(a)
c = mi
col = str(c).replace("(","").replace(")","")
write(col)
return col
# run the application
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
8073388 | import ast
import collections
import json
import logging
from logging.handlers import RotatingFileHandler
from os import sys, path
from flasgger import Swagger
from flask import Flask, request
from flask.json import jsonify
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from lib.wss.wss import WSS
app = Flask(__name__)
Swagger(app)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.DEBUG)
operations = collections.OrderedDict()
@app.route('/api/wss', methods=['POST'])
def wss_configuration():
"""
WaveShaper configuration
---
post:
description: Sets the configuration file, central wavelength, bandwidth and attenuation/phase per port of a WaveShaper.
consumes:
- application/json
produces:
- application/json
parameters:
- name: operations
in: body
description: operations to be configured on the WaveShaper
example: {'wss_id': 1, 'operations': [
{'port_in': 1, 'port_out': 1, 'lambda0': 1550.52, 'att': 0.0, 'phase': 0.0, 'bw': 112.5}]}
required: true
responses:
200:
description: Successful configuration
400:
description: Invalid input operations
"""
if request.method == 'POST':
params = request.json
wss_id = params['wss_id']
ops = ast.literal_eval(json.dumps(params['operations']))
if len(ops) != 0:
logger.debug("WaveShaper %s configuration started" % str(wss_id))
try:
n = n_max(ops, 'port_in')
m = n_max(ops, 'port_out')
wss = WSS(wss_id, n, m)
wss.configuration(ops)
# Adding new operation
if str(wss_id) not in operations.keys():
operations[str(wss_id)] = ops
else:
operations[str(wss_id)] += ops
msg = "WaveShaper %s was successfully configured" % str(wss_id)
logger.debug(msg)
return jsonify(msg, 200)
except Exception as e:
logger.error("WaveShaper {} wasn't successfully configured. Error: {}".format(str(wss_id), e))
raise e
else:
return jsonify("The parameters sent are not correct", 400)
@app.route('/api/wss', methods=['GET'])
def wss_operations():
"""
WaveShaper operations
---
get:
description: Get multiple operations configured on the WaveShapers
produces:
- application/json
parameters:
- name: operations
in: query
type: dict
description: operations configured on the WaveShapers
responses:
200:
description: Successful operation
schema:
type: dict
example: {'1': [{'port_in': 1, 'port_out': 1, 'lambda0': 1550.52, 'att': 0.0, 'phase': 0.0, 'bw': 112.5}]}
404:
description: Operations not found
"""
if request.method == 'GET':
if len(operations) != 0: # If exists operations
return jsonify(operations)
else:
return jsonify("Not exists operations", 404)
@app.route('/api/wss/<wss_id>', methods=['GET'])
def wss_getOperationsByID(wss_id):
"""
WaveShaper operations by ID
---
get:
description: Returns operations configured on a WaveShaper specified by id
produces:
- application/json
parameters:
- name: wss_id
in: path
type: integer
description: id of the WaveShaper
required: true
responses:
200:
description: Successful operation
schema:
type: list
example: ['1': [{'port_in': 1, 'port_out': 1, 'lambda0': 1550.52, 'att': 0.0, 'phase': 0.0, 'bw': 112.5}]]
400:
description: Invalid ID supplied
404:
description: Operations not found
"""
wss_id = str(wss_id)
msg_not_exists_operations = "Not exists operations on the WaveShaper %s." % wss_id
msg_not_exists = "Not exists operations"
if request.method == 'GET':
if len(operations) != 0: # If exists operations
if operations[wss_id]: # If exists operations for wss_id
return jsonify(wss_id, operations[wss_id])
else:
return jsonify(msg_not_exists_operations, 400)
else:
return jsonify(msg_not_exists, 404)
@app.route('/api/wss/<wss_id>', methods=['DELETE'])
def wss_deleteOperationsByID(wss_id):
"""
WaveShaper operations by ID
---
delete:
description: Delete operations configured on a WaveShaper specified by id
produces:
- application/json
parameters:
- name: wss_id
in: path
type: integer
description: id of the WaveShaper
example: 1
required: true
responses:
200:
description: Successful operation
400:
description: Invalid ID supplied
404:
description: Operations not found
"""
wss_id = str(wss_id)
msg_not_exists_operations = "Not exists operations on the WaveShaper %s." % wss_id
msg_not_exists = "Not exists operations"
if request.method == 'DELETE':
if len(operations) != 0: # If exists operations
if operations[wss_id]: # If exists operations for wss_id
del operations[wss_id]
return jsonify("Operations deleted for WaveShaper %s" % wss_id, 200)
else:
return jsonify(msg_not_exists_operations, 400)
else:
return jsonify(msg_not_exists, 404)
def n_max(ops, key_func):
"""
Return the maximum element of ops according to key_func.
:param ops: operations to configure the WaveShaper
:type ops: list
:param key_func: comparison key
:type key_func: str
:return: maximum element
:rtype: int
"""
maximum = 0
for i in range(len(ops)):
if ops[i][key_func] > maximum:
maximum = ops[i][key_func]
return maximum
def define_logger():
"""
Create, formatter and add Handlers (RotatingFileHandler and StreamHandler) to the logger.
"""
fileHandler = RotatingFileHandler('wss_server.log', maxBytes=10000000, backupCount=5) # File Handler
streamHandler = logging.StreamHandler() # Stream Handler
# Create a Formatter for formatting the logs messages
formatter = logging.Formatter("[%(asctime)s] %(levelname)s in %(filename)s: %(message)s")
# Add the Formatter to the Handlers
fileHandler.setFormatter(formatter)
streamHandler.setFormatter(formatter)
# Add Handlers to the logger
logger.addHandler(fileHandler)
logger.addHandler(streamHandler)
if __name__ == '__main__':
define_logger()
app.run(host='0.0.0.0', port=5001, debug=True, threaded=False)
| StarcoderdataPython |
8127860 | from django.core.management.base import BaseCommand
from moderation.models import SimpleRight
class Command(BaseCommand):
"""Создание в базе возможных прав для модераторов"""
help = 'Creating simple rights for moderators'
def create_simple_rights(self):
rights = {
0: 'Полные права',
1: 'Возможность запретить комментирование',
2: 'Возможность запретить создание топиков',
3: 'Возможность запретить доступ к форуму',
4: 'Возможность редактирования топиков',
5: 'Возможность удаления топиков',
6: 'Возможность удаления комментариев',
}
for k, v in rights.items():
if not SimpleRight.objects.filter(key=k).exists():
SimpleRight.objects.create(key=k, description=v)
self.stdout.write('"{}" добавлено'.format(v))
else:
self.stdout.write('Запись с ключом {} уже существует'.format(k))
def handle(self, *args, **options):
self.create_simple_rights()
self.stdout.write('Завершено')
| StarcoderdataPython |
8072552 | <filename>privacy_encoder/__init__.py<gh_stars>1-10
from .models import *
from . import data
from . import callbacks
| StarcoderdataPython |
1774630 | import pytest
from dataclasses import dataclass
from typing import Optional
from gemma import Course, PORT, Item, Fallback, Attr, Call, NullNameError
class TestBasicAPI:
def test_init_basic(self, course_basic):
assert str(course_basic) == "a/b/c/d"
def test_repr(self, course_basic):
assert repr(course_basic) == (
"<Course: <Fallback: 'a'> / <Fallback: 'b'> / <Fallback: 'c'> "
"/ <Fallback: 'd'>>"
)
def test_len(self, course_basic):
assert len(course_basic) == 4
def test_init_int(self):
assert Course(1)[0] == Item(1)
def test_concat_basic(self):
course = PORT / "a" / "b" / "c" / "d"
assert str(course) == "a/b/c/d"
assert len(course) == 4
def test_concat_multi_type(self):
course = PORT / "a" / 1 / "c"
assert str(course) == "a/[1]/c"
assert isinstance(course[1], Item)
def test_concat_courses(self):
course = Course("a", "b") / Course("c", "d")
assert str(course) == "a/b/c/d"
assert len(course) == 4
def test_concat_course_str(self):
course = PORT / "a/[b]/c/d"
assert len(course) == 4
assert isinstance(course[1], Item)
def test_replace_single(self):
course = PORT / 0
assert course.replace(0, 1) == PORT / 1
@pytest.mark.parametrize(
"index, value, answer",
[
(1, "key", PORT / 0 / "key" / 2 / 3),
(0, "key", PORT / "key" / 1 / 2 / 3),
(3, "key", PORT / 0 / 1 / 2 / "key"),
(-1, "key", PORT / 0 / 1 / 2 / "key"),
(-2, "key", PORT / 0 / 1 / "key" / 3),
(slice(1, 3), "key", PORT / 0 / "key" / 3),
(slice(1, 3), PORT / "key" / "sub-key", PORT / 0 / "key" / "sub-key" / 3),
(slice(1, None), PORT / "key" / "sub-key", PORT / 0 / "key" / "sub-key"),
(
slice(None, 2),
PORT / "key" / "sub-key",
PORT / "key" / "sub-key" / 2 / 3,
),
],
)
def test_replace(self, index, value, answer):
course = PORT / 0 / 1 / 2 / 3
result = course.replace(index, value)
print(f"resulting course {result}")
assert result == answer
class TestEqualityContains:
def test_course_eq_course(self):
assert PORT / Fallback("a") / Item("b") / Call("c") == PORT / "a" / "b" / "c"
def test_course_ne_course(self):
assert PORT / Fallback("a") / Item("b") / Call("c") != PORT / "a" / "b()" / "c"
def test_course_eq_str(self):
assert Course("a", "b", "c", "d") == "a/b/c/d"
def test_contains(self, course_basic):
assert Fallback("a") in course_basic
assert Fallback("b") in course_basic
assert Fallback("c") in course_basic
assert Fallback("d") in course_basic
assert Item("a") in course_basic
assert Attr("a") in course_basic
assert Call("a") in course_basic
assert Fallback("e") not in course_basic
def test_contains_types(self, course_types):
assert Fallback("a") in course_types
assert Attr("b") in course_types
assert Item("c") in course_types
assert Call("d") in course_types
assert Fallback("b") in course_types
assert Fallback("c") in course_types
assert Fallback("d") in course_types
assert Item("b") not in course_types
assert Attr("c") not in course_types
assert Item("d") not in course_types
def test_contains_slice(self, course_basic):
assert Course("a", "b") in course_basic
assert Course("b", "c") in course_basic
assert Course("c", "d") in course_basic
assert Course("b", "c", "d") in course_basic
assert Course("a", "b", "c", "d") in course_basic
assert Course("a", "c") not in course_basic
assert Course("d", "e") not in course_basic
def test_ends_with(self, course_basic):
end_course = Course("c", "d")
end_course_single = Course("d")
assert course_basic.ends_with(Fallback("d"))
assert course_basic.ends_with(end_course)
assert course_basic.ends_with(end_course_single)
def test_ends_with_types(self, course_types):
end_course = Course(Item("c"), Call("d"))
end_course_single = Course(Call("d"))
assert course_types.ends_with(Fallback("d"))
assert course_types.ends_with(end_course)
assert course_types.ends_with(end_course_single)
def test_not_ends_with_values(self, course_basic):
end_course = Course("b", "c")
end_course_single = Course("e")
assert not course_basic.ends_with(Fallback("e"))
assert not course_basic.ends_with(end_course)
assert not course_basic.ends_with(end_course_single)
def test_not_ends_with_types(self, course_types):
end_course = Course(Item("c"), Item("d"))
end_course_single = Course(Item("d"))
assert not course_types.ends_with(Item("d"))
assert not course_types.ends_with(end_course)
assert not course_types.ends_with(end_course_single)
def test_starts_with(self, course_basic):
start_course = Course("a", "b")
start_course_single = Course("a")
assert course_basic.starts_with(Fallback("a"))
assert course_basic.starts_with(start_course)
assert course_basic.starts_with(start_course_single)
def test_starts_with_types(self):
course_types = Course(Item("a"), Attr("b"), Item("c"), Call("d"))
start_course = Course(Item("a"), Attr("b"))
start_course_single = Course(Item("a"))
assert course_types.starts_with(Item("a"))
assert course_types.starts_with(start_course)
assert course_types.starts_with(start_course_single)
def test_parent(self, course_basic):
assert course_basic.parent == Course("a", "b", "c")
def test_end_point(self, course_basic):
assert course_basic.end_point == Fallback("d")
class TestOperations:
def test_fetch(self, data_structure_1):
course = PORT / "list_data" / -1 / "two dict"
assert course.fetch(data_structure_1) == 2
def test_fetch_keyword(self, data_structure_1):
course = PORT / "list_data" / -1 / "two dict"
assert course.fetch(target=data_structure_1) == 2
def test_fetch_default(self, data_structure_1):
course = PORT / "list_data" / -1 / "three dict"
with pytest.raises(NullNameError):
course.fetch(data_structure_1)
assert course.fetch(data_structure_1, default=3) == 3
def test_place(self, data_structure_1):
course = PORT / "list_data" / -1 / "two dict"
course.place(data_structure_1, "changed value")
assert data_structure_1.list_data[-1]["two dict"] == "changed value"
def test_place_raises(self, data_structure_1):
with pytest.raises(NullNameError):
course = PORT / "list_data" / 100 / "two dict"
course.place(data_structure_1, "changed value")
def test_place_len_1(self):
course = PORT / "one"
target = dict()
course.place(target, 1)
assert target["one"] == 1
def test_place_default_factory_list_in_dict(self):
data = {"a": {}}
course = PORT / "a" / Item("list", factory=list) / 0
course.place(data, "value")
assert data == {"a": {"list": ["value"]}}
def test_place_default_factory_data_class_optional_list(self):
@dataclass
class TestData:
data_list: Optional[list] = None
data = TestData()
course = PORT / Attr("data_list", factory=list) / "append()"
course.place(data, "value")
assert data == TestData(["value"])
def test_place_factory_does_not_replace_existing(self):
data = {"nested": ["zero", "one", "two"]}
course = PORT / "nested" / Item(0, factory=list)
course.place(data, "yay!")
assert data == {"nested": ["yay!", "one", "two"]}
| StarcoderdataPython |
5146584 | <gh_stars>0
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from mmdet.models.utils import build_linear_layer
from .atr_head import AttributeHead
@HEADS.register_module()
class ConvFCAtrHead(AttributeHead):
r"""
atr_conv -> atr_fcs ->
""" # noqa: W605
def __init__(self,
num_atr_convs=0,
num_atr_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
*args,
**kwargs):
super(ConvFCAtrHead, self).__init__(
*args, init_cfg=init_cfg, **kwargs)
assert (num_atr_convs + num_atr_fcs > 0)
if not self.with_atr:
assert num_atr_convs == 0 and num_atr_fcs == 0
self.num_atr_convs = num_atr_convs
self.num_atr_fcs = num_atr_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add atr specific branch
self.atr_convs, self.atr_fcs, self.atr_last_dim = \
self._add_conv_fc_branch(
self.num_atr_convs, self.num_atr_fcs, self.in_channels)
if not self.with_avg_pool:
if self.num_atr_fcs == 0:
self.atr_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_atr:
if self.custom_atr_channels:
atr_channels = self.loss_atr.get_atr_channels(
self.num_attributes)
else:
atr_channels = self.num_attributes + 1
self.fc_atr = build_linear_layer(
self.atr_predictor_cfg,
in_features=self.atr_last_dim,
out_features=atr_channels)
if init_cfg is None:
self.init_cfg += [
dict(
type='Xavier',
distribution='uniform',
override=[
dict(name='atr_fcs'),
])
]
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def forward(self, x):
# shared part
# separate branches
x_atr = x
for conv in self.atr_convs:
x_atr = conv(x_atr)
if x_atr.dim() > 2:
if self.with_avg_pool:
x_atr = self.avg_pool(x_atr)
x_atr = x_atr.flatten(1)
for fc in self.atr_fcs:
x_atr = self.relu(fc(x_atr))
atr_score = self.fc_atr(x_atr) if self.with_atr else None
return atr_score
@HEADS.register_module()
class Shared2FCAtrHead(ConvFCAtrHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared2FCAtrHead, self).__init__(
num_atr_convs=2,
num_atr_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class Shared4Conv1FCAtrHead(ConvFCAtrHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCAtrHead, self).__init__(
num_atr_convs=4,
num_atr_fcs=1,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| StarcoderdataPython |
9738703 | <reponame>kuraakhilesh8230/aries-cloudagent-python
"""Multiple IndyVdrLedger Manager."""
import asyncio
import concurrent.futures
import logging
import json
from collections import OrderedDict
from typing import Optional, Tuple, Mapping
from ...cache.base import BaseCache
from ...core.profile import Profile
from ...ledger.error import LedgerError
from ...wallet.crypto import did_is_self_certified
from ..indy_vdr import IndyVdrLedger
from ..merkel_validation.domain_txn_handler import (
prepare_for_state_read,
get_proof_nodes,
)
from ..merkel_validation.trie import SubTrie
from .base_manager import BaseMultipleLedgerManager, MultipleLedgerManagerError
LOGGER = logging.getLogger(__name__)
class MultiIndyVDRLedgerManager(BaseMultipleLedgerManager):
"""Multiple Indy VDR Ledger Manager."""
def __init__(
self,
profile: Profile,
production_ledgers: OrderedDict = OrderedDict(),
non_production_ledgers: OrderedDict = OrderedDict(),
write_ledger_info: Tuple[str, IndyVdrLedger] = None,
cache_ttl: int = None,
):
"""Initialize MultiIndyLedgerManager.
Args:
profile: The base profile for this manager
production_ledgers: production IndyVDRLedger mapping
non_production_ledgers: non_production IndyVDRLedger mapping
cache_ttl: Time in sec to persist did_ledger_id_resolver cache keys
"""
self.profile = profile
self.production_ledgers = production_ledgers
self.non_production_ledgers = non_production_ledgers
self.write_ledger_info = write_ledger_info
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
self.cache_ttl = cache_ttl
async def get_write_ledger(self) -> Optional[Tuple[str, IndyVdrLedger]]:
"""Return the write IndyVdrLedger instance."""
return self.write_ledger_info
async def get_prod_ledgers(self) -> Mapping:
"""Return production ledgers mapping."""
return self.production_ledgers
async def get_nonprod_ledgers(self) -> Mapping:
"""Return non_production ledgers mapping."""
return self.non_production_ledgers
async def _get_ledger_by_did(
self,
ledger_id: str,
did: str,
) -> Optional[Tuple[str, IndyVdrLedger, bool]]:
"""Build and submit GET_NYM request and process response.
Successful response return tuple with ledger_id, IndyVdrLedger instance
and is_self_certified bool flag. Unsuccessful response return None.
Args:
ledger_id: provided ledger_id to retrieve IndyVdrLedger instance
from production_ledgers or non_production_ledgers
did: provided DID
Return:
(str, IndyVdrLedger, bool) or None
"""
try:
indy_vdr_ledger = None
if ledger_id in self.production_ledgers:
indy_vdr_ledger = self.production_ledgers.get(ledger_id)
else:
indy_vdr_ledger = self.non_production_ledgers.get(ledger_id)
async with indy_vdr_ledger:
request = await indy_vdr_ledger.build_and_return_get_nym_request(
None, did
)
response_json = await asyncio.wait_for(
indy_vdr_ledger.submit_get_nym_request(request), 10
)
if isinstance(response_json, dict):
response = response_json
else:
response = json.loads(response_json)
if "result" in response.keys():
data = response.get("result", {}).get("data")
else:
data = response.get("data")
if not data:
LOGGER.warning(f"Did {did} not posted to ledger {ledger_id}")
return None
if isinstance(data, str):
data = json.loads(data)
if not await SubTrie.verify_spv_proof(
expected_value=prepare_for_state_read(response),
proof_nodes=get_proof_nodes(response),
):
LOGGER.warning(
f"State Proof validation failed for Did {did} "
f"and ledger {ledger_id}"
)
return None
if did_is_self_certified(did, data.get("verkey")):
return (ledger_id, indy_vdr_ledger, True)
return (ledger_id, indy_vdr_ledger, False)
except asyncio.TimeoutError:
LOGGER.exception(
f"get-nym request timedout for Did {did} and "
f"ledger {ledger_id}, reply not received within 10 sec"
)
return None
except LedgerError as err:
LOGGER.error(
"Exception when building and submitting get-nym request, "
f"for Did {did} and ledger {ledger_id}, {err}"
)
return None
async def lookup_did_in_configured_ledgers(
self, did: str, cache_did: bool = True
) -> Tuple[str, IndyVdrLedger]:
"""Lookup given DID in configured ledgers in parallel."""
self.cache = self.profile.inject_or(BaseCache)
cache_key = f"did_ledger_id_resolver::{did}"
if bool(cache_did and self.cache and await self.cache.get(cache_key)):
cached_ledger_id = await self.cache.get(cache_key)
if cached_ledger_id in self.production_ledgers:
return (cached_ledger_id, self.production_ledgers.get(cached_ledger_id))
elif cached_ledger_id in self.non_production_ledgers:
return (
cached_ledger_id,
self.non_production_ledgers.get(cached_ledger_id),
)
else:
raise MultipleLedgerManagerError(
f"cached ledger_id {cached_ledger_id} not found in either "
"production_ledgers or non_production_ledgers"
)
applicable_prod_ledgers = {"self_certified": {}, "non_self_certified": {}}
applicable_non_prod_ledgers = {"self_certified": {}, "non_self_certified": {}}
ledger_ids = list(self.production_ledgers.keys()) + list(
self.non_production_ledgers.keys()
)
coro_futures = {
self.executor.submit(self._get_ledger_by_did, ledger_id, did): ledger_id
for ledger_id in ledger_ids
}
for coro_future in concurrent.futures.as_completed(coro_futures):
result = await coro_future.result()
if result:
applicable_ledger_id = result[0]
applicable_ledger_inst = result[1]
is_self_certified = result[2]
if applicable_ledger_id in self.production_ledgers:
insert_key = list(self.production_ledgers).index(
applicable_ledger_id
)
if is_self_certified:
applicable_prod_ledgers["self_certified"][insert_key] = (
applicable_ledger_id,
applicable_ledger_inst,
)
else:
applicable_prod_ledgers["non_self_certified"][insert_key] = (
applicable_ledger_id,
applicable_ledger_inst,
)
else:
insert_key = list(self.non_production_ledgers).index(
applicable_ledger_id
)
if is_self_certified:
applicable_non_prod_ledgers["self_certified"][insert_key] = (
applicable_ledger_id,
applicable_ledger_inst,
)
else:
applicable_non_prod_ledgers["non_self_certified"][
insert_key
] = (applicable_ledger_id, applicable_ledger_inst)
applicable_prod_ledgers["self_certified"] = OrderedDict(
sorted(applicable_prod_ledgers.get("self_certified").items())
)
applicable_prod_ledgers["non_self_certified"] = OrderedDict(
sorted(applicable_prod_ledgers.get("non_self_certified").items())
)
applicable_non_prod_ledgers["self_certified"] = OrderedDict(
sorted(applicable_non_prod_ledgers.get("self_certified").items())
)
applicable_non_prod_ledgers["non_self_certified"] = OrderedDict(
sorted(applicable_non_prod_ledgers.get("non_self_certified").items())
)
if len(applicable_prod_ledgers.get("self_certified")) > 0:
successful_ledger_inst = list(
applicable_prod_ledgers.get("self_certified").values()
)[0]
if cache_did and self.cache:
await self.cache.set(
cache_key, successful_ledger_inst[0], self.cache_ttl
)
return successful_ledger_inst
elif len(applicable_non_prod_ledgers.get("self_certified")) > 0:
successful_ledger_inst = list(
applicable_non_prod_ledgers.get("self_certified").values()
)[0]
if cache_did and self.cache:
await self.cache.set(
cache_key, successful_ledger_inst[0], self.cache_ttl
)
return successful_ledger_inst
elif len(applicable_prod_ledgers.get("non_self_certified")) > 0:
successful_ledger_inst = list(
applicable_prod_ledgers.get("non_self_certified").values()
)[0]
if cache_did and self.cache:
await self.cache.set(
cache_key, successful_ledger_inst[0], self.cache_ttl
)
return successful_ledger_inst
elif len(applicable_non_prod_ledgers.get("non_self_certified")) > 0:
successful_ledger_inst = list(
applicable_non_prod_ledgers.get("non_self_certified").values()
)[0]
if cache_did and self.cache:
await self.cache.set(
cache_key, successful_ledger_inst[0], self.cache_ttl
)
return successful_ledger_inst
else:
raise MultipleLedgerManagerError(
f"DID {did} not found in any of the ledgers total: "
f"(production: {len(self.production_ledgers)}, "
f"non_production: {len(self.non_production_ledgers)})"
)
| StarcoderdataPython |
1926285 | import struct
from cStringIO import StringIO
from unicorn import *
from unicorn.arm64_const import *
import nxo64
def load_nxo_to_unicorn(uc, f, loadbase):
for sym in f.symbols:
if sym.shndx:
sym.resolved = loadbase + sym.value
else:
sym.resolved = 0
resultw = StringIO()
f.binfile.seek(0)
resultw.write(f.binfile.read_to_end())
def write_qword(ea, val):
resultw.seek(ea - loadbase)
resultw.write(struct.pack('<Q', val))
for offset, r_type, sym, addend in f.relocations:
ea = loadbase + offset
if r_type == nxo64.R_AARCH64_RELATIVE:
assert sym is None, 'R_AARCH64_RELATIVE with sym?'
newval = (loadbase + addend)
write_qword(ea, newval)
elif r_type == nxo64.R_AARCH64_JUMP_SLOT or r_type == nxo64.R_AARCH64_GLOB_DAT:
assert sym is not None
assert addend == 0
newval = sym.resolved
write_qword(ea, newval)
elif r_type == nxo64.R_AARCH64_ABS64:
assert sym is not None
newval = sym.resolved
if addend != 0:
#assert sym.shndx # huge mess if we do this on an extern
newval += addend
write_qword(ea, newval)
else:
print 'TODO: r_type=0x%x sym=%r ea=%X addend=%X' % (r_type, sym, ea, addend)
continue
binary = resultw.getvalue()
uc.mem_map(loadbase, (max(len(binary),f.bssend) + 0xFFF) & ~0xFFF)
uc.mem_write(loadbase, binary)
def create_unicorn_arm64(): # enables float
mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM)
addr = 0x1000
mu.reg_write(UC_ARM64_REG_X0, 3 << 20)
mu.mem_map(addr, 0x1000)
fpstartinstrs = '\x41\x10\x38\xd5\x00\x00\x01\xaa\x40\x10\x18\xd5\x40\x10\x38\xd5\xc0\x03\x5f\xd6'
mu.mem_write(addr, fpstartinstrs)
mu.emu_start(addr, addr+len(fpstartinstrs)-4)
mu.mem_unmap(addr, 0x1000)
return mu
| StarcoderdataPython |
1823204 | <reponame>felipebarraza6/emprende_escena
from .users import User, ProfileUser
from .courses import Course, Video, Resource, PreRequisite
from .contests import (QuestionCourse, AlternativeQuestion, ResultContest,
AnswerQuestion)
from .tests import (Test, QuestionTest, AlternativeQuestionTest,
AnswerTest, ResultTest)
| StarcoderdataPython |
178169 | <filename>apps/contents/urls.py<gh_stars>0
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register('contents', ContentsViewSet, basename='contents')
urlpatterns = router.urls
| StarcoderdataPython |
3584515 | <reponame>m-wichmann/kodi_addons
# -*- coding: utf-8 -*-
from resources.lib import plugin
plugin.run_video_source()
| StarcoderdataPython |
313070 | <gh_stars>1-10
""" add 2 number """
def add(x, y):
return x + y
""" substract y from x """
def substract(x, y):
return y - x | StarcoderdataPython |
208102 | <filename>tests/test_tools_pdf2txt.py
import os
from shutil import rmtree
from tempfile import mkdtemp
import filecmp
import tools.pdf2txt as pdf2txt
from helpers import absolute_sample_path
from tempfilepath import TemporaryFilePath
def run(sample_path, options=None):
absolute_path = absolute_sample_path(sample_path)
with TemporaryFilePath() as output_file_name:
if options:
s = 'pdf2txt -o{} {} {}' \
.format(output_file_name, options, absolute_path)
else:
s = 'pdf2txt -o{} {}'.format(output_file_name, absolute_path)
pdf2txt.main(s.split(' ')[1:])
class TestPdf2Txt():
def test_jo(self):
run('jo.pdf')
def test_simple1(self):
run('simple1.pdf')
def test_simple2(self):
run('simple2.pdf')
def test_simple3(self):
run('simple3.pdf')
def test_sample_one_byte_identity_encode(self):
run('sampleOneByteIdentityEncode.pdf')
def test_nonfree_175(self):
"""Regression test for:
https://github.com/pdfminer/pdfminer.six/issues/65
"""
run('nonfree/175.pdf')
def test_nonfree_dmca(self):
run('nonfree/dmca.pdf')
def test_nonfree_f1040nr(self):
run('nonfree/f1040nr.pdf')
def test_nonfree_i1040nr(self):
run('nonfree/i1040nr.pdf')
def test_nonfree_kampo(self):
run('nonfree/kampo.pdf')
def test_nonfree_naacl06_shinyama(self):
run('nonfree/naacl06-shinyama.pdf')
def test_nlp2004slides(self):
run('nonfree/nlp2004slides.pdf')
def test_contrib_2b(self):
run('contrib/2b.pdf', '-A -t xml')
def test_contrib_issue_350(self):
"""Regression test for
https://github.com/pdfminer/pdfminer.six/issues/350"""
run('contrib/issue-00352-asw-oct96-p41.pdf')
def test_scancode_patchelf(self):
"""Regression test for https://github.com/euske/pdfminer/issues/96"""
run('scancode/patchelf.pdf')
def test_contrib_hash_two_complement(self):
"""Check that unsigned integer is added correctly to encryption hash.et
See https://github.com/pdfminer/pdfminer.six/issues/186
"""
run('contrib/issue-00352-hash-twos-complement.pdf')
def test_contrib_excel(self):
"""Regression test for
https://github.com/pdfminer/pdfminer.six/issues/369
"""
run('contrib/issue-00369-excel.pdf', '-t html')
def test_encryption_aes128(self):
run('encryption/aes-128.pdf', '-P foo')
def test_encryption_aes128m(self):
run('encryption/aes-128-m.pdf', '-P foo')
def test_encryption_aes256(self):
run('encryption/aes-256.pdf', '-P foo')
def test_encryption_aes256m(self):
run('encryption/aes-256-m.pdf', '-P foo')
def test_encryption_aes256_r6_user(self):
run('encryption/aes-256-r6.pdf', '-P usersecret')
def test_encryption_aes256_r6_owner(self):
run('encryption/aes-256-r6.pdf', '-P ownersecret')
def test_encryption_base(self):
run('encryption/base.pdf', '-P foo')
def test_encryption_rc4_40(self):
run('encryption/rc4-40.pdf', '-P foo')
def test_encryption_rc4_128(self):
run('encryption/rc4-128.pdf', '-P foo')
class TestDumpImages:
@staticmethod
def extract_images(input_file):
output_dir = mkdtemp()
with TemporaryFilePath() as output_file_name:
commands = ['-o', output_file_name, '--output-dir',
output_dir, input_file]
pdf2txt.main(commands)
image_files = os.listdir(output_dir)
rmtree(output_dir)
return image_files
def test_nonfree_dmca(self):
"""Extract images of pdf containing bmp images
Regression test for:
https://github.com/pdfminer/pdfminer.six/issues/131
"""
image_files = self.extract_images(
absolute_sample_path('../samples/nonfree/dmca.pdf'))
assert image_files[0].endswith('bmp')
def test_nonfree_175(self):
"""Extract images of pdf containing jpg images"""
self.extract_images(absolute_sample_path('../samples/nonfree/175.pdf'))
def test_jbig2_image_export(self):
"""Extract images of pdf containing jbig2 images
Feature test for: https://github.com/pdfminer/pdfminer.six/pull/46
"""
input_file = absolute_sample_path(
'../samples/contrib/pdf-with-jbig2.pdf')
output_dir = mkdtemp()
with TemporaryFilePath() as output_file_name:
commands = ['-o', output_file_name, '--output-dir',
output_dir, input_file]
pdf2txt.main(commands)
image_files = os.listdir(output_dir)
try:
assert image_files[0].endswith('.jb2')
assert filecmp.cmp(output_dir + '/' + image_files[0],
absolute_sample_path(
'../samples/contrib/XIPLAYER0.jb2'))
finally:
rmtree(output_dir)
def test_contrib_matplotlib(self):
"""Test a pdf with Type3 font"""
run('contrib/matplotlib.pdf')
def test_nonfree_cmp_itext_logo(self):
"""Test a pdf with Type3 font"""
run('nonfree/cmp_itext_logo.pdf')
| StarcoderdataPython |
245933 | def notas(*num, sit=0):
"""
:param num: valores de nota
:param sit: se True apresenta a situação do boletim se False nao apresenta
:return: dicionario um dict() com varias infos das notas
"""
soma = 0
count = 0
for c in num:
soma += c
count += 1
media = soma/count
situacao = str
if media >= 7:
situacao = 'Boa'
else:
situacao = 'Ruim'
dicionario = {}
if sit == True:
dicionario = {
'numero de notas': len(num),
'notamax': max(num),
'notamin': min(num),
'media': media,
'situação': situacao
}
if sit == False:
dicionario = {
'numero de notas': len(num),
'notamax': max(num),
'notamin': min(num),
'media': media,
}
return dicionario
print(notas(5,4,3))
| StarcoderdataPython |
9678068 | """This module provides convenient tools for the communication with
Mifare cards via the PN532.
Some knowledge of a Mifare card's layout and general access procedure
is needed to use this class effectively. Special care needs to be
taken when modifying trailer blocks because you may shut yourself
out of your card! Google "MF1S703x" for a good introduction to
Mifare cards.
A typical scenario would be:
card = Mifare()
card.SAMconfigure()
card.set_max_retries(MIFARE_SAFE_RETRIES)
uid = card.scan_field()
if uid:
address = card.mifare_address(0,1)
card.mifare_auth_a(address,MIFARE_FACTORY_KEY)
data = card.mifare_read(address)
card.in_deselect() # In case you want to authorize a different sector.
"""
import py532lib.i2c as i2c
from py532lib.frame import Pn532Frame as Pn532Frame
from py532lib.constants import *
import logging
import math
MIFARE_COMMAND_AUTH_A = 0x60
MIFARE_COMMAND_AUTH_B = 0x61
MIFARE_COMMAND_READ = 0x30
MIFARE_COMMAND_WRITE_16 = 0xA0
MIFARE_COMMAND_WRITE_4 = 0xA2
MIFARE_FACTORY_KEY = b"\<KEY>"
MIFARE_WAIT_FOR_ENTRY = 0xFF # MxRtyPassiveActivation value: wait until card enters field.
MIFARE_SAFE_RETRIES = 5 # This number of retries seems to detect most cards properlies.
class Mifare(i2c.Pn532_i2c):
"""This class allows for the communication with Mifare cards via
the PN532.
Compared to its superclass, this class provides a bit more
sophisticated tools such as reading the contents of a Mifare
card or writing to them, access restrictions, and key management.
"""
def __init__(self):
"""Set up and configure PN532."""
i2c.Pn532_i2c.__init__(self)
self._uid = False
def set_max_retries(self,mx_rty_passive_activation):
"""Configure the PN532 for the number of retries attempted
during the InListPassiveTarget operation (set to
MIFARE_SAFE_RETRIES for a safe one-time check, set to
MIFARE_WAIT_FOR_ENTRY so it waits until entry of a card).
"""
# We set MxRtyPassiveActivation to 5 because it turns out that one
# try sometimes does not detect the card properly.
frame = Pn532Frame(frame_type=PN532_FRAME_TYPE_DATA,
data=bytearray([PN532_COMMAND_RFCONFIGURATION,
PN532_RFCONFIGURATION_CFGITEM_MAXRETRIES,
0xFF,0x01,mx_rty_passive_activation]))
self.send_command_check_ack(frame)
self.read_response()
def scan_field(self):
"""Scans the PN532's field for a Mifare card using the
InListPassiveTarget operation.
Returns the card's UID (a bytearray) if a card was in the field
or False if no card was in the field. Only one card is
detected at a time (the PN532 can handle two but this is not
implemented here). False is never returned if the number of
retries (see set_max_retries()) is set to MIFARE_WAIT_FOR_ENTRY.
"""
frame = Pn532Frame(frame_type=PN532_FRAME_TYPE_DATA,
data=bytearray([PN532_COMMAND_INLISTPASSIVETARGET, 0x01, 0x00]))
self.send_command_check_ack(frame)
response = self.read_response().get_data()
target_count = response[1]
if not target_count:
self._uid = False
return False
uid_length = response[6]
self._uid = response[7:7 + uid_length]
return self._uid
def in_data_exchange(self,data):
"""Sends a (Mifare) command to the currently active target.
The "data" parameter contains the command data as a bytearray.
Returns the data returned by the command (as a bytearray).
Raises an IOError if the command failed.
"""
logging.debug("InDataExchange sending: " + " ".join("{0:02X}".format(k) for k in data))
logging.debug(data)
frame = Pn532Frame(frame_type=PN532_FRAME_TYPE_DATA, data=bytearray([PN532_COMMAND_INDATAEXCHANGE, 0x01]) + data)
self.send_command_check_ack(frame)
response_frame = self.read_response()
if response_frame.get_frame_type() == PN532_FRAME_TYPE_ERROR:
raise IOError("InDataExchange failed (error frame returned)")
response = response_frame.get_data()
logging.debug("InDataExchange response: " + " ".join("{0:02X}".format(k) for k in response))
if response[1] != 0x00:
# Only the status byte was returned. There was an error.
if response[1] == 0x14:
raise IOError("Mifare authentication failed")
else:
raise IOError("InDataExchange returned error status: {0:#x}".format(response[1]))
return response[2:]
def in_deselect(self):
"""Deselects the current target."""
logging.debug("InDeselect sending...")
frame = Pn532Frame(frame_type=PN532_FRAME_TYPE_DATA, data=bytearray([PN532_COMMAND_INDESELECT, 0x01]))
self.send_command_check_ack(frame)
response_frame = self.read_response()
if response_frame.get_frame_type() == PN532_FRAME_TYPE_ERROR:
raise IOError("InDeselect failed (error frame returned)")
response = response_frame.get_data()
logging.debug("InDeselect response: " + " ".join("{0:02X}".format(k) for k in response))
if response[1] != 0x00:
# Only the status byte was returned. There was an error.
raise IOError("InDataExchange returned error status: {0:#x}".format(response[1]))
def mifare_address(self,sector,block):
"""Returns a one byte address for the given Mifare sector and block."""
if sector < 32:
if sector < 0 or block > 3 or block < 0:
raise IndexError("Invalid sector / block: {0} / {1}".format(sector,block))
return sector * 4 + block
else:
if sector > 39 or block < 0 or block > 15:
raise IndexError("Invalid sector / block: {0} / {1}".format(sector,block))
return 32 * 4 + (sector - 32) * 16 + block
def mifare_sector_block(self,address):
"""Returns a tuple (sector,block) for the given address."""
if address > 255 or address < 0:
raise IndexError("Invalid Mifare block address: {0}".format(address))
if address < 128:
return (address >> 2,address & 3)
else:
return (32 + ((address - 128) >> 4),(address - 128) & 15)
def mifare_auth_a(self,address,key_a):
"""Authenticate the Mifare card with key A.
The "key_a" parameter is a bytearray that contains key A.
You may specify an address directly or use the mifare_address()
function to calculate it. Raises an IOError if authentication failed.
"""
if self._uid == False:
raise RuntimeError("No Mifare card currently activated.")
if len(self._uid) == 4:
uid = self._uid
elif len(self._uid) == 7: # 10-byte UID cards don't exist yet.
uid = self._uid[3:7] # Sequence 1, keep it simple.
self.in_data_exchange(bytearray([MIFARE_COMMAND_AUTH_A,address]) + key_a + uid)
def mifare_auth_b(self,address,key_b):
"""Authenticate the Mifare card with key B.
The "key_a" parameter is a bytearray that contains key B.
You may specify an address directly or use the mifare_address()
function to calculate it. Raises an IOError if authentication failed.
"""
if self._uid == False:
raise RuntimeError("No Mifare card currently activated.")
if len(self._uid) == 4:
uid = self._uid
elif len(self._uid) == 7: # 10-byte UID cards don't exist yet.
uid = self._uid[3:7] # Sequence 1, keep it simple.
self.in_data_exchange(bytearray([MIFARE_COMMAND_AUTH_B,address]) + key_b + uid)
def mifare_read(self,address):
"""Read and return 16 bytes from the data block at the given address."""
return self.in_data_exchange(bytearray([MIFARE_COMMAND_READ,address]))
def mifare_write_standard(self,address,data):
"""Write 16 bytes to the data block on a Mifare Standard card
at the given address."""
if len(data) > 16:
raise IndexError("Data cannot exceed 16 bytes (is {0} bytes)".format(len(data)))
self.in_data_exchange(bytearray([MIFARE_COMMAND_WRITE_16,address]) + data + (b'\x00' * (16 - len(data))))
def mifare_write_ultralight(self,address,data):
"""Write 4 bytes to the data block on a Mifare Ultralight card
at the given address."""
if len(data) > 4:
raise IndexError("Data cannot exceed 4 bytes (is {0} bytes)".format(len(data)))
self.in_data_exchange(bytearray([MIFARE_COMMAND_WRITE_4,address]) + data + (b'\x00' * (4 - len(data))))
def mifare_read_access(self,address):
"""Returns the access conditions for the block at the given address
in a three-tuple of booleans (C1,C2,C3)."""
sector, index = self.mifare_sector_block(address)
if address < 128:
data = self.mifare_read(address | 3)
else:
data = self.mifare_read(address | 15)
index = math.floor(index / 5)
return (data[7] & 1 << 4 + index > 0,data[8] & 1 << index > 0,data[8] & 1 << 4 + index > 0)
def mifare_write_access(self,address,c1,c2,c3,key_a,key_b):
"""Changes the access conditions for the block at the given address
to the three booleans c1,c2,c3.
YOU SHOULD REALLY KNOW WHAT YOU'RE DOING HERE! With the wrong,
settings, you may shut yourself out of your card. The keys A
and B must also be provided because they cannot be read and
may therefore be overwritten by zeroes (as returned by a
regular read on the trailer sector).
"""
sector, index = self.mifare_sector_block(address)
if address < 128:
trailer_address = address | 3
else:
trailer_address = address | 15
index = math.floor(index / 5)
data = self.mifare_read(trailer_address)
if c1:
data[7] |= 1 << 4 + index
data[6] &= ~(1 << index)
else:
data[7] &= ~(1 << 4 + index)
data[6] |= 1 << index
if c2:
data[8] |= 1 << index
data[6] &= ~(1 << 4 + index)
else:
data[8] &= ~(1 << index)
data[6] |= 1 << 4 + index
if c3:
data[8] |= 1 << 4 + index
data[7] &= ~(1 << index)
else:
data[8] &= ~(1 << 4 + index)
data[7] |= 1 << index
data = key_a + data[6:10] + key_b
self.mifare_write_standard(trailer_address,data)
def mifare_change_keys(self,address,key_a,key_b):
"""Changes the authorization keys A and B for the block at
the given address.
KEYS MAY NOT BE READABLE SO MAKE SURE YOU WRITE THEM DOWN!
If you forget a key (especially key A), you may not be able
to authenticate a block anymore and therefore not read it
or write to it. The factory preset for keys is always
b'\xFF\xFF\xFF\xFF\xFF\xFF' as defined in MIFARE_FACTORY_KEY.
"""
if address < 128:
trailer_address = address | 3
else:
trailer_address = address | 15
data = self.mifare_read(trailer_address)
data = key_a + data[6:10] + key_b
self.mifare_write_standard(trailer_address,data)
| StarcoderdataPython |
1600830 | from __future__ import unicode_literals, absolute_import
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.utils.dates import MONTHS
from django.core.validators import validate_email
from django.core.urlresolvers import resolve, Resolver404
from uwsgiit.api import UwsgiItClient
from select2.widgets import SelectMultipleAutocomplete, SelectAutocomplete
from .models import UwsgiItApi
def email_list_validator(value):
"Check if value consists only of valid emails."
# Use the parent's handling of required fields, etc.
for email in value:
validate_email(email.strip())
class MultiEmailField(forms.CharField):
default_validators = [email_list_validator]
def to_python(self, value):
"Normalize data to a list of strings."
# Return an empty list if no input was given.
if value in self.empty_values:
return []
return value.split(',')
def clean(self, value):
value = super(MultiEmailField, self).clean(value)
return ','.join([email.strip() for email in value])
class TagsForm(forms.Form):
tags = forms.MultipleChoiceField(
widget=SelectMultipleAutocomplete(plugin_options={"width": "300px"}),
choices=(),
required=False)
def __init__(self, *args, **kwargs):
tag_choices = kwargs.pop('tag_choices')
super(TagsForm, self).__init__(*args, **kwargs)
self.fields['tags'].choices = tag_choices
class BootstrapForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BootstrapForm, self).__init__(*args, **kwargs)
for field in self.fields.keys():
if not isinstance(self.fields[field].widget, (SelectAutocomplete, SelectMultipleAutocomplete)):
self.fields[field].widget.attrs['class'] = 'form-control'
class LoginForm(forms.Form):
action_login = forms.IntegerField(
label='', widget=forms.HiddenInput(), initial=1)
username = forms.CharField(label='', widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Username'}))
password = forms.CharField(label='', widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': 'Password'}))
api_url = forms.ModelChoiceField(
label='Api url :', queryset=UwsgiItApi.objects.none())
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.fields['api_url'].queryset = UwsgiItApi.objects.all()
self.fields['api_url'].initial = UwsgiItApi.objects.get_or_create(
url=settings.DEFAULT_API_URL)[0]
def clean(self):
cd = super(LoginForm, self).clean()
if 'username' in cd and 'password' in cd and 'api_url' in cd:
client = UwsgiItClient(
cd['username'],
cd['password'],
cd['api_url'].url)
me = client.me().json()
if 'error' in me:
raise forms.ValidationError('Wrong username or password')
return cd
class MeForm(forms.Form):
company = forms.CharField(label='Company', widget=forms.TextInput(
attrs={'class': 'form-control col-xs-8'}))
email = MultiEmailField(label='Email', widget=forms.TextInput(
attrs={'class': 'form-control col-xs-8'}), required=False)
password = forms.CharField(label='Password', widget=forms.PasswordInput(
attrs={'class': 'form-control'}, render_value=True))
re_password = forms.CharField(
label='Retype password',
widget=forms.PasswordInput(
render_value=True, attrs={'class': 'form-control'}))
vat = forms.CharField(label='Vat', widget=forms.TextInput(
attrs={'class': 'form-control col-xs-8'}), required=False)
def clean(self):
cd = super(MeForm, self).clean()
if 'password' in cd and 're_password' in cd:
p1 = cd['password']
p2 = cd['re_password']
if p1 != p2:
self._errors['re_password'] = self.error_class(
['Passwords do not match'])
return cd
class SSHForm(forms.Form):
key = forms.CharField(label='ssh key', widget=forms.Textarea(
attrs={'cols': 100, 'rows': 3, 'class': 'form-control'}))
def clean(self):
"""Raise a ValidationError if the
value is not bigger than 130 bytes
check for ssh-rsa and ssh-dsa strings
"""
data = super(SSHForm, self).clean()
if 'key' in data:
key = data['key'].strip()
if len(key) > 130:
if 'ssh-rsa ' not in key and 'ssh-dsa ' not in key:
msg = 'Inserted value is not a valid ssh key'
raise forms.ValidationError(msg)
if key.count('\n') > 0:
msg = 'Too many newlines in the ssh key'
raise forms.ValidationError(msg)
else:
msg = 'Key too short'
raise forms.ValidationError(msg)
return data
class ContainerForm(TagsForm):
name = forms.CharField(label='Name', required=False)
quota_threshold = forms.IntegerField(
label='Quota Threshold', min_value=0, max_value=100)
nofollow = forms.BooleanField(label='NoFollow', required=False)
distro = forms.IntegerField(label='Distro', widget=forms.Select(choices=()))
linked_to = forms.MultipleChoiceField(
widget=SelectMultipleAutocomplete(plugin_options={"width": "300px"}),
choices=(),
required=False)
jid = forms.CharField(label='Jabber ID', required=False)
jid_destinations = forms.CharField(
label='Jabber Destinations', required=False)
jid_secret = forms.CharField(
label='Jabber Password', widget=forms.PasswordInput(), required=False)
pushover_user = forms.CharField(label='Pushover User', required=False)
pushover_token = forms.CharField(label='Pushover Token', required=False)
pushover_sound = forms.CharField(label='Pushover Sound', required=False)
pushbullet_token = forms.CharField(label='Pushbullet Token', required=False)
slack_webhook = forms.CharField(label='Slack Webhook', required=False)
alarm_freq = forms.IntegerField(
label='Alarm Frequency', required=False, min_value=60)
note = forms.CharField(
widget=forms.Textarea(
attrs={'cols': 50, 'rows': 3, 'class': 'form-control'}),
required=False)
reboot = forms.BooleanField(required=False, widget=forms.HiddenInput)
def __init__(self, *args, **kwargs):
distro_choices = kwargs.pop('distro_choices')
linked_to_choices = kwargs.pop('linked_to_choices')
super(ContainerForm, self).__init__(*args, **kwargs)
self.fields['distro'].widget.choices = distro_choices
self.fields['linked_to'].choices = linked_to_choices
class TagForm(forms.Form):
name = forms.CharField(label='Name')
class DomainForm(TagsForm):
note = forms.CharField(required=False, widget=forms.Textarea(
attrs={'cols': 50, 'rows': 3, 'class': 'form-control'}))
class NewDomainForm(forms.Form):
name = forms.CharField(
label='Name', widget=forms.TextInput(attrs={'size': 70}))
class CalendarForm(forms.Form):
year = forms.IntegerField()
month = forms.ChoiceField(
required=False,
widget=SelectAutocomplete(plugin_options={"width": "200px"}),
choices=[('', '')] + [(k, v) for k, v in MONTHS.items()])
day = forms.IntegerField(required=False)
def __init__(self, *args, **kwargs):
super(CalendarForm, self).__init__(*args, **kwargs)
today = datetime.today()
yesterday = today - timedelta(1)
self.fields['year'].initial = yesterday.year
self.fields['month'].initial = yesterday.month
self.fields['day'].initial = yesterday.day
self.fields['day'].widget.attrs['min'] = 1
def has_value(self, field):
data = self.cleaned_data
if field in data and data[field]:
return True
return False
def get_params(self):
res = {}
data = self.cleaned_data
if self.has_value('year'):
res['year'] = data['year']
if self.has_value('month'):
res['month'] = int(data['month'])
if self.has_value('day'):
res['day'] = data['day']
return res
def metric_name(self):
metric_name = ''
data = self.cleaned_data
if self.has_value('year'):
metric_name = str(data['year'])
if self.has_value('month'):
metric_name = str(data['month']) + '-' + metric_name
if self.has_value('day'):
metric_name = str(data['day']) + '-' + metric_name
return metric_name
def time_unit(self):
if self.has_value('day'):
return 'hour'
elif self.has_value('month'):
return 'day'
return 'month'
def is_in_the_future(self):
data = self.get_params()
today = datetime.today()
if 'year' in data and data['year'] > today.year:
return True
if ('year' in data and data['year'] == today.year and
'month' in data and data['month'] > today.month):
return True
if ('year' in data and data['year'] == today.year and
'month' in data and data['month'] == today.month and
'day' in data and data['day'] > today.day):
return True
return False
def clean(self):
data = super(CalendarForm, self).clean()
if self.has_value('day') and not self.has_value('month'):
self._errors['month'] = self.error_class(['Month is required.'])
if self.is_in_the_future():
raise forms.ValidationError('Set a date in the past.')
return data
class MetricDetailForm(forms.Form):
metric_url = forms.CharField()
metric_type = forms.CharField()
subject = forms.CharField()
def clean(self):
cd = super(MetricDetailForm, self).clean()
if 'metric_url' in cd:
try:
resolve(cd['metric_url'])
except Resolver404:
raise forms.ValidationError('Invalid url')
return cd
class NewLoopboxForm(BootstrapForm):
# container = forms.IntegerField(label='', widget=forms.HiddenInput())
filename = forms.CharField(label='Filename')
mountpoint = forms.CharField(label='Mount Point')
readonly = forms.BooleanField(label='Readonly', required=False)
class LoopboxForm(TagsForm):
lid = forms.IntegerField(widget=forms.HiddenInput, required=False)
class AlarmForm(BootstrapForm):
action_filter = forms.IntegerField(
label='', widget=forms.HiddenInput(), initial=1)
container = forms.IntegerField(required=False)
vassal = forms.CharField(required=False)
class_ = forms.CharField(label='Class', required=False)
color = forms.CharField(max_length=7, required=False)
level = forms.ChoiceField(
required=False,
widget=SelectAutocomplete(plugin_options={"width": "100%"}),
choices=(
('', ' '), (0, 'System'), (1, 'User'),
(2, 'Exception'), (3, 'Traceback'), (4, 'Log')
)
)
line = forms.IntegerField(min_value=0, required=False)
filename = forms.CharField(required=False)
func = forms.CharField(label='Function', required=False)
def clean(self):
cd = super(AlarmForm, self).clean()
del cd['action_filter']
return cd
| StarcoderdataPython |
175171 | <reponame>ozburo/babushka<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
example.handlers.py
"""
from basehandler import BaseHandler
from models import Blog, Post
import yaml
# --------------------------------------------------------------------
# Example Handler
# --------------------------------------------------------------------
class ExampleHandler(BaseHandler):
def get(self):
blog = Blog.get_by_id('main')
context = {
'blog': blog,
}
return self.render_response('/index.html', **context)
# --------------------------------------------------------------------
# Debug Handler
# --------------------------------------------------------------------
class DebugHandler(BaseHandler):
def populate(self):
data = yaml.load(open('example/data.yaml'))
blog = Blog.get_or_insert(data['blog']['name'], name=data['blog']['name'])
for post in data['blog']['posts']:
post = Post(blog=blog.key, **post)
post.put()
return self.redirect_to('index')
def update(self):
post = Post.query().get()
if post:
post.title = '%s *UPDATED*' % post.title
post.put()
return self.redirect_to('index')
def delete(self):
post = Post.query().get()
if post:
post.key.delete()
return self.redirect_to('index')
| StarcoderdataPython |
6497073 | # !/usr/bin/env python
# -*-coding:utf-8 -*-
# PROJECT : web-common-service
# Time :2020/12/4 11:09
# Warning:The Hard Way Is Easier
import json
from flask import g
from collections import namedtuple
from webAPi.extensions import db
from webAPi.models import Column
from webAPi.models import BaseModel
from webAPi.models import BaseMixin
from webAPi.utils.com import produce_id
from webAPi.constant import UPLOAD_FILE_BASE_CONF
APP_INFO = namedtuple('app_info', 'id name brief status conf')
class AppInfo(BaseModel, BaseMixin):
__tablename__ = 'app_info'
__table_args__ = {'mysql_engine': 'InnoDB'} # 支持事务操作和外键
name = Column(db.String(255), nullable=False, comment="应用名称")
brief = Column(db.Text, nullable=False, comment="应用简介")
status = Column(db.Integer, default=0, comment="应用状态:0 可用/1 不可用")
conf = Column(db.Text, nullable=False, comment="使用json存储上传文件的配置信息")
@staticmethod
def insert_data():
app_infos = [
APP_INFO('dc601e113be8a2e622f9f9a3f363eb93', 'test_project', '这是一个测试项目', 0, UPLOAD_FILE_BASE_CONF),
APP_INFO('1b4925bfa780f5964a2de19e5322dca4', 'school_info', '学校信息网', 0, UPLOAD_FILE_BASE_CONF),
]
for info in app_infos:
app = AppInfo.query.filter_by(name=info.name).first()
# 对于已经存在的项目数据,只通过接口更新
if app is None:
app_id = info.id if info.id else produce_id()
app = AppInfo(id=app_id, name=info.name, brief=info.brief,
status=info.status, conf=json.dumps(info.conf, ensure_ascii=False))
db.session.add(app)
db.session.commit()
@property
def upload_conf(self):
return json.loads(self.conf)
@staticmethod
def get_app_info():
app_id = g.app_id
app_info = AppInfo.query.get(app_id)
return app_info
| StarcoderdataPython |
112422 | # create node class
class Node:
def __init__(self, value=None, next=None):
self.value = value
self.next = next
# create stack
class Stack:
def __init__(self):
self.top = None
def isEmpty(self):
if self.top == None:
return True
else:
return False
def peek(self):
if self.isEmpty():
raise Exception('cannot peek empty stack')
else:
return self.top.value
def push(self, val):
self.top = Node(val, self.top)
return True
def pop(self):
if self.isEmpty():
raise Exception('cannot pop empty stack')
else:
temp = self.top.value
self.top = self.top.next
return temp | StarcoderdataPython |
8026361 | from application_form.models.application import (
Applicant,
Application,
ApplicationApartment,
)
from application_form.models.lottery import LotteryEvent, LotteryEventResult
from application_form.models.reservation import (
ApartmentQueueChangeEvent,
ApartmentReservation,
)
__all__ = [
"Applicant",
"Application",
"ApplicationApartment",
"LotteryEvent",
"LotteryEventResult",
"ApartmentReservation",
"ApartmentQueueChangeEvent",
]
| StarcoderdataPython |
5118721 | <reponame>enuan/duktape-py
"""
This file represents how to implement an event loop for duktape using asyncio.
"""
import asyncio
from functools import partial
import duktape
class Timer:
def __init__(self, event_loop, callback, delay, oneshot=True):
self.event_loop = event_loop
self.callback = callback
self.delay = delay
self.oneshot = oneshot
self.cancelled = False
self.done = False
self.schedule()
def __call__(self, *args):
if not self.cancelled:
try:
self.callback(*args)
except Exception:
self.done = True
raise
else:
if not self.oneshot:
self.schedule()
else:
self.done = True
finally:
self.event_loop.tick()
def schedule(self):
if not self.cancelled:
self._handle = self.event_loop.loop.call_later(self.delay, self)
def cancel(self):
self._handle.cancel()
self.cancelled = True
self.done = True
def __repr__(self):
return f"<Timer {id(self)} callback={self.callback} delay={self.delay} oneshot={self.oneshot} cancelled={self.cancelled}>"
class EventLoop:
loop = asyncio.get_event_loop()
@classmethod
def setup(cls, ctx):
event_loop = cls()
ctx.load("event_loop.js")
ctx["EventLoop"] = {
"createTimer": duktape.PyFunc(event_loop.create_timer, 3),
"cancelTimer": duktape.PyFunc(event_loop.cancel_timer, 1),
}
return event_loop
def __init__(self):
self.timers = []
def create_timer(self, callback, delay, oneshot):
self.timers.append(Timer(self, callback, delay / 1000, oneshot))
return len(self.timers) - 1
def cancel_timer(self, idx):
timer = self.timers.pop(int(idx))
timer.cancel()
def tick(self):
for timer in self.timers:
if not timer.done:
break
else:
self.completed.set_result(None)
def run(self):
self.completed = self.loop.create_future()
self.tick()
self.loop.run_until_complete(self.completed)
def user_code(ctx):
ctx.load("demo.js")
def console_log(message):
print(message)
def setup_duk_ctx():
ctx = duktape.Context()
ctx["console"] = {"log": duktape.PyFunc(console_log, 1)}
event_loop = EventLoop.setup(ctx)
return ctx, event_loop
duk_ctx, event_loop = setup_duk_ctx()
event_loop.create_timer(partial(user_code, duk_ctx), 0, True)
event_loop.run()
| StarcoderdataPython |
1787544 | finding_target = 101
finding_numbers = [i for i in range(100)]
def is_existing_target_number_sequential(target, array):
find_count = 0
for number in array:
find_count += 1
if target == number:
print(find_count) # 14!
return True
return False
result = is_existing_target_number_sequential(finding_target, finding_numbers)
print(result) # True | StarcoderdataPython |
8119447 | """
Tests methods for evaluating model fairness.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import pytest
import numpy as np
import fatf.fairness.models.measures as ffmm
# ACC: 10/19; TPR: (0) 5/9 (2) 1/3; PPV: (0) 5/7 (2) 1/3
CM1 = np.array([[5, 2, 0], [3, 4, 2], [1, 1, 1]])
# ACC: 6/12; TPR: (0) 3/5 (2) 1/4; PPV: (0) 3/6 (2) 1/2
CM2 = np.array([[3, 0, 3], [2, 2, 0], [0, 1, 1]])
# ACC: 26/32; TPR: (0) 3/3 (2) 20/23; PPV: (0) 3/9 (2) 20/20
CM3 = np.array([[3, 3, 3], [0, 3, 0], [0, 0, 20]])
CM_LIST = [CM1, CM2, CM3]
GROUND_TRUTH = np.array(9 * ['a'] + 5 * ['a'] + 3 * ['a']
+ 7 * ['b'] + 3 * ['b'] + 6 * ['b']
+ 3 * ['c'] + 4 * ['c'] + 23 * ['c']) # yapf: disable
PREDICTIONS = np.array(5 * ['a'] + 3 * ['b'] + 1 * ['c']
+ 3 * ['a'] + 2 * ['b'] + 0 * ['c']
+ 3 * ['a'] + 0 * ['b'] + 0 * ['c']
#
+ 2 * ['a'] + 4 * ['b'] + 1 * ['c']
+ 0 * ['a'] + 2 * ['b'] + 1 * ['c']
+ 3 * ['a'] + 3 * ['b'] + 0 * ['c']
#
+ 0 * ['a'] + 2 * ['b'] + 1 * ['c']
+ 3 * ['a'] + 0 * ['b'] + 1 * ['c']
+ 3 * ['a'] + 0 * ['b'] + 20 * ['c']) # yapf: disable
def test_validate_tolerance():
"""
Tests :func:`fatf.fairness.models.measures._validate_tolerance` function.
"""
value_error = 'The tolerance parameter should be within [0, 1] range.'
type_error = 'The tolerance parameter should be a number.'
with pytest.raises(TypeError) as exin:
ffmm._validate_tolerance('a')
assert str(exin.value) == type_error
with pytest.raises(ValueError) as exin:
ffmm._validate_tolerance(-0.00000001)
assert str(exin.value) == value_error
with pytest.raises(ValueError) as exin:
ffmm._validate_tolerance(1.00000001)
assert str(exin.value) == value_error
assert ffmm._validate_tolerance(1.0000000)
def test_equal_accuracy():
"""
Tests :func:`fatf.fairness.models.measures.equal_accuracy` function.
"""
ok_array = np.array([[False, False, True], [False, False, True],
[True, True, False]])
not_ok_array = np.array([[False, False, False], [False, False, False],
[False, False, False]])
disparity = ffmm.equal_accuracy(CM_LIST)
assert np.array_equal(disparity, ok_array)
disparity = ffmm.equal_accuracy(CM_LIST, tolerance=0.35)
assert np.array_equal(disparity, not_ok_array)
def test_equal_opportunity():
"""
Tests :func:`fatf.fairness.models.measures.equal_opportunity` function.
"""
ok_array = np.array([[False, False, True], [False, False, True],
[True, True, False]])
not_ok_array = np.array([[False, False, True], [False, False, False],
[True, False, False]])
disparity = ffmm.equal_opportunity(CM_LIST)
assert np.array_equal(disparity, ok_array)
disparity = ffmm.equal_opportunity(CM_LIST, label_index=2)
assert np.array_equal(disparity, ok_array)
disparity = ffmm.equal_opportunity(CM_LIST, tolerance=0.4)
assert np.array_equal(disparity, not_ok_array)
def test_demographic_parity():
"""
Tests :func:`fatf.fairness.models.measures.demographic_parity` function.
"""
ok_array = np.array([[False, True, True], [True, False, False],
[True, False, False]])
not_ok_array = np.array([[False, False, True], [False, False, True],
[True, True, False]])
disparity = ffmm.demographic_parity(CM_LIST)
assert np.array_equal(disparity, ok_array)
disparity = ffmm.demographic_parity(CM_LIST, label_index=2)
assert np.array_equal(disparity, not_ok_array)
disparity = ffmm.demographic_parity(CM_LIST, label_index=2, tolerance=0.67)
assert not disparity.any()
def test_disparate_impact_check():
"""
Tests :func:`fatf.fairness.models.measures.disparate_impact_check`.
"""
ok_array = np.array([[False, False], [False, False]])
not_ok_array = np.array([[False, True], [True, False]])
assert not ffmm.disparate_impact_check(ok_array)
assert ffmm.disparate_impact_check(not_ok_array)
def test_disparate_impact_grid():
"""
Tests :func:`fatf.fairness.models.measures._disparate_impact_grid`.
"""
type_error = ('Criterion has to either be a string indicating parity '
'metric or None for the default parity metric (equal '
'accuracy).')
value_error = ('Unrecognised criterion. The following options are '
"allowed: ['demographic parity', 'equal opportunity', "
"'equal accuracy'].")
ok_array = np.array([[False, False, True], [False, False, True],
[True, True, False]])
not_ok_array = np.array([[False, True, True], [True, False, False],
[True, False, False]])
with pytest.raises(TypeError) as exin:
ffmm._disparate_impact_grid(0, 42, 0, 0)
assert str(exin.value) == type_error
with pytest.raises(ValueError) as exin:
ffmm._disparate_impact_grid(0, '42', 0, 0)
assert str(exin.value) == value_error
disparity = ffmm._disparate_impact_grid(CM_LIST, None, 0.2, 0)
assert np.array_equal(disparity, ok_array)
disparity = ffmm._disparate_impact_grid(CM_LIST, 'equal accuracy', 0.2, 0)
assert np.array_equal(disparity, ok_array)
disparity = ffmm._disparate_impact_grid(
CM_LIST, 'equal opportunity', 0.2, 0) # yapf: disable
assert np.array_equal(disparity, ok_array)
disparity = ffmm._disparate_impact_grid(
CM_LIST, 'demographic parity', 0.2, 0) # yapf: disable
assert np.array_equal(disparity, not_ok_array)
def test_disparate_impact_indexed():
"""
Tests :func:`fatf.fairness.models.measures.disparate_impact_indexed`.
"""
grouping = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 17, 18, 19, 20, 21, 22, 23, 33, 34, 35],
[9, 10, 11, 12, 13, 24, 25, 26, 36, 37, 38, 39],
[14, 15, 16, 27, 28, 29, 30, 31, 32] + list(range(40, 63))
] # yapf: disable
disparity = ffmm.disparate_impact_indexed(grouping, GROUND_TRUTH,
PREDICTIONS)
ok_array = np.array([[False, False, True], [False, False, True],
[True, True, False]])
assert np.array_equal(disparity, ok_array)
def test_disparate_impact():
"""
Tests :func:`fatf.fairness.models.measures.disparate_impact` function.
"""
dataset = np.array([
*(9 * [['a', 'a']]), *(5 * [['a', 'b']]), *(3 * [['a', 'c']]),
*(7 * [['a', 'a']]), *(3 * [['a', 'b']]), *(6 * [['a', 'c']]),
*(3 * [['a', 'a']]), *(4 * [['a', 'b']]), *(23 * [['a', 'c']])
])
disparity, bin_names = ffmm.disparate_impact(dataset, GROUND_TRUTH,
PREDICTIONS, 1)
assert bin_names == ["('a',)", "('b',)", "('c',)"]
ok_array = np.array([[False, False, True], [False, False, True],
[True, True, False]])
assert np.array_equal(disparity, ok_array)
| StarcoderdataPython |
169861 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-06 02:46
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
from newsroomFramework.settings import PROJECT_ROOT
import os
import ontospy
def forwards_func(apps, schema_editor):
Namespace = apps.get_model("cms", "Namespace")
Recurso = apps.get_model("cms", "Recurso")
db_alias = schema_editor.connection.alias
ontoZika = ontospy.Ontospy(os.path.join(PROJECT_ROOT, 'root-ontology.owl'))
ontoAO = ontospy.Ontospy(os.path.join(PROJECT_ROOT, 'annotation-core.owl'))
nsZika = Namespace(ns_ref=str(ontoZika.namespaces[0][1]).partition(')')[0],rdf=open(os.path.join(PROJECT_ROOT, 'root-ontology.owl'),'r').read())
nsAO = Namespace(ns_ref=str(ontoAO.namespaces[0][1]).partition(')')[0],rdf=open(os.path.join(PROJECT_ROOT, 'annotation-core.owl'),'r').read())
nsZika.save()
nsAO.save()
for i in ontoZika.classes:
Recurso.objects.using(db_alias).bulk_create([
Recurso(namespace=nsZika,uri=i,valor=str(i).partition('#')[-1].partition('*')[0].replace('_',' ')),
])
for i in ontoAO.properties:
Recurso.objects.using(db_alias).bulk_create([
Recurso(namespace=nsAO,uri=i,valor=str(i).split('/')[-1].partition('*')[0]),
])
def reverse_func(apps, schema_editor):
Namespace = apps.get_model("cms", "Namespace")
Recurso = apps.get_model("cms", "Recurso")
db_alias = schema_editor.connection.alias
Recurso.objects.using(db_alias).all().delete()
Namespace.objects.using(db_alias).all().delete()
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artigo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('sutian', models.CharField(max_length=50)),
('text', ckeditor.fields.RichTextField(default='', verbose_name='Matéria')),
],
),
migrations.CreateModel(
name='Creator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.URLField()),
],
),
migrations.CreateModel(
name='Editoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topico', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Namespace',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ns_ref', models.TextField(verbose_name='ref')),
('rdf', models.TextField(verbose_name='rdf')),
],
),
migrations.CreateModel(
name='Publicado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('html', models.TextField(verbose_name='html')),
('rdf_annotation', models.TextField(verbose_name='rdf_annotation')),
('artigo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Artigo')),
],
),
migrations.CreateModel(
name='Recurso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uri', ckeditor.fields.RichTextField(verbose_name='uri')),
('valor', ckeditor.fields.RichTextField(verbose_name='valor')),
('namespace', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Namespace')),
],
),
migrations.CreateModel(
name='Tripla',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artigo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Artigo')),
('objeto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='objeto', to='cms.Recurso')),
('predicado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='predicado', to='cms.Recurso')),
],
),
migrations.AddField(
model_name='artigo',
name='creators',
field=models.ManyToManyField(to='cms.Creator'),
),
migrations.AddField(
model_name='artigo',
name='editoria',
field=models.ManyToManyField(to='cms.Editoria'),
),
migrations.RunPython(forwards_func, reverse_func),
]
| StarcoderdataPython |
4932561 | <reponame>f213/rumetr-client
from .item import ApptItem
from .pipeline import UploadPipeline
from .yandex import YandexFeedSpider
__all__ = [
ApptItem,
UploadPipeline,
YandexFeedSpider,
]
| StarcoderdataPython |
3407702 | import cv2
import numpy as np
import matplotlib.pyplot as plt
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# Otsu Binalization
def otsu_binarization(img, th=128):
H, W = img.shape
out = img.copy()
max_sigma = 0
max_t = 0
# determine threshold
for _t in range(1, 255):
v0 = out[np.where(out < _t)]
m0 = np.mean(v0) if len(v0) > 0 else 0.
w0 = len(v0) / (H * W)
v1 = out[np.where(out >= _t)]
m1 = np.mean(v1) if len(v1) > 0 else 0.
w1 = len(v1) / (H * W)
sigma = w0 * w1 * ((m0 - m1) ** 2)
if sigma > max_sigma:
max_sigma = sigma
max_t = _t
# Binarization
print("threshold >>", max_t)
th = max_t
out[out < th] = 0
out[out >= th] = 255
return out
# Morphology Erode
def Morphology_Erode(img, Erode_time=1):
H, W = img.shape
out = img.copy()
# kernel
MF = np.array(((0, 1, 0),
(1, 0, 1),
(0, 1, 0)), dtype=np.int)
# each erode
for i in range(Erode_time):
tmp = np.pad(out, (1, 1), 'edge')
# erode
for y in range(1, H+1):
for x in range(1, W+1):
if np.sum(MF * tmp[y-1:y+2, x-1:x+2]) < 255*4:
out[y-1, x-1] = 0
return out
# Morphology Dilate
def Morphology_Dilate(img, Dilate_time=1):
H, W = img.shape
# kernel
MF = np.array(((0, 1, 0),
(1, 0, 1),
(0, 1, 0)), dtype=np.int)
# each dilate time
out = img.copy()
for i in range(Dilate_time):
tmp = np.pad(out, (1, 1), 'edge')
for y in range(1, H+1):
for x in range(1, W+1):
if np.sum(MF * tmp[y-1:y+2, x-1:x+2]) >= 255:
out[y-1, x-1] = 255
return out
# Opening morphology
def Morphology_Opening(img, time=1):
out = Morphology_Erode(img, Erode_time=time)
out = Morphology_Dilate(out, Dilate_time=time)
return out
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
# Grayscale
gray = BGR2GRAY(img)
# Otsu's binarization
otsu = otsu_binarization(gray)
# Morphology - opening
out = Morphology_Opening(otsu, time=1)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
6659871 | <reponame>denilau17/RepSeq
import sys
import os
import csv
import sqlite3
import clusteringcore
import numpy as np
import scipy as sp
import scipy.cluster
import datetime
import multiprocessing as mp
import itertools
def pdist(X,metric):
m = len(X)
dm = np.zeros((m * (m - 1) / 2,), dtype=np.double)
k = 0
for i in xrange(0, m - 1):
for j in xrange(i+1, m):
dm[k] = metric(X[i], X[j])
k += 1
return dm
def cluster_seqs(seqs,cutoff,linkage='single'):
if len(seqs) == 0:
return (np.array([]),{})
#checks if there is only 1 unique seq
if len(seqs) == 1:
T = np.array([1]*len(seqs))
return T
#compute distance matrix
Y = pdist(seqs, clusteringcore.levenshtein)
#compute linkage
Z = sp.cluster.hierarchy.linkage(Y,method=linkage)
# determine the clusters at level cutoff
T = sp.cluster.hierarchy.fcluster(Z,cutoff,criterion='distance')
return T
#get list of subgroups for each pool of clonal assignments
def get_subgroups(c, subject):
query = "SELECT subgroup, count(*) FROM " + subject + " GROUP BY subgroup ORDER BY count(*);"
results = c.execute(query).fetchall()
subgroup = [x[0].encode('ascii', 'ignore') for x in results]
return subgroup
#get sequence, celltype and CDR3 len info for clustering and post-clustering analysis
def get_subgroup_seqs(c, subgroup):
query = "SELECT Sequence, cell_type, CDR3_len FROM " + subject + " WHERE subgroup = '" + subgroup + "';"
results = c.execute(query).fetchall()
seqs = [x[0].encode('ascii', 'ignore') for x in results]
seqs = list(set(seqs))
celltype = [x[1].encode('ascii', 'ignore') for x in results]
cdr3_len = results[0][2]
return [seqs, celltype, cdr3_len]
#group sequences into clones with max edit distance of the CDR3 length
def clones(data):
results = cluster_seqs(data[0], data[2])
t = [int(x) for x in results]
return t
#format data to write to csv
def format_data(subgroup_list, data, results):
if len(data) != len(results):
return []
rv = []
for i in range(len(data)):
subgroup = subgroup_list[i]
seqs = data[i][0]
celltype = data[i][1]
cdr3_len = data[i][2]
clone_assignments = results[i]
for j in range(len(seqs)):
if len(seqs) != len(clone_assignments):
print("not correct order!!")
return []
row = [subgroup, cdr3_len, seqs[j], celltype[j], clone_assignments[j]]
rv.append(row)
return rv
def main(db, subject, outfile):
connection = sqlite3.connect(db)
c = connection.cursor()
print "getting data to analyze"
subgroup_list = get_subgroups(c, subject)
data = []
for subgroup in subgroup_list:
x = get_subgroup_seqs(c, subgroup)
data.append(x)
pool = mp.Pool(processes=4)
print "assigning clones"
results = pool.map(clones, data)
rv = format_data(subgroup_list, data, results)
out = open(outfile, 'wb')
csv_out = csv.writer(out)
csv_out.writerows(rv)
connection.close()
if __name__ == "__main__":
db = '/Users/denise/Documents/RepSeq2/IMGT_parsed.sqlite'
subject = 'IMGT_012'
outfile = '/Users/denise/Documents/RepSeq2/clones_012_3001_4000.csv'
main(db, subject, outfile)
| StarcoderdataPython |
4948947 | import cv2
import os
import numpy as np
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
INPUT_PATH = 'dataset/'
# define the format types you shall have
imgFormatType2WorkWithInput = ('PNG', 'png', 'jpeg', 'jpg')
# initialize the variables
X_train = []
ImageNamesListTrain = []
Y_train = []
X_val = []
ImageNamesListValidation = []
Y_val = []
X_test = []
ImageNamesListTest = []
Y_test = []
_, subCategoryDirectoriesInputSet, _ = next(os.walk(INPUT_PATH))
for TrainValidationOrTestIdx in range(0, subCategoryDirectoriesInputSet.__len__()):
tmpTrainValOrTestPath = INPUT_PATH + subCategoryDirectoriesInputSet[TrainValidationOrTestIdx]
_, SubcategorySet, _ = next(os.walk(tmpTrainValOrTestPath))
for tmpCategoryIdx in range(0, SubcategorySet.__len__()):
_, _, SubcategoryFiles = next(os.walk(tmpTrainValOrTestPath+'/' + SubcategorySet[tmpCategoryIdx]))
print(' . we are in directory:', subCategoryDirectoriesInputSet[TrainValidationOrTestIdx],
'/', SubcategorySet[tmpCategoryIdx])
print(' .. there are', str(len(SubcategoryFiles)), 'available images')
for ImageIdx in range(0, len(SubcategoryFiles)):
# first check if we have the requested image format type
if SubcategoryFiles[ImageIdx].endswith(imgFormatType2WorkWithInput):
print(' . Working on input image', SubcategoryFiles[ImageIdx], '(',
str(ImageIdx + 1), '/', str(len(SubcategoryFiles)), ')')
tmpFullImgName = INPUT_PATH + subCategoryDirectoriesInputSet[TrainValidationOrTestIdx] +\
'/' + SubcategorySet[tmpCategoryIdx] +\
'/' + SubcategoryFiles[ImageIdx]
TmpImg = cv2.imread(tmpFullImgName) # remember its height, width, chanels cv2.imread returns
# just check that image is red correctly
if TmpImg is not None:
# kill all small images
if (TmpImg.shape[0] < 50) | (TmpImg.shape[0] < 50):
print(' . Warning: too small image size for image:', SubcategoryFiles[ImageIdx], 'Ignoring it!')
else:
# check the image size and type remember it's according to CV2 format
WidthSizeCheck = TmpImg.shape[1] - IMG_WIDTH
HeightSizeCheck = TmpImg.shape[0] - IMG_HEIGHT
NumOfChannelsCheck = TmpImg.shape[2] - IMG_CHANNELS
if (WidthSizeCheck == 0) & (HeightSizeCheck == 0) & (NumOfChannelsCheck == 0):
print(' ... image was in correct shape')
else:
print(' ... reshaping image')
TmpImg = cv2.resize(TmpImg, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_NEAREST) #remember it's CV2 here
# special check that resize has not coused any unwanted problem
if subCategoryDirectoriesInputSet[TrainValidationOrTestIdx] == 'train':
X_train.append(TmpImg)
Y_train.append(tmpCategoryIdx)
ImageNamesListTrain.append(SubcategoryFiles[ImageIdx])
elif subCategoryDirectoriesInputSet[TrainValidationOrTestIdx] == 'test':
X_test.append(TmpImg)
Y_test.append(tmpCategoryIdx)
ImageNamesListTest.append(SubcategoryFiles[ImageIdx])
else:
X_val.append(TmpImg)
Y_val.append(tmpCategoryIdx)
ImageNamesListValidation.append(SubcategoryFiles[ImageIdx])
else:
print(' .. CV Warning: could not read image:', tmpFullImgName)
# For CNN, your input must be a 4-D tensor [batch_size, dimension(e.g. width), dimension (e.g. height), channels]
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
X_val = np.array(X_val)
Y_val = np.array(Y_val)
print('Done!') | StarcoderdataPython |
1629451 | <reponame>ISISNeutronMuon/HLM_PV_Import
import os
import sys
# noinspection PyUnresolvedReferences
from shared.const import *
# About
VER = '1.1.4'
B_DATE = '6 Jul 2021'
ISIS_URL = "https://www.isis.stfc.ac.uk/Pages/home.aspx"
if getattr(sys, 'frozen', False):
# BASE_PATH = os.path.dirname(sys.executable)
# noinspection PyProtectedMember
# noinspection PyUnresolvedReferences
BASE_PATH = sys._MEIPASS
else:
BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)))
# region Assets & Layouts
GUI_DIR_PATH = os.path.join(BASE_PATH, 'GUI')
ASSETS_PATH = os.path.join(GUI_DIR_PATH, 'assets')
icon_path = os.path.join(ASSETS_PATH, 'icon.svg')
about_logo_path = os.path.join(ASSETS_PATH, 'isis-logo.png')
loading_animation = os.path.join(ASSETS_PATH, 'loading.gif')
main_window_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'MainWindow.ui')
about_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'about.ui')
db_settings_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'DBSettings.ui')
general_settings_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'GeneralSettings.ui')
ca_settings_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'CASettings.ui')
service_path_dlg_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'ServicePathDialog.ui')
config_entry_ui = os.path.join(GUI_DIR_PATH, 'layouts', 'ConfigEntry.ui')
# endregion
# Directory for storing the manager app settings and persistent data
MANAGER_SETTINGS_DIR = os.path.join(os.getenv('LOCALAPPDATA'), 'HLM Service Manager', '')
MANAGER_SETTINGS_FILE = os.path.join(MANAGER_SETTINGS_DIR, 'settings.ini')
MANAGER_LOGS_DIR = os.path.join(MANAGER_SETTINGS_DIR, 'logs')
MANAGER_LOGS_FILE = os.path.join(MANAGER_LOGS_DIR, 'HLM_Manager.log')
MANAGER_ERR_LOGS_DIR = os.path.join(MANAGER_LOGS_DIR, 'error')
MANAGER_ERR_LOGS_FILE = os.path.join(MANAGER_ERR_LOGS_DIR, 'HLM_Manager_Error.log')
SERVICE_SETTINGS_FILE_NAME = 'settings.ini'
# region Settings Files Templates
MANAGER_SETTINGS_TEMPLATE = {
'Service': {
'Directory': ''
},
'General': {
'AutoPVConnectionCheck': 'True',
'AutoLoadExistingConfig': 'False'
},
'Defaults': {
'MeasurementsUpdateInterval': '60'
}
}
SERVICE_SETTINGS_TEMPLATE = {
'ChannelAccess': {
'EPICS_CA_ADDR_LIST': '',
'ConnectionTimeout': '2',
'PvStaleAfter': '7200',
'AddStalePvs': 'False',
'PV_PREFIX': '',
'PV_DOMAIN': ''
},
'PVImport': {
'LoopTimer': '5'
},
'HeRecoveryDB': {
'Host': '',
'Name': ''
}
}
# endregion
| StarcoderdataPython |
9697591 | <reponame>lancethomps/lt-pylib<filename>ltpylib/dicts.py
#!/usr/bin/env python
# pylint: disable=C0111
from typing import Any, Callable, Dict, List, Optional, Union
from ltpylib import checks, strings
def convert_keys_to_snake_case(
obj: Union[dict, list],
recursive: bool = False,
) -> Union[dict, list]:
if isinstance(obj, list):
objs = obj
else:
objs = [obj]
for obj_dict in objs:
dict_items = list(obj_dict.items())
for key, val in dict_items:
key_snake_case = strings.to_snake_case(key)
if key != key_snake_case:
obj_dict[key_snake_case] = obj_dict.pop(key)
if recursive and isinstance(val, dict):
convert_keys_to_snake_case(
val,
recursive=recursive,
)
elif recursive and isinstance(val, list) and len(val) > 0 and isinstance(val[0], dict):
for inner_val in val:
convert_keys_to_snake_case(
inner_val,
recursive=recursive,
)
return obj
def convert_string_values_to_correct_type(
value_to_convert: Union[dict, list],
convert_numbers: bool = True,
convert_booleans: bool = True,
use_decimal: bool = False,
recursive: bool = False,
) -> Union[dict, list]:
if isinstance(value_to_convert, list):
if isinstance(value_to_convert[0], str):
return [convert_string_to_correct_type(val, convert_numbers=convert_numbers, convert_booleans=convert_booleans, use_decimal=use_decimal) for val in value_to_convert]
objs = value_to_convert
else:
objs = [value_to_convert]
for obj_dict in objs:
for key, val in obj_dict.items():
if isinstance(val, str):
obj_dict[key] = convert_string_to_correct_type(val, convert_numbers=convert_numbers, convert_booleans=convert_booleans, use_decimal=use_decimal)
elif recursive and isinstance(val, dict):
convert_string_values_to_correct_type(
val,
convert_numbers=convert_numbers,
convert_booleans=convert_booleans,
use_decimal=use_decimal,
recursive=recursive,
)
elif recursive and isinstance(val, list) and len(val) > 0 and isinstance(val[0], dict):
for inner_val in val:
convert_string_values_to_correct_type(
inner_val,
convert_numbers=convert_numbers,
convert_booleans=convert_booleans,
use_decimal=use_decimal,
recursive=recursive,
)
return value_to_convert
def convert_string_to_correct_type(
val: str,
convert_numbers: bool = True,
convert_booleans: bool = True,
use_decimal: bool = False,
):
if convert_numbers and strings.is_number(val, allow_comma=True):
return strings.convert_to_number(val, use_decimal=use_decimal, remove_commas=True)
elif convert_booleans and strings.is_boolean(val):
return strings.convert_to_bool(val)
return val
def copy_fields(from_val: dict, to_val: dict, fields: List[str], field_converter: Callable[[str], str] = None, field_converter_map: Dict[str, str] = None) -> dict:
for field in fields:
if field in from_val:
if field_converter is not None:
to_val[field_converter(field)] = from_val[field]
elif field_converter_map:
to_val[field_converter_map[field]] = from_val[field]
else:
to_val[field] = from_val[field]
return to_val
def find(key: str, obj: dict) -> List[dict]:
if isinstance(obj, dict):
for k, v in obj.items():
if k == key:
yield v
else:
for res in find(key, v):
yield res
elif isinstance(obj, list):
for d in obj:
for res in find(key, d):
yield res
def create_key_getter(key: Union[str, Callable[[dict], Any]]) -> Callable[[dict], Any]:
if isinstance(key, str):
def key_getter(x):
return x.get(key)
else:
key_getter = key
return key_getter
def find_first_with_key_value(list_of_dicts: List[dict], key: Union[str, Callable[[dict], Any]], expected_value: Any) -> Optional[dict]:
key_getter = create_key_getter(key)
for val in list_of_dicts:
field_value = key_getter(val)
if field_value == expected_value:
return val
def group_by(list_of_dicts: List[dict], key: Union[str, Callable[[dict], Any]]) -> Dict[Any, List[dict]]:
key_getter = create_key_getter(key)
by_field: Dict[str, List[dict]] = {}
for val in list_of_dicts:
field_value = key_getter(val)
if field_value not in by_field:
by_field[field_value] = []
by_field[field_value].append(val)
return by_field
def unique_key_values(list_of_dicts: List[dict], key: Union[str, Callable[[dict], Any]], include_nulls: bool = False) -> List[Any]:
key_getter = create_key_getter(key)
unique_values = []
for val in list_of_dicts:
field_value = key_getter(val)
if field_value is None and not include_nulls:
continue
if field_value not in unique_values:
unique_values.append(field_value)
return unique_values
def remove_nulls(dict_with_nulls: dict) -> dict:
return {key: val for (key, val) in dict_with_nulls.items() if val is not None}
def remove_nulls_and_empty(dict_with_nulls: dict) -> dict:
return {key: val for (key, val) in dict_with_nulls.items() if checks.is_not_empty(val)}
if __name__ == "__main__":
import sys
result = globals()[sys.argv[1]](*sys.argv[2:])
if result is not None:
print(result)
| StarcoderdataPython |
5156301 | from gpt2.translation.configuration import TranslationConfig
from gpt2.translation.specification import TranslationSpec
from gpt2.translation.translation import Translator | StarcoderdataPython |
9734805 | from __future__ import print_function
import os
import numpy as np
from scipy.misc import imresize
from scipy.ndimage import imread
from pepper.framework.sensor.face_detect import OpenFace
def add_friend_from_directory(directory, name, max_size=1024):
# type: (str, str, int) -> None
openface = OpenFace()
vectors = []
listdir = os.listdir(directory)
for i, item in enumerate(listdir, 1):
print("\rDetecting Face {}/{}".format(i, len(listdir)), end="")
# Try Loading Image, Resizing if necessary
try:
image = imread(os.path.join(directory, item))
image_size = max(image.shape[0], image.shape[1])
if image_size > max_size:
image = imresize(image, max_size/float(image_size))
# Represent Face as a 128-bit vector
representation = openface.represent(image)
if representation:
face, bounds = representation[0]
vectors.append(face)
except:
print("\rWarning: Couldn't read {}, skipping file...".format(item))
# Write Data to .bin file
path = os.path.join(os.path.dirname(__file__), "friends", "{}.bin".format(name))
with open(path, 'wb') as person_file:
person_file.write(np.concatenate(vectors))
if __name__ == '__main__':
pass
# Use: add_friend_from_directory('<directory>', '<name>')
| StarcoderdataPython |
11295224 | from .ge_exception import GeException
class GeGeneralServerError(GeException):
"""Error raised when there is a server error (not 4xx http code)"""
pass
| StarcoderdataPython |
3510959 | <reponame>cv4ppl/face-checkin<gh_stars>1-10
import numpy as np
class Utils:
@staticmethod
def im2vec(shape, img):
h, w = shape
vec = np.empty((w * h))
for i in range(h):
for j in range(w):
vec[i * w + j] = img[i][j]
return vec, (h, w)
@staticmethod
def vec2im(vec, shape):
h, w = shape
img = np.empty((h, w), np.uint8)
for i in range(h):
for j in range(w):
img[i][j] = vec[i * w + j]
return img
def add_dim(self, vec):
ret = np.zeros((1, len(vec)))
ret[0] = vec
return ret
| StarcoderdataPython |
11351945 | """
All nuitka functions associated for TA
"""
import os
import sys
from setuptools import find_packages
from pkgutil import iter_modules
import dataclasses as dc
import importlib
import pathlib
import re
PACKAGES_DIRS = [
os.getcwd(),
'/opt/venvdm/lib64/python3.8/site-packages/',
'/opt/venvdm/src',
'/usr/lib/python3.8/site-packages/',
'/usr/lib64/python3.8/site-packages/',
]
def find_modules(path):
if not path:
return None
modules = set()
rootdir, base_package_name = os.path.split(path)
def add_modules4pkg(pkg):
modules.add(pkg)
pkgpath = path + '/' + pkg.replace('.', '/')
if sys.version_info.major == 2 or (sys.version_info.major == 3 and sys.version_info.minor < 6):
for _, name, ispkg in iter_modules([pkgpath]):
if not ispkg:
modules.add(pkg + '.' + name)
else:
for info in iter_modules([pkgpath]):
if not info.ispkg:
modules.add(pkg + '.' + info.name)
pass
for info in iter_modules([path]):
if not info.ispkg:
if info.name not in ['__main__', 'setup']:
modules.add(info.name)
for pkg in find_packages(path):
add_modules4pkg(pkg)
return modules
def dir4module(modname):
try:
mod = importlib.__import__(modname)
except:
return None
finally:
if modname in sys.modules:
del sys.modules[modname]
import gc
gc.collect()
return str(pathlib.Path(mod.__file__).resolve().parent)
def dir4mnode(target_):
module = target_.module
module_dir = None
if "folder" in target_:
module_dir = target_.folder
else:
module_dir = dir4module(module)
return module_dir
def flags4module(modname, module_dir, block_modules=None):
# modnames_ = [modname]
mods = sorted(find_modules(module_dir))
disabled_re = None
if block_modules:
disabled_re_str = '(' + '|'.join([s.replace('.', '\.') for s in block_modules]) + ')'
# print(disabled_re_str)
disabled_re = re.compile(disabled_re_str)
flags = []
for mod in mods:
beforename, lastname = os.path.splitext(modname + '.' + mod)
if not lastname[1:2].isdigit():
firstname = mod.split('.')[0]
if 'migrations' in mod.split('.'):
continue
if firstname not in ['tests'] and lastname[1:] not in ['tests']:
modname_ = mod
if modname != firstname:
modname_ = modname + '.' + mod
if disabled_re and disabled_re.match(modname_):
flags.append(' --nofollow-import-to ' + modname_ )
else:
flags.append(' --include-module ' + modname_ )
flags.append("--module %s" % module_dir)
return flags
@dc.dataclass
class NuitkaFlags:
'''
Just make observable flags for Nuitka compiler
'''
force_packages: list = None # force packages to include
force_modules: list = None # force modules to include
block_packages: list = None # disable packages
std_flags: list = ('show-progress', 'show-scons') # base flags
# def get_flags(self, out_dir, module=None, block_modules=None):
def get_flags(self, out_dir, target_):
'''
Get flags for Nuitka compiler
'''
block_modules = None
if block_modules in target_:
block_modules = target_.block_modules
flags = ("""
%s --output-dir="%s"
""" % (" --".join([''] + self.std_flags), out_dir)).strip().split("\n")
if self.force_packages:
for it_ in self.force_packages:
flags.append('--include-package=' + it_)
if self.force_modules:
for it_ in self.force_modules:
flags.append('--include-module=' + it_)
if self.block_packages:
for it_ in self.block_packages:
flags.append('--nofollow-import-to=' + it_)
if "module" in target_:
module_dir = dir4mnode(target_)
if not module_dir:
return ''
flags += flags4module(target_.module, module_dir, block_modules)
else:
flags.append('--standalone')
flags.append('--follow-imports')
if "modules" in target_:
for it_ in target_.modules:
flags.append('--nofollow-import-to=' + it_)
if 'force_modules' in target_:
for it_ in target_.force_modules:
flags.append('--include-module=' + it_)
return " ".join(flags)
if __name__ == '__main__':
print(dir4module('ansible'))
# flags4module | StarcoderdataPython |
12830451 | <reponame>jeremyschlatter/vaccine-feed-ingest
#!/usr/bin/env python
import json
import pathlib
import sys
import requests
output_dir = pathlib.Path(sys.argv[1])
output_file = output_dir / "nv.json"
with output_file.open("w") as fout:
r = requests.post(
"https://www.immunizenevada.org/views/ajax",
headers={
"Referer": "https://www.immunizenevada.org/covid-19-vaccine-locator",
},
data={
"field_type_of_location_value": "All",
"field_zip_code_value": "",
"view_name": "vaccine_locator",
"view_display_id": "block_2",
"view_path": "/node/2668",
"_drupal_ajax": 1,
},
)
json.dump(r.json(), fout)
fout.write("\n")
| StarcoderdataPython |
5043841 | <reponame>orabhan/twip
from __future__ import unicode_literals
import pandas as pd
def try_int(s):
try:
return int(s)
except:
return None
def try_encode(s):
try:
return s.encode('utf-8')
except:
return None
def try_decode(s):
try:
return s.decode('utf-8')
except:
return s or None
def null2none(obj):
if pd.isnull(obj):
return None
else:
return obj
def clean_tweets(df, columns=None, text_columns=None):
columns = columns or [c for c in df.columns if 'count' in c.lower() and 'country' not in c.lower()]
for c in columns:
df[c] = df[c].apply(try_int)
print(c + ': ' + str(df[c].isnull().sum()))
df = df[~df['user.favourites_count'].isnull()]
df = df[~df['user.statuses_count'].isnull()]
text_columns = text_columns or [c for c in df.columns if c.lower().endswith('text')]
for c in text_columns:
df[c] = df[c].apply(try_encode)
print(c + ': ' + str(df[c].isnull().sum()))
| StarcoderdataPython |
8058439 | <filename>testData/search/grandChildFieldWithPythonClass.py
from pydantic import BaseModel, validator
class A(BaseModel):
pass
class B(A):
pass
class D:
pass
class C(B, D):
ab<caret>c: str
A(abc='cde')
B(abc='cde')
C(abc='cde')
## count: 0 | StarcoderdataPython |
8066531 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from re import finditer
def convert(df):
"""Turns original binary values to 0/1"""
replace = {'No': 0, 'Yes': 1}
replace2 = {'Male': 0, 'Female': 1}
bool_col = ['SeniorCitizen', 'Partner', 'Dependents', 'PhoneService', 'PaperlessBilling', 'Churn']
df[bool_col] = df[bool_col].replace(replace)
df['gender'] = df['gender'].replace(replace2)
return df
def revert(df):
"""Turns 0/1 to original binary values"""
replace = {0: 'No', 1: 'Yes'}
replace2 = {0: 'Male', 1: 'Female'}
bool_col = ['SeniorCitizen', 'Partner', 'Dependents', 'PhoneService', 'PaperlessBilling', 'Churn']
df[bool_col] = df[bool_col].replace(replace)
df['gender'] = df['gender'].replace(replace2)
return df
def plot_cat(df, nrows=8, ncols=2):
"""Return a plotgrid of all categorical columns"""
ncount = len(df)
cat_cols = df.select_dtypes(exclude=np.number).columns.drop('Churn')
fig, axes = plt.subplots(nrows, ncols, constrained_layout=True, figsize=(12,24))
fig.suptitle('Churn rate/occurrence')
for i,col in enumerate(cat_cols):
ax = sns.countplot(y=col, hue='Churn', data=df, ax=axes[i//ncols][i%ncols])
ax.set_xlabel('')
for p in ax.patches:
x=p.get_bbox().get_points()[1,0]
y=p.get_bbox().get_points()[:,1]
perc = 100.*x/ncount
if perc > 10:
ax.annotate('{:.2f}%'.format(perc), (x/2, y.mean()),
ha='center', va='center') # set the alignment of the text
else:
ax.annotate('{:.2f}%'.format(perc), (x, y.mean()),
ha='left', va='center') # set the alignment of the text
return fig
def camel_case_split(identifier):
"""ref -> https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python"""
matches = finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
def clean_str(s):
return ' '.join(camel_case_split(s)).capitalize()
def plot_num(df, nrows=2, ncols=2):
"""Return a plotgrid of all numerical columns"""
num_cols = df.select_dtypes(include=np.number).columns
fig, axes = plt.subplots(nrows, ncols, constrained_layout=True, figsize=(12,8))
fig.suptitle('Churn occurrence')
for i,col in enumerate(num_cols):
ax = sns.histplot(x=col, hue='Churn', data=df, kde=True, ax=axes[i//ncols][i%ncols])
ax.set_xlabel(f'{clean_str(col)}')
return fig | StarcoderdataPython |
3475248 | # Generated by Django 3.1 on 2021-07-11 08:51
from django.db import models
from django.db import migrations
import api.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Vector",
fields=[
(
"id",
api.models.ApiIdField(
editable=False, max_length=34, primary_key=True, serialize=False
),
),
("product_id", models.CharField(max_length=32)),
("created_at", models.DateTimeField(auto_now_add=True)),
("details", models.JSONField(default=dict)),
("title", models.CharField(max_length=32)),
("description", models.TextField(blank=True, max_length=512)),
("image_url", models.URLField(blank=True)),
("active", models.BooleanField(default=True)),
],
),
]
| StarcoderdataPython |
3352333 | <gh_stars>1-10
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-list-token-enumeration-1-NS"
class NistschemaSvIvListTokenEnumeration1Type(Enum):
THE_RESIDES_EARLY_OF_DATA_UNAMBIGUOUS = (
"the",
"resides",
"early",
"of",
"data;",
"unambiguous",
)
TESTING_PARTNERSHIPS_THE_SOFTWARE_AUTOMATICALLY = (
"testing",
"partnerships",
"the",
"software",
"automatically",
)
G_REACH_AS_CONTROL_OF_HELPING_NIST_ITL = (
"g",
"reach",
"as",
"Control",
"of",
"helping",
"NIST/ITL",
)
LAW_SIMPLEST_AND_ANY_ADOPTION_HELP_WORK_NUMBER_THE = (
"law",
"simplest",
"and",
"any",
"adoption",
"help",
"Work",
"number",
"the",
)
AS_IS_TOOLS_AND_NEEDED = (
"as",
"is",
"tools",
"and",
"needed",
)
A_PARTNERSHIP_MANIPULATE_KNOWN_FOR_PROCESS_THE = (
"a",
"partnership",
"manipulate",
"known",
"for",
"process",
"the",
)
@dataclass
class NistschemaSvIvListTokenEnumeration1:
class Meta:
name = "NISTSchema-SV-IV-list-token-enumeration-1"
namespace = "NISTSchema-SV-IV-list-token-enumeration-1-NS"
value: Optional[NistschemaSvIvListTokenEnumeration1Type] = field(
default=None,
metadata={
"required": True,
}
)
| StarcoderdataPython |
12809885 | <reponame>borntyping/python-infix
#!/usr/bin/env python
from setuptools import setup
setup(
name = 'infix',
version = '1.2',
license = 'MIT License',
url = 'https://github.com/borntyping/python-infix',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Infix operators for Python',
long_description = open('README.rst').read(),
py_modules = ['infix'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
)
| StarcoderdataPython |
5192721 | import http.server
import random
import socketserver
PORT = 8000
class ServerHandler(http.server.BaseHTTPRequestHandler):
def do_PUT(self):
body = random.choices(
population=['accepted\n', 'invalid\n', 'too old', 'is not available'],
weights=(1, 0.1),
k=1
)[0]
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(bytes(body, 'utf-8'))
handler = ServerHandler
httpd = socketserver.TCPServer(("", PORT), handler)
print("Serving at port", PORT)
httpd.serve_forever()
| StarcoderdataPython |
4959782 | from .annota_vo import AnnotaVO
from .dataset_entry_vo import DatasetEntryVO
from .dataset_vo import DatasetVO
from .hub_model_vo import HubModelVO
from .hub_vo import HubVO
from .label_vo import LabelVO
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.