id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3388702 | <reponame>mcarlen/libbiarc
PkfName = 'void'
KnotType = 'void'
NCMP = 0
COMP = 0
coords = []
tangents = []
edges = []
def pkfread(file):
global PkfName,KnotType,NCMP,COMP
global coords,tangents,edges
if file.readline().strip()!='PKF 0.2':
print "Not PKF Version 0.2"
exit(1)
KnotType, PkfName = file.readline().strip().split()
# Read ETIC,HIST and CITE
for i in xrange(3):
token,val = file.readline().strip().split(" ",1)
if val>0:
while file.readline().strip()!='END':
continue
else:
file.readline()
token,NCMP = file.readline().strip().split()
if token!='NCMP':
print "Expected NCMP! Got : "+token
exit(1)
for i in xrange(int(NCMP)):
token,COMP = file.readline().strip().split()
if token!='COMP':
print "Expected COMP! Got : "+token
exit(1)
if KnotType == 'BIARC_KNOT':
for i in xrange(int(COMP)):
token,x,y,z,tx,ty,tz = file.readline().strip().split()
coords.append([float(x),float(y),float(z),1.0])
tangents.append([float(tx),float(ty),float(tz)])
if i>0:
edges.append([i,i-1])
else:
for i in xrange(int(COMP)):
token,x,y,z = file.readline().strip().split()
coords.append([float(x),float(y),float(z),1.0])
if i>0:
edges.append([i,i-1])
token = file.readline().strip()
if token!='END':
print "Expected END : Got "+token
exit(1)
| StarcoderdataPython |
4812211 | <reponame>micahaza/contract-management-api
from jcapi.extensions.mail import MailSender
from unittest import mock
from threading import Thread
@mock.patch('smtplib.SMTP_SSL', autospec=True)
def test_mail(mail_sender_mock, app):
mail_sender_mock.return_value.login = mock.Mock(return_value={})
mail_sender_mock.return_value.sendmail = mock.Mock(return_value={})
ms = MailSender()
ms.init_app(app)
message = ms.build_message('<EMAIL>', 'Hi Bro', 'Simple body, eeee')
ms.send_message(message)
mail_sender_mock.return_value.login.assert_called_once_with(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
mail_sender_mock.return_value.sendmail.assert_called_once_with(app.config['MAIL_DEFAULT_SENDER'], '<EMAIL>', message.as_string())
@mock.patch('smtplib.SMTP_SSL', autospec=True)
def test_async_mail(mail_sender_mock, app):
mail_sender_mock.return_value.login = mock.Mock(return_value={})
mail_sender_mock.return_value.sendmail = mock.Mock(return_value={})
ms = MailSender()
ms.init_app(app)
message = ms.build_message('<EMAIL>', 'Hi Bro', 'Simple body, eeee')
ms.send_message_async(message)
mail_sender_mock.return_value.login.assert_called_once_with(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
mail_sender_mock.return_value.sendmail.assert_called_once_with(app.config['MAIL_DEFAULT_SENDER'], '<EMAIL>', message.as_string())
@mock.patch('smtplib.SMTP_SSL', autospec=True)
def test_mail_from_template(mail_sender_mock, app):
mail_sender_mock.return_value.login = mock.Mock(return_value={})
mail_sender_mock.return_value.sendmail = mock.Mock(return_value={})
ms = MailSender()
ms.init_app(app)
message = ms.build_message_from_template('<EMAIL>',
'Welcome To JustContracts.io',
'welcome',
name='<NAME>',
email_verification_link='http://yooooooo.com')
ms.send_message(message)
mail_sender_mock.return_value.login.assert_called_once_with(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
mail_sender_mock.return_value.sendmail.assert_called_once_with(app.config['MAIL_DEFAULT_SENDER'], '<EMAIL>', message.as_string())
@mock.patch('smtplib.SMTP_SSL', autospec=True)
def test_non_blocking_mail_send(mail_sender_mock, app):
mail_sender_mock.return_value.login = mock.Mock(return_value={})
mail_sender_mock.return_value.sendmail = mock.Mock(return_value={})
ms = MailSender()
ms.init_app(app)
message = ms.build_message_from_template('<EMAIL>',
'Welcome To JustContracts.io',
'welcome',
name='<NAME>',
email_verification_link='http://yooooooo.com')
result = ms.send_message_async(message)
assert isinstance(result, Thread)
mail_sender_mock.return_value.login.assert_called_once_with(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
mail_sender_mock.return_value.sendmail.assert_called_once_with(app.config['MAIL_DEFAULT_SENDER'], '<EMAIL>', message.as_string())
| StarcoderdataPython |
3394040 | """
Input __init__.
"""
try:
from spikey.snn.input.ratemap import RateMap
from spikey.snn.input.staticmap import StaticMap
from spikey.snn.input.rbf import RBF
except ImportError as e:
raise ImportError(f"input/__init__.py failed: {e}")
| StarcoderdataPython |
197395 | <gh_stars>10-100
import pytest
import pandas as pd
import dariah
@pytest.fixture
def dtm():
return pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["AAA", "BBB", "CCC"],
index=["a", "b", "c"],
)
@pytest.fixture
def riddell_topics():
return pd.DataFrame(
{
"word0": {"topic0": "AAA", "topic1": "CCC"},
"word1": {"topic0": "CCC", "topic1": "BBB"},
"word2": {"topic0": "BBB", "topic1": "AAA"},
}
)
@pytest.fixture
def riddell_topic_word():
return pd.DataFrame(
{
"AAA": {"topic0": 0.9981867633726201, "topic1": 0.02967969438730532},
"BBB": {"topic0": 0.0009066183136899366, "topic1": 0.4410813987657949},
"CCC": {"topic0": 0.0009066183136899366, "topic1": 0.5292389068468998},
}
)
@pytest.fixture
def riddell_topic_document():
return pd.DataFrame(
{
"a": {"topic0": 0.01612903225806452, "topic1": 0.9838709677419355},
"b": {"topic0": 0.26973684210526316, "topic1": 0.7302631578947368},
"c": {"topic0": 0.2933884297520661, "topic1": 0.7066115702479339},
}
)
@pytest.fixture
def riddell_topic_similarities():
return pd.DataFrame(
{
"topic0": {"topic0": 1.0000000000000002, "topic1": 2.6409361679102195},
"topic1": {"topic0": 0.21001814797045967, "topic1": 0.9999999999999999},
}
)
@pytest.fixture
def riddell_document_similarities():
return pd.DataFrame(
{
"a": {
"a": 0.9999999999999999,
"b": 0.7465284651715263,
"c": 0.7228895865992244,
},
"b": {
"a": 1.1927144048546063,
"b": 1.0000000000000002,
"c": 0.9820273609083001,
},
"c": {
"a": 1.1957201277450218,
"b": 1.0166958876685324,
"c": 0.9999999999999999,
},
}
)
@pytest.fixture
def mallet_topics():
return pd.DataFrame(
{
"word0": {"topic0": "aaa", "topic1": "ccc"},
"word1": {"topic0": "bbb", "topic1": "bbb"},
"word2": {"topic0": "ccc", "topic1": None},
}
)
@pytest.fixture
def mallet_topic_word():
return pd.DataFrame(
{
"aaa": {"topic0": 9.01, "topic1": 0.01},
"bbb": {"topic0": 7.01, "topic1": 8.01},
"ccc": {"topic0": 6.01, "topic1": 12.01},
}
)
@pytest.fixture
def mallet_topic_document():
return pd.DataFrame(
{
"a": {"topic0": 0.2058823529411765, "topic1": 0.7941176470588236},
"b": {"topic0": 0.7127659574468086, "topic1": 0.28723404255319146},
"c": {"topic0": 0.4783549783549784, "topic1": 0.5216450216450217},
}
)
@pytest.fixture
def mallet_topic_similarities():
return pd.DataFrame(
{
"topic0": {"topic0": 1.0, "topic1": 0.7927620823177987},
"topic1": {"topic0": 0.6270117939000307, "topic1": 1.0},
}
)
@pytest.fixture
def mallet_document_similarities():
return pd.DataFrame(
{
"a": {
"a": 0.9999999999999999,
"b": 0.556965487064486,
"c": 0.7618491191755972,
},
"b": {
"a": 0.6347484950285212,
"b": 0.9999999999999999,
"c": 0.8310875275229433,
},
"c": {"a": 1.0235465765588327, "b": 0.97974263999169, "c": 1.0},
}
)
@pytest.fixture
def random_state():
return 23
def test_read_mallet_topics(tmpdir):
p = tmpdir.mkdir("sub").join("topics.txt")
p.write("0\t0.05\tfoo bar\n1\t0.05\tfoo bar")
topics = dariah.core.utils.read_mallet_topics(p, num_words=2)
assert list(topics) == [["foo", "bar"], ["foo", "bar"]]
def test_riddell_lda(
dtm,
riddell_topics,
riddell_topic_word,
riddell_topic_document,
riddell_topic_similarities,
riddell_document_similarities,
random_state,
):
lda = dariah.core.modeling.LDA(
num_topics=2, num_iterations=10, random_state=random_state
)
lda.fit(dtm)
assert lda.topics.sum().sum() == riddell_topics.sum().sum()
assert lda.topic_word.sum().sum() == riddell_topic_word.sum().sum()
assert lda.topic_document.sum().sum() == riddell_topic_document.sum().sum()
assert lda.topic_similarities.sum().sum() == riddell_topic_similarities.sum().sum()
assert (
lda.document_similarities.sum().sum()
== riddell_document_similarities.sum().sum()
)
def test_mallet_lda(
dtm,
mallet_topics,
mallet_topic_word,
mallet_topic_document,
mallet_topic_similarities,
mallet_document_similarities,
):
lda = dariah.core.modeling.LDA(
num_topics=2, num_iterations=10, random_state=23, mallet="mallet"
)
lda.fit(dtm)
assert lda.topics.sum().sum() == mallet_topics.sum().sum()
assert lda.topic_word.sum().sum() == mallet_topic_word.sum().sum()
assert lda.topic_document.sum().sum() == mallet_topic_document.sum().sum()
assert lda.topic_similarities.sum().sum() == mallet_topic_similarities.sum().sum()
assert (
lda.document_similarities.sum().sum()
== mallet_document_similarities.sum().sum()
)
| StarcoderdataPython |
1620198 | from modeltranslation.translator import translator, TranslationOptions
from mezzanine.conf.models import Setting
class TranslatedSetting(TranslationOptions):
fields = ("value",)
translator.register(Setting, TranslatedSetting)
| StarcoderdataPython |
118079 | """
test_predictor.py
Class created to run automated unit testings for the Pico & Placa predictor.
The tests are made for the following use-cases:
1. Users are allowed to go outside
2. Users are not allowed to go outside
3. License plate is not valid
4. Date format is incorrect (dd-mm-yyyy)
5. Time format is incorrect (hh h mm)
"""
import unittest
from predictor import predictor, plate_validation, date_validation, time_validation
class TestPredictor(unittest.TestCase):
def test_car_can_transit(self):
# monday tests
self.assertTrue(predictor("abc1231", "26/04/2021", "19:40"))
self.assertTrue(predictor("abc1232", "26/04/2021", "19:40"))
# tuesday tests
self.assertTrue(predictor("abc1233", "27/04/2021", "19:40"))
self.assertTrue(predictor("abc1234", "27/04/2021", "19:40"))
# wednesday tests
self.assertTrue(predictor("abc1235", "28/04/2021", "19:40"))
self.assertTrue(predictor("abc1236", "28/04/2021", "19:40"))
# thursday tests
self.assertTrue(predictor("abc1237", "29/04/2021", "19:40"))
self.assertTrue(predictor("abc1238", "29/04/2021", "19:40"))
# friday tests
self.assertTrue(predictor("abc1239", "30/04/2021", "19:40"))
self.assertTrue(predictor("abc1230", "30/04/2021", "19:40"))
# saturday tests
self.assertTrue(predictor("abc1231", "01/05/2021", "9:00"))
self.assertTrue(predictor("abc1230", "01/05/2021", "19:00"))
# sunday tests
self.assertTrue(predictor("abc1231", "02/05/2021", "9:00"))
self.assertTrue(predictor("abc1230", "02/05/2021", "19:00"))
def test_car_cannot_transit(self):
# monday tests
self.assertFalse(predictor("abc1231", "26/04/2021", "19:00"))
self.assertFalse(predictor("abc1232", "26/04/2021", "19:00"))
# tuesday tests
self.assertFalse(predictor("abc1233", "27/04/2021", "19:00"))
self.assertFalse(predictor("abc1234", "27/04/2021", "19:00"))
# wednesday tests
self.assertFalse(predictor("abc1235", "28/04/2021", "19:00"))
self.assertFalse(predictor("abc1236", "28/04/2021", "19:00"))
# thursday tests
self.assertFalse(predictor("abc1237", "29/04/2021", "19:00"))
self.assertFalse(predictor("abc1238", "29/04/2021", "19:00"))
# friday tests
self.assertFalse(predictor("abc1239", "30/04/2021", "19:00"))
self.assertFalse(predictor("abc1230", "30/04/2021", "19:00"))
def test_correct_plate(self):
# license plates can be introduced with uppercased or lowercased letters
self.assertTrue(plate_validation("abc123"))
self.assertTrue(plate_validation("abc1234"))
self.assertTrue(plate_validation("ABC123"))
self.assertTrue(plate_validation("ABC1234"))
def test_incorrect_plate(self):
# plate length is incorrect
self.assertFalse(plate_validation("abe12"))
self.assertFalse(plate_validation("abcde123456"))
# plate first three characters contain numbers
self.assertFalse(plate_validation("ab1234"))
self.assertFalse(plate_validation("ab12345"))
# plate last digits contain letters
self.assertFalse(plate_validation("abcd12"))
self.assertFalse(plate_validation("abcd123"))
def test_correct_date(self):
self.assertTrue(date_validation("29/04/2021"))
def test_incorrect_date(self):
self.assertFalse(date_validation("29-04-2021"))
def test_correct_time(self):
self.assertTrue(time_validation("09:30"))
self.assertTrue(time_validation("9:30"))
def test_incorrect_time(self):
self.assertFalse(time_validation("19h30"))
| StarcoderdataPython |
3207341 | import os
import random
import traceback
import discord
from discord.ext import commands, tasks
GUILD = 384811165949231104
IMG_DIR = './data/server-icons'
PLAN_Z = 507429352720433152
def find_file(i):
images = os.listdir(IMG_DIR)
for img_name in images:
if img_name.startswith(str(i)):
return f'{IMG_DIR}/{img_name}'
return
def shuffle_server_icons():
names = list(range(len(os.listdir(IMG_DIR))))
random.shuffle(names)
for img_name in os.listdir(IMG_DIR):
ext = img_name.split('.')[-1]
os.rename(f'{IMG_DIR}/{img_name}', f'{IMG_DIR}/{names.pop()}.{ext}')
class ServerIcon(commands.Cog):
"""Automatic server icon rotation."""
def __init__(self, bot):
self.bot = bot
# self.check_if_new_week.start()
async def cog_command_error(self, ctx, error):
if not isinstance(error, commands.CheckFailure):
await ctx.send(f"```py\n{error.__class__.__name__}: {error}\n```")
async def rotate_server_icon(self):
try:
guild = self.bot.get_guild(GUILD)
img = random.choice(os.listdir(IMG_DIR))
img_path = f"{IMG_DIR}/{img}"
with open(img_path, 'rb') as fp:
icon = fp.read()
await guild.edit(icon=icon)
await self.log(f"Set server icon to `{img_path}`.")
except Exception as e:
error = ''.join(traceback.format_exception(e.__class__, e, e.__traceback__))
await self.log(f"Error rotating server icon:```\n{error}\n```")
async def log(self, msg):
await self.bot.get_channel(PLAN_Z).send(msg)
@tasks.loop(hours=1)
async def check_if_new_week(self):
# TODO
pass
@commands.group(invoke_without_command=True)
async def icons(self, ctx):
"""Base command for controlling server icon."""
images = os.listdir(IMG_DIR)
count = len(images)
await ctx.send(f"Found `{count}` total images: ```py\n{images}\n```")
@icons.command()
async def rotate(self, ctx):
"""Rotate to the next server icon."""
await self.rotate_server_icon()
@icons.command()
async def upload(self, ctx):
"""Add a new image to the icon folder."""
attachment = ctx.message.attachments[0]
filename = f"{IMG_DIR}/{attachment.filename}"
await attachment.save(filename)
await ctx.send(f"Saved as `{filename}`.")
def setup(bot):
bot.add_cog(ServerIcon(bot))
| StarcoderdataPython |
4828298 | <reponame>EkaterinaLisovec/python_traning1
from model.contact import Contact
import random
import string
import os.path
import jsonpickle
import getopt #для чтения опций командной строки
import sys #чтобы получить доступ к этим опциям
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10 #+ string.punctuation
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname="", middlename="", lastname="", nickname="", title="",
company="", address="", homephone="", mobilephone="", workphone="",
fax="", email="", email2="", email3="", homepage="", bday="15",
bmonth="September", byear="5555", aday="14", amonth="November",
ayear="6666", address2="", phone2="", notes="")] + [
Contact(firstname=random_string("firstname",20), middlename=random_string("middlename",20), lastname=random_string("lastname",20),
nickname=random_string("nickname",10), title=random_string("title",10), company=random_string("company",20),
address=random_string("address",20), homephone=random_string("homephone",10),mobilephone=random_string("mobilephone",10),
workphone=random_string("workphone",10), fax=random_string("fax",10), email=random_string("email",20),
email2=random_string("email2",20), email3=random_string("email3",20), homepage=random_string("homepage",20),
address2=random_string("address2",20), phone2=random_string("phone2",10), notes=random_string("notes",20))
for i in range(2)]
# определяем путь относительно директории проекта
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
# отркываем файл на запись
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata)) | StarcoderdataPython |
174412 | <filename>setup.py
#!/usr/bin/env python
"""The setup script."""
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "requirements.txt"), encoding="utf-8") as requirements_file:
requirements = requirements_file.read().splitlines()
with open(os.path.join(here, "requirements_dev.txt"), encoding="utf-8") as requirements_dev_file:
dev_requirements = requirements_dev_file.read().splitlines()
with open(os.path.join(here, "README.rst"), encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
install_requires=requirements,
long_description=readme,
include_package_data=True,
packages=find_packages(include=["zfit_physics", "zfit_physics.models", "zfit_physics.unstable"]),
test_suite="tests",
extras_require={"dev": dev_requirements},
use_scm_version=True,
zip_safe=False,
)
| StarcoderdataPython |
1751822 | <reponame>iliankostadinov/thinkpython<filename>Chapter14/Exercise_14_2.py<gh_stars>1-10
#!/usr/bin/env python3
"""
Write a module that imports anagram_sets and provides two new functions:
store_anagrams should store the anagram dictionary in a “shelf”;
read_anagrams should look up a word and return a list of its anagrams.
"""
import anagram_sets
import dbm
import pickle
def store_anagrams(dictionary):
db = dbm.open('db_with_anagrams', 'c')
for k in dictionary:
db[k] = pickle.dumps(dictionary[k])
db.close()
def read_anagrams(word):
db = dbm.open('db_with_anagrams', 'c')
print(pickle.loads(db[word]))
db.close()
if __name__ == "__main__":
# print(anagram_sets.all_anagrams('words.txt'))
d = anagram_sets.all_anagrams("words.txt")
store_anagrams(d)
read_anagrams("opst")
| StarcoderdataPython |
1625986 | <gh_stars>1-10
#!/usr/bin/python
import sdk_common
import shutil
import os
import glob
from functools import reduce
import json
import csv
from collections import OrderedDict
# Block in charge of licensing
class LicenceSetter(sdk_common.BuildStepUsingGradle):
def __init__(self, logger=None):
super(LicenceSetter, self).__init__('Licensing', logger)
self.distribution_directory = reduce(lambda x, y: os.path.join(x, y),
[self.top_directory, 'src', 'main', 'dist'])
self.third_party_licences = os.path.join(self.distribution_directory, 'ThirdParty-Licences')
self.tpip_csv_format = self.get_csv_format()
def execute(self):
self.print_title()
try:
self.log_info("Generating 3rd party licence documents")
self.execute_gradle_task('jar')
self.execute_gradle_task('dependencyLicenseReport')
self.execute_gradle_task('downloadLicenses')
self.log_info("Generating 3rd party reports")
self.generating_tpip_reports()
self.log_info("Integrating 3rd party licence documents")
self.copy_third_party_directory()
self.log_info("Integrating SDK distribution documentation and licence")
self.copy_distribution_documentation()
except:
self.log_error('Failed to generate licence documentation')
return False
self.log_info("Done.")
return True
def copy_distribution_documentation(self):
for filename in glob.glob(os.path.join(self.top_directory, '*.md')):
shutil.copy2(filename, self.distribution_directory)
licence_file = os.path.join(self.top_directory, 'LICENCE')
if os.path.exists(licence_file):
shutil.copy2(licence_file, self.distribution_directory)
def copy_third_party_directory(self):
licenses = reduce(lambda x, y: os.path.join(x, y), [self.build_directory, 'reports', 'dependency-license'])
tpip_report = reduce(lambda x, y: os.path.join(x, y), [self.build_directory, 'reports', 'license'])
if os.path.exists(self.third_party_licences):
self.remove_path(self.third_party_licences, True)
shutil.copytree(licenses, self.third_party_licences)
for file in os.listdir(tpip_report):
shutil.copy2(os.path.join(tpip_report, file), self.third_party_licences)
for filename in glob.glob(os.path.join(self.third_party_licences, '*.jar')):
self.remove_path(filename, True)
def generating_tpip_reports(self):
tpip_report = reduce(lambda x, y: os.path.join(x, y), [self.build_directory, 'reports', 'license'])
for filename in glob.glob(os.path.join(tpip_report, '*.json')):
self.generate_tpip_csv(filename)
def generate_tpip_csv(self, file):
with open(file, 'r') as json_data:
data = json.load(json_data)
if data.get('dependencies'):
csv_file = os.path.join(os.path.dirname(file), os.path.splitext(os.path.basename(file))[0] + '.csv')
columns = self.tpip_csv_format.keys()
with open(csv_file, 'w') as tpip_csv:
csv_w = csv.writer(tpip_csv)
csv_w.writerow(columns)
used_packages = data.get('dependencies')
for used_package in used_packages:
csv_entries = self.fetch_csv_entries(used_package)
csv_w.writerow(map(lambda x: csv_entries.get(x, ""), columns))
def fetch_csv_entries(self, package_json):
csv_data = {}
for key in self.tpip_csv_format.keys():
csv_data[key] = self.tpip_csv_format[key](package_json) if self.tpip_csv_format[key] else None
return csv_data
def get_csv_format(self):
return OrderedDict([('PkgName', lambda x: x['name'].split(':')[1]),
('PkgType', lambda x: x['file']),
('PkgOriginator', lambda x: x['name'].split(':')[0]),
('PkgVersion', lambda x: x['name'].split(':')[2]),
('PkgSummary', None),
('PkgHomePageURL', None),
('PkgLicense', lambda x: x['licenses'][0]['name']),
('PkgLicenseURL', lambda x: x['licenses'][0]['url']),
('PkgMgrURL', None)]
)
| StarcoderdataPython |
1670925 | from typing import Dict, Any
import torch
from malib.algorithm.common.loss_func import LossFunc
from malib.algorithm.common import misc
from malib.utils.episode import EpisodeKey
class DDPGLoss(LossFunc):
def __init__(self):
super(DDPGLoss, self).__init__()
def reset(self, policy, configs):
self._params.update(configs)
if policy is not self.policy:
self._policy = policy
self.setup_optimizers()
def step(self):
self.policy.soft_update(self._params["tau"])
def setup_optimizers(self, *args, **kwargs):
"""Accept training configuration and setup optimizers"""
if self.optimizers is None:
optim_cls = getattr(torch.optim, self._params.get("optimizer", "Adam"))
self.optimizers = {
"actor": optim_cls(
self.policy.actor.parameters(), lr=self._params["actor_lr"]
),
"critic": optim_cls(
self.policy.critic.parameters(), lr=self._params["critic_lr"]
),
}
else:
self.optimizers["actor"].param_groups = []
self.optimizers["actor"].add_param_group(
{"params": self.policy.actor.parameters()}
)
self.optimizers["critic"].param_groups = []
self.optimizers["critic"].add_param_group(
{"params": self.policy.critic.parameters()}
)
def loss_compute(self, batch) -> Dict[str, Any]:
rewards = batch[EpisodeKey.REWARD].view(-1, 1)
actions = batch[EpisodeKey.ACTION_DIST]
cur_obs = batch[EpisodeKey.CUR_OBS]
next_obs = batch[EpisodeKey.NEXT_OBS]
dones = batch[EpisodeKey.DONE].view(-1, 1)
cliprange = self._params["grad_norm_clipping"]
gamma = self.policy.custom_config["gamma"]
# ---------------------------------------
self.optimizers["critic"].zero_grad()
target_vf_in = torch.cat(
[next_obs, self.policy.compute_actions_by_target_actor(next_obs)], dim=-1
)
next_value = self.policy.target_critic(target_vf_in)
target_value = rewards + gamma * next_value * (1.0 - dones)
vf_in = torch.cat([cur_obs, actions], dim=-1)
actual_value = self.policy.critic(vf_in)
assert actual_value.shape == target_value.shape, (
actual_value.shape,
target_value.shape,
rewards.shape,
)
value_loss = torch.nn.MSELoss()(actual_value, target_value.detach())
value_loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy.critic.parameters(), cliprange)
self.optimizers["critic"].step()
# --------------------------------------
# --------------------------------------
self.optimizers["actor"].zero_grad()
vf_in = torch.cat([cur_obs, self.policy.compute_actions(cur_obs)], dim=-1)
# use stop gradient here
policy_loss = -self.policy.critic(vf_in).mean() # need add regularization?
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy.actor.parameters(), cliprange)
self.optimizers["actor"].step()
# --------------------------------------
loss_names = ["policy_loss", "value_loss", "target_value_est", "eval_value_est"]
stats_list = [
policy_loss.detach().item(),
value_loss.detach().item(),
target_value.mean().item(),
actual_value.mean().item(),
]
return dict(zip(loss_names, stats_list))
| StarcoderdataPython |
135298 | <filename>sickbeard/lib/hachoir_parser/file_system/ntfs.py
"""
New Technology File System (NTFS) file system parser.
Sources:
- The NTFS documentation
http://www.linux-ntfs.org/
- NTFS-3G driver
http://www.ntfs-3g.org/
Creation date: 3rd january 2007
Author: <NAME>
"""
SECTOR_SIZE = 512
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, Enum,
UInt8, UInt16, UInt32, UInt64, TimestampWin64,
String, Bytes, Bit,
NullBits, NullBytes, PaddingBytes, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.tools import humanFilesize, createDict
from lib.hachoir_parser.common.msdos import MSDOSFileAttr32
class BiosParameterBlock(FieldSet):
"""
BIOS parameter block (bpb) structure
"""
static_size = 25 * 8
MEDIA_TYPE = {0xf8: "Hard disk"}
def createFields(self):
yield UInt16(self, "bytes_per_sector", "Size of a sector in bytes")
yield UInt8(self, "sectors_per_cluster", "Size of a cluster in sectors")
yield NullBytes(self, "reserved_sectors", 2)
yield NullBytes(self, "fats", 1)
yield NullBytes(self, "root_entries", 2)
yield NullBytes(self, "sectors", 2)
yield Enum(UInt8(self, "media_type"), self.MEDIA_TYPE)
yield NullBytes(self, "sectors_per_fat", 2)
yield UInt16(self, "sectors_per_track")
yield UInt16(self, "heads")
yield UInt32(self, "hidden_sectors")
yield NullBytes(self, "large_sectors", 4)
def validate(self):
if self["bytes_per_sector"].value not in (256, 512, 1024, 2048, 4096):
return "Invalid sector size (%u bytes)" % \
self["bytes_per_sector"].value
if self["sectors_per_cluster"].value not in (1, 2, 4, 8, 16, 32, 64, 128):
return "Invalid cluster size (%u sectors)" % \
self["sectors_per_cluster"].value
return ""
class MasterBootRecord(FieldSet):
static_size = 512*8
def createFields(self):
yield Bytes(self, "jump", 3, "Intel x86 jump instruction")
yield String(self, "name", 8)
yield BiosParameterBlock(self, "bios", "BIOS parameters")
yield textHandler(UInt8(self, "physical_drive", "(0x80)"), hexadecimal)
yield NullBytes(self, "current_head", 1)
yield textHandler(UInt8(self, "ext_boot_sig", "Extended boot signature (0x80)"), hexadecimal)
yield NullBytes(self, "unused", 1)
yield UInt64(self, "nb_sectors")
yield UInt64(self, "mft_cluster", "Cluster location of MFT data")
yield UInt64(self, "mftmirr_cluster", "Cluster location of copy of MFT")
yield UInt8(self, "cluster_per_mft", "MFT record size in clusters")
yield NullBytes(self, "reserved[]", 3)
yield UInt8(self, "cluster_per_index", "Index block size in clusters")
yield NullBytes(self, "reserved[]", 3)
yield textHandler(UInt64(self, "serial_number"), hexadecimal)
yield textHandler(UInt32(self, "checksum", "Boot sector checksum"), hexadecimal)
yield Bytes(self, "boot_code", 426)
yield Bytes(self, "mbr_magic", 2, r"Master boot record magic number (\x55\xAA)")
def createDescription(self):
size = self["nb_sectors"].value * self["bios/bytes_per_sector"].value
return "NTFS Master Boot Record (%s)" % humanFilesize(size)
class MFT_Flags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "in_use")
yield Bit(self, "is_directory")
yield NullBits(self, "padding", 14)
class Attribute(FieldSet):
# --- Common code ---
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
type = self["type"].value
if type in self.ATTR_INFO:
self._name = self.ATTR_INFO[type][0]
self._parser = self.ATTR_INFO[type][2]
def createFields(self):
yield Enum(textHandler(UInt32(self, "type"), hexadecimal), self.ATTR_NAME)
yield UInt32(self, "size")
yield UInt8(self, "non_resident", "Non-resident flag")
yield UInt8(self, "name_length", "Name length in bytes")
yield UInt16(self, "name_offset", "Name offset")
yield UInt16(self, "flags")
yield textHandler(UInt16(self, "attribute_id"), hexadecimal)
yield UInt32(self, "length_attr", "Length of the Attribute")
yield UInt16(self, "offset_attr", "Offset of the Attribute")
yield UInt8(self, "indexed_flag")
yield NullBytes(self, "padding", 1)
if self._parser:
for field in self._parser(self):
yield field
else:
size = self["length_attr"].value
if size:
yield RawBytes(self, "data", size)
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "end_padding", size)
def createDescription(self):
return "Attribute %s" % self["type"].display
FILENAME_NAMESPACE = {
0: "POSIX",
1: "Win32",
2: "DOS",
3: "Win32 & DOS",
}
# --- Parser specific to a type ---
def parseStandardInfo(self):
yield TimestampWin64(self, "ctime", "File Creation")
yield TimestampWin64(self, "atime", "File Altered")
yield TimestampWin64(self, "mtime", "MFT Changed")
yield TimestampWin64(self, "rtime", "File Read")
yield MSDOSFileAttr32(self, "file_attr", "DOS File Permissions")
yield UInt32(self, "max_version", "Maximum Number of Versions")
yield UInt32(self, "version", "Version Number")
yield UInt32(self, "class_id")
yield UInt32(self, "owner_id")
yield UInt32(self, "security_id")
yield filesizeHandler(UInt64(self, "quota_charged", "Quota Charged"))
yield UInt64(self, "usn", "Update Sequence Number (USN)")
def parseFilename(self):
yield UInt64(self, "ref", "File reference to the parent directory")
yield TimestampWin64(self, "ctime", "File Creation")
yield TimestampWin64(self, "atime", "File Altered")
yield TimestampWin64(self, "mtime", "MFT Changed")
yield TimestampWin64(self, "rtime", "File Read")
yield filesizeHandler(UInt64(self, "alloc_size", "Allocated size of the file"))
yield filesizeHandler(UInt64(self, "real_size", "Real size of the file"))
yield UInt32(self, "file_flags")
yield UInt32(self, "file_flags2", "Used by EAs and Reparse")
yield UInt8(self, "filename_length", "Filename length in characters")
yield Enum(UInt8(self, "filename_namespace"), self.FILENAME_NAMESPACE)
size = self["filename_length"].value * 2
if size:
yield String(self, "filename", size, charset="UTF-16-LE")
def parseData(self):
size = (self.size - self.current_size) // 8
if size:
yield Bytes(self, "data", size)
def parseBitmap(self):
size = (self.size - self.current_size)
for index in xrange(size):
yield Bit(self, "bit[]")
# --- Type information ---
ATTR_INFO = {
0x10: ('standard_info', 'STANDARD_INFORMATION ', parseStandardInfo),
0x20: ('attr_list', 'ATTRIBUTE_LIST ', None),
0x30: ('filename', 'FILE_NAME ', parseFilename),
0x40: ('vol_ver', 'VOLUME_VERSION', None),
0x40: ('obj_id', 'OBJECT_ID ', None),
0x50: ('security', 'SECURITY_DESCRIPTOR ', None),
0x60: ('vol_name', 'VOLUME_NAME ', None),
0x70: ('vol_info', 'VOLUME_INFORMATION ', None),
0x80: ('data', 'DATA ', parseData),
0x90: ('index_root', 'INDEX_ROOT ', None),
0xA0: ('index_alloc', 'INDEX_ALLOCATION ', None),
0xB0: ('bitmap', 'BITMAP ', parseBitmap),
0xC0: ('sym_link', 'SYMBOLIC_LINK', None),
0xC0: ('reparse', 'REPARSE_POINT ', None),
0xD0: ('ea_info', 'EA_INFORMATION ', None),
0xE0: ('ea', 'EA ', None),
0xF0: ('prop_set', 'PROPERTY_SET', None),
0x100: ('log_util', 'LOGGED_UTILITY_STREAM', None),
}
ATTR_NAME = createDict(ATTR_INFO, 1)
class File(FieldSet):
# static_size = 48*8
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["bytes_allocated"].value * 8
def createFields(self):
yield Bytes(self, "signature", 4, "Usually the magic is 'FILE'")
yield UInt16(self, "usa_ofs", "Update Sequence Array offset")
yield UInt16(self, "usa_count", "Update Sequence Array count")
yield UInt64(self, "lsn", "$LogFile sequence number for this record")
yield UInt16(self, "sequence_number", "Number of times this mft record has been reused")
yield UInt16(self, "link_count", "Number of hard links")
yield UInt16(self, "attrs_offset", "Byte offset to the first attribute")
yield MFT_Flags(self, "flags")
yield UInt32(self, "bytes_in_use", "Number of bytes used in this record")
yield UInt32(self, "bytes_allocated", "Number of bytes allocated for this record")
yield UInt64(self, "base_mft_record")
yield UInt16(self, "next_attr_instance")
# The below fields are specific to NTFS 3.1+ (Windows XP and above)
yield NullBytes(self, "reserved", 2)
yield UInt32(self, "mft_record_number", "Number of this mft record")
padding = self.seekByte(self["attrs_offset"].value, relative=True)
if padding:
yield padding
while not self.eof:
addr = self.absolute_address + self.current_size
if self.stream.readBytes(addr, 4) == "\xFF\xFF\xFF\xFF":
yield Bytes(self, "attr_end_marker", 8)
break
yield Attribute(self, "attr[]")
size = self["bytes_in_use"].value - self.current_size//8
if size:
yield RawBytes(self, "end_rawdata", size)
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end_padding", size, "Unused but allocated bytes")
def createDescription(self):
text = "File"
if "filename/filename" in self:
text += ' "%s"' % self["filename/filename"].value
if "filename/real_size" in self:
text += ' (%s)' % self["filename/real_size"].display
if "standard_info/file_attr" in self:
text += ', %s' % self["standard_info/file_attr"].display
return text
class NTFS(Parser):
MAGIC = "\xEB\x52\x90NTFS "
PARSER_TAGS = {
"id": "ntfs",
"category": "file_system",
"description": "NTFS file system",
"min_size": 1024*8,
"magic": ((MAGIC, 0),),
}
endian = LITTLE_ENDIAN
_cluster_size = None
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic string"
err = self["mbr/bios"].validate()
if err:
return err
return True
def createFields(self):
yield MasterBootRecord(self, "mbr")
bios = self["mbr/bios"]
cluster_size = bios["sectors_per_cluster"].value * bios["bytes_per_sector"].value
offset = self["mbr/mft_cluster"].value * cluster_size
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
for index in xrange(1000):
yield File(self, "file[]")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
| StarcoderdataPython |
1639407 | <reponame>luxuantao/golden-retriever
"""
Query ES and merge results with original hotpot data.
Input:
- query file
- hotpotqa data
- output filename
- whether this is for hop1 or hop2
Outputs:
- json file containing a list of:
{'context', 'question', '_id', 'query', 'json_context'}
context -- the concatentation of the top n paragraphs for the given query
to ES.
json_context -- same as context, but in json structure same as original
hotpot data.
question, _id -- identical to those from the original HotPotQA data
"""
import argparse
from tqdm import tqdm
from search.search import bulk_text_query
from utils.io import load_json_file, write_json_file
from utils.general import chunks, make_context
def main(query_file, question_file, out_file, top_n):
query_data = load_json_file(query_file)
question_data = load_json_file(question_file)
out_data = []
for chunk in tqdm(list(chunks(question_data, 100))):
queries = []
for datum in chunk:
_id = datum['_id']
queries.append(query_data[_id] if isinstance(query_data[_id], str) else query_data[_id][0][0])
es_results = bulk_text_query(queries, topn=top_n, lazy=False)
for es_result, datum in zip(es_results, chunk):
_id = datum['_id']
question = datum['question']
query = query_data[_id] if isinstance(query_data[_id], str) else query_data[_id][0][0]
context = make_context(question, es_result)
json_context = [
[p['title'], p['data_object']['text']]
for p in es_result
]
out_data.append({
'_id': _id,
'question': question,
'context': context,
'query': query,
'json_context': json_context
})
write_json_file(out_data, out_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Query ES and merge results with original hotpot data.')
parser.add_argument('query_file', help='.preds file containing ES queries ')
parser.add_argument('question_file', help='.json file containing original questions and ids')
parser.add_argument('out_file', help='filename to write data out to')
parser.add_argument('--top_n', default=5,
help='number of docs to return from ES',
type=int)
args = parser.parse_args()
main(args.query_file, args.question_file, args.out_file, args.top_n)
| StarcoderdataPython |
198584 | <filename>hddcoin/cmds/wallet_funcs.py<gh_stars>1-10
import asyncio
import sys
import time
from datetime import datetime
from decimal import Decimal
from typing import Callable, List, Optional, Tuple, Dict
import aiohttp
from hddcoin.cmds.units import units
from hddcoin.rpc.wallet_rpc_client import WalletRpcClient
from hddcoin.server.start_wallet import SERVICE_NAME
from hddcoin.util.bech32m import encode_puzzle_hash
from hddcoin.util.byte_types import hexstr_to_bytes
from hddcoin.util.config import load_config
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
from hddcoin.util.ints import uint16, uint64
from hddcoin.wallet.transaction_record import TransactionRecord
from hddcoin.wallet.util.wallet_types import WalletType
def print_transaction(tx: TransactionRecord, verbose: bool, name) -> None:
if verbose:
print(tx)
else:
hddcoin_amount = Decimal(int(tx.amount)) / units["hddcoin"]
to_address = encode_puzzle_hash(tx.to_puzzle_hash, name)
print(f"Transaction {tx.name}")
print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}")
print(f"Amount {'sent' if tx.sent else 'received'}: {hddcoin_amount} {name}")
print(f"To address: {to_address}")
print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S"))
print("")
async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
transaction_id = hexstr_to_bytes(args["tx_id"])
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
tx: TransactionRecord = await wallet_client.get_transaction(wallet_id, transaction_id=transaction_id)
print_transaction(tx, verbose=(args["verbose"] > 0), name=name)
async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
paginate = args["paginate"]
if paginate is None:
paginate = sys.stdout.isatty()
txs: List[TransactionRecord] = await wallet_client.get_transactions(wallet_id)
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
if len(txs) == 0:
print("There are no transactions to this address")
offset = args["offset"]
num_per_screen = 5 if paginate else len(txs)
for i in range(offset, len(txs), num_per_screen):
for j in range(0, num_per_screen):
if i + j >= len(txs):
break
print_transaction(txs[i + j], verbose=(args["verbose"] > 0), name=name)
if i + num_per_screen >= len(txs):
return None
print("Press q to quit, or c to continue")
while True:
entered_key = sys.stdin.read(1)
if entered_key == "q":
return None
elif entered_key == "c":
break
def check_unusual_transaction(amount: Decimal, fee: Decimal):
return fee >= amount
async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
amount = Decimal(args["amount"])
fee = Decimal(args["fee"])
address = args["address"]
override = args["override"]
if not override and check_unusual_transaction(amount, fee):
print(
f"A transaction of amount {amount} and fee {fee} is unusual.\n"
f"Pass in --override if you are sure you mean to do this."
)
return
print("Submitting transaction...")
final_amount = uint64(int(amount * units["hddcoin"]))
final_fee = uint64(int(fee * units["hddcoin"]))
res = await wallet_client.send_transaction(wallet_id, final_amount, address, final_fee)
tx_id = res.name
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(wallet_id, tx_id)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do hddcoin wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status")
return None
print("Transaction not yet submitted to nodes")
print(f"Do 'hddcoin wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status")
async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
res = await wallet_client.get_next_address(wallet_id, False)
print(res)
async def delete_unconfirmed_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
await wallet_client.delete_unconfirmed_transactions(wallet_id)
print(f"Successfully deleted all unconfirmed transactions for wallet id {wallet_id} on key {fingerprint}")
def wallet_coin_unit(typ: WalletType, address_prefix: str) -> Tuple[str, int]:
if typ == WalletType.COLOURED_COIN:
return "", units["colouredcoin"]
if typ in [WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.MULTI_SIG, WalletType.RATE_LIMITED]:
return address_prefix, units["hddcoin"]
return "", units["byte"]
def print_balance(amount: int, scale: int, address_prefix: str) -> str:
ret = f"{amount/scale} {address_prefix} "
if scale > 1:
ret += f"({amount} byte)"
return ret
async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
summaries_response = await wallet_client.get_wallets()
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
# lazy load HODL stuff here for cleaner diff
import hddcoin.hodl.exc
from hddcoin.hodl.hodlrpc import HodlRpcClient
hodlRpcClient = HodlRpcClient(fingerprint)
try:
rpcRet = await hodlRpcClient.get("getTotalHodlForWallet")
hodl_balance_bytes = rpcRet["committed_bytes"]
hodl_balance_hdd = Decimal(hodl_balance_bytes) / int(1e12)
# emulating upstream repr for now
hodl_balance_str = f"{hodl_balance_hdd} hdd ({hodl_balance_bytes} byte)"
except hddcoin.hodl.exc.HodlConnectionError:
hodl_balance_str = "< UNABLE TO CONNECT TO HODL SERVER >"
except Exception as e:
hodl_balance_str = f"ERROR: {e!r}"
finally:
hodlRpcClient.close()
await hodlRpcClient.await_closed()
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}")
print(f"Balances, fingerprint: {fingerprint}")
print(f"HODL deposits: {hodl_balance_str}")
for summary in summaries_response:
wallet_id = summary["id"]
balances = await wallet_client.get_wallet_balance(wallet_id)
typ = WalletType(int(summary["type"]))
address_prefix, scale = wallet_coin_unit(typ, address_prefix)
print(f"Wallet ID {wallet_id} type {typ.name} {summary['name']}")
print(f" -Total Balance: {print_balance(balances['confirmed_wallet_balance'], scale, address_prefix)}")
print(
f" -Pending Total Balance: {print_balance(balances['unconfirmed_wallet_balance'], scale, address_prefix)}"
)
print(f" -Spendable: {print_balance(balances['spendable_balance'], scale, address_prefix)}")
print(f" -Max Send Amount: {print_balance(balances['max_send_amount'], scale, address_prefix)}")
async def get_wallet(wallet_client: WalletRpcClient, fingerprint: int = None) -> Optional[Tuple[WalletRpcClient, int]]:
if fingerprint is not None:
fingerprints = [fingerprint]
else:
fingerprints = await wallet_client.get_public_keys()
if len(fingerprints) == 0:
print("No keys loaded. Run 'hddcoin keys generate' or import a key")
return None
if len(fingerprints) == 1:
fingerprint = fingerprints[0]
if fingerprint is not None:
log_in_response = await wallet_client.log_in(fingerprint)
else:
print("Choose wallet key:")
for i, fp in enumerate(fingerprints):
print(f"{i+1}) {fp}")
val = None
while val is None:
val = input("Enter a number to pick or q to quit: ")
if val == "q":
return None
if not val.isdigit():
val = None
else:
index = int(val) - 1
if index >= len(fingerprints):
print("Invalid value")
val = None
continue
else:
fingerprint = fingerprints[index]
assert fingerprint is not None
log_in_response = await wallet_client.log_in(fingerprint)
if log_in_response["success"] is False:
if log_in_response["error"] == "not_initialized":
use_cloud = True
if "backup_path" in log_in_response:
path = log_in_response["backup_path"]
print(f"Backup file from backup.chia.net downloaded and written to: {path}")
val = input("Do you want to use this file to restore from backup? (Y/N) ")
if val.lower() == "y":
log_in_response = await wallet_client.log_in_and_restore(fingerprint, path)
else:
use_cloud = False
if "backup_path" not in log_in_response or use_cloud is False:
if use_cloud is True:
val = input(
"No online backup file found,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
else:
val = input(
"Cloud backup declined,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
if val.lower() == "s":
log_in_response = await wallet_client.log_in_and_skip(fingerprint)
elif val.lower() == "f":
val = input("Please provide the full path to your backup file: ")
log_in_response = await wallet_client.log_in_and_restore(fingerprint, val)
if "success" not in log_in_response or log_in_response["success"] is False:
if "error" in log_in_response:
error = log_in_response["error"]
print(f"Error: {log_in_response[error]}")
return None
return wallet_client, fingerprint
async def defrag(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
"""Defragment the wallet, reducing the number of coins in it.
This increases the maximum amount that can be sent in a single transaction.
"""
# This is currently an extremely simple algorithm. We just send the maximum possible amount to
# ourselves, using the built in wallet restrictions (which are based on "reasonable" cost limits
# per block).
#
# Successive calls to this will always result in a single coin in the wallet.
from hddcoin.hodl.util import getNthWalletAddr, getPkSkFromFingerprint, loadConfig
wallet_id = args["id"]
fee_hdd = Decimal(args["fee"])
fee_bytes = uint64(int(fee_hdd * units["hddcoin"]))
target_address = args["address"]
override = args["override"]
no_confirm = args["no_confirm"]
if fee_hdd >= 1 and (override == False):
print(f"fee of {fee_hdd} HDD seems too large (use --override to force)")
return
elif target_address and len(target_address) != 62:
print("Address is invalid")
return
config = loadConfig()
sk = getPkSkFromFingerprint(fingerprint)[1]
if not target_address:
target_address = getNthWalletAddr(config, sk, 0)
else:
check_count = 100
for i in range(check_count):
if target_address == getNthWalletAddr(config, sk, i):
break # address is confirmed as one of ours
else:
print("WARNING!!!\nWARNING!!!\nWARNING!!! ", end = "")
print(f"The given address is not one of the first {check_count} wallet addresses!")
print("WARNING!!!\nWARNING!!!")
inp = input(f"Is {target_address} where you want to defrag to? [y/N] ")
if not inp or inp[0].lower() == "n":
print("Aborting defrag!")
return
# Figure out the maximum value the wallet can send at the moment
balances = await wallet_client.get_wallet_balance(wallet_id)
max_send_bytes = balances["max_send_amount"]
spendable_bytes = balances["spendable_balance"]
max_send_hdd = Decimal(max_send_bytes) / units["hddcoin"]
spendable_hdd = Decimal(spendable_bytes) / units["hddcoin"]
print(f"Total of spendable coins in wallet (right now): {spendable_hdd} HDD")
print(f"Maximum value you can send right now (pre-defrag): {max_send_hdd} HDD")
if not no_confirm:
if max_send_bytes == spendable_bytes:
inp = input("Your wallet is not currently limited by fragmentation! Continue? [y/N] ")
else:
inp = input("Do you wish to defrag and consolidate some coins? [y/N] ")
if not inp or inp[0].lower() == "n":
print("Aborting defrag!")
return
# Now do one round of defrag!
defrag_coin_size_bytes = max_send_bytes - fee_bytes
res = await wallet_client.send_transaction(wallet_id,
defrag_coin_size_bytes,
target_address,
fee_bytes)
tx_id = res.name
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(wallet_id, tx_id)
if len(tx.sent_to) > 0:
print(f"Defrag transaction submitted to nodes: {tx.sent_to}")
print(f"Do hddcoin wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status")
return
print("Defrag transaction not yet submitted to nodes")
print(f"Do 'hddcoin wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status")
async def execute_with_wallet(
wallet_rpc_port: Optional[int], fingerprint: int, extra_params: Dict, function: Callable,
eat_exceptions: bool = True,
) -> None:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
wallet_client_f = await get_wallet(wallet_client, fingerprint=fingerprint)
if wallet_client_f is None:
wallet_client.close()
await wallet_client.await_closed()
return None
wallet_client, fingerprint = wallet_client_f
await function(extra_params, wallet_client, fingerprint)
except KeyboardInterrupt:
pass
except Exception as e:
if not eat_exceptions:
wallet_client.close()
await wallet_client.await_closed()
raise e
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if the wallet is running at {wallet_rpc_port}. "
"You can run the wallet via:\n\thddcoin start wallet"
)
else:
print(f"Exception from 'wallet' {e}")
wallet_client.close()
await wallet_client.await_closed()
| StarcoderdataPython |
3393512 | import numpy as np
from scipy import linalg
from ..processing import knee
import warnings
warnings.filterwarnings("ignore")
def wgr_regress(y, X):
n, ncolX = X.shape
Q,R,perm = linalg.qr(X, mode='economic', pivoting=True)
if R.ndim == 0:
p = 0
elif R.ndim == 1:
p = int(abs(R[0]) > 0)
else:
if np.amin(R.shape) == 1:
p = int(abs(R[0]) > 0)
else:
p = np.sum(np.abs(np.diagonal(R)) > abs(max(n, ncolX)*np.spacing(R[0][0])))
if p < ncolX:
R = R[0:p,0:p]
Q = Q[:,0:p]
perm = perm[0:p]
b = np.zeros((ncolX))
if(R.shape[0] == R.shape[1]):
try:
b[perm] = linalg.solve(R,np.matmul(Q.T,y))
except:
b[perm] = linalg.lstsq(R,np.matmul(Q.T,y))
else:
b[perm] = linalg.lstsq(R,np.matmul(Q.T,y))
return b
def wgr_glsco(X, Y, sMRI = [], AR_lag=0, max_iter=20):
"""
Linear regression when disturbance terms follow AR(p)
-----------------------------------
Model:
Yt = Xt * Beta + ut ,
ut = Phi1 * u(t-1) + ... + Phip * u(t-p) + et
where et ~ N(0,s^2)
-----------------------------------
Algorithm:
Cochrane-Orcutt iterated regression (Feasible generalized least squares)
-----------------------------------
Usage:
Y = dependent variable (n * 1 vector)
X = regressors (n * k matrix)
AR_lag = number of lags in AR process
-----------------------------------
Returns:
Beta = estimator corresponding to the k regressors
"""
nobs, nvar = X.shape
if sMRI == []:
Beta = wgr_regress(Y,X)
else:
sMRI = np.array(sMRI)
try:
Beta = linalg.solve(np.matmul(X.T,X)+sMRI,np.matmul(X.T,Y))
except:
Beta = linalg.lstsq(np.matmul(X.T,X)+sMRI,np.matmul(X.T,Y))
resid = Y - np.matmul(X,Beta)
if AR_lag == 0:
res_sum = np.cov(resid)
return res_sum, Beta
max_tol = min(1e-6, max(np.absolute(Beta)) / 1000)
for r in range(max_iter):
Beta_temp = Beta
X_AR = np.zeros((nobs - (2 * AR_lag), AR_lag))
for m in range(AR_lag):
X_AR[:, m] = resid[AR_lag - m - 1:nobs - AR_lag - m - 1]
Y_AR = resid[AR_lag:nobs - AR_lag]
AR_para = wgr_regress(Y_AR, X_AR)
X_main = X[AR_lag:nobs, :]
Y_main = Y[AR_lag:nobs]
for m in range(AR_lag):
X_main = \
X_main - (AR_para[m] * (X[AR_lag - m - 1:nobs - m - 1, :]))
Y_main = Y_main - (AR_para[m] * (Y[AR_lag - m - 1:nobs - m - 1]))
if sMRI == []:
Beta = wgr_regress(Y_main, X_main)
else:
try:
Beta = linalg.solve(np.matmul(X_main.T,X_main)+sMRI,np.matmul(X_main.T,Y_main))
except:
Beta = linalg.lstsq(np.matmul(X_main.T,X_main)+sMRI,np.matmul(X_main.T,Y_main))
resid = Y[AR_lag:nobs] - X[AR_lag:nobs, :].dot(Beta)
if(max(np.absolute(Beta - Beta_temp)) < max_tol):
break
res_sum = np.cov(resid)
return res_sum, Beta
def Fit_sFIR2(output, length, TR, input, T, flag_sfir, AR_lag):
NN = int(np.floor(length/TR))
_input = np.expand_dims(input[0], axis=0)
X = linalg.toeplitz(input, np.concatenate((_input, np.zeros((1, NN-1))), axis = 1))
X = np.concatenate((X, np.ones((input.shape))), axis = 1)
if flag_sfir:
fwhm = 7 #fwhm=7 seconds smoothing - ref. Goutte
nh = NN-1
dt = TR
_ = np.expand_dims(np.arange(1, nh+1).T, axis=1)
C = np.matmul(_,np.ones((1, nh)))
h = np.sqrt(1./(fwhm/dt))
v = 0.1
R = v * np.exp(-h/2 * (C-C.T)**2)
RI = linalg.inv(R)
MRI = np.zeros((nh + 1, nh + 1))
MRI[0:nh,0:nh] = RI;
sigma = 1
sMRI0 = sigma**2*MRI
sMRI = np.zeros((NN+1, NN+1))
sMRI[0:NN,0:NN] = sMRI0;
if AR_lag == 0:
try:
hrf = linalg.solve((np.matmul(X.T,X)+sMRI),np.matmul(X.T,output))
except:
hrf = linalg.lstsq((np.matmul(X.T,X)+sMRI),np.matmul(X.T,output))
resid = output - np.matmul(X, hrf)
res_sum = np.cov(resid)
else:
res_sum, hrf = wgr_glsco(X,output,AR_lag=AR_lag,sMRI=sMRI);
else:
if AR_lag == 0:
hrf = linalg.lstsq(X,output)
hrf = hrf[0]
resid = output - np.matmul(X, hrf)
res_sum = np.cov(resid)
else:
res_sum, hrf = wgr_glsco(X,output,AR_lag=AR_lag)
return hrf, res_sum
def wgr_FIR_estimation_HRF(u, dat, para, N):
if para['estimation'] == 'sFIR':
firmode = 1
else:
firmode = 0
lag = para['lag']
nlag = np.amax(lag.shape)
len_bin = int(np.floor(para['len'] / para['TR']))
hrf = np.zeros((len_bin+1, nlag))
Cov_E = np.zeros((1, nlag))
kk = 0
for i_lag in range(1, nlag + 1):
RR = u - i_lag
RR = RR[RR >= 0]
if RR.size != 0:
design = np.zeros((N, 1))
design[RR] = 1
hrf_kk, e3 = Fit_sFIR2(dat, para['len'], para['TR'], design, len_bin, firmode, para['AR_lag'])
hrf[:, kk] = np.ravel(hrf_kk)
Cov_E[:, kk] = (np.ravel(e3))
else:
Cov_E[:, kk] = np.inf
kk += 1
placeholder, ind = knee.knee_pt(np.ravel(Cov_E))
if ind == np.amax(Cov_E.shape) - 1:
ind = ind - 1
rsH = hrf[:,ind+1]
return rsH, u | StarcoderdataPython |
156915 | <gh_stars>0
# coding=utf-8
from collections import defaultdict
import threading
__author__ = 'nekmo'
class Events(defaultdict):
def __init__(self):
super(Events, self).__init__(list)
def propagate(self, event, *args, **kwargs):
if not event in self: return
for function in self[event]:
# TODO: Es necesario limitar el tiempo, hacer workers...
l = threading.Thread(target=function, args=args, kwargs=kwargs)
l.start()
events = Events()
def event(event_name):
"""Decorador para que se ejecute una función con un determinado evento.
Uso:
@event('mievento')
def function():
pass
"""
def decorator(f):
events[event_name].append(f)
return f
return decorator | StarcoderdataPython |
3256994 | <reponame>Mr-TelegramBot/python-tdlib
from ..factory import Type
class pushMessageContentChatChangePhoto(Type):
pass
| StarcoderdataPython |
99562 | # -*- coding: utf-8 -*-
"""Tests for backoff_utils._backoff"""
from datetime import datetime
import pytest
import backoff_utils.strategies as strategies
from backoff_utils._decorator import apply_backoff
_attempts = 0
_was_successful = False
def when_successful(value):
"""Update the global ``_was_successful`` value to True."""
global _was_successful # pylint: disable=W0603,C0103
_was_successful = True
def successful_function(trying_again, max_tries):
"""A successful function which returns a value."""
global _attempts # pylint: disable=W0603,C0103
if trying_again is True:
_attempts += 1
else:
_attempts = 0
if _attempts >= max_tries:
return 123
raise ZeroDivisionError()
def on_failure_function(error,
message = None,
stacktrace = None):
raise AttributeError(message)
@pytest.mark.parametrize("failure, strategy, max_tries, max_delay", [
(None, strategies.Exponential, 1, None),
(None, strategies.Exponential, 3, None),
(None, strategies.Exponential, 1, None),
(None, strategies.Exponential, 3, None),
(None, strategies.Exponential, 1, 3),
(None, strategies.Exponential, 3, 5),
(None, strategies.Exponential(jitter = False), 1, None),
(None, strategies.Exponential(scale_factor = 3), 3, None),
(None, strategies.Fibonacci, 1, None),
(None, strategies.Fibonacci, 3, None),
(None, strategies.Fibonacci, 1, 3),
(None, strategies.Fibonacci, 3, 5),
(None, strategies.Fixed, 1, None),
(None, strategies.Fixed, 3, None),
(None, strategies.Fixed, 1, 3),
(None, strategies.Fixed, 3, 5),
(None, strategies.Fixed(sequence = [2, 3, 4, 5]), 3, None),
(None, strategies.Linear, 1, None),
(None, strategies.Linear, 3, None),
(None, strategies.Linear, 1, 3),
(None, strategies.Linear, 3, 5),
(None, strategies.Polynomial, 1, None),
(None, strategies.Polynomial, 3, None),
(None, strategies.Polynomial, 1, 3),
(None, strategies.Polynomial, 3, 5),
(None, strategies.Polynomial(exponent = 2), 3, None),
(TypeError, 'invalid-value', 1, None),
])
def test_apply_backoff(failure, strategy, max_tries, max_delay):
"""Test the :ref:`backoff_utils._backoff.backoff` function."""
global _attempts # pylint: disable=W0603,C0103
@apply_backoff(strategy = strategy,
max_tries = max_tries,
max_delay = max_delay,
catch_exceptions = [type(ZeroDivisionError())],
on_failure = None,
on_success = None)
def divide_by_zero_function():
"""Raise a ZeroDivisionError counting attempts."""
global _attempts # pylint: disable=W0603,C0103
if _attempts > 0:
_attempts += 1
raise ZeroDivisionError('Failed on Subsequent Attempt')
else:
_attempts += 1
raise ZeroDivisionError()
if not failure:
with pytest.raises(ZeroDivisionError) as excinfo:
start_time = datetime.utcnow()
divide_by_zero_function()
end_time = datetime.utcnow()
elapsed_time = start_time - end_time
elapsed_time = elapsed_time.total_seconds()
if max_delay is not None:
assert elapsed_time <= max_delay
assert _attempts <= (max_tries + 1)
else:
assert _attempts == (max_tries + 1)
if max_tries > 1:
assert 'Subsequent Attempt' in str(excinfo.value)
else:
with pytest.raises(failure):
start_time = datetime.utcnow()
divide_by_zero_function()
end_time = datetime.utcnow()
elapsed_time = start_time - end_time
elapsed_time = elapsed_time.total_seconds()
_attempts = 0
| StarcoderdataPython |
3273838 | """
Utils for testing
"""
from django.db.models import fields
def get_simplified_model(**kwargs):
"""
Generates a mocked model class
Expected kwargs:
- `fields`: dict field_name:field_instance that will populate the model fields
:returns: InternalSimplifiedModel
"""
model_fields = {
"id": fields.AutoField(name="id"),
}
name = "SimplifiedModel"
if "fields" in kwargs:
model_fields.update(kwargs.get("fields"))
class InternalSimplifiedModel(object):
"""
Fake model class, as django will require
a DJANGO_SETTINGS_MODULE module.
The class offers the mocked functionality
expected to be used by the library
"""
class _meta(object):
"""Fake meta class"""
pk = 0
model_name = "SIMPLIFIED_MODEL_NAME"
@classmethod
def get_fields(cls):
"""Fake get_fields"""
return model_fields.values()
class objects(object): # pylint: disable=C0103
"""Fake object manager"""
@classmethod
def create(cls, *args, **kwargs):
"""Fake create"""
return {
"args": args,
"kwargs": kwargs,
}
InternalSimplifiedModel.__name__ = name
return InternalSimplifiedModel
class SimplifiedForeignKey(fields.related.ForeignKey):
"""
Simplified foreign key class
"""
def __init__(self, **kwargs):
"""
Sets the defaults for the class
"""
kwargs["on_delete"] = ""
super().__init__(get_simplified_model(), **kwargs)
SimplifiedForeignKey.__name__ = "ForeignKey"
TESTED_FIELDS = (
fields.IntegerField,
fields.FloatField,
fields.CharField,
fields.TextField,
fields.EmailField,
fields.BooleanField,
fields.DateField,
fields.DateTimeField,
SimplifiedForeignKey,
)
| StarcoderdataPython |
140390 | <reponame>erose1337/versionhelper
VERSION = "1.0.0-beta.15"
LANGUAGE = "python"
PROJECT = "versionhelper"
_p = "versionhelper.libvh."
API = {_p + "version_helper" : {"arguments" : ("filename str", ),
"keywords" : {"directory" : "directory str",
"version" : "str",
"prerelease" : "str",
"build_metadata" : "str",
"db" : "filename str",
"checker" : "filename str",
"source_types" : "iterable of str",
"no_invariant_check" : "bool",
"dry_run" : "bool",
"silent" : "bool"},
"returns" : None,
"exceptions" : ("ValueError", "Missing_Api_Function",
"Mismatched_Api_Argument",
"Missing_Api_Info"),
"side_effects" : ("Modifies api VERSION",
"Modifies database",
"Overwrites apichangelog.txt")},
_p + "parse_version" : {"arguments" : ("version str", ),
"returns" : ("str", "str", "str", "str", "str")}
}
| StarcoderdataPython |
3313653 | <reponame>jscpeterson/reminders
# Generated by Django 2.2.4 on 2019-08-15 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('remind', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='deadline',
name='type',
field=models.IntegerField(choices=[(0, 'FFA'), (1, 'Scheduling Conference'), (2, 'Initial Witness List'), (3, 'PTIs Requested'), (4, 'PTIs Conducted'), (5, 'Witness PTIs'), (6, 'Scientific Evidence'), (7, 'Pretrial Motion Filing'), (8, 'Pretrial Motion Response'), (9, 'Pretrial Motion Hearing'), (10, 'Final Witness List'), (11, 'Need for Interpreter'), (12, 'Plea Agreement'), (13, 'Certification of Readiness'), (14, 'PTC/Docket Call'), (15, 'Trial')]),
),
]
| StarcoderdataPython |
171555 | #!/usr/bin/env python3
distro={ }
library=[]
distro["name"]="RedHat"
distro["versions"]=["4.0","5.0","6.0","7.0","8.0"]
library.append(distro.copy())
distro["name"]="Suse"
distro["versions"]=["10.0","11.0","15.0","42.0"]
library.append(distro.copy())
print(library)
| StarcoderdataPython |
1766374 | #!/usr/bin/env python3
class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
lo, hi = 0, len(nums) - 1
while lo <= hi:
mid = (lo + hi) // 2
if nums[mid] == target: return mid
if (nums[0] > nums[mid]) ^ (nums[0] > target) ^ (target > nums[mid]):
lo = mid + 1
else:
hi = mid - 1
return -1
def search_pre2(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
lo, hi = 0, len(nums)
while lo < hi:
mid = (lo + hi) // 2
num = nums[mid] if (nums[mid] < nums[0]) == (target < nums[0]) \
else float('-inf') if target < nums[0] else float('inf')
if num == target: return mid
if num < target:
lo = mid + 1
else:
hi = mid
return -1
def search_pre1(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
n = len(nums)
lo, hi = 0, n - 1
while lo < hi:
mid = (lo + hi) // 2
if nums[mid] < nums[hi]: hi = mid
else: lo = mid + 1
p = lo
lo, hi = 0, n - 1
while lo <= hi:
mid = (lo + hi) // 2
real_mid = (mid + p) % n
if nums[real_mid] == target: return real_mid
if nums[real_mid] < target:
lo = mid + 1
else:
hi = mid - 1
return -1
if __name__ == "__main__":
nums = [4, 5, 6, 7, 0, 1, 2]
print(Solution().search(nums, 0))
print(Solution().search(nums, 3))
| StarcoderdataPython |
3383315 | <filename>feeder/parsers/PT.py
import requests, dateutil, arrow
from bs4 import BeautifulSoup
COUNTRY_CODE = 'PT'
def GWh_per_day_to_MW(energy_day_gwh):
hours_in_a_day = 24;
power_mw = energy_day_gwh / 24 * 1000;
return power_mw
def fetch_PT():
r = requests.get('http://www.centrodeinformacao.ren.pt/EN/InformacaoExploracao/Pages/EstatisticaDiaria.aspx')
soup = BeautifulSoup(r.text, 'html.parser')
trs = soup.find_all("tr", { "class" : "grid_row" })
daily_values = []
for tr in trs:
value = tr.find_all("td")[2].string # Daily values are in column 3
value = GWh_per_day_to_MW(float(value)) # str -> float
daily_values.append(value)
date_str = soup.find(id="ctl00_m_g_5e80321e_76aa_4894_8c09_4e392fc3dc7d_txtDatePicker_foo")['value']
date = arrow.get(date_str + " 23:59:59", "DD-MM-YYYY HH:mm:ss").replace(tzinfo=dateutil.tz.gettz('Europe/Lisbon')).datetime
data = {
'countryCode': COUNTRY_CODE,
'datetime': date, # UTC
'production': {
'wind': daily_values[9],
'solar': daily_values[10],
'hydro': daily_values[0] + daily_values[7], # There are 2 different production regimes
'coal': daily_values[1] + daily_values[8], # There are 2 different production regimes
'nuclear': 0
},
'consumption': {
'unknown': daily_values[13]
},
'exchange':{
'ES': daily_values[3] - daily_values[4]
}
}
return data
if __name__ == '__main__':
print fetch_PT()
| StarcoderdataPython |
1776918 | WINDOW_WIDTH = 1024
WINDOW_HEIGHT = 720
BLACK = (0,0,0)
WHITE = (225,225,225)
| StarcoderdataPython |
72407 | from ..._common import block_to_format, str2format
from ..._io.input.tough._helpers import write_record
def block(keyword):
"""Decorate block writing functions."""
def decorator(func):
from functools import wraps
header = "----1----*----2----*----3----*----4----*----5----*----6----*----7----*----8"
@wraps(func)
def wrapper(f, *args):
f.write("{}{}\n".format(keyword, header))
func(f, *args)
f.write("\n")
return wrapper
return decorator
def _write_eleme(labels, materials, volumes, nodes, material_name=None):
"""Return a generator that iterates over the records of block ELEME."""
label_length = len(labels[0])
fmt = block_to_format["ELEME"][label_length]
fmt = str2format(fmt)
iterables = zip(labels, materials, volumes, nodes)
for label, material, volume, node in iterables:
mat = (
material_name[material]
if material_name and material in material_name.keys()
else material
)
mat = mat if isinstance(mat, str) else "{:>5}".format(str(mat))
record = write_record(
[
label, # ID
None, # NSEQ
None, # NADD
mat, # MAT
volume, # VOLX
None, # AHTX
None, # PMX
node[0], # X
node[1], # Y
node[2], # Z
],
fmt=fmt,
)
yield record[0]
def _write_coord(nodes):
"""Return a generator that iterates over the records of block COORD."""
fmt = "{:20.13e}{:20.13e}{:20.13e}\n"
for node in nodes:
record = fmt.format(*node)
yield record
def _write_conne(clabels, isot, d1, d2, areas, angles):
"""Return a generator that iterates over the records of block CONNE."""
label_length = len(clabels[0][0])
fmt = block_to_format["CONNE"][label_length]
fmt = str2format(fmt)
iterables = zip(clabels, isot, d1, d2, areas, angles)
for label, isot, d1, d2, area, angle in iterables:
record = write_record(
[
"".join(label), # ID1-ID2
None, # NSEQ
None, # NAD1
None, # NAD2
isot, # ISOT
d1, # D1
d2, # D2
area, # AREAX
angle, # BETAX
None, # SIGX
],
fmt=fmt,
)
yield record[0]
def _write_incon(
labels, values, porosity=None, userx=None, phase_composition=None, eos=None
):
"""Return a generator that iterates over the records of block INCON."""
porosity = porosity if porosity is not None else [None] * len(labels)
userx = userx if userx is not None else [None] * len(labels)
phase_composition = (
phase_composition if phase_composition is not None else [None] * len(labels)
)
label_length = len(labels[0])
fmt = block_to_format["INCON"]
iterables = zip(labels, values, porosity, userx, phase_composition)
for label, value, phi, usrx, indicat0 in iterables:
cond1 = any(v > -1.0e-9 for v in value)
cond2 = phi is not None
cond3 = usrx is not None
if cond1 or cond2 or cond3:
# Record 1
values = [label, "", ""]
ignore_types = [1, 2]
if phi is not None:
values.append(phi)
else:
values.append("")
ignore_types.append(3)
if eos == "tmvoc":
if indicat0 is not None:
values.append(indicat0)
else:
values.append("")
ignore_types.append(4)
else:
if usrx is not None:
values += list(usrx)
else:
values += 3 * [""]
ignore_types += [4, 5, 6]
fmt1 = str2format(
fmt[eos][label_length] if eos in fmt else fmt["default"][label_length],
ignore_types=ignore_types,
)
fmt1 = "{}\n".format("".join(fmt1[: len(values)]))
record = fmt1.format(*values)
# Record 2
n = min(4, len(value))
values = []
ignore_types = []
for i, v in enumerate(value[:n]):
if v > -1.0e9:
values.append(v)
else:
values.append("")
ignore_types.append(i)
fmt2 = str2format(fmt[0], ignore_types=ignore_types)
fmt2 = "{}\n".format("".join(fmt2[: len(values)]))
record += fmt2.format(*values)
# Record 3 (EOS7R)
if len(value) > 4:
values = []
ignore_types = []
for i, v in enumerate(value[n:]):
if v > -1.0e9:
values.append(v)
else:
values.append("")
ignore_types.append(i)
fmt2 = str2format(fmt[0], ignore_types=ignore_types)
fmt2 = "{}\n".format("".join(fmt2[: len(values)]))
record += fmt2.format(*values)
yield record
else:
continue
| StarcoderdataPython |
30224 | <filename>scripts/supp_fig_C_calc.py
import sys
sys.path.append("../src")
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import C_calculation
plt.style.use(['seaborn-deep', '../paper.mplstyle'])
"""
This script produces Figure S3, which displays a detailed summary of the
calculations used to determine a suitable value for C. Run as:
$ python figure_s3.py
"""
C_recovery_dict = np.load("../results/C_recovery.npy", allow_pickle=True).item()
Cs = C_recovery_dict['Cs']
frac_recovered = C_recovery_dict['frac_recovered']
Lqs = [(10, 2), (11, 2), (12, 2), (13, 2),
(6, 3), (7, 3), (8, 3),
(5, 4), (6, 4), (7, 4)]
fig, axes = plt.subplots(2, 5, figsize=(10, 5))
axes_flat = axes.flatten()
colors = sns.color_palette('rocket_r', n_colors=5)
for j in range(len(Lqs)):
ax = axes_flat[j]
L, q = Lqs[j]
Ks = []
vals = []
for i in range(len(C_calculation.TESTED)):
L_, q_, K = C_calculation.TESTED[i]
if L == L_ and q == q_:
if j == 1:
lbl = '$K=%s$' % K
else:
lbl=None
vals = frac_recovered[i]
ax.plot(Cs, vals, c=colors[K-1], label=lbl)
if j == 1:
fig.legend(loc='lower center', bbox_to_anchor=(0.5, -0.05), ncol=5, fancybox=True)
ax.plot((2.62, 2.62), (0, 1), c='k', lw=1, ls='--')
ax.set_xlim([0, 3])
ax.set_ylim([-0.01, 1.01])
ax.set_xlabel("$C$")
ax.set_xticks([0, 0.5, 1, 1.5, 2, 2.5, 3])
ax.tick_params(axis='x', which='major', labelsize=8)
ax.set_ylabel("Fraction Recovered at $C \cdot S \log(q^L)$")
ax.set_title("$L=%s, \,q=%s$" % (L, q))
plt.tight_layout()
plt.savefig("plots/supp_fig_C_calc.png", dpi=300, bbox_inches='tight', facecolor='w', transparent=False)
plt.show() | StarcoderdataPython |
3276174 | <reponame>phac-nml/irida-staramr-results
import unittest
from irida_staramr_results import util
class TestUtil(unittest.TestCase):
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def tearDown(self):
pass
def test_local_to_timestamp(self):
"""
Test local to timestamp conversion.
:return:
"""
fake_good_date = "2021-04-08" # CDT
res = util.local_to_timestamp(fake_good_date)
self.assertEqual(res, 1617840000000.0)
fake_bad_date = "2021/04/08"
with self.assertRaises(ValueError) as c:
util.local_to_timestamp(fake_bad_date)
self.assertTrue("does not match format '%Y-%m-%d'" in c.exception)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1685682 | <reponame>ultimus11/Filed-Test
from django.conf.urls import url,include
from .views import GETAPIView
from .views import CREATEAPIView
from .views import UPDATEAPIView
from .views import DELETEAPIView
urlpatterns = [
url('GET/', GETAPIView.as_view()),
url('CREATE/', CREATEAPIView.as_view()),
url('UPDATE/', UPDATEAPIView.as_view()),
url('DELETE/', DELETEAPIView.as_view()),
]
'''
As recomended only four Endpoints are Used
''' | StarcoderdataPython |
1608799 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from ngraph.util.persist import valid_path_append, fetch_file
import os
import numpy as np
class PTB(object):
"""
Penn Treebank data set from http://arxiv.org/pdf/1409.2329v5.pdf
Arguments:
path (string): Data directory to find the data, if not existing, will
download the data
shift_target (boolean): Set the target to be the same sequence of shifted
version of the sequence. Default to be True, for
language models.
"""
def __init__(self, path='.', use_words=False, shift_target=True):
self.path = path
self.url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data'
self.filemap = dict(train=dict(filename='ptb.train.txt', size=5101618),
test=dict(filename='ptb.test.txt', size=449945),
valid=dict(filename='ptb.valid.txt', size=399782))
self.shift_target = shift_target
self.use_words = use_words
def load_data(self):
self.data_dict = {}
self.vocab = None
for phase in ['train', 'test', 'valid']:
filename, filesize = self.filemap[phase]['filename'], self.filemap[phase]['size']
workdir, filepath = valid_path_append(self.path, '', filename)
if not os.path.exists(filepath):
fetch_file(self.url, filename, filepath, filesize)
tokens = open(filepath).read() # add tokenization here if necessary
if self.use_words:
tokens = tokens.strip().split()
self.vocab = sorted(set(tokens)) if self.vocab is None else self.vocab
# vocab dicts
self.token_to_index = dict((t, i) for i, t in enumerate(self.vocab))
self.index_to_token = dict((i, t) for i, t in enumerate(self.vocab))
# map tokens to indices
X = np.asarray([self.token_to_index[t] for t in tokens], dtype=np.uint32)
if self.shift_target:
y = np.concatenate((X[1:], X[:1]))
else:
y = X.copy()
self.data_dict[phase] = {'inp_txt': X, 'tgt_txt': y}
return self.data_dict
| StarcoderdataPython |
1725231 | import codecs
import orcid_api
from lxml import objectify
import pprint
import os.path
import time
from config import DATAPATH
token=''
def getToken():
global token
if token=='':
token = orcid_api.get_access_token(scope='/read-public', sandbox=False)
return token
def search_to_file(q, start, rows, filename, sandbox=False):
token=getToken()
res = orcid_api.search(token, q, start, rows, sandbox=False)
fh = open(DATAPATH+filename, "w")
fh.write(res.content)
fh.close
return filename, token
# toevoegen in csv: current affiliation, type employment, zonder end date
def getSearchResults(q):
datestr=time.strftime("%d%m%y")
filebase=q.replace('+','')
filebase = filebase.replace(':', '')
filebase = filebase.replace('"', '')
filebase = filebase.replace('*', '')
filebase = filebase.replace('@', '')
start=1
filename='%s_%s_%s.txt' % (filebase,datestr,str(start))
if not os.path.isfile(DATAPATH + filename):
search_to_file(q,1,100, filename,sandbox=False)
with open(DATAPATH+filename) as f:
content = f.read()
searchXml=objectify.fromstring(content)
numfound=int(searchXml['orcid-search-results'].attrib.get('num-found'))
print('found %s search results for query %s...' % (numfound,q))
if numfound>0:
searchfiles=[]
searchfiles.append(filename)
if numfound>100:
iters = numfound / 100
for c in range(1,iters+1):
start=(c*100)+1
filename = '%s_%s_%s.txt' % (filebase, datestr, str(start))
if not os.path.isfile(DATAPATH + filename):
search_to_file(q, start, 100, filename, sandbox=False)
searchfiles.append(filename)
return searchfiles
def download_orcid(orcid):
datestr = time.strftime("%m%y")
dest='%sdownloads/%s' %(DATAPATH,datestr)
if not os.path.exists(dest):
os.mkdir(dest)
dest_file = '%s/%s.xml' %(dest,orcid)
if not os.path.exists(dest_file):
token = getToken()
print('Retrieving %s...' % orcid)
response = orcid_api.read_public_record(orcid, token, sandbox=False)
with codecs.open(dest_file, 'w', 'utf-8') as f:
f.write(response.text)
else:
print('Skipping %s, has already been retrieved' % orcid)
resultFiles=getSearchResults('"vu university"+OR+"vrije universiteit amsterdam"')
orcids=[]
for filename in resultFiles:
with open(DATAPATH+filename) as f:
content = f.read()
searchXml=objectify.fromstring(content)
for child in searchXml['orcid-search-results']['orcid-search-result']:
orcid=child['orcid-profile']['orcid-identifier']['path']
#orcids.append(orcid)
download_orcid(orcid)
| StarcoderdataPython |
123554 | # Generated by Django 3.0.5 on 2020-05-11 01:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=70, unique=True)),
('city', models.CharField(default='', max_length=255)),
('status', models.CharField(choices=[('Singer', 'Singer'), ('Baglama-Player', 'Baglama-Player'), ('Guitar-Player', 'Guitar-Player'), ('Violin-Player', 'Violin-Player'), ('Acordion-Player', 'Acordion-Player'), ('Chor-Chef', 'Chor-Chef')], default='Singer', max_length=20)),
('image', models.ImageField(default='post_list/profile_images/default.jpg', upload_to='post_list/profile_images')),
('friends', models.ManyToManyField(blank=True, related_name='friends', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
3218453 | <filename>niceman/support/distributions/tests/test_debian.py
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the niceman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Various supporting utilities for various distributions
"""
from ..debian import DebianReleaseSpec
from ..debian import get_spec_from_release_file
from ..debian import parse_dpkgquery_line
from niceman.tests.utils import eq_, assert_is_subset_recur
def test_get_spec_from_release_file(f=None):
content = """\
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
Origin: NeuroDebian
Label: NeuroDebian2
Suite: stretch
Codename: stretch2
Date: Thu, 15 Sep 2016 01:30:57 UTC
Architectures: i386 amd64 sparc
Components: main non-free contrib
Description: NeuroDebian repository with perspective, inofficial and backported packages -- mostly neuroscience-related
MD5Sum:
d9650396c56a6f9521d0bbd9f719efbe 482669 main/binary-i386/Packages
34134c9a64b847d33eeeb3cc7291f855ab9f0969e8ad7c92cd2a0c1aebc19d1e 14199 contrib/Contents-sparc.gz
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2
iEYEAREIAAYFAlfZ+dEACgkQpdMvASZJpamBowCfXOPQimiIy2wnVY5U9sLs1jSn
JZ0An0Uoocusvjco1t6RAwxt/y3lQoWV
=a3Nn
-----END PGP SIGNATURE-----
"""
eq_(get_spec_from_release_file(content),
DebianReleaseSpec(
origin='NeuroDebian',
label='NeuroDebian2',
codename='stretch2',
version=None,
suite='stretch',
date='Thu, 15 Sep 2016 01:30:57 UTC',
components='main non-free contrib',
architectures='i386 amd64 sparc',
))
def test_parse_apt_cache_show_pkgs_output():
from ..debian import parse_apt_cache_show_pkgs_output
txt1 = """\
Package: openssl
Status: install ok installed
Priority: optional
Section: utils
Installed-Size: 934
Maintainer: Ubuntu Developers <<EMAIL>>
Architecture: amd64
Version: 1.0.2g-1ubuntu4.5
Depends: libc6 (>= 2.15), libssl1.0.0 (>= 1.0.2g)
Suggests: ca-certificates
Conffiles:
/etc/ssl/openssl.cnf 7df26c55291b33344dc15e3935dabaf3
Description-en: Secure Sockets Layer toolkit - cryptographic utility
This package is part of the OpenSSL project's implementation of the SSL
and TLS cryptographic protocols for secure communication over the
Internet.
.
It contains the general-purpose command line binary /usr/bin/openssl,
useful for cryptographic operations such as:
* creating RSA, DH, and DSA key parameters;
* creating X.509 certificates, CSRs, and CRLs;
* calculating message digests;
* encrypting and decrypting with ciphers;
* testing SSL/TLS clients and servers;
* handling S/MIME signed or encrypted mail.
Description-md5: 9b6de2bb6e1d9016aeb0f00bcf6617bd
Original-Maintainer: Debian OpenSSL Team <<EMAIL>>
Package: openssl
Priority: standard
Section: utils
Installed-Size: 934
Maintainer: <NAME> <<EMAIL>>
Original-Maintainer: Debian OpenSSL Team <<EMAIL>>
Architecture: amd64
Source: openssl-src (1.0.2g)
Version: 1.0.2g-1ubuntu4
Depends: libc6 (>= 2.15), libssl1.0.0 (>= 1.0.2g)
Suggests: ca-certificates
Filename: pool/main/o/openssl/openssl_1.0.2g-1ubuntu4_amd64.deb
Size: 492190
MD5sum: 8280148dc2991da94be5810ad4d91552
SHA1: b5326f27aae83c303ff934121dede47d9fce7c76
SHA256: e897ffc8d84b0d436baca5dbd684a85146ffa78d3f2d15093779d3f5a8189690
Description-en: Secure Sockets Layer toolkit - cryptographic utility
This package is part of the OpenSSL project's implementation of the SSL
and TLS cryptographic protocols for secure communication over the
Internet.
.
It contains the general-purpose command line binary /usr/bin/openssl,
useful for cryptographic operations such as:
* creating RSA, DH, and DSA key parameters;
* creating X.509 certificates, CSRs, and CRLs;
* calculating message digests;
* encrypting and decrypting with ciphers;
* testing SSL/TLS clients and servers;
* handling S/MIME signed or encrypted mail.
Description-md5: 9b6de2bb6e1d9016aeb0f00bcf6617bd
Bugs: https://bugs.launchpad.net/ubuntu/+filebug
Origin: Ubuntu
Supported: 5y
Task: standard, ubuntu-core, ubuntu-core, mythbuntu-frontend, mythbuntu-backend-slave, mythbuntu-backend-master, ubuntu-touch-core, ubuntu-touch, ubuntu-sdk-libs-tools, ubuntu-sdk
Package: alienblaster
Priority: extra
Section: universe/games
Installed-Size: 668
Maintainer: <NAME> <<EMAIL>>
Original-Maintainer: Debian Games Team <<EMAIL>>
Architecture: amd64
Source: alienblaster-src
Version: 1.1.0-9
Depends: alienblaster-data, libc6 (>= 2.14), libgcc1 (>= 1:3.0), libsdl-mixer1.2, libsdl1.2debian (>= 1.2.11), libstdc++6 (>= 5.2)
Filename: pool/universe/a/alienblaster/alienblaster_1.1.0-9_amd64.deb
Size: 180278
MD5sum: e53379fd0d60e0af6304af78aa8ef2b7
SHA1: ca405056cf66a1c2ae3ae1674c22b7d24cda4986
SHA256: ff25bd843420801e9adea4f5ec1ca9656b2aeb327d8102107bf5ebbdb3046c38
Description-en: Classic 2D shoot 'em up
Your mission is simple: Stop the invasion of the aliens and blast them!
.
Alien Blaster is a classic 2D shoot 'em up featuring lots of different
weapons, special items, aliens to blast and a big bad boss.
.
It supports both a single player mode and a cooperative two player mode
for two persons playing on one computer.
Description-md5: da1f8f1a6453d62874036331e075d65f
Homepage: http://www.schwardtnet.de/alienblaster/
Bugs: https://bugs.launchpad.net/ubuntu/+filebug
Origin: Ubuntu
"""
out1 = [{'architecture': 'amd64',
'package': 'openssl',
'status': 'install ok installed',
'version': '1.0.2g-1ubuntu4.5'},
{'architecture': 'amd64',
'source_name': 'openssl-src',
'source_version': '1.0.2g',
'package': 'openssl',
'version': '1.0.2g-1ubuntu4'},
{'architecture': 'amd64',
'source_name': 'alienblaster-src',
'package': 'alienblaster',
'md5': 'e53379fd0d60e0af6304af78aa8ef2b7',
'version': '1.1.0-9'},
]
out = parse_apt_cache_show_pkgs_output(txt1)
assert_is_subset_recur(out1, out, [dict, list])
def test_parse_apt_cache_policy_pkgs_output():
from ..debian import parse_apt_cache_policy_pkgs_output
txt1 = """\
afni:
Installed: 16.2.07~dfsg.1-2~nd90+1
Candidate: 16.2.07~dfsg.1-2~nd90+1
Version table:
*** 16.2.07~dfsg.1-2~nd90+1 500
500 http://neuro.debian.net/debian stretch/contrib amd64 Packages
100 /var/lib/dpkg/status
openssl:
Installed: 1.0.2g-1ubuntu4.5
Candidate: 1.0.2g-1ubuntu4.8
Version table:
1.0.2g-1ubuntu4.8 500
500 http://us.archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages
1.0.2g-1ubuntu4.6 500
500 http://security.ubuntu.com/ubuntu xenial-security/main amd64 Packages
*** 1.0.2g-1ubuntu4.5 100
100 /var/lib/dpkg/status
1.0.2g-1ubuntu4 500
500 http://us.archive.ubuntu.com/ubuntu xenial/main amd64 Packages
python-nibabel:
Installed: 2.1.0-1
Candidate: 2.1.0-1
Version table:
*** 2.1.0-1 900
900 http://http.debian.net/debian stretch/main amd64 Packages
900 http://http.debian.net/debian stretch/main i386 Packages
600 http://http.debian.net/debian sid/main amd64 Packages
600 http://http.debian.net/debian sid/main i386 Packages
100 /var/lib/dpkg/status
2.1.0-1~nd90+1 500
500 http://neuro.debian.net/debian stretch/main amd64 Packages
500 http://neuro.debian.net/debian stretch/main i386 Packages
python-biotools:
Installed: (none)
Candidate: 1.2.12-2
Version table:
1.2.12-2 600
600 http://http.debian.net/debian sid/main amd64 Packages
600 http://http.debian.net/debian sid/main i386 Packages
alienblaster:
Installed: 1.1.0-9
Candidate: 1.1.0-9
Version table:
*** 1.1.0-9 500
500 http://us.archive.ubuntu.com/ubuntu xenial/universe amd64 Packages
500 file:/my/repo ./ Packages
500 file:/my/repo2 ubuntu/ Packages
100 /var/lib/dpkg/status
skype:i386:
Installed: (none)
Candidate: (none)
Version table:
4.3.0.37-1 -1
100 /var/lib/dpkg/status
"""
out1 = {'openssl': {'architecture': None,
'candidate': '1.0.2g-1ubuntu4.8',
'installed': '1.0.2g-1ubuntu4.5',
'versions': [{'installed': None,
'priority': '500',
'sources': [{'priority': '500',
'source': 'http://us.archive.ubuntu.com/ubuntu '
'xenial-updates/main amd64 '
'Packages'}],
'version': '1.0.2g-1ubuntu4.8'},
{'installed': None,
'priority': '500',
'sources': [{'priority': '500',
'source': 'http://security.ubuntu.com/ubuntu '
'xenial-security/main amd64 '
'Packages'}],
'version': '1.0.2g-1ubuntu4.6'},
{'installed': '***',
'priority': '100',
'sources': [{'priority': '100',
'source': '/var/lib/dpkg/status'}],
'version': '1.0.2g-1ubuntu4.5'},
{'installed': None,
'priority': '500',
'sources': [{'priority': '500',
'source': 'http://us.archive.ubuntu.com/ubuntu '
'xenial/main amd64 '
'Packages'}],
'version': '1.0.2g-1ubuntu4'}]}}
out = parse_apt_cache_policy_pkgs_output(txt1)
assert_is_subset_recur(out1, out, [dict])
def test_parse_apt_cache_policy_source_info():
from ..debian import parse_apt_cache_policy_source_info
txt = """\
Package files:
100 /var/lib/dpkg/status
release a=now
500 http://neuro.debian.net/debian xenial/non-free i386 Packages
release o=NeuroDebian,a=xenial,n=xenial,l=NeuroDebian,c=non-free,b=i386
origin neuro.debian.net
500 http://neuro.debian.net/debian xenial/non-free amd64 Packages
release o=NeuroDebian,a=xenial,n=xenial,l=NeuroDebian,c=non-free,b=amd64
origin neuro.debian.net
500 http://neuro.debian.net/debian data/non-free i386 Packages
release o=NeuroDebian,a=data,n=data,l=NeuroDebian,c=non-free,b=i386
origin neuro.debian.net
500 http://neuro.debian.net/debian data/non-free amd64 Packages
release o=NeuroDebian,a=data,n=data,l=NeuroDebian,c=non-free,b=amd64
origin neuro.debian.net
500 file:/my/repo2 ubuntu/ Packages
release c=
500 file:/my/repo ./ Packages
release c=
500 http://dl.google.com/linux/chrome/deb stable/main amd64 Packages
release v=1.0,o=Google, Inc.,a=stable,n=stable,l=Google,c=main,b=amd64
origin dl.google.com
500 http://security.ubuntu.com/ubuntu xenial-security/restricted i386 Packages
release v=16.04,o=Ubuntu,a=xenial-security,n=xenial,l=Ubuntu,c=restricted,b=i386
origin security.ubuntu.com
500 http://security.ubuntu.com/ubuntu xenial-security/restricted amd64 Packages
release v=16.04,o=Ubuntu,a=xenial-security,n=xenial,l=Ubuntu,c=restricted,b=amd64
origin security.ubuntu.com
500 http://debproxy:9999/debian/ jessie-backports/contrib Translation-en
100 http://debproxy:9999/debian/ jessie-backports/non-free amd64 Packages
release o=Debian Backports,a=jessie-backports,n=jessie-backports,l=Debian Backports,c=non-free
origin debproxy
500 http://us.archive.ubuntu.com/ubuntu xenial-updates/universe amd64 Packages
release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu,c=universe,b=amd64
origin us.archive.ubuntu.com
500 http://us.archive.ubuntu.com/ubuntu xenial-updates/multiverse i386 Packages
release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu,c=multiverse,b=i386
origin us.archive.ubuntu.com
Pinned packages:
"""
out1 = {'http://neuro.debian.net/debian xenial/non-free i386 Packages':
{'architecture': 'i386',
'archive': 'xenial',
'archive_uri': 'http://neuro.debian.net/debian',
'uri_suite': 'xenial',
'codename': 'xenial',
'component': 'non-free',
'label': 'NeuroDebian',
'origin': 'NeuroDebian',
'site': 'neuro.debian.net'
},
'http://security.ubuntu.com/ubuntu xenial-security/restricted amd64 Packages':
{'architecture': 'amd64',
'archive': 'xenial-security',
'archive_uri': 'http://security.ubuntu.com/ubuntu',
'uri_suite': 'xenial-security',
'codename': 'xenial',
'component': 'restricted',
'label': 'Ubuntu',
'origin': 'Ubuntu',
'site': 'security.ubuntu.com'
},
'http://debproxy:9999/debian/ jessie-backports/contrib Translation-en':
{'archive_uri': 'http://debproxy:9999/debian/',
'uri_suite': 'jessie-backports'
},
'http://debproxy:9999/debian/ jessie-backports/non-free amd64 Packages':
{'archive': 'jessie-backports',
'archive_uri': 'http://debproxy:9999/debian/',
'codename': 'jessie-backports',
'component': 'non-free',
'label': 'Debian Backports',
'origin': 'Debian Backports',
'site': 'debproxy',
'uri_suite': 'jessie-backports'
},
}
out = parse_apt_cache_policy_source_info(txt)
assert_is_subset_recur(out1, out, [dict])
def test_get_apt_release_file_names():
from ..debian import get_apt_release_file_names
fn = get_apt_release_file_names('http://us.archive.ubuntu.com/ubuntu',
'xenial-backports')
assert "/var/lib/apt/lists/us.archive.ubuntu.com_ubuntu_dists_xenial-backports_InRelease" in fn
assert "/var/lib/apt/lists/us.archive.ubuntu.com_ubuntu_dists_xenial-backports_Release" in fn
fn = get_apt_release_file_names('file:/my/repo2/ubuntu',None)
assert "/var/lib/apt/lists/_my_repo2_ubuntu_InRelease" in fn
assert "/var/lib/apt/lists/_my_repo2_ubuntu_Release" in fn
def test_parse_dpkgquery_line():
for line, expected in [
('zlib1g:i386: /lib/i386-linux-gnu/libz.so.1.2.8',
{'name': 'zlib1g',
'architecture': 'i386',
'path': '/lib/i386-linux-gnu/libz.so.1.2.8',
'pkgs_rest': None}),
('fail2ban: /usr/bin/fail2ban-client',
{'name': 'fail2ban',
'path': '/usr/bin/fail2ban-client',
'pkgs_rest': None}),
('fsl-5.0-eddy-nonfree, fsl-5.0-core: /usr/lib/fsl/5.0',
{'name': 'fsl-5.0-eddy-nonfree',
'path': '/usr/lib/fsl/5.0',
'pkgs_rest': ', fsl-5.0-core'}),
('pkg: path,with,commas',
{'name': 'pkg',
'path': 'path,with,commas',
'pkgs_rest': None}),
('diversion by dash from: /bin/sh', None)
]:
assert parse_dpkgquery_line(line) == expected
| StarcoderdataPython |
1792052 | """only allow unique keys in key-value store
Revision ID: <KEY>
Revises: c2aead9ff6d9
Create Date: 2019-03-09 12:12:25.914048
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'c2aead9ff6d9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'key_value_store', ['key'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'key_value_store', type_='unique')
# ### end Alembic commands ###
| StarcoderdataPython |
3327553 | from __future__ import print_function
import os, json, sys
import requests
from bs4 import BeautifulSoup
class Bing:
"""Scrapper class for Bing"""
def __init__(self):
pass
def get_page(self,query):
"""
Fetches search response from bing.com
returns : result page in html
"""
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36'}
payload = {'q': query}
response = requests.get('http://www.bing.com/search', params=payload, headers=header)
return response
def results_search(self,query):
""" Search bing for the query and return set of urls
Returns: urls (list)
[[Tile1,url1], [Title2, url2],..]
"""
urls = []
response = self.get_page(query)
soup = BeautifulSoup(response.text, 'html.parser')
for li in soup.findAll('li', {'class': 'b_algo'}):
title = li.h2.text.replace('\n', '').replace(' ', '')
url = li.h2.a['href']
desc = li.find('p').text
url_entry = {'title': title,
'link': url,
'desc': desc}
urls.append(url_entry)
return urls
| StarcoderdataPython |
1749898 | <reponame>chrisdunne/hibp
import requests
import urllib.parse
def get(email, key):
if isinstance(email, str):
return requests.get(
f"https://haveibeenpwned.com/api/v3/breachedaccount/{urllib.parse.quote(email)}",
headers={"hibp-api-key": key}
).text
if isinstance(email, list):
result = {}
for e in email:
r = requests.get(
f"https://haveibeenpwned.com/api/v3/breachedaccount/{urllib.parse.quote(e)}",
headers={"hibp-api-key": key}
)
result[e] = r.text
return result
def get_full(email, key):
if isinstance(email, str):
return requests.get(
f"https://haveibeenpwned.com/api/v3/breachedaccount/{urllib.parse.quote(email)}?truncateResponse=false",
headers={"hibp-api-key": key}
).text
if isinstance(email, list):
result = {}
for e in email:
r = requests.get(
f"https://haveibeenpwned.com/api/v3/breachedaccount/{urllib.parse.quote(e)}?truncateResponse=false",
headers={"hibp-api-key": key}
)
result[e] = r.text
return result
def get_breaches(key):
return requests.get(
f"https://haveibeenpwned.com/api/v3/breaches",
headers={"hibp-api-key": key}
).text
def get_breach(site, key):
return requests.get(
f"https://haveibeenpwned.com/api/v3/breach/{site}",
headers={"hibp-api-key": key}
).text
def get_pastes(email, key):
return requests.get(
f"https://haveibeenpwned.com/api/v3/pasteaccount/{urllib.parse.quote(email)}",
headers={"hibp-api-key": key}
).text | StarcoderdataPython |
3340316 | <gh_stars>0
import os
import sys
import shutil
import random
from tqdm import tqdm
from datasets.data_format.voc import VOCDataSet
from datasets.data_format.yolo import builder
class Project(object):
"""目录结构
project
├── data.yaml #数据集配置文件
├── models #网络模型(可以使用下面的脚本自动生成)
│ ├── yolov5s.yaml #Small
│ ├── yolov5m.yaml #Medium
│ ├── yolov5l.yaml #Large
│ └── yolov5x.yaml #XLarge
├── images #图片
│ ├── train #训练集
│ │ ├── 000001.jpg
│ │ ├── 000002.jpg
│ │ └── 000003.jpg
│ └── val #验证集
│ ├── 000010.jpg
│ └── 000011.jpg
├── labels #YOLO格式的标注
│ ├── train #训练集
│ │ ├── 000001.txt
│ │ ├── 000002.txt
│ │ └── 000003.txt
│ └── val #验证集
│ ├── 000010.txt
│ └── 000011.txt
└── inference #推理
├── images #原图
└── output #推理后的标注图片
"""
def __init__(self, project_dir='project', dataset_split_radio=0.2):
train_dir = 'train'
val_dir = 'val'
self.project_dir = project_dir
self.dataset_images_dir = os.path.join(project_dir, 'images')
self.dataset_images_train_dir = os.path.join(self.dataset_images_dir, train_dir)
self.dataset_images_val_dir = os.path.join(self.dataset_images_dir, val_dir)
self.dataset_labels_dir = os.path.join(project_dir, 'labels')
self.dataset_labels_train_dir = os.path.join(self.dataset_labels_dir, train_dir)
self.dataset_labels_val_dir = os.path.join(self.dataset_labels_dir, val_dir)
self.dataset_split_radio = dataset_split_radio
def create(self, dataset):
if os.path.exists(self.dataset_images_dir):
shutil.rmtree(self.dataset_images_dir)
os.makedirs(self.dataset_images_train_dir)
os.makedirs(self.dataset_images_val_dir)
if os.path.exists(self.dataset_labels_dir):
shutil.rmtree(self.dataset_labels_dir)
os.makedirs(self.dataset_labels_train_dir)
os.makedirs(self.dataset_labels_val_dir)
self._build_dataset(dataset)
def _build_dataset(self, dataset):
image_paths, label_paths, _ = dataset.load()
assert(len(image_paths) == len(label_paths))
image_size = len(image_paths)
indexs = list(range(0, image_size))
random.shuffle(indexs)
split_size = int(image_size*self.dataset_split_radio)
train_indexs, val_indexs = indexs[split_size:], indexs[:split_size]
for i in tqdm(train_indexs):
shutil.copy(image_paths[i], self.dataset_images_train_dir)
shutil.copy(label_paths[i], self.dataset_labels_train_dir)
for i in tqdm(val_indexs):
shutil.copy(image_paths[i], self.dataset_images_val_dir)
shutil.copy(label_paths[i], self.dataset_labels_val_dir)
shutil.copy(dataset.classes_path, self.project_dir)
| StarcoderdataPython |
3348980 | from poop.hfdp.command.remote.ceiling_fan import CeilingFan
class CeilingFanOnCommand:
def __init__(self, ceiling_fan: CeilingFan) -> None:
self.__ceiling_fan = ceiling_fan
def execute(self) -> None:
self.__ceiling_fan.high()
| StarcoderdataPython |
3362672 | <reponame>python-demo-codes/basics
# HEAD
# DataType - Tuples as return Type with Multiple Returns
# DESCRIPTION
# Describes multiple returns from functions returned as tuple
# Also referred to as destructuring
# RESOURCES
#
def square(x,y):
# return (x*x, y*y)
# Following line is equivalent to above line
return x*x, y*y
t = square(2,3)
print(t) # Produces a tuple - (4,9)
# Now access the tuple with usual operations
def squareTwo(x,y):
# return (x*x, y*y)
# Following line is equivalent to above line
return x*x, y*y
xsq, ysq = squareTwo(2,3)
# Tuple has vanished!
# Well, not really they were DESTRUCTURED to different variables
print(xsq) # Prints 4
print(ysq) # Prints 9
| StarcoderdataPython |
3347595 | n, y = map(int, input().split())
l = set([int(input()) for i in range(y)])
for i in range(n):
if i not in l:
print(i)
print(f"Mario got {len(l)} of the dangerous obstacles.")
| StarcoderdataPython |
1602033 | import unittest
from unittest_onerror import on_fail
def my_fail_handler(testcase, exception=None):
print('Hey, test {} failed:\n{}'.format(testcase.id(), exception))
class MyTestCase(unittest.TestCase):
@on_fail(my_fail_handler)
def test_which_fails(self):
self.assertEqual(0, 1)
# error will not be re-raised => test will be "OK"
@on_fail(my_fail_handler, reraise=False)
def test_which_fails_no_reraise(self):
self.assertEqual(0, 1)
if __name__ == 'main':
unittest.main() | StarcoderdataPython |
1654372 | <filename>parallel_esn/example/weather_recursive.py
import numpy as np
import argparse
import matplotlib.pyplot as plt
from ..esn import ESN
from ..utils import to_forecast_form, standardize_traindata, scale_data, unscale_data
from ..bo import BO
"""
Attempts to predict a window of humidities in a recursive manner, producing more
accurate results for near term.
"""
def prep_data(filename, in_len, pred_len):
"""load data from the file and chunk it into windows of input"""
# Columns are
# 0:datetime, 1:temperature, 2:humidity, 3:pressure, 4:wind_direction, 5:wind_speed
data = np.genfromtxt(filename, delimiter=',', skip_header=1,
usecols=(1, 2, 3, 4, 5), dtype=float)
# Remove rows that are missing values
data = data[~np.isnan(data).any(axis=1)]
# We will save the last 1/8th of the data for validation/testing data,
# 1/16 for validation, 1/16 for testing
total_len = data.shape[0]
val_len = total_len // 16
test_len = total_len // 16
train_len = total_len - val_len - test_len
train_data = data[:train_len]
val_data = data[train_len:train_len + val_len]
test_data = data[train_len + val_len:]
# To stay in the most accurate ranges of the ESN, and to put the various
# features on equal footing, we standardize the training data.
train_data, mu_arr, sigma_arr = standardize_traindata(train_data)
# We now need to scale our validation and test data by the means and standard
# deviations determined from the training data
val_data = scale_data(val_data, mu_arr, sigma_arr)
test_data = scale_data(test_data, mu_arr, sigma_arr)
# We need to convert the time series data to forecast form for one-step
# prediction training. For simplicity we will discard the remainder batches rU, rY
train_batch_size = 200
val_batch_size = in_len + pred_len + 1
test_batch_size = in_len + pred_len + 1
trainU, trainY, rU, rY = to_forecast_form(train_data, batch_size=train_batch_size)
valU, valY, rU, rY = to_forecast_form(val_data, batch_size=val_batch_size)
testU, testY, rU, rY = to_forecast_form(test_data, batch_size=test_batch_size)
return trainU, trainY, valU, valY, testU, testY, mu_arr, sigma_arr
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_len', type=int, nargs='?', default=200)
parser.add_argument('--pred_len', type=int, nargs='?', default=24)
parser.add_argument('--num_iter', type=int, nargs='?', default=5)
parser.add_argument('--filename', type=str, nargs='?', default='boston_weather.csv')
args = parser.parse_args()
in_len = args.input_len
pred_len = args.pred_len
trainU, trainY, valU, valY, testU, testY, mu, sigma = prep_data(args.filename,
in_len, pred_len)
bo = BO(k=(2, 50), hidden_dim=(200, 400), random_state=12)
# for reproducibility
np.random.seed(12)
best_loss = 1e8
best_esn = None
time = np.arange(in_len+pred_len+1)
# To choose which runs to look at at random
if testU.shape[0] >= 9:
replace = False
else:
replace = True
wi = np.random.choice(testU.shape[0], 9, replace=replace)
# Humidity Figure
fig_h, ax_h = plt.subplots(3, 3, figsize=(15, 14))
ax_h = ax_h.flatten()
# Temperature Figure
fig_t, ax_t = plt.subplots(3, 3, figsize=(15, 14))
ax_t = ax_t.flatten()
for k in range(len(wi)):
dat_in = unscale_data(testU[wi[k], :, :in_len].T, mu, sigma)
ax_t[k].plot(time[:in_len], dat_in[:, 0], 'ob', label='input')
ax_h[k].plot(time[:in_len], dat_in[:, 1], 'ob', label='input')
for i in range(args.num_iter):
h_star = bo.find_best_choices()
print("Iteration {}".format(i))
print(h_star)
esn = ESN(input_dim=trainU.shape[1], hidden_dim=h_star['hidden_dim'],
output_dim=trainY.shape[1], k=h_star['k'],
spectral_radius=h_star['spectral_radius'],
p=h_star['p'], alpha=h_star['alpha'], beta=h_star['beta'])
# val_loss = esn.train_validate(trainU, trainY, valU, valY, verbose=1, compute_loss_freq=10)
val_loss = esn.recursive_train_validate(trainU, trainY, valU, valY,
in_len, pred_len, verbose=1, compute_loss_freq=10)
print("validation loss = {}".format(val_loss))
for k in range(len(wi)):
s_pred = esn.recursive_predict(testU[wi[k], :, :in_len], pred_len)
dat_pred = unscale_data(s_pred.T, mu, sigma)
ax_t[k].plot(time[in_len:in_len+pred_len], dat_pred[:, 0], '-', color='#888888', alpha=0.1)
ax_h[k].plot(time[in_len:in_len+pred_len], dat_pred[:, 1], '-', color='#888888', alpha=0.1)
if val_loss < best_loss:
best_esn = esn
best_loss = val_loss
bo.update_gpr(X=[h_star[val] for val in h_star.keys()], y=val_loss)
for k in range(len(wi)):
dat_obs = unscale_data(testU[wi[k], :, in_len:in_len+pred_len].T, mu, sigma)
ax_t[k].plot(time[in_len:in_len+pred_len], dat_obs[:, 0], '^g', label='observed')
ax_h[k].plot(time[in_len:in_len+pred_len], dat_obs[:, 1], '^g', label='observed')
s_pred = best_esn.recursive_predict(testU[wi[k], :, :in_len], pred_len)
dat_pred = unscale_data(s_pred.T, mu, sigma)
ax_t[k].plot(time[in_len:in_len+pred_len], dat_pred[:, 0], '-r', label="Best ESN")
ax_h[k].plot(time[in_len:in_len+pred_len], dat_pred[:, 1], '-r', label="Best ESN")
plt.figure(fig_t.number)
plt.suptitle("Boston Temperature, Recursive Prediction")
plt.figure(fig_h.number)
plt.suptitle("Boston Humidity, Recursive Prediction")
for i in range(len(ax_h)):
ax_t[i].set_xlim(time[in_len - 2*pred_len], time[in_len + pred_len])
ax_h[i].set_xlim(time[in_len - 2*pred_len], time[in_len + pred_len])
for idx in [0, 3, 6]:
ax_t[idx].set_ylabel("Temperature (Kelvin)")
ax_h[idx].set_ylabel("Humidity (percent)")
for idx in [6, 7, 8]:
ax_t[idx].set_xlabel("Hours")
ax_h[idx].set_xlabel("Hours")
ax_h[0].legend(loc=2, numpoints=1)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4823758 | from flask import Blueprint
workspace = Blueprint('workspace', __name__)
@workspace.route('/', methods=['GET'])
def get_workspaces():
return []
| StarcoderdataPython |
108065 | <reponame>YangJae96/KMU_Visual-SLAM<gh_stars>0
from . import slam
opensfm_commands = [
slam,
]
| StarcoderdataPython |
199142 | <reponame>phlax/abstracts
from typing import (
Any, Coroutine, Dict, Generator, List, Optional, Set,
Tuple, TypedDict)
import aiohttp
from . import cve, dependency, typing
# Scanner configuration
class BaseCVEConfigDict(TypedDict):
# non-optional attributes
nist_url: str
start_year: int
class CVEConfigDict(BaseCVEConfigDict, total=False):
ignored_cves: List[str]
# NDIST CVE data format
class CVENodeMatchDict(TypedDict, total=False):
cpe23Uri: str
versionStartIncluding: str
versionEndIncluding: str
versionStartExcluding: str
versionEndExcluding: str
class CVENodeDict(TypedDict, total=False):
cpe_match: List[CVENodeMatchDict]
children: List["typing.CVENodeDict"] # type:ignore
class CVEItemConfigurationsDict(TypedDict, total=False):
nodes: List[CVENodeDict]
class CVEItemDict(TypedDict, total=False):
configurations: CVEItemConfigurationsDict
cve: Dict
impact: Dict
lastModifiedDate: str
publishedDate: str
class CVEJsonDict(TypedDict, total=False):
CVE_Items: List[CVEItemDict]
# Package defined types
class BaseDependencyMetadataDict(TypedDict):
release_date: str
version: str
class DependencyMetadataDict(BaseDependencyMetadataDict, total=False):
cpe: Optional[str]
CPERevmapDict = Dict[str, Set[str]]
CVEDict = Dict[str, "cve.ACVE"]
CVEDataTuple = Tuple[CVEDict, CPERevmapDict]
DependenciesDict = Dict[str, DependencyMetadataDict]
TrackedCPEDict = Dict[str, "dependency.ADependency"]
DownloadGenerator = Generator[
Coroutine[Any, Any, aiohttp.ClientResponse],
str,
None]
| StarcoderdataPython |
1748305 | <reponame>hydratk/hydratk-ext-datagen
# -*- coding: utf-8 -*-
"""Module for sample XML generation from WSDL/XSD
.. module:: datagen.xmlgen
:platform: Unix
:synopsis: Module for sample XML generation from WSDL/XSD
.. moduleauthor:: <NAME> <<EMAIL>>
"""
"""
Events:
-------
xmlgen_before_import_spec
xmlgen_after_import_spec
xmlgen_before_write
xmlgen_after_write
"""
from hydratk.core.masterhead import MasterHead
from hydratk.core import event
from suds.client import Client, TypeNotFound
from lxml.etree import Element, SubElement, tostring
from os import path
from re import search
from logging import getLogger, CRITICAL
getLogger('suds.resolver').setLevel(CRITICAL)
''' WSDL template '''
wsdl_tmpl = """
<wsdl:definitions targetNamespace="{0}" xmlns:tns="{1}" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/">
<wsdl:types>
<xsd:schema attributeFormDefault="qualified" targetNamespace="{2}">
<xsd:include schemaLocation="{3}"/>
</xsd:schema>
</wsdl:types>
<wsdl:portType name="dummyPort">
</wsdl:portType>
<wsdl:binding name="dummyBinding" type="tns:dummyPort">
</wsdl:binding>
<wsdl:service name="dummy">
<wsdl:port name="dummyPort" binding="tns:dummyBinding">
<soap:address location="http://localhost/dummy"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>
"""
class XMLGen(object):
"""Class XMLGen
"""
_mh = None
_path = None
_client = None
def __init__(self):
"""Class constructor
Called when object is initialized
Args:
none
"""
self._mh = MasterHead.get_head()
@property
def path(self):
""" path property getter """
return self._path
@property
def client(self):
""" client property getter """
return self._client
def import_spec(self, filename):
"""Method imports specification
Args:
filename (str): filename
Returns:
bool: result
Raises:
event: xmlgen_before_import_spec
event: xmlgen_after_import_spec
"""
try:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'datagen_xmlgen_import_spec', filename), self._mh.fromhere())
ev = event.Event('xmlgen_before_import_spec', filename)
if (self._mh.fire_event(ev) > 0):
filename = ev.argv(0)
if (ev.will_run_default()):
if (path.exists(filename)):
spec_type = (filename.split('.')[-1]).upper()
filename = path.join(
path.dirname(path.abspath(filename)), filename)
if (spec_type == 'WSDL'):
self._client = Client('file://' + filename, cache=None)
self._path = path.abspath(filename)
elif (spec_type == 'XSD'):
wsdl = self._create_dummy_wsdl(filename)
self._client = Client('file://' + wsdl, cache=None)
self._path = wsdl
else:
raise ValueError(
'Unknown specification type: {0}'.format(spec_type))
else:
raise ValueError('File {0} not found'.format(filename))
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'datagen_xmlgen_spec_imported'), self._mh.fromhere())
ev = event.Event('xmlgen_after_import_spec')
self._mh.fire_event(ev)
return True
except (Exception, ValueError) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return False
def toxml(self, root, outfile=None, envelope=False):
"""Method creates sample xml file
Args:
root (str): root element name
outfile (str): output filename, default sample.xml
envelope (bool): create SOAP envelope
Returns:
bool: result
Raises:
event: xmlgen_before_write
event: xmlgen_after_write
"""
try:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'datagen_xmlgen_write_sample'), self._mh.fromhere())
ev = event.Event('xmlgen_before_write', root, outfile, envelope)
if (self._mh.fire_event(ev) > 0):
root = ev.argv(0)
outfile = ev.argv(1)
envelope = ev.argv(2)
if (ev.will_run_default()):
if (self._client == None):
raise ValueError('Specification is not imported yet')
if (envelope):
ns = '{%s}' % 'http://schemas.xmlsoap.org/soap/envelope/'
doc = Element(ns + 'Envelope')
SubElement(doc, 'Header')
body = SubElement(doc, 'Body')
body.append(self._toxml_rec(root))
else:
doc = self._toxml_rec(root)
outfile = 'sample.xml' if (outfile == None) else outfile
with open(outfile, 'w') as f:
f.write(tostring(
doc, encoding='UTF-8', xml_declaration=True, pretty_print=True).decode())
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'datagen_xmlgen_sample_written', outfile), self._mh.fromhere())
ev = event.Event('xmlgen_after_write')
self._mh.fire_event(ev)
return True
except (Exception, ValueError) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return False
def _toxml_rec(self, root, obj=None, ns_cur=None):
"""Method creates sample xml document
It is used in recursive traversal
Args:
root (str): root element name
obj (obj): suds element object
ns_cur (str): current namespace
Returns:
xml: xml document
Raises:
error: ValueError
"""
if (self._client == None):
raise ValueError('Specification is not imported yet')
try:
if (obj == None):
obj = self._client.factory.create(root)
ns = '{%s}' % self._get_element_ns(obj.__class__.__name__)
if (ns != '{None}' and ns != ns_cur):
doc = Element(ns + root)
else:
doc = Element(root)
ns = ns_cur
for key in obj.__keylist__:
subelem = obj[key]
if (subelem == None):
SubElement(doc, key).text = '?'
elif (subelem == [] or '[]' in subelem.__str__()):
inner_doc = self._toxml_rec(key, None, ns)
if (inner_doc != None):
doc.append(inner_doc)
else:
el_type = self._get_element_type(
subelem.__class__.__name__)
if (el_type == 'Simple'):
SubElement(doc, key).text = '?'
elif (el_type == 'Complex'):
inner_doc = self._toxml_rec(key, subelem, ns)
if (inner_doc != None):
doc.append(inner_doc)
return doc
except TypeNotFound:
return None
def _get_element_type(self, element):
"""Method gets element XSD type
It is used to determine if element is Simple or Complex
Args:
element (str): element name
Returns:
str: element type
Raises:
error: ValueError
"""
if (self._client == None):
raise ValueError('Specification is not imported yet')
el_type = None
for value in self._client.wsdl.schema.types.values():
if (value.name == element):
if ('Simple' in value.id):
el_type = 'Simple'
elif ('Complex' in value.id):
el_type = 'Complex'
break
return el_type
def _get_element_ns(self, element):
"""Method gets element XSD namespace
It is used to construct XML element with correct namespaces
Args:
element (str): element name
Returns:
str: element namespace
Raises:
error: ValueError
"""
if (self._client == None):
raise ValueError('Specification is not imported yet')
ns = None
for key in self._client.wsdl.schema.types.keys():
if (key[0] == element):
ns = key[1]
break
return ns
def _create_dummy_wsdl(self, xsd):
"""Method creates dummy WSDL file
Workaround method:
Library suds is designed for SOAP and imports WSDL only
Dummy WSDL imports given XSD and is parsed automatically
File is stored in same folder as XSD file (with suffix .wsdl)
Args:
xsd (str): XSD filename
Returns:
str: WSDL filename
Raises:
error: ValueError
"""
if (path.exists(xsd)):
try:
with open(xsd, 'r') as f:
tns = search(r'targetNamespace="(.*)"', f.read()).group(1)
if ('"' in tns):
tns = tns[: tns.index('"')]
filename = xsd.split('/')[-1]
wsdl = path.abspath(xsd)[:-3] + 'wsdl'
with open(wsdl, 'w') as f:
f.write(wsdl_tmpl.format(tns, tns, tns, filename))
return wsdl
except AttributeError as ex:
raise ValueError('File {0} is not valid XSD'.format(xsd))
else:
raise ValueError('File {0} not found'.format(xsd))
| StarcoderdataPython |
161429 | # -*- coding: utf-8 -*-
#pylint: disable = missing-docstring, blacklisted-name, unused-argument, invalid-name, line-too-long, protected-access
import unittest
import re
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktLanguageVars
import context #pylint: disable=unused-import
from qcrit import textual_feature
#[^\s\d’”\'\")\)\]\}\.,:;]
#[“‘—\-†&vâ\*\^(α-ωΑ-Ὠ`̔]
#΄´´``′″‴
textual_feature.setup_tokenizers(terminal_punctuation=('.', ';', ';'))
p = PunktLanguageVars()
#TODO don't mess with the PunktLanguageVars instance variables, mess with the class variables
p._re_word_tokenizer = re.compile(
PunktLanguageVars._word_tokenize_fmt % {
'NonWord': r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
'MultiChar': PunktLanguageVars._re_multi_char_punct,
'WordStart': r"[^\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡]",
}, re.UNICODE | re.VERBOSE)
p._re_period_context = re.compile(
PunktLanguageVars._period_context_fmt % {
'NonWord': r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
'SentEndChars': p._re_sent_end_chars,
}, re.UNICODE | re.VERBOSE)
test_sentence_tokenizer = PunktSentenceTokenizer(lang_vars=p)
class TestParsers(unittest.TestCase):
def setUp(self):
pass
def test_sentences1(self):
file = 'test test. test test test? test test test; test test. test.'
result = textual_feature.tokenize_types['sentences']['func'](file)
expected = ['test test.', 'test test test? test test test;', 'test test.', 'test.']
self.assertEqual(expected, result)
def test_sentence_words1(self):
file = 'test test. test test test? test test test; test test. test.'
result = textual_feature.tokenize_types['sentence_words']['func'](file)
expected = [
['test', 'test', '.'], ['test', 'test', 'test', '?', 'test', 'test', 'test', ';'],
['test', 'test', '.'], ['test', '.']
]
self.assertEqual(expected, result)
def test_sentence_words2(self):
file = 'a b ccccccc. aaa aa bb; bb; ads ofiihwio; freino. daieof; frinoe.'
result = textual_feature.tokenize_types['sentence_words']['func'](file)
expected = [
['a', 'b', 'ccccccc', '.'], ['aaa', 'aa', 'bb', ';'], ['bb', ';'], ['ads', 'ofiihwio', ';'],
['freino', '.'], ['daieof', ';'], ['frinoe', '.']
]
self.assertEqual(expected, result)
def test_sentence_words3(self):
file = 'a b ccccccc. aaa aa bb; bb; ads ofiihwio; freino. daieof; frinoe.'
result = textual_feature.tokenize_types['words']['func'](file)
expected = [
'a', 'b', 'ccccccc', '.', 'aaa', 'aa', 'bb', ';', 'bb', ';', 'ads', 'ofiihwio', ';',
'freino', '.', 'daieof', ';', 'frinoe', '.'
]
self.assertEqual(expected, result)
def test_sentence_words4(self):
file = 'a b ccccccc. aaa aa bb; bb; ads ofiihwio; freino. daieof; frinoe.'
result = p.word_tokenize(file)
expected = [
'a', 'b', 'ccccccc', '.', 'aaa', 'aa', 'bb', ';', 'bb', ';', 'ads', 'ofiihwio', ';',
'freino', '.', 'daieof', ';', 'frinoe', '.'
]
self.assertEqual(expected, result)
def test_sentence_slant_quote(self):
s = 'a b c. "a b c". a b c. "a b c." a b c. “a b c”. a b c. “a b c.” a b c.'
result = textual_feature.tokenize_types['sentences']['func'](s)
expected = ['a b c.', '"a b c".', 'a b c.', '"a b c."', 'a b c.', '“a b c”.', 'a b c.', '“a b c.”', 'a b c.']
self.assertEqual(expected, result)
def test_sentence_slant_quote1_5(self):
s = 'a b c. "a b c". a b c. "a b c." a b c. “a b c”. a b c. “a b c.” a b c.'
result = textual_feature.sentence_tokenizer.tokenize(s)
expected = ['a b c.', '"a b c".', 'a b c.', '"a b c."', 'a b c.', '“a b c”.', 'a b c.', '“a b c.”', 'a b c.']
self.assertEqual(expected, result)
def test_sentence_slant_quote2(self):
s = 'a b c. "a b c". a b c. "a b c." a b c. “a b c”. a b c. “a b c.” a b c.'
result = test_sentence_tokenizer.tokenize(s)
expected = ['a b c.', '"a b c".', 'a b c.', '"a b c."', 'a b c.', '“a b c”.', 'a b c.', '“a b c.”', 'a b c.']
self.assertEqual(expected, result)
def test_apollodorus_slant_quote(self):
s = "καὶ εὑρέθησαν οὕτω. Μόψος δὲ συὸς οὔσης ἐπιτόκου ἠρώτα Κάλχαντα, πόσους χοίρους κατὰ γαστρὸς ἔχει καὶ πότε τέκοι:τοῦ δὲ εἰπόντος: “ὀκτώ,” μειδιάσας ὁ Μόψος ἔφη: “Κάλχας τῆς ἀκριβοῦς μαντείας ἀπεναντιῶς διακεῖται, ἐγὼ δ' ̓Απόλλωνος καὶ Μαντοῦς παῖς ὑπάρχων τῆς ἀκριβοῦς μαντείας τὴν ὀξυδορκίαν πάντως πλουτῶ, καὶ οὐχ ὡς ὁ Κάλχας ὀκτώ, ἀλλ' ἐννέα κατὰ γαστρός, καὶ τούτους ἄρρενας ὅλους ἔχειν μαντεύομαι, καὶ αὔριον ἀνυπερθέτως ἐν ἕκτῃ ὥρᾳ τεχθήσεσθαι.”ὧν γενομένων Κάλχας ἀθυμήσας ἀπέθανεκαὶ ἐτάφη ἐν Νοτίῳ."
result = textual_feature.tokenize_types['sentences']['func'](s)
expected = ['καὶ εὑρέθησαν οὕτω.', "Μόψος δὲ συὸς οὔσης ἐπιτόκου ἠρώτα Κάλχαντα, πόσους χοίρους κατὰ γαστρὸς ἔχει καὶ πότε τέκοι:τοῦ δὲ εἰπόντος: “ὀκτώ,” μειδιάσας ὁ Μόψος ἔφη: “Κάλχας τῆς ἀκριβοῦς μαντείας ἀπεναντιῶς διακεῖται, ἐγὼ δ' ̓Απόλλωνος καὶ Μαντοῦς παῖς ὑπάρχων τῆς ἀκριβοῦς μαντείας τὴν ὀξυδορκίαν πάντως πλουτῶ, καὶ οὐχ ὡς ὁ Κάλχας ὀκτώ, ἀλλ' ἐννέα κατὰ γαστρός, καὶ τούτους ἄρρενας ὅλους ἔχειν μαντεύομαι, καὶ αὔριον ἀνυπερθέτως ἐν ἕκτῃ ὥρᾳ τεχθήσεσθαι.", "”ὧν γενομένων Κάλχας ἀθυμήσας ἀπέθανεκαὶ ἐτάφη ἐν Νοτίῳ."]
self.assertEqual(expected, result)
def test_numbers(self):
s = '1234.4321 32. 4324 4321 432 1. 134 52.653 142 41. 41268.'
result = textual_feature.tokenize_types['sentences']['func'](s)
expected = ['1234.4321 32.', '4324 4321 432 1.', '134 52.653 142 41.', '41268.']
self.assertEqual(expected, result)
def test_dagger(self):
s = 'a b† c. "a b‡ c". a b c. "a b c†." a b c. “a b c†”. a b c. “a‡ b c.” a b c.'
result = textual_feature.word_tokenizer.word_tokenize(s)
expected = ['a', 'b', '†', 'c', '.', '"', 'a', 'b', '‡', 'c', '"', '.', 'a', 'b', 'c', '.', '"', 'a', 'b', 'c', '†', '.', '"', 'a', 'b', 'c', '.', '“', 'a', 'b', 'c', '†', '”', '.', 'a', 'b', 'c', '.', '“', 'a', '‡', 'b', 'c', '.', '”', 'a', 'b', 'c', '.']
self.assertEqual(expected, result)
'''
#Plutarch Camillus
"οὐ μὴν π.,ρῆκεν αὐτῷ τὴν ἀρχὴν ὁ δῆμος, ἀλλὰ βοῶν μήτε ἱππεύοντος αὐτοῦ μήτε ὁπλομαχοῦντος ἐν τοῖς ἀγῶσι δεῖσθαι, βουλευομένου δὲ μόνον καί προστάττοντος, ἠνάγκασεν ὑποστῆναι τὴν στρατηγίαν καί μεθ' ἑνὸς τῶν συναρχόντων Λευκίου Φουρίου τὸν στρατὸν ἄγειν εὐθὺς ἐπὶ τοὺς πολεμίους."
http://www.perseus.tufts.edu/hopper/text?doc=Perseus%3Atext%3A2008.01.0086%3Achapter%3D37%3Asection%3D2
http://www.perseus.tufts.edu/hopper/text?doc=Perseus%3Atext%3A2008.01.0012%3Achapter%3D37%3Asection%3D2
'''
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
198586 | import numpy as np
from preprocess.hierarchical import TreeNodes
from preprocess import utils
from evaluation.metrics import compute_level_loss
from algorithms.MinT import recon_base_forecast
from algorithms.ERM import unbiased_recon
from algorithms import LSTNet, Optim
import torch
import torch.nn as nn
import math
import time
from itertools import chain
import pdb
def evaluate(data, X, Y, model, h, evaluateL2, evaluateL1, batch_size, part, nodes, method, alg, cuda):
model.eval()
total_loss = 0
total_loss_l1 = 0
n_samples = 0
predict = None
test = None
if part == 'test':
output = model(X.cuda()) if cuda else model(X)
result = np.zeros((output.shape[0], len(nodes) + 1))
if method == 'erm':
i = 0
recon_pred = unbiased_recon(nodes, Y.numpy(), output.cpu().detach().numpy())
for pred in recon_pred:
result[i, :] = compute_level_loss(pred.keys(), nodes, pred, Y[i, :].numpy(), len(nodes) + 1, True, h)
i += 1
else:
for i in range(output.shape[0]):
test_pred = output[i, :].cpu().detach().numpy()
full_test = Y[i, :].cpu().detach().numpy()
if method == 'BU':
S = TreeNodes(nodes).get_s_matrix()
full_test = np.dot(S, full_test)
test_pred = np.dot(S, test_pred)
pred_dict = dict(zip(TreeNodes(nodes).col_order(), test_pred))
if 'mint' in method:
pred_dict = recon_base_forecast(pred_dict.keys(), nodes, pred_dict, model,
data, data.P + data.h - 1, method, alg)
result[i, :] = compute_level_loss(pred_dict.keys(), nodes, pred_dict, full_test, len(nodes) + 1, True, h)
result = result.mean(axis=0)
return result
for X, Y in data.get_batches(X, Y, batch_size, False):
output = model(X)
if predict is None:
predict = output
test = Y
else:
predict = torch.cat((predict, output))
test = torch.cat((test, Y))
scale = data.scale.expand(output.size(0), data.m)
total_loss += evaluateL2(output * scale, Y * scale).data.cpu().numpy()
total_loss_l1 += evaluateL1(output * scale, Y * scale).data.cpu().numpy()
n_samples += (output.size(0) * data.m)
rse = math.sqrt(total_loss / n_samples) / data.rse
rae = (total_loss_l1 / n_samples) / data.rae
predict = predict.data.cpu().numpy()
Ytest = test.data.cpu().numpy()
sigma_p = (predict).std(axis=0)
sigma_g = (Ytest).std(axis=0)
mean_p = predict.mean(axis=0)
mean_g = Ytest.mean(axis=0)
index = (sigma_g != 0)
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)
correlation = (correlation[index]).mean()
return rse, rae, correlation
def train(data, X, Y, model, criterion, optim, batch_size):
model.train()
total_loss = 0
n_samples = 0
for X, Y in data.get_batches(X, Y, batch_size, True):
model.zero_grad()
output = model(X)
scale = data.scale.expand(output.size(0), data.m)
loss = criterion(output * scale, Y * scale)
loss.backward()
grad_norm = optim.step()
total_loss += loss.data.cpu().numpy()
n_samples += (output.size(0) * data.m)
return total_loss / n_samples
def fit_and_pred(Data, model, h, num_epoch, batch_size, params, optim, TRAINING_METHOD, criterion, evaluateL2, evaluateL1,
nodes, verbose, cuda):
best_val = 10000000
for epoch in range(1, num_epoch + 1):
epoch_start_time = time.time()
train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optim, 64)
val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, h, evaluateL2,
evaluateL1, batch_size, 'valid', nodes,
TRAINING_METHOD, params['alg'], cuda)
if verbose:
print(
'| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae '
'{:5.4f} | valid corr {:5.4f}'.format(
epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr))
if val_loss < best_val:
with open('./save/LSTNet.pt', 'wb') as f:
torch.save(model, f)
best_val = val_loss
def train_lstn(TRAINING_METHOD, nodes, data, cuda, h, num_epoch, batch_size, params, verbose):
if TRAINING_METHOD == 'BU':
start = sum(list(chain(*nodes[:-1]))) + 1
end = sum(list(chain(*nodes))) + 1
feat_list = [str(i) for i in range(start, end)]
data = data[feat_list]
Data = utils.Data_utility(0.6, 0.2, cuda, h, 24 * 7, data, TRAINING_METHOD, normalize=2)
model = LSTNet.Model(Data)
criterion = nn.MSELoss(size_average=False)
evaluateL2 = nn.MSELoss(size_average=False)
evaluateL1 = nn.L1Loss(size_average=False)
if cuda:
model.cuda()
criterion = criterion.cuda()
evaluateL1 = evaluateL1.cuda()
evaluateL2 = evaluateL2.cuda()
optim = Optim.Optim(model.parameters(), 'adam', 1e-3, 10.)
# optim = torch.optim.Adam(model.parameters(), lr=1e-3)
fit_and_pred(Data, model, h, num_epoch, batch_size, params, optim, TRAINING_METHOD, criterion, evaluateL2,
evaluateL1, nodes, verbose, cuda)
with open('./save/LSTNet.pt', 'rb') as f:
model = torch.load(f)
multilevel_loss = evaluate(Data, Data.test[0], Data.test[1], model, h, evaluateL2, evaluateL1, batch_size,
'test', nodes, TRAINING_METHOD, params['alg'], cuda)
return multilevel_loss
| StarcoderdataPython |
4811440 | <filename>tests/sol/opt/opt_test.py
# coding=utf-8
from itertools import product
import pytest
from hypothesis import given
from hypothesis import strategies as st
from numpy import array
from sol import NetworkCaps
from sol import NetworkConfig
from sol.opt.app import App
from sol.opt.funcs import CostFuncFactory
from sol.opt.quickstart import from_app
from sol.path.generate import generate_paths_tc, use_mbox_modifier
from sol.path.predicates import null_predicate, has_mbox_predicate
from sol.topology.generators import complete_topology
from sol.topology.traffic import TrafficClass
from sol.utils.const import *
# When comparing objective functions, use this as the precision
def test_shortest_path():
""" Check that we can correctly implement shortest path routing """
# Generate a topology:
topo = complete_topology(5)
# Generate a single traffic class:
# TrafficClass (id, name, source node, destination node)
tcs = [TrafficClass(0, u'classname', 0, 2)]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, null_predicate, cutoff=100)
# Application configuration
appconfig = {
'name': u'minLatencyApp',
'constraints': [(Constraint.ROUTE_ALL, (pptc.tcs(),), {})],
'obj': (Objective.MIN_LATENCY, (), {}),
'resource_cost': {}
}
# Create an application based on our config
app = App(pptc, **appconfig)
# Create and solve an optimization based on the app
# No link capacities will just result in a single shortest path
opt = from_app(topo, app, NetworkConfig(None))
opt.solve()
assert opt.is_solved()
paths = opt.get_paths()
for pi, p in enumerate(paths.paths(tcs[0])):
if list(p.nodes()) == [0, 2]:
assert p.flow_fraction() == 1
else:
assert p.flow_fraction() == 0
# norm factor for latency is diameter * n^2
norm = topo.diameter() * 25
# the objective is 1-normalized latency, and latency is 1.
# because 1 path with flow fraction of 1.
solution = opt.get_solved_objective(app)[0]
assert solution == 1 - 1 / norm or abs(solution - 1 - 1 / norm) <= EPSILON
solution = opt.get_solved_objective()
assert solution == 1 - 1 / norm or abs(solution - 1 - 1 / norm) <= EPSILON
@given(st.floats(1e-3, 1))
def test_maxflow(cap):
""" Check that maxflow works correctly, for a single traffic class """
# Generate a topology:
topo = complete_topology(4)
for link in topo.links():
topo.set_resource(link, BANDWIDTH, 1)
tcs = [TrafficClass(0, u'classname', 0, 2, array([3]))]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, null_predicate, cutoff=100)
appconfig = {
'name': u'mf',
'constraints': [],
'obj': (Objective.MAX_FLOW, (), {}),
'resource_cost': {BANDWIDTH: (LINKS, 1, None)}
}
app = App(pptc, **appconfig)
caps = NetworkCaps(topo)
caps.add_cap(BANDWIDTH, cap=cap)
opt = from_app(topo, app, NetworkConfig(caps))
opt.solve()
assert opt.is_solved()
# Ensure that both app objective and global objective are the same
# Also, use abs(actual - exprected) because floating point errors
solution = opt.get_solved_objective(app)[0]
assert solution == cap or abs(solution - cap) <= EPSILON
solution = opt.get_solved_objective()
assert solution == cap or abs(solution - cap) <= EPSILON
@given(st.floats(0, 1))
def test_maxflow_inapp_caps(cap):
"""Text maxflow, but use the CAP constraint instead of global network caps"""
# Generate a topology:
topo = complete_topology(4)
for link in topo.links():
topo.set_resource(link, BANDWIDTH, 1)
tcs = [TrafficClass(0, u'classname', 0, 2, array([3]))]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, null_predicate, cutoff=100)
caps = {link: cap for link in topo.links()}
appconfig = {
'name': u'mf',
'constraints': [(Constraint.CAP_LINKS, (BANDWIDTH, caps), {})],
'obj': (Objective.MAX_FLOW, (), {}),
'resource_cost': {BANDWIDTH: (LINKS, 1, None)}
}
app = App(pptc, **appconfig)
opt = from_app(topo, app, NetworkConfig())
opt.solve()
assert opt.is_solved()
# Ensure that both app objective and global objective are the same
# Also, use abs(actual - exprected) because floating point errors
solution = opt.get_solved_objective(app)[0]
assert solution == cap or abs(solution - cap) <= EPSILON
solution = opt.get_solved_objective()
assert solution == cap or abs(solution - cap) <= EPSILON
def test_min_latency_app():
"""Test a single min latency app"""
topo = complete_topology(4)
for link in topo.links():
topo.set_resource(link, BANDWIDTH, 1)
tcs = [TrafficClass(0, u'classname', 0, 2, array([1]))]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, null_predicate, cutoff=100)
appconfig = {
'name': u'te',
'constraints': [(Constraint.ROUTE_ALL, (), {})],
'obj': (Objective.MIN_LATENCY, (), {}),
'resource_cost': {BANDWIDTH: (LINKS, 1, None)}
}
app = App(pptc, **appconfig)
caps = NetworkCaps(topo)
caps.add_cap(BANDWIDTH, cap=1)
opt = from_app(topo, app, NetworkConfig(caps))
opt.solve()
assert opt.is_solved()
# norm factor for latency is diameter * n^2
norm = topo.diameter() * 16
# the objective is 1-normalized latency, and latency is 1.
# because 1 path with flow fraction of 1.
solution = opt.get_solved_objective(app)[0]
assert solution == 1 - 1 / norm or abs(solution - (1 - 1 / norm)) <= EPSILON
solution = opt.get_solved_objective()
assert solution == 1 - 1 / norm or abs(solution - (1 - 1 / norm)) <= EPSILON
def test_te_app():
""" Test a single traffic engineering app"""
topo = complete_topology(4)
for link in topo.links():
topo.set_resource(link, BANDWIDTH, 1)
tcs = [TrafficClass(0, u'classname', 0, 2, array([1]))]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, null_predicate, cutoff=100)
appconfig = {
'name': u'te',
'constraints': [(Constraint.ROUTE_ALL, (), {})],
'obj': (Objective.MIN_LINK_LOAD, (BANDWIDTH,), {}),
'resource_cost': {BANDWIDTH: (LINKS, 1, None)}
}
app = App(pptc, **appconfig)
caps = NetworkCaps(topo)
caps.add_cap(BANDWIDTH, cap=1)
opt = from_app(topo, app, NetworkConfig(caps))
opt.solve()
assert opt.is_solved()
# THE solution is 1-objective because of the maximization flip
solution = 1 - opt.get_solved_objective(app)[0]
# Use abs(actual - exprected) because floating point errors
assert solution == .333333 or abs(solution - .33333) <= EPSILON
solution = 1 - opt.get_solved_objective()
assert solution == .333333 or abs(solution - .33333) <= EPSILON
def test_mbox_load_balancing():
"""Test the middlebox loadbalancing"""
topo = complete_topology(4)
for n in topo.nodes():
topo.set_resource(n, CPU, 1)
topo.set_mbox(n)
tcs = [TrafficClass(0, u'classname', 0, 2, array([1]))]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, has_mbox_predicate, modify_func=use_mbox_modifier, cutoff=100)
appconfig = {
'name': u'mb_lb',
'constraints': [(Constraint.ROUTE_ALL, (), {})],
'obj': (Objective.MIN_NODE_LOAD, (CPU,), {}),
'resource_cost': {CPU: (MBOXES, 1, None)}
}
app = App(pptc, **appconfig)
caps = NetworkCaps(topo)
caps.add_cap(CPU, cap=1)
opt = from_app(topo, app, NetworkConfig(caps))
opt.solve()
assert opt.is_solved()
# THE solution is 1-objective because of the maximization flip
solution = 1 - opt.get_solved_objective(app)[0]
# Use abs(actual - exprected) because floating point errors
assert solution == .25 or abs(solution - .25) <= EPSILON
solution = 1 - opt.get_solved_objective()
assert solution == .25 or abs(solution - .25) <= EPSILON
def test_mbox_load_balancing_all_tcs():
"""Test the middlebox loadbalancing"""
topo = complete_topology(4)
for n in topo.nodes():
topo.set_resource(n, CPU, 1)
topo.set_mbox(n)
tcs = [TrafficClass(0, u'classname', s, t, array([1])) for (s, t) in product(topo.nodes(), repeat=2)]
# Generate all paths for this traffic class
pptc = generate_paths_tc(topo, tcs, has_mbox_predicate, modify_func=use_mbox_modifier, cutoff=100)
appconfig = {
'name': u'mb_lb',
'constraints': [(Constraint.ROUTE_ALL, (), {})],
'obj': (Objective.MIN_NODE_LOAD, (CPU,), {}),
'resource_cost': {CPU: (MBOXES, 1, None)}
}
app = App(pptc, **appconfig)
caps = NetworkCaps(topo)
caps.add_cap(CPU, cap=1)
opt = from_app(topo, app, NetworkConfig(caps))
opt.solve()
assert opt.is_solved()
# THE solution is 1-objective because of the maximization flip
solution = 1 - opt.get_solved_objective(app)[0]
# Use abs(actual - exprected) because floating point errors
assert solution == 1 or abs(solution - 1) <= EPSILON
solution = 1 - opt.get_solved_objective()
assert solution == 1 or abs(solution - 1) <= EPSILON
@pytest.mark.skip()
def test_fixed_paths():
pass
# TODO: bring back fixed paths test
| StarcoderdataPython |
139452 | <reponame>ishtjot/susereumutep
import gi; gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
def ErrorDialog(self, message):
d = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING, Gtk.ButtonsType.OK, message)
d.run()
d.destroy()
| StarcoderdataPython |
67771 | from argparse import Namespace
from typing import List, Union
from yawast.external.spinner import Spinner
from yawast.reporting import reporter
from yawast.reporting.enums import Vulnerabilities
from yawast.reporting.issue import Issue
from yawast.scanner.plugins.evidence import Evidence
from yawast.scanner.plugins.http import (
http_basic,
waf,
spider,
retirejs,
special_files,
file_search,
error_checker,
)
from yawast.scanner.plugins.http.applications import wordpress
from yawast.scanner.plugins.http.servers import apache_httpd, apache_tomcat, nginx, iis
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output
def scan(args: Namespace, url: str, domain: str):
reporter.register_data("url", url)
reporter.register_data("domain", domain)
output.empty()
output.norm("HEAD:")
head = network.http_head(url)
raw = network.http_build_raw_response(head)
for line in raw.splitlines():
output.norm(f"\t{line}")
output.empty()
res = http_basic.get_header_issues(head, raw, url)
if len(res) > 0:
output.norm("Header Issues:")
reporter.display_results(res, "\t")
output.empty()
res = http_basic.get_cookie_issues(head, raw, url)
if len(res) > 0:
output.norm("Cookie Issues:")
reporter.display_results(res, "\t")
output.empty()
# check for WAF signatures
res = waf.get_waf(head.headers, raw, url)
if len(res) > 0:
output.norm("WAF Detection:")
reporter.display_results(res, "\t")
output.empty()
output.norm("Performing vulnerability scan (this will take a while)...")
links: List[str] = []
with Spinner():
try:
links, res = spider.spider(url)
except Exception as error:
output.debug_exception()
output.error(f"Error running scan: {str(error)}")
output.norm(f"Identified {len(links) + 1} pages.")
output.empty()
if len(res) > 0:
output.norm("Issues Detected:")
reporter.display_results(res, "\t")
output.empty()
# get files, and add those to the link list
links += _file_search(args, url, links)
res = apache_httpd.check_all(url)
if len(res) > 0:
reporter.display_results(res, "\t")
res = apache_tomcat.check_all(url, links)
if len(res) > 0:
reporter.display_results(res, "\t")
res = nginx.check_all(url)
if len(res) > 0:
reporter.display_results(res, "\t")
res = iis.check_all(url)
if len(res) > 0:
reporter.display_results(res, "\t")
res = http_basic.check_propfind(url)
if len(res) > 0:
reporter.display_results(res, "\t")
res = http_basic.check_trace(url)
if len(res) > 0:
reporter.display_results(res, "\t")
res = http_basic.check_options(url)
if len(res) > 0:
reporter.display_results(res, "\t")
wp_path, res = wordpress.identify(url)
if len(res) > 0:
reporter.display_results(res, "\t")
if wp_path is not None:
res = wordpress.check_json_user_enum(wp_path)
if len(res) > 0:
reporter.display_results(res, "\t")
def reset():
retirejs.reset()
file_search.reset()
error_checker.reset()
def _file_search(args: Namespace, url: str, orig_links: List[str]) -> List[str]:
new_files: List[str] = []
file_good, file_res, path_good, path_res = network.check_404_response(url)
# these are here for data typing
results: Union[List[Result], None]
links: Union[List[str], None]
if not file_good:
reporter.display(
"Web server does not respond properly to file 404 errors.",
Issue(
Vulnerabilities.SERVER_INVALID_404_FILE,
url,
Evidence.from_response(file_res),
),
)
if not path_good:
reporter.display(
"Web server does not respond properly to path 404 errors.",
Issue(
Vulnerabilities.SERVER_INVALID_404_PATH,
url,
Evidence.from_response(path_res),
),
)
if not (file_good or path_good):
output.norm(
"Site does not respond properly to non-existent file/path requests; skipping some checks."
)
if file_good:
links, results = special_files.check_special_files(url)
if len(results) > 0:
reporter.display_results(results, "\t")
new_files += links
if args.files:
output.empty()
output.norm("Searching for common files (this will take a few minutes)...")
with Spinner():
try:
links, results = file_search.find_files(url)
except Exception as error:
output.debug_exception()
output.error(f"Error running scan: {str(error)}")
results = None
links = None
if results is not None and len(results) > 0:
reporter.display_results(results, "\t")
if links is not None and len(links) > 0:
new_files += links
for l in links:
if l not in orig_links:
output.norm(f"\tNew file found: {l}")
output.empty()
if path_good:
links, results = special_files.check_special_paths(url)
if len(results) > 0:
reporter.display_results(results, "\t")
new_files += links
if args.dir:
output.empty()
output.norm(
"Searching for common directories (this will take a few minutes)..."
)
with Spinner():
try:
links, results = file_search.find_directories(
url, args.dirlistredir, args.dirrecursive
)
except Exception as error:
output.debug_exception()
output.error(f"Error running scan: {str(error)}")
results = None
links = None
if results is not None and len(results) > 0:
reporter.display_results(results, "\t")
if links is not None and len(links) > 0:
new_files += links
for l in links:
if l not in orig_links:
output.norm(f"\tNew directory found: {l}")
output.empty()
return new_files
| StarcoderdataPython |
1735270 | import imageio
import torch
import time
from tqdm import tqdm
from animate import normalize_kp
from demo import load_checkpoints
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage import img_as_ubyte
from skimage.transform import resize
import cv2
import os
import argparse
import subprocess
import os
from PIL import Image
def video2mp3(file_name):
"""
将视频转为音频
:param file_name: 传入视频文件的路径
:return:
"""
outfile_name = file_name.split('.')[0] + '.mp3'
cmd = 'ffmpeg -i ' + file_name + ' -f mp3 ' + outfile_name + ' -y'
print(cmd)
subprocess.call(cmd, shell=True)
def video_add_mp3(file_name, mp3_file):
"""
视频添加音频
:param file_name: 传入视频文件的路径
:param mp3_file: 传入音频文件的路径
:return:
"""
outfile_name = file_name.split('.')[0] + '-f.mp4'
subprocess.call('ffmpeg -i ' + file_name
+ ' -i ' + mp3_file + ' -strict -2 -f mp4 '
+ outfile_name + ' -y', shell=True)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input_image", required=True,
help="Path to image to animate")
ap.add_argument("-c", "--checkpoint", required=True, help="Path to checkpoint")
ap.add_argument("-v", "--input_video", required=False,
help="Path to video input")
args = vars(ap.parse_args())
# 带时间的输出文件名 11_06_20_51_59_output.mp4
nowtime = time.strftime("%m_%d_%H_%M_%S", time.localtime())
# 没有声音的生成视频
filename = nowtime+'_output.mp4'
# 带声音的最终视频
finalname = nowtime+'_output-f.mp4'
print(";python; Loading image and checkpoint...")
source_path = args['input_image']
checkpoint_path = args['checkpoint']
if args['input_video']:
video_path = args['input_video']
else:
print(";python; video not exist!")
# 图像压缩
source_image = imageio.imread(source_path)
source_image = resize(source_image, (256, 256))[..., :3]
# 当前文件位置
currPath = os.path.dirname(__file__)
# 当前文件绝对路径
asbPath = os.path.abspath(__file__)
generator, kp_detector = load_checkpoints(
config_path=currPath+'/config/vox-256.yaml', checkpoint_path=checkpoint_path)
# 检查输出文件夹
if not os.path.exists(currPath+'/output'):
os.mkdir(currPath+'/output')
relative = True
adapt_movement_scale = True
cpu = False
# 如果存在视频路径就加载视频
if video_path:
cap = cv2.VideoCapture(video_path)
print(";python; Loading video...")
else:
print(";python; video not exist!")
fps = cap.get(cv2.CAP_PROP_FPS)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
video2mp3(file_name=video_path)
fourcc = cv2.VideoWriter_fourcc('M', 'P', 'E', 'G')
out1 = cv2.VideoWriter(currPath+'/output/'+filename, fourcc, fps, size, True)
cv2_source = cv2.cvtColor(source_image.astype('float32'), cv2.COLOR_BGR2RGB)
with torch.no_grad():
predictions = []
source = torch.tensor(source_image[np.newaxis].astype(
np.float32)).permute(0, 3, 1, 2)
if not cpu:
source = source.cuda()
kp_source = kp_detector(source)
count = 0
while(True):
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret == True:
frame1 = resize(frame, (256, 256))[..., :3]
if count == 0:
source_image1 = frame1
source1 = torch.tensor(source_image1[np.newaxis].astype(
np.float32)).permute(0, 3, 1, 2)
kp_driving_initial = kp_detector(source1)
frame_test = torch.tensor(
frame1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
driving_frame = frame_test
if not cpu:
driving_frame = driving_frame.cuda()
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_movement_scale)
out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
predictions.append(np.transpose(
out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
im = np.transpose(
out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
out1.write(img_as_ubyte(im))
count += 1
else:
break
cap.release()
out1.release()
cv2.destroyAllWindows()
video_add_mp3(file_name=currPath+'/output/'+filename,
mp3_file=video_path.split('.')[0] + '.mp3')
if os.path.exists(currPath+'/output/'+finalname):
if os.path.getsize(currPath+'/output/'+finalname)>1024*100:
# 成功则回一个视频路径
print(';finalvideo;'+asbPath+'/../output/'+finalname)
else:
print(';python; failed')
else:
print(';python; failed')
| StarcoderdataPython |
3236081 | from typing import Callable, List
from heuristic.classes import Solution
from .handling_cost import handling_cost
from .routes import routes
from .objective import objective
from .routing_cost import routing_cost
STATISTICS: List[Callable[[Solution], float]] = [
routes,
objective,
routing_cost,
handling_cost,
]
| StarcoderdataPython |
3218762 | import pytest
import sklearn.decomposition
import sklearn.linear_model
from numpy.testing import assert_array_equal
from sklearn import datasets
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.pipeline import Pipeline
from baikal import Model, Input
from baikal.sklearn import SKLearnWrapper
from tests.helpers.sklearn_steps import PCA, LogisticRegression, RandomForestClassifier
iris = datasets.load_iris()
x_data = iris.data
y_t_data = iris.target
random_state = 123
verbose = 0
cv = StratifiedKFold(3) # cv will default to KFold if the estimator is a baikal Model
def test_grid_search_cv():
param_grid = {
"pca__n_components": [2, 4],
"logreg__C": [0.1, 1.0, 10],
"logreg__penalty": ["l1", "l2"],
}
# baikal way
def build_fn():
x = Input()
y_t = Input()
h = PCA(random_state=random_state, name="pca")(x)
y = LogisticRegression(
random_state=random_state, solver="liblinear", name="logreg"
)(h, y_t)
model = Model(x, y, y_t)
return model
sk_model = SKLearnWrapper(build_fn)
assert isinstance(sk_model.model, Model)
gscv_baikal = GridSearchCV(
sk_model,
param_grid,
cv=cv,
scoring="accuracy",
return_train_score=True,
verbose=verbose,
)
gscv_baikal.fit(x_data, y_t_data)
# traditional way
pca = PCA(random_state=random_state)
logreg = LogisticRegression(random_state=random_state, solver="liblinear")
pipe = Pipeline([("pca", pca), ("logreg", logreg)])
gscv_traditional = GridSearchCV(
pipe,
param_grid,
cv=cv,
scoring="accuracy",
return_train_score=True,
verbose=verbose,
)
gscv_traditional.fit(x_data, y_t_data)
assert gscv_baikal.best_params_ == gscv_traditional.best_params_
assert_array_equal(
gscv_traditional.cv_results_["mean_train_score"],
gscv_baikal.cv_results_["mean_train_score"],
)
assert_array_equal(
gscv_traditional.cv_results_["mean_test_score"],
gscv_baikal.cv_results_["mean_test_score"],
)
def test_grid_search_cv_with_tunable_step():
param_grid = {
"classifier": [
LogisticRegression(random_state=random_state),
RandomForestClassifier(random_state=random_state),
],
"pca__n_components": [2, 4],
}
# baikal way
def build_fn():
x = Input()
y_t = Input()
h = PCA(random_state=random_state, name="pca")(x)
y = LogisticRegression(random_state=random_state, name="classifier")(h, y_t)
model = Model(x, y, y_t)
return model
sk_model = SKLearnWrapper(build_fn)
gscv_baikal = GridSearchCV(
sk_model,
param_grid,
cv=cv,
scoring="accuracy",
return_train_score=True,
verbose=verbose,
)
gscv_baikal.fit(x_data, y_t_data)
# traditional way
pca = PCA(random_state=random_state)
classifier = LogisticRegression(random_state=random_state)
pipe = Pipeline([("pca", pca), ("classifier", classifier)])
gscv_traditional = GridSearchCV(
pipe,
param_grid,
cv=cv,
scoring="accuracy",
return_train_score=True,
verbose=verbose,
)
gscv_traditional.fit(x_data, y_t_data)
assert gscv_baikal.best_params_ == gscv_traditional.best_params_
assert_array_equal(
gscv_traditional.cv_results_["mean_train_score"],
gscv_baikal.cv_results_["mean_train_score"],
)
assert_array_equal(
gscv_traditional.cv_results_["mean_test_score"],
gscv_baikal.cv_results_["mean_test_score"],
)
| StarcoderdataPython |
3202141 | """
This script checks a scenario for v2.29.0 format and migrates the input tables it to the v2.31.1 format.
NOTE: You'll still need to run the archetypes-mapper after this script has run.
"""
import os
import cea
import pandas as pd
import collections
import cea.config
import cea.inputlocator
from cea.utilities.dbf import dbf_to_dataframe, dataframe_to_dbf
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def find_migrators(scenario):
"""
Add new migrations here as they become necessary
the data-migrator will run these in sequence starting from the first migrator found
(NOTE: I've added a dummy migration - 2.31 - 2.31.1 - to show how the principle works)
"""
migrations = collections.OrderedDict()
migrations["v2.29.0 - v2.31.0"] = (is_2_29, migrate_2_29_to_2_31)
migrations["v2.31.0 - v2.31.1"] = (is_2_31, migrate_2_31_to_2_31_1)
migrations["v3.22.0 - v3.22.1"] = (is_3_22, migrate_3_22_to_3_22_1)
for key, migration_info in migrations.items():
identifier, migrator = migration_info
if identifier(scenario):
yield key, migrator
def is_2_29(scenario):
if not os.path.exists(os.path.join(scenario, "inputs", "building-properties", "age.dbf")):
return False
if not os.path.exists(os.path.join(scenario, "inputs", "building-properties", "occupancy.dbf")):
return False
if os.path.exists(os.path.join(scenario, "inputs", "building-properties", "typology.dbf")):
# avoid migrating multiple times
return False
return True
def migrate_2_29_to_2_31(scenario):
def lookup_standard(year, standards_df):
matched_standards = standards_df[(standards_df.YEAR_START <= year) & (year <= standards_df.YEAR_END)]
if len(matched_standards):
# find first standard that is similar to the year
standard = matched_standards.iloc[0]
else:
raise ValueError('Could not find a `STANDARD` in the databases to match the year `{}`.'
'You can try adding it to the `CONSTRUCTION_STANDARDS` input database and try again.'
.format(year))
return standard.STANDARD
def convert_occupancy(name, occupancy_dbf):
row = occupancy_dbf[occupancy_dbf.Name == name].iloc[0]
uses = set(row.to_dict().keys()) - {"Name", "REFERENCE"}
uses = sorted(uses, cmp=lambda a, b: cmp(float(row[a]), float(row[b])), reverse=True)
result = {
"1ST_USE": uses[0],
"1ST_USE_R": float(row[uses[0]]),
"2ND_USE": uses[1],
"2ND_USE_R": float(row[uses[1]]),
"3RD_USE": uses[2],
"3RD_USE_R": float(row[uses[2]])}
if pd.np.isclose(result["2ND_USE_R"], 0.0):
result["1ST_USE_R"] = 1.0
result["2ND_USE_R"] = 0.0
result["3RD_USE_R"] = 0.0
result["2ND_USE"] = "NONE"
result["3RD_USE"] = "NONE"
elif pd.np.isclose(result["3RD_USE_R"], 0.0):
result["1ST_USE_R"] = 1.0 - result["2ND_USE_R"]
result["3RD_USE_R"] = 0.0
result["3RD_USE"] = "NONE"
result["1ST_USE_R"] = 1.0 - result["2ND_USE_R"] - result["3RD_USE_R"]
return result
def merge_age_and_occupancy_to_typology(age_dbf, occupancy_dbf, standards_df):
# merge age.dbf and occupancy.dbf to typology.dbf
typology_dbf_columns = ["Name", "YEAR", "STANDARD", "1ST_USE", "1ST_USE_R", "2ND_USE", "2ND_USE_R", "3RD_USE",
"3RD_USE_R"]
typology_dbf = pd.DataFrame(columns=typology_dbf_columns)
for rindex, row in age_dbf.iterrows():
typology_row = {
"Name": row.Name,
"YEAR": row.built,
"STANDARD": lookup_standard(row.built, standards_df)}
typology_row.update(convert_occupancy(row.Name, occupancy_dbf))
typology_dbf = typology_dbf.append(typology_row, ignore_index=True)
return typology_dbf
age_dbf_path = os.path.join(scenario, "inputs", "building-properties", "age.dbf")
occupancy_dbf_path = os.path.join(scenario, "inputs", "building-properties", "occupancy.dbf")
age_df = dbf_to_dataframe(age_dbf_path)
occupancy_df = dbf_to_dataframe(occupancy_dbf_path)
locator = cea.inputlocator.InputLocator(scenario=scenario)
standards_df = pd.read_excel(locator.get_database_construction_standards(), "STANDARD_DEFINITION")
typology_df = merge_age_and_occupancy_to_typology(age_df, occupancy_df, standards_df)
print("- writing typology.dbf")
dataframe_to_dbf(typology_df, locator.get_building_typology())
print("- removing occupancy.dbf and age.dbf")
os.remove(age_dbf_path)
os.remove(occupancy_dbf_path)
print("- removing invalid input-tables (NOTE: run archetypes-mapper again)")
for fname in {"supply_systems.dbf", "internal_loads.dbf", "indoor_comfort.dbf",
"air_conditioning.dbf", "architecture.dbf"}:
fpath = os.path.join(scenario, "inputs", "building-properties", fname)
if os.path.exists(fpath):
print(" - removing {fname}".format(fname=fname))
os.remove(fpath)
print("- done")
print("- NOTE: You'll need to run the archetpyes-mapper tool after this migration!")
def is_2_31(scenario):
# NOTE: these checks can get more extensive when migrations get more intricate... this is just an example
return os.path.exists(os.path.join(scenario, "inputs", "building-properties", "typology.dbf"))
def migrate_2_31_to_2_31_1(scenario):
# nothing needs to be done. this is just an example of a migration - add your own in this fashion
print("- (nothing to do)")
def is_3_22(scenario):
'''
Checks if "pax" is being used the indoor comfort dbf file.
'''
if indoor_comfort_is_3_22(scenario) or internal_loads_is_3_22(scenario) or output_occupancy_is_3_22(scenario):
return True
else:
return False
def indoor_comfort_is_3_22(scenario):
indoor_comfort = dbf_to_dataframe(os.path.join(scenario, "inputs", "building-properties", "indoor_comfort.dbf"))
if not 'Ve_lpspax' in indoor_comfort.columns:
return False
return True
def internal_loads_is_3_22(scenario):
internal_loads = dbf_to_dataframe(os.path.join(scenario, "inputs", "building-properties", "internal_loads.dbf"))
if not 'Occ_m2pax' in internal_loads.columns:
return False
return True
def output_occupancy_is_3_22(scenario):
if os.path.isdir(os.path.join(scenario, 'outputs', 'data', 'occupancy')) and any(
['people_pax' in pd.read_csv(os.path.join(scenario, 'outputs', 'data', 'occupancy', i)).columns
and '_original' not in i for i in
os.listdir(os.path.join(scenario, 'outputs', 'data', 'occupancy'))]):
return True
else:
return False
def migrate_3_22_to_3_22_1(scenario):
'''
Renames columns in `indoor_comfort.dbf` and `internal_loads.dbf` to remove the use of "pax" meaning "people".
'''
INDOOR_COMFORT_COLUMNS = {'Ve_lpspax': 'Ve_lsp'}
INTERNAL_LOADS_COLUMNS = {'Occ_m2pax': 'Occ_m2p', 'Qs_Wpax': 'Qs_Wp', 'Vw_lpdpax': 'Vw_ldp',
'Vww_lpdpax': 'Vww_ldp', 'X_ghpax': 'X_ghp'}
OCCUPANCY_COLUMNS = {'people_pax': 'people_p'}
if indoor_comfort_is_3_22(scenario):
# import building properties
indoor_comfort = dbf_to_dataframe(os.path.join(scenario, 'inputs', 'building-properties', 'indoor_comfort.dbf'))
# make a backup copy of original data for user's own reference
os.rename(os.path.join(scenario, 'inputs', 'building-properties', 'indoor_comfort.dbf'),
os.path.join(scenario, 'inputs', 'building-properties', 'indoor_comfort_original.dbf'))
# rename columns containing "pax"
indoor_comfort.rename(columns=INDOOR_COMFORT_COLUMNS, inplace=True)
# export dataframes to dbf files
print("- writing indoor_comfort.dbf")
dataframe_to_dbf(indoor_comfort, os.path.join(scenario, 'inputs', 'building-properties', 'indoor_comfort.dbf'))
if internal_loads_is_3_22(scenario):
# import building properties
internal_loads = dbf_to_dataframe(os.path.join(scenario, 'inputs', 'building-properties', 'internal_loads.dbf'))
# make a backup copy of original data for user's own reference
os.rename(os.path.join(scenario, 'inputs', 'building-properties', 'internal_loads.dbf'),
os.path.join(scenario, 'inputs', 'building-properties', 'internal_loads_original.dbf'))
# rename columns containing "pax"
internal_loads.rename(columns=INTERNAL_LOADS_COLUMNS, inplace=True)
# export dataframes to dbf files
print("- writing internal_loads.dbf")
dataframe_to_dbf(internal_loads, os.path.join(scenario, 'inputs', 'building-properties', 'internal_loads.dbf'))
# import building properties
use_type_properties = pd.read_excel(os.path.join(scenario, 'inputs', 'technology', 'archetypes', 'use_types',
'USE_TYPE_PROPERTIES.xlsx'), sheet_name=None)
if max([i in use_type_properties['INTERNAL_LOADS'].columns for i in INTERNAL_LOADS_COLUMNS.keys()]) or max(
[i in use_type_properties['INDOOR_COMFORT'].columns for i in INDOOR_COMFORT_COLUMNS.keys()]):
os.rename(os.path.join(scenario, 'inputs', 'technology', 'archetypes', 'use_types', 'USE_TYPE_PROPERTIES.xlsx'),
os.path.join(scenario, 'inputs', 'technology', 'archetypes', 'use_types',
'USE_TYPE_PROPERTIES_original.xlsx'))
# rename columns containing "pax"
use_type_properties['INDOOR_COMFORT'].rename(columns=INDOOR_COMFORT_COLUMNS, inplace=True)
use_type_properties['INTERNAL_LOADS'].rename(columns=INTERNAL_LOADS_COLUMNS, inplace=True)
# export dataframes to dbf files
print("-writing USE_TYPE_PROPERTIES.xlsx")
with pd.ExcelWriter(os.path.join(scenario, 'inputs', 'technology', 'archetypes', 'use_types',
'USE_TYPE_PROPERTIES.xlsx')) as writer1:
for sheet_name in use_type_properties.keys():
use_type_properties[sheet_name].to_excel(writer1, sheet_name=sheet_name, index=False)
if output_occupancy_is_3_22(scenario):
# if occupancy schedule files are found in the outputs, these are also renamed
print("-writing schedules in ./outputs/data/occupancy")
for file_name in os.listdir(os.path.join(scenario, 'outputs', 'data', 'occupancy')):
schedule_df = pd.read_csv(os.path.join(scenario, 'outputs', 'data', 'occupancy', file_name))
if 'people_pax' in schedule_df.columns:
os.rename(os.path.join(scenario, 'outputs', 'data', 'occupancy', file_name),
os.path.join(scenario, 'outputs', 'data', 'occupancy', file_name.split('.')[0] +
'_original.' + file_name.split('.')[1]))
schedule_df.rename(columns=OCCUPANCY_COLUMNS, inplace=True)
# export dataframes to dbf files
schedule_df.to_csv(os.path.join(scenario, 'outputs', 'data', 'occupancy', file_name))
print("- done")
def main(config):
for key, migrator in find_migrators(config.scenario):
print("Performing migration {key}".format(key=key))
migrator(config.scenario)
if __name__ == "__main__":
main(cea.config.Configuration()) | StarcoderdataPython |
64738 | <gh_stars>1-10
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from google.cloud import ndb
from pydantic import BaseModel, validator
from typing import List, Dict
import re
class VTType(str, Enum):
file = "file"
url = "url"
domain = "domain"
ip_address = "ip_address"
class VTData(BaseModel):
attributes: Dict
id: str
links: Dict
type: VTType
class APIKey(BaseModel):
api_key: str
@validator("api_key")
def api_key_validator(cls, v):
if not v.isalnum():
raise ValueError("API key must be alphanumeric!")
return v
class VTAPI(APIKey):
data: List[VTData]
links: Dict
meta: Dict
jwt_token: str
class APIKeyEmail(APIKey):
email: str
@validator("email")
def email_validator(cls, v):
regex = r"[a-z0-9\.\-]+[@]\w+[.]\w+$"
if not re.match(regex, v):
raise ValueError("Email address is not valid!")
return v
class UserEmail(ndb.Model):
email = ndb.StringProperty()
class AuthUser(BaseModel):
access_key: str
vt_key: str
| StarcoderdataPython |
20447 | <filename>WeIrD-StRiNg-CaSe.py
def to_weird_case(string):
arr=string.split()
count=0
for i in arr:
tmp=list(i)
for j in range(len(tmp)):
if j%2==0:
tmp[j]=tmp[j].upper()
arr[count] = ''.join(tmp)
count+=1
return ' '.join(arr)
'''
一个比较不错的版本
def to_weird_case(string):
recase = lambda s: "".join([c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s)])
return " ".join([recase(word) for word in string.split(" ")])
''' | StarcoderdataPython |
3216557 | <reponame>jpmorgan98/MCDC-TNT<filename>mcdc_tnt/mako_kernels/gpu/advance.py
"""
Name: Advance
breif: inputdeck for MCDC-TNT
Author: <NAME> (OR State Univ - <EMAIL>) CEMeNT
Date: Dec 2nd 2021
"""
import math
import numpy as np
import numba as nb
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
mod = SourceModule("""
__global__ void AdvanceCuda(float *p_pos_x, float *p_pos_y, float *p_pos_z,
float *p_dir_x, float *p_dir_y, float *p_dir_z,
int *p_mesh_cell, float *p_speed, float *p_time,
float *clever_in, float *mesh_total_xsec,
int *p_end_trans, float *rands,
float *mesh_dist_traveled, float *mesh_dist_traveled_squared,
int *num_dead)
{
float dx = clever_in[1];
float L = clever_in[0];
const int num_part = clever_in[2];
const int max_mesh_index = clever_in[3];
const int i = threadIdx.x;
const float kicker = 1e-10;
const int init_cell = p_mesh_cell[i];
float p_dist_travled = 0.0;
int cell_next;
if (i < num_part){
if (p_end_trans[i] == 0){
if (p_pos_x[i] < 0){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else if (p_pos_x[i] >= L){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else{
float dist = -log(rands[i]/mesh_total_xsec[p_mesh_cell[i]]);
float x_loc = (p_dir_x[i] * dist) + p_pos_x[i];
float LB = p_mesh_cell[i] * dx;
float RB = LB + dx;
if (x_loc < LB){
p_dist_travled = (LB - p_pos_x[i])/p_dir_x[i] + kicker; //29
cell_next = p_mesh_cell[i] - 1;
}
else if (x_loc > RB){
p_dist_travled = (RB - p_pos_x[i])/p_dir_x[i] + kicker;
cell_next = p_mesh_cell[i] + 1;
}
else{
p_dist_travled = dist;
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
cell_next = p_mesh_cell[i];
}
p_pos_x[i] += p_dir_x[i]*p_dist_travled;
p_pos_y[i] += p_dir_y[i]*p_dist_travled;
p_pos_z[i] += p_dir_z[i]*p_dist_travled;
atomicAdd(&mesh_dist_traveled[init_cell], p_dist_travled);
atomicAdd(&mesh_dist_traveled_squared[init_cell], pow(p_dist_travled,2));
p_mesh_cell[i] = cell_next;
p_time[i] += p_dist_travled/p_speed[i];
}
}
}
}
""")
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
p_end_trans = np.zeros(num_part, dtype=np.int32)
end_flag = 0
max_mesh_index = len(mesh_total_xsec)-1
cycle_count = 0
#copy data to cuda device
d_p_pos_x = drv.mem_alloc(p_pos_x.nbytes)
d_p_pos_y = drv.mem_alloc(p_pos_y.nbytes)
d_p_pos_z = drv.mem_alloc(p_pos_z.nbytes)
drv.memcpy_htod(d_p_pos_x, p_pos_x)
drv.memcpy_htod(d_p_pos_y, p_pos_y)
drv.memcpy_htod(d_p_pos_z, p_pos_z)
d_p_dir_y = drv.mem_alloc(p_dir_y.nbytes)
d_p_dir_z = drv.mem_alloc(p_dir_z.nbytes)
d_p_dir_x = drv.mem_alloc(p_dir_x.nbytes)
drv.memcpy_htod(d_p_dir_x, p_dir_x)
drv.memcpy_htod(d_p_dir_y, p_dir_y)
drv.memcpy_htod(d_p_dir_z, p_dir_z)
d_p_mesh_cell = drv.mem_alloc(p_mesh_cell.nbytes)
d_p_speed = drv.mem_alloc(p_speed.nbytes)
d_p_time = drv.mem_alloc(p_time.nbytes)
drv.memcpy_htod(d_p_mesh_cell, p_mesh_cell)
drv.memcpy_htod(d_p_speed, p_speed)
drv.memcpy_htod(d_p_time, p_time)
d_p_end_trans = drv.mem_alloc(p_end_trans.nbytes)
d_mesh_total_xsec = drv.mem_alloc(mesh_total_xsec.nbytes)
drv.memcpy_htod(d_p_end_trans, p_end_trans)
drv.memcpy_htod(d_mesh_total_xsec, mesh_total_xsec)
d_mesh_dist_traveled = drv.mem_alloc(mesh_dist_traveled.nbytes)
d_mesh_dist_traveled_squared = drv.mem_alloc(mesh_dist_traveled_squared.nbytes)
drv.memcpy_htod(d_mesh_dist_traveled, mesh_dist_traveled)
drv.memcpy_htod(d_mesh_dist_traveled_squared, mesh_dist_traveled_squared)
threadsperblock = 32
blockspergrid = (num_part + (threadsperblock - 1)) // threadsperblock
summer = num_part
number_done = np.zeros(1, dtype=np.int32)
d_number_done = drv.mem_alloc(number_done.nbytes)
drv.memcpy_htod(d_number_done, number_done)
#d_number_done = cuda.to_device(number_done)
AdvanceCuda = mod.get_function("AdvanceCuda")
clever_io = np.array([L, dx, num_part, max_mesh_index], np.float32)
while end_flag == 0 and cycle_count < 1000:
#allocate randoms
rands = np.random.random(num_part).astype(np.float32)
AdvanceCuda(d_p_pos_x, d_p_pos_y, d_p_pos_z,
d_p_dir_y, d_p_dir_z, d_p_dir_x,
d_p_mesh_cell, d_p_speed, d_p_time,
drv.In(clever_io), d_mesh_total_xsec,
d_p_end_trans, drv.In(rands), d_mesh_dist_traveled, d_mesh_dist_traveled_squared, d_number_done,
block=(threadsperblock, blockspergrid, 1))
if (number_done == num_part):
end_flag = 1
cycle_count += 1
#print("Number done (atomics): {0} Number done (classical): {1}".format(d_number_done[0], number_done_2))
print("Advance Complete:......{0}% ({1}/{2}) cycle: {3}".format(int(100*summer/num_part), summer, num_part, cycle_count), end = "\r")
print()
drv.memcpy_dtoh(p_pos_x, d_p_pos_x)
drv.memcpy_dtoh(p_pos_y, d_p_pos_y)
drv.memcpy_dtoh(p_pos_z, d_p_pos_z)
drv.memcpy_dtoh(p_dir_x, d_p_dir_x)
drv.memcpy_dtoh(p_dir_y, d_p_dir_y)
drv.memcpy_dtoh(p_dir_z, d_p_dir_z)
drv.memcpy_dtoh(p_speed, d_p_speed)
drv.memcpy_dtoh(p_time, d_p_time)
drv.memcpy_dtoh(p_mesh_cell, d_p_mesh_cell)
drv.memcpy_dtoh(mesh_dist_traveled, d_mesh_dist_traveled)
drv.memcpy_dtoh(mesh_dist_traveled_squared, d_mesh_dist_traveled_squared)
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared)
@nb.jit(nopython=True)
def StillIn(p_pos_x, surface_distances, p_alive, num_part):
tally_left = 0
tally_right = 0
for i in range(num_part):
#exit at left
if p_pos_x[i] <= surface_distances[0]:
tally_left += 1
p_alive[i] = False
elif p_pos_x[i] >= surface_distances[len(surface_distances)-1]:
tally_right += 1
p_alive[i] = False
return(p_alive, tally_left, tally_right)
def test_Advance():
L: float = 1
dx: float = .25
N_m: int = 4
num_part: int = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1], np.float32)
p_pos_y = 2.1*np.ones(num_part, np.float32)
p_pos_z = 3.4*np.ones(num_part, np.float32)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], np.int32)
p_dir_x = np.ones(num_part, np.float32)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part, np.float32)
p_dir_z = np.zeros(num_part, np.float32)
p_speed = np.ones(num_part, np.float32)
p_time = np.zeros(num_part, np.float32)
p_alive = np.ones(num_part, np.int32)
p_alive[5] = 0
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100], np.float32)
mesh_dist_traveled_squared = np.zeros(N_m, np.float32)
mesh_dist_traveled = np.zeros(N_m, np.float32)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
test_Advance()
#test_StillIn()
| StarcoderdataPython |
1762377 | # -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='192.168.3.11:9299')
print(0)
index = 'bi_da9ae629609135e55e4af697308f63b4'
resp = es.indices.get_mapping(index=index, doc_type='doc')
print(1)
mappings = resp[index]['mappings']['doc']['properties']
print(2)
field_list = mappings.keys()
print(3)
print(list(field_list))
| StarcoderdataPython |
1674979 | import igl
import numpy as np
import mpmath as mp
import os
import argparse
import matplotlib.pyplot as plt
from conformal_py import *
from overload_math import *
from render import *
from collections import namedtuple
from copy import deepcopy
import meshplot as meshp
import pickle
RenderInfo = namedtuple('RenderInfo', 'pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W')
def render_texture(out, name, v3d, f, m, u, cones, reindex, render_info, build_double):
fid_mat = render_info.fid_mat
pt_fids = render_info.pt_fids
pt_bcs = render_info.pt_bcs
bc_mat = render_info.bc_mat
cam = render_info.cam
bd_thick = render_info.bd_thick
view = render_info.view
proj = render_info.proj
H = render_info.H
W = render_info.W
reindex = np.array(reindex)
# update original cone ids to m
cones = [idx for idx in range(len(reindex)) if reindex[idx] in cones]
fid_mat_input = deepcopy(fid_mat)
bc_mat_input = deepcopy(bc_mat)
cnt = 0
for i in trange(H):
for j in range(W):
if fid_mat[i][j] > -1:
fid_mat[i][j] = pt_fids[cnt]
bc_mat[i][j] = pt_bcs[cnt]
cnt += 1
is_cut_h = []
if use_mpf:
u_cpp, v_cpp, is_cut_h = layout_mpf(m, list(u), is_cut_h, -1)
u_cpp = [mp.mpf(repr(u_cppi)) for u_cppi in u_cpp]
v_cpp = [mp.mpf(repr(v_cppi)) for v_cppi in v_cpp]
else:
u_cpp, v_cpp, is_cut_h = layout_float(m, list(u), is_cut_h, -1)
fid_mat = add_cut_to_sin(m.n, m.opp, m.to, cones, m.type, is_cut_h, reindex, v3d, f, bd_thick, fid_mat, cam, H, W, build_double)
N_bw = 10
def cprs(x):
x = max(0,min(1,x))
return max(0, min(1, 3 * x * x - 2 * x * x * x))
print("draw grid...")
if use_mpf:
u = np.array([mp.mpf(repr(ui)) for ui in u])
color_rgb_gd = draw_grid_mpf(fid_mat, bc_mat, m.h, m.n, m.to, u_cpp, v_cpp, u, cprs, H, W, N_bw)
else:
u = np.array(u)
color_rgb_gd = draw_grid(fid_mat, bc_mat, m.h, m.n, m.to, u_cpp, v_cpp, u, cprs, H, W, N_bw) # faster but less accurate float alternative: draw_grid
plt.imsave(out + "/" + name + "_" + str(N_bw) + "_gd_plain.png", color_rgb_gd)
print("add shading...")
add_shading(color_rgb_gd, v3d, f, fid_mat_input, bc_mat_input, view, proj)
plt.imsave(out + "/" + name + "_" + str(N_bw) + "_gd.png", color_rgb_gd)
def do_conformal(m, dir, out, output_type="param", output_format="obj", use_mpf=False, error_log=False, energy_cond=False, energy_samples=False, suffix=None, flip_count=False, prec=None, no_round_Th_hat=False, print_summary=False, eps=None, no_plot_result=False, bypass_overlay=False, max_itr=500, no_lm_reset=False, do_reduction=False,lambda0=1,bound_norm_thres=1, log_level=2):
if use_mpf:
if prec == None:
mp.prec = 100
else:
mp.prec = prec
if eps == None:
eps = 0
float_type = mp.mpf
else:
float_type = float
if eps == None:
eps = 0
v3d, f = igl.read_triangle_mesh(dir+'/'+m)
dot_index = m.rfind(".")
name = m[:dot_index]
if suffix != None:
name = name+"_"+str(suffix)
else:
name = name
Th_hat = np.loadtxt(dir+"/"+name+"_Th_hat", dtype=str)
Th_hat = nparray_from_float64(Th_hat,float_type)
if use_mpf and not no_round_Th_hat:
# Round rational multiples of pi to multiprecision accuray
for i,angle in enumerate(Th_hat):
n=round(60*angle/mp.pi)
Th_hat[i] = n*mp.pi/60
# identify the cones - used for visualization
is_bd = igl.is_border_vertex(v3d, f)
# need to build double mesh when it has boundary
build_double = (np.sum(is_bd) != 0)
cones = np.array([id for id in range(len(Th_hat)) if np.abs(Th_hat[id]-2*mpi(float_type)) > 1e-15 and not is_bd[id]], dtype=int)
W = 500; H = 300 # figure size
bd_thick = 2; sin_size = 3
pt_fids = []; pt_bcs=[]
if output_type == "render" and output_format == "png" and not no_plot_result:
with open("data/cameras/" + name + "_camera.pickle", 'rb') as fp:
cam = pickle.load(fp)
vc = pickle.load(fp)
fc = pickle.load(fp)
red_size = pickle.load(fp)
blue_size = pickle.load(fp)
(view, proj, vp) = cam
if not build_double:
fc = fc[:red_size+blue_size,:]
fid_mat, bc_mat = get_pt_mat(cam, v3d, f, vc, fc, red_size, blue_size, W, H)
for i in range(H):
for j in range(W):
if fid_mat[i][j] > -1:
pt_fids.append(fid_mat[i][j])
pt_bcs.append(bc_mat[i][j])
# Create algorithm parameter struct
alg_params = AlgorithmParameters()
alg_params.MPFR_PREC = mp.prec
alg_params.initial_ptolemy = False
alg_params.error_eps = eps
if use_mpf:
alg_params.min_lambda = pow(2, -100)
else:
alg_params.min_lambda = 1e-16
alg_params.newton_decr_thres = -0.01 * eps * eps;
alg_params.max_itr = max_itr
alg_params.bypass_overlay = bypass_overlay;
stats_params = StatsParameters()
stats_params.flip_count = flip_count
stats_params.output_dir = out
if use_mpf:
stats_params.name = name + "_mpf"
else:
stats_params.name = name + "_float"
stats_params.print_summary = print_summary
stats_params.error_log = error_log
stats_params.log_level = log_level
# Create line search parameter struct
ls_params = LineSearchParameters()
ls_params.energy_cond = energy_cond
ls_params.energy_samples = energy_samples
ls_params.do_reduction = do_reduction
ls_params.do_grad_norm_decrease = True
ls_params.bound_norm_thres = bound_norm_thres
ls_params.lambda0 = lambda0
ls_params.reset_lambda = not no_lm_reset
if float_type == float:
if output_type == "he_metric" and output_format == "pickle":
n, opp, l = conformal_metric_cl_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, l), pf)
elif output_type == "vf_metric" and output_format == "pickle":
vo, fo, l = conformal_metric_vl_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((vo, fo, l), pf)
elif output_type == "param" and output_format == "pickle":
n, opp, u, v = conformal_parametrization_cl_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, u, v), pf)
elif output_type == "param" and output_format == "obj":
vo, fo, u, v, ft = conformal_parametrization_vf_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
write_texture_obj_double(out + "/" + name + "_out.obj", vo, fo, u, v, ft)
elif output_type == "render" and output_format == "png": # for texture rendering
m_o, u, pt_fids, pt_bcs, reindex, _ = conformal_metric_double(v3d, f, Th_hat, pt_fids, pt_bcs, alg_params, ls_params, stats_params);
m = m_o._m
if not no_plot_result:
render_info = RenderInfo(pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W)
render_texture(out, name, v3d, f, m, u, cones, reindex, render_info, build_double)
else:
print("non-supported output-type/output-format")
print("output_type options:")
print(" 'render'")
print(" 'vf_metric'")
print(" 'he_metric'")
print(" 'param'")
print("output format options:")
print(" 'png' (compatible with 'render' only)")
print(" 'pickle' (compatible with 'he_metric', 'vf_metric' and 'param')")
print(" 'obj' (compatible with 'param')")
else:
set_mpf_prec(alg_params.MPFR_PREC)
vnstr = np.vectorize(lambda a:str(repr(a))[5:-2])
Th_hat = vnstr(Th_hat)
if output_type == "he_metric" and output_format == "pickle":
n, opp, l = conformal_metric_cl_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
l_str = np.array([str(l[idx]) for idx in range(len(l))])
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, l_str), pf)
elif output_type == "vf_metric" and output_format == "pickle":
vo, fo, l = conformal_metric_vl_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
vo_str = [[str(vo[i][k]) for k in range(3)] for i in range(len(vo))]
l_str = np.array([str(l[idx]) for idx in range(len(l))])
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((vo_str, fo, l_str), pf)
elif output_type == "param" and output_format == "pickle":
n, opp, u, v = conformal_parametrization_cl_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
u_str = [str(u[i]) for i in range(len(u))]
v_str = [str(v[i]) for i in range(len(v))]
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, u_str, v_str), pf)
elif output_type == "param" and output_format == "obj":
vo, fo, u, v, ft = conformal_parametrization_vf_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
vo_fl = [[float(str(vo[i][k])) for k in range(3)] for i in range(len(vo))]
u_fl = [float(str(u[i])) for i in range(len(u))]
v_fl = [float(str(v[i])) for i in range(len(v))]
write_texture_obj_double(out + "/" + name + "_out.obj", vo_fl, fo, u_fl, v_fl, ft)
elif output_type == "render" and output_format == "png": # default interface - for texture rendering
m_o, u, pt_fids, pt_bcs, reindex, _ = conformal_metric_mpf(v3d, f, Th_hat, pt_fids, pt_bcs, alg_params, ls_params, stats_params);
m = m_o._m
if not no_plot_result:
render_info = RenderInfo(pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W)
render_texture(out, name, v3d, f, m, u, cones, reindex, render_info, build_double)
else:
print("non-supported output-type/output-format")
print("output_type options:")
print(" 'render'")
print(" 'vf_metric'")
print(" 'he_metric'")
print(" 'param'")
print("output format options:")
print(" 'png' (compatible with 'render' only)")
print(" 'pickle' (compatible with 'he_metric', 'vf_metric' and 'param')")
print(" 'obj' (compatible with 'param')")
if __name__ == "__main__":
# Parse arguments for the script
parser = argparse.ArgumentParser(description='Run the conformal map with options.')
parser.add_argument("-i", "--input", help="input folder that stores obj files and Th_hat")
parser.add_argument("-o", "--output", help="output folder for stats", default="out")
parser.add_argument("-f", "--fname", help="filename of the obj file")
parser.add_argument("--use_mpf", action="store_true", help="True for enable multiprecision", default=False)
parser.add_argument("--do_reduction", action="store_true", help="do reduction for search direction", default=False)
parser.add_argument("-p", "--prec", help="choose the mantissa value of mpf", type=int)
parser.add_argument("-m", "--max_itr", help="choose the maximum number of iterations", type=int, default=50)
parser.add_argument("--energy_cond", action="store_true", help="True for enable energy computation for line-search")
parser.add_argument("--energy_samples", action="store_true", help="True for write out energy sample and newton decrement before linesearch")
parser.add_argument("--error_log", action="store_true", help="True for enable writing out the max/ave angle errors per newton iteration")
parser.add_argument("--flip_count", action="store_true", help="True for enable collecting flip type stats")
parser.add_argument("--no_round_Th_hat", action="store_true", help="True for NOT rounding Th_hat values to multiples of pi/60")
parser.add_argument("--print_summary", action="store_true", help="print a summary table contains target angle range and final max curvature error")
parser.add_argument("--no_plot_result", action="store_true", help="True for NOT rendering the results, used only for reproducing figures to speedup.")
parser.add_argument("--bypass_overlay", action="store_true", help="True for NOT compute overlay, used only for reproducing figures to speedup.")
parser.add_argument("--no_lm_reset", action="store_true", help="True for using double the previous lambda for line search.")
parser.add_argument("--suffix", help="id assigned to each model for the random test")
parser.add_argument("--eps", help="target error threshold")
parser.add_argument("--lambda0", help="initial lambda value", type=float, default=1)
parser.add_argument("--bound_norm_thres", help="threshold to drop the norm bound", type=float, default=1e-10)
parser.add_argument("--output_type", action='store', help="output type selection: 'render', 'he_metric', 'vf_metric', 'param'", type=str, default="render")
parser.add_argument("--output_format", action='store', help="output file format selection: 'png', 'pickle', 'obj'", type=str, default="png")
parser.add_argument("--log_level", help="console logger info level [verbose 0-6]", type=int, default=2)
args = parser.parse_args()
output = args.output
input = args.input
fname = args.fname
use_mpf = args.use_mpf
do_reduction = args.do_reduction
max_itr = args.max_itr
energy_cond = args.energy_cond
error_log = args.error_log
flip_count = args.flip_count
no_round_Th_hat = args.no_round_Th_hat
prec = args.prec
no_lm_reset = args.no_lm_reset
suffix = args.suffix
print_summary = args.print_summary
no_plot_result = args.no_plot_result
bypass_overlay = args.bypass_overlay
eps = args.eps
lambda0 = args.lambda0
bound_norm_thres = args.bound_norm_thres
log_level = args.log_level
energy_samples = args.energy_samples
output_type = args.output_type
output_format = args.output_format
if not os.path.isdir(output):
os.makedirs(output, exist_ok=True)
if eps != None:
eps = float(eps)
do_conformal(fname, input, output, output_type, output_format, use_mpf, error_log, energy_cond, energy_samples, suffix, flip_count, prec, no_round_Th_hat, print_summary, eps, no_plot_result, bypass_overlay, max_itr, no_lm_reset, do_reduction, lambda0, bound_norm_thres, log_level) | StarcoderdataPython |
1726350 | <reponame>arshadansari27/blockchain-experiment
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import pytest
from .. import mapper_registry
@pytest.fixture(scope="session")
def db_engine():
"""yields a SQLAlchemy engine which is suppressed after the test session"""
db_url = "sqlite:///./app/models/tests/test.db"
engine_ = create_engine(db_url, echo=True)
mapper_registry.metadata.drop_all(bind=engine_)
mapper_registry.metadata.create_all(bind=engine_)
yield engine_
engine_.dispose()
@pytest.fixture(scope="session")
def db_session_factory(db_engine):
"""returns a SQLAlchemy scoped session factory"""
return scoped_session(sessionmaker(bind=db_engine))
@pytest.fixture(scope="function")
def db_session(db_session_factory):
"""yields a SQLAlchemy connection which is rollbacked after the test"""
session_ = db_session_factory()
yield session_
session_.close()
| StarcoderdataPython |
1625522 | <gh_stars>0
#!/usr/bin/env python
"""
Example script to register two volumes with VoxelMorph models.
Please make sure to use trained models appropriately. Let's say we have a model trained to register
a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run:
register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.h5
--moved moved.nii.gz --warp warp.nii.gz
The source and target input images are expected to be affinely registered.
If you use this code, please cite the following, and read function docs for further info/citations
VoxelMorph: A Learning Framework for Deformable Medical Image Registration
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019.
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under the
License.
"""
import os
import argparse
import numpy as np
import voxelmorph as vxm
import tensorflow as tf
# parse commandline args
parser = argparse.ArgumentParser()
parser.add_argument('--moving', required=True, help='moving image (source) filename')
parser.add_argument('--fixed', required=True, help='fixed image (target) filename')
parser.add_argument('--moved', required=True, help='warped image output filename')
parser.add_argument('--model', required=True, help='keras model for nonlinear registration')
parser.add_argument('--warp', help='output warp deformation filename')
parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')
parser.add_argument('--multichannel', action='store_true',
help='specify that data has multiple channels')
args = parser.parse_args()
# tensorflow device handling
device, nb_devices = vxm.tf.utils.setup_device(args.gpu)
# load moving and fixed images
add_feat_axis = not args.multichannel
moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)
fixed, fixed_affine = vxm.py.utils.load_volfile(
args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)
inshape = moving.shape[1:-1]
nb_feats = moving.shape[-1]
with tf.device(device):
# load model and predict
warp = vxm.networks.VxmDense.load(args.model).register(moving, fixed)
moved = vxm.networks.Transform(inshape, nb_feats=nb_feats).predict([moving, warp])
# save warp
if args.warp:
vxm.py.utils.save_volfile(warp.squeeze(), args.warp, fixed_affine)
# save moved image
vxm.py.utils.save_volfile(moved.squeeze(), args.moved, fixed_affine)
| StarcoderdataPython |
3259200 | <reponame>jernelv/SpecAnalysis
import numpy as np
import scipy
def Der(x,y):
"""Function for finding first derivative of spectral data. Uses finite differences."""
n=len(x)
x2=np.zeros(n-1)
y2=np.zeros(n-1)
for i in range(n-1):
x2[i]=0.5*(x[i]+x[i+1])
y2[i]=(y[i+1]-y[i])/(x[i+1]-x[i])
return(x2,y2)
def Der2(x,y):
"""Function for finding second derivative of spectral data. Uses finite differences."""
n=len(x)
x2=np.zeros(n-2)
y2=np.zeros(n-2)
dx2=(x[1]-x[0])**2 # assumed constant
for i in range(n-2):
x2[i]=x[i+1]
y2[i]=(y[i]-2*y[i+1]+y[i+2])/dx2
return(x2,y2)
def mlr(x,y,order):
"""Multiple linear regression fit of the columns of matrix x
(dependent variables) to constituent vector y (independent variables)
order - order of a smoothing polynomial, which can be included
in the set of independent variables. If order is
not specified, no background will be included.
b - fit coeffs
f - fit result (m x 1 column vector)
r - residual (m x 1 column vector)
"""
if order > 0:
s=scipy.ones((len(y),1))
for j in range(order):
s=scipy.concatenate((s,(scipy.arange(0,1+(1.0/(len(y)-1))-0.5/(len(y)-1),1.0/(len(y)-1))**j)[:,nA]),1)
X=scipy.concatenate((x, s),1)
else:
X = x
b = scipy.dot(scipy.dot(scipy.linalg.pinv(scipy.dot(scipy.transpose(X),X)),scipy.transpose(X)),y)
f = scipy.dot(X,b)
r = y - f
return b,f,r
def emsc(case, order, fit=None):
"""Extended multiplicative scatter correction
case - spectral data for background correction
order - order of polynomial
fit - if None then use average spectrum, otherwise provide a spectrum
as a column vector to which all others fitted
corr - EMSC corrected data
mx - fitting spectrum
"""
if not type(fit)==type(None):
mx = fit
else:
mx = scipy.mean(case,axis=0)[:,nA]
corr = scipy.zeros(case.shape)
for i in range(len(case)):
b,f,r = mlr(mx, case[i,:][:,nA], order)
corr[i,:] = scipy.reshape((r/b[0,0]) + mx, (corr.shape[1],))
corr=np.nan_to_num(corr)
return corr
def baseline_corr(case):
"""Baseline correction that sets the first independent variable of each
spectrum to zero."""
size = case.shape
subtract = scipy.transpose(scipy.resize(scipy.transpose(case[:,0]),(size[1],size[0])))
return (case-subtract)
def baseline_avg(case):
"""Baseline correction that subtracts an average of the first and last
independent variable from each variable."""
size = case.shape
subtract = scipy.transpose(scipy.resize(scipy.transpose((case[:,0]+case[:size[1]-1])/2),(size[1],size[0])))
return (case-subtract)
def baseline_linear(case):
"""Baseline correction that subtracts a linearly increasing baseline between
the first and last independent variable."""
size, t = case.shape, 0
subtract = scipy.zeros((size[0],size[1]), 'd')
while t < size[0]:
a = case[t,0]
b = case[t,size[1]-1]
div = (b-a)/size[1]
if div == 0:
div = 1
arr = scipy.arrange(a,b,div,'d')
subtract[t,:] = scipy.resize(arr,(size[1],))
t = t+1
return case-subtract
| StarcoderdataPython |
4834728 | # -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
from plastiqpublicapi.models.address import Address
class Card(object):
"""Implementation of the 'Card' model.
Debit or Credit Card
Attributes:
card_holder_name (string): TODO: type description here.
account_number (string): Card number.
cvv (string): 3-4 digits
expiration_month (string): TODO: type description here.
expiration_year (string): TODO: type description here.
billing_address (Address): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"card_holder_name": 'cardHolderName',
"account_number": 'accountNumber',
"cvv": 'cvv',
"expiration_month": 'expirationMonth',
"expiration_year": 'expirationYear',
"billing_address": 'billingAddress'
}
def __init__(self,
card_holder_name=None,
account_number=None,
cvv=None,
expiration_month=None,
expiration_year=None,
billing_address=None):
"""Constructor for the Card class"""
# Initialize members of the class
self.card_holder_name = card_holder_name
self.account_number = account_number
self.cvv = cvv
self.expiration_month = expiration_month
self.expiration_year = expiration_year
self.billing_address = billing_address
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
card_holder_name = dictionary.get('cardHolderName')
account_number = dictionary.get('accountNumber')
cvv = dictionary.get('cvv')
expiration_month = dictionary.get('expirationMonth')
expiration_year = dictionary.get('expirationYear')
billing_address = Address.from_dictionary(dictionary.get('billingAddress')) if dictionary.get('billingAddress') else None
# Return an object of this model
return cls(card_holder_name,
account_number,
cvv,
expiration_month,
expiration_year,
billing_address)
| StarcoderdataPython |
1747476 | <gh_stars>0
#
# Simple Python Extension
# v 1.0.0
#
def init():
global db
global cmdMap
cmdMap = {"getData": handleGetData}
db = _server.getDatabaseManager()
def destroy():
_server.trace( "Python extension dying" )
def handleRequest(cmd, params, who, roomId, protocol):
if protocol == "xml":
if cmdMap.has_key(cmd):
cmdMap[cmd](params, who, roomId)
def handleInternalEvent(evt):
evtName = evt.getEventName()
_server.trace( "Received internal event: " + evt.getEventName() )
def handleGetData(params, who, roomId):
sql = "SELECT * FROM contacts ORDER BY name"
queryRes = db.executeQuery(sql)
response = {}
response["_cmd"] = "getData"
response["db"] = {}
if (queryRes != None) and (queryRes.size() > 0):
c = 0
for row in queryRes:
item = {}
item["name"] = row.getItem("name")
item["location"] = row.getItem("location")
item["email"] = row.getItem("email")
response["db"][c] = item
c += 1
else:
_server.trace("QUERY FAILED")
_server.sendResponse(response, -1, None, [who])
| StarcoderdataPython |
4833824 | <filename>faa_actuation/nodes/actuation_server.py
#!/usr/bin/env python
import roslib; roslib.load_manifest('faa_actuation')
import rospy
import copy
import threading
import time
from faa_actuation import Actuation
from faa_actuation.srv import UpdateLed, UpdateLedResponse
from faa_actuation.srv import UpdateGate, UpdateGateResponse
from faa_actuation.srv import SetOdorValveOn, SetOdorValveOnResponse
from faa_actuation.srv import SetOdorValvesOff, SetOdorValvesOffResponse
from faa_actuation.srv import SetMfcFlowRate, SetMfcFlowRateResponse
from faa_actuation.srv import GetMfcFlowRateSetting, GetMfcFlowRateSettingResponse
from faa_actuation.srv import GetActuationInfo, GetActuationInfoResponse
from faa_actuation.srv import GetPwmControllerInfo, GetPwmControllerInfoResponse
from faa_actuation.srv import SetPwmControllerValue, SetPwmControllerValueResponse
from faa_actuation.srv import StartCurrentControllerPwm, StartCurrentControllerPwmResponse
from faa_actuation.srv import StopCurrentControllerPwm, StopCurrentControllerPwmResponse
from faa_actuation.msg import TunnelState
from faa_actuation.msg import ActuationState
class ActuationServer(object):
def __init__(self):
rospy.init_node('faa_actuation')
rospy.set_param('/faa_actuation/initialized',False)
self.hardware = rospy.get_param('/faa_actuation/faa_actuation/hardware')
if self.hardware:
rospy.loginfo("usb hardware mode")
self.actuation = Actuation()
else:
rospy.loginfo("no usb hardware test mode")
time.sleep(10)
rospy.set_param('/faa_actuation/initialized',True)
self.lock = threading.Lock()
self.tunnel_count = 6
self.gate_count = 3
self.gates_state = ['close']*self.gate_count
self.tunnel_state = TunnelState()
self.tunnel_state.gates_state = copy.deepcopy(self.gates_state)
self.actuation_state = ActuationState()
for tunnel in range(self.tunnel_count):
self.actuation_state.tunnels_state.append(copy.deepcopy(self.tunnel_state))
self.update_led = rospy.Service('update_led', UpdateLed, self.handle_update_led)
self.update_gate = rospy.Service('update_gate', UpdateGate, self.handle_update_gate)
self.get_pwm_controller_info = rospy.Service('get_pwm_controller_info', GetPwmControllerInfo, self.handle_get_pwm_controller_info)
self.set_pwm_controller_value = rospy.Service('set_pwm_controller_value', SetPwmControllerValue, self.handle_set_pwm_controller_value)
self.set_odor_valve_on = rospy.Service('set_odor_valve_on', SetOdorValveOn, self.handle_set_odor_valve_on)
self.set_odor_valves_off = rospy.Service('set_odor_valves_off', SetOdorValvesOff, self.handle_set_odor_valves_off)
self.set_mfc_flow_rate = rospy.Service('set_mfc_flow_rate', SetMfcFlowRate, self.handle_set_mfc_flow_rate)
self.get_mfc_flow_rate_setting = rospy.Service('get_mfc_flow_rate_setting', GetMfcFlowRateSetting, self.handle_get_mfc_flow_rate_setting)
self.state_pub = rospy.Publisher('actuation_state', ActuationState)
# while not rospy.is_shutdown():
# self.lock.acquire()
# self.state_pub.publish(self.actuation_state)
# self.lock.release()
# rospy.sleep(0.25)
self.update_gate_actuation_state('all','all','close')
self.start_current_controller_pwm = rospy.Service('start_current_controller_pwm', StartCurrentControllerPwm, self.handle_start_current_controller_pwm)
self.stop_current_controller_pwm = rospy.Service('stop_current_controller_pwm', StopCurrentControllerPwm, self.handle_stop_current_controller_pwm)
self.main()
def update_gate_actuation_state(self,tunnel,gate,angle):
self.lock.acquire()
tunnel = str(tunnel).lower()
gate = str(gate).lower()
angle = str(angle).lower()
if (tunnel != 'all') and (gate != 'all'):
tunnel = int(tunnel)
gate = int(gate)
self.actuation_state.tunnels_state[tunnel].gates_state[gate] = angle
elif tunnel != 'all':
tunnel = int(tunnel)
for gate in range(self.gate_count):
self.actuation_state.tunnels_state[tunnel].gates_state[gate] = angle
elif (gate != 'all'):
gate = int(gate)
for tunnel in range(self.tunnel_count):
self.actuation_state.tunnels_state[tunnel].gates_state[gate] = angle
elif (tunnel == 'all') and (gate == 'all'):
for tunnel in range(self.tunnel_count):
for gate in range(self.gate_count):
self.actuation_state.tunnels_state[tunnel].gates_state[gate] = angle
self.state_pub.publish(self.actuation_state)
self.lock.release()
def handle_update_led(self,req):
rospy.loginfo('updateLed: tunnel={0},led={1},duty_cycle={2}'.format(req.tunnel,req.led,req.duty_cycle))
if self.hardware and self.actuation.pwm_controller is not None:
self.actuation.pwm_controller.updateLed(req.tunnel,req.led,req.duty_cycle)
return UpdateLedResponse("success")
def handle_update_gate(self,req):
rospy.loginfo('updateGate: tunnel={0},gate={1},angle={2}'.format(req.tunnel,req.gate,req.angle))
self.update_gate_actuation_state(req.tunnel,req.gate,req.angle)
if self.hardware and self.actuation.pwm_controller is not None:
self.actuation.pwm_controller.updateGate(req.tunnel,req.gate,req.angle)
return UpdateGateResponse("success")
def handle_get_pwm_controller_info(self,req):
if self.hardware and self.actuation.pwm_controller is not None:
dev_info = self.actuation.pwm_controller.getDevInfo()
gate_open_servo_angle = dev_info['gate_open_servo_angle']
gate_close_servo_angle = dev_info['gate_close_servo_angle']
led_on_duty_cycle = dev_info['led_on_duty_cycle']
else:
gate_open_servo_angle = 15
gate_close_servo_angle = 35
led_on_duty_cycle = 50
return GetPwmControllerInfoResponse(gate_open_servo_angle=gate_open_servo_angle,
gate_close_servo_angle=gate_close_servo_angle,
led_on_duty_cycle=led_on_duty_cycle)
def handle_set_pwm_controller_value(self,req):
if self.hardware and self.actuation.pwm_controller is not None:
if req.value_name.lower() == 'open':
self.actuation.pwm_controller.setGateOpenServoAngle(req.value_amount)
elif req.value_name.lower() == 'close':
self.actuation.pwm_controller.setGateCloseServoAngle(req.value_amount)
elif req.value_name.lower() == 'on':
self.actuation.pwm_controller.setLedOnDutyCycle(req.value_amount)
else:
pass
return SetPwmControllerValueResponse("success")
def handle_set_odor_valve_on(self,req):
rospy.loginfo('setOdorValveOn: device={0},valve={1}'.format(req.device,req.valve))
if self.hardware and self.actuation.olfactometers is not None:
try:
self.actuation.olfactometers[req.device].setOdorValveOn(req.valve)
except IndexError:
return SetOdorValveOnResponse("failure")
return SetOdorValveOnResponse("success")
def handle_set_odor_valves_off(self,req):
rospy.loginfo('setOdorValvesOff: device={0}'.format(req.device))
if self.hardware and self.actuation.olfactometers is not None:
try:
self.actuation.olfactometers[req.device].setOdorValvesOff()
except IndexError:
return SetOdorValvesOffResponse("failure")
return SetOdorValvesOffResponse("success")
def handle_set_mfc_flow_rate(self,req):
rospy.loginfo('setMfcFlowRate: device={0},mfc={1},percent_capacity={2}'.format(req.device,req.mfc,req.percent_capacity))
rospy.set_param('mfc_flow_rates/percent_capacity_device{0}_mfc{1}'.format(req.device,req.mfc),req.percent_capacity)
if self.hardware and self.actuation.olfactometers is not None:
try:
self.actuation.olfactometers[req.device].setMfcFlowRate(req.mfc,req.percent_capacity)
except IndexError:
return SetMfcFlowRateResponse("failure")
return SetMfcFlowRateResponse("success")
def handle_get_mfc_flow_rate_setting(self,req):
if self.hardware and self.actuation.olfactometers is not None:
try:
percent_capacity = self.actuation.olfactometers[req.device].getMfcFlowRateSetting(req.mfc)
except IndexError:
percent_capacity = (req.device + 10) + 2*(req.mfc + 2)
else:
percent_capacity = (req.device + 10) + 2*(req.mfc + 2)
rospy.loginfo('updateMfcFlowRateSettings: device={0},mfc={1},percent_capacity={2}'.format(req.device,req.mfc,percent_capacity))
rospy.set_param('mfc_flow_rates/percent_capacity_device{0}_mfc{1}'.format(req.device,req.mfc),percent_capacity)
return GetMfcFlowRateSettingResponse(percent_capacity)
def handle_get_actuation_info(self,req):
if self.hardware:
pass
return GetActuationInfoResponse("")
def handle_start_current_controller_pwm(self,req):
if self.hardware and self.actuation.current_controller is not None:
self.actuation.current_controller.startPwmAll(req.percent_capacity,req.duration_on,req.duration_off)
return StartCurrentControllerPwmResponse("success")
def handle_stop_current_controller_pwm(self,req):
if self.hardware and self.actuation.current_controller is not None:
self.actuation.current_controller.stopPwmAll()
return StopCurrentControllerPwmResponse("success")
def main(self):
rospy.spin()
if __name__ == "__main__":
actuation_server = ActuationServer()
| StarcoderdataPython |
3287083 | <gh_stars>100-1000
import unittest
import win32com.client
import win32com.test.util
import win32com.server.util
class Tester:
_public_methods_ = ["TestValue"]
def TestValue(self, v):
pass
def test_ob():
return win32com.client.Dispatch(win32com.server.util.wrap(Tester()))
class TestException(Exception):
pass
# The object we try and pass - pywin32 will call __float__ as a last resort.
class BadConversions:
def __float__(self):
raise TestException()
class TestCase(win32com.test.util.TestCase):
def test_float(self):
try:
test_ob().TestValue(BadConversions())
raise Exception("Should not have worked")
except Exception as e:
assert isinstance(e, TestException)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1624820 | # listener.py
import asyncio
from typing import List, Union
class EventListener:
def __init__(self, bot):
self.bot = bot
self.log = bot.log
self.pool = bot.pool
self.config = bot.config
self.registered = False
self.loop = asyncio.get_event_loop()
def registeredEvents(self) -> List[str]:
return [m for m in dir(self) if m not in dir(EventListener) and not m.startswith('_')]
async def processEvent(self, data: dict[str, Union[str, int]]) -> None:
# ignore any other events until the server is registered
if data['command'] == 'registerDCSServer':
self.registered = True
if self.registered and data['command'] in self.registeredEvents():
return await getattr(self, data['command'])(data)
else:
return None
| StarcoderdataPython |
3392415 | <gh_stars>1-10
import logging
import os
from pathlib import Path
from time import sleep
from dotenv import load_dotenv
from twython import Twython, TwythonError
logging.basicConfig(format="{asctime} : {levelname} : {message}", style="{")
logger = logging.getLogger("tweet_followers")
logger.setLevel(logging.DEBUG)
IS_PROD = os.getenv("IS_PROD", default=None)
if IS_PROD is None:
env_path = Path.cwd().parent / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path)
else:
raise OSError(f"{env_path} not found. Did you set it up?")
APP_KEY = os.getenv("API_KEY", default="")
APP_SECRET = os.getenv("API_SECRET", default="")
OAUTH_TOKEN = os.getenv("ACCESS_TOKEN", default="")
OAUTH_TOKEN_SECRET = os.getenv("ACCESS_TOKEN_SECRET", default="")
MY_SCREEN_NAME = os.getenv("MY_SCREEN_NAME", default="haikuincidence")
# Uses OAuth1 ("user auth") for authentication
twitter = Twython(
app_key=APP_KEY,
app_secret=APP_SECRET,
oauth_token=OAUTH_TOKEN,
oauth_token_secret=OAUTH_TOKEN_SECRET,
)
# https://twython.readthedocs.io/en/latest/api.html
# get the screen names I follow
# can only make 15 requests in a 15-minute window (1 per minute)
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-list
i_follow = []
cursor_if = -1
sleep_seconds = 60
counter = 1
while True:
logger.info(f"Query {counter}, I follow cursor: {cursor_if}")
result = twitter.get_friends_list(
screen_name=MY_SCREEN_NAME, count=200, skip_status="true", cursor=cursor_if
)
if len(result["users"]) == 0:
break
else:
counter += 1
user_list = [user["screen_name"] for user in result["users"]]
i_follow.extend(user_list)
cursor_if = result["next_cursor"]
logger.info(f"Added {len(user_list)} users who I follow (total: {len(i_follow)})")
# # find the screen names with notifications turned on
# user_list_notif = [user['screen_name'] for user in ifollow['users'] if user['notifications']]
# logger.info(f'Found {len(user_list_notif)} users with notifications turned on who I follow')
# i_follow.extend(user_list_notif)
logger.info(f"Sleeping for {sleep_seconds} seconds")
sleep(sleep_seconds)
i_follow = list(set([sn for sn in i_follow if sn]))
logger.info(f"Found {len(i_follow)} users who I follow")
# get the screen names that follow me
# can only make 15 requests in a 15-minute window (1 per minute)
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-followers-list
follows_me = []
cursor_fm = -1
sleep_seconds = 60
counter = 1
while True:
logger.info(f"Query {counter}, Follow me cursor: {cursor_fm}")
result = twitter.get_followers_list(
screen_name=MY_SCREEN_NAME, count=200, skip_status="true", cursor=cursor_fm
)
if len(result["users"]) == 0:
break
else:
counter += 1
user_list = [user["screen_name"] for user in result["users"]]
follows_me.extend(user_list)
cursor_fm = result["next_cursor"]
logger.info(f"Added {len(user_list)} users who follow me (total: {len(follows_me)})")
logger.info(f"Sleeping for {sleep_seconds} seconds")
sleep(sleep_seconds)
follows_me = list(set([sn for sn in follows_me if sn]))
logger.info(f"Found {len(follows_me)} users who follow me")
# unfollow people I follow who do not follow me, to make room for following more new poets
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/post-friendships-destroy
unfollowed = []
does_not_follow_me = set(i_follow) - set(follows_me)
to_unfollow = list(set([sn for sn in does_not_follow_me if (sn not in unfollowed)]))
sleep_seconds = 0.1
for sn in to_unfollow:
if sn:
try:
result = twitter.destroy_friendship(screen_name=sn)
unfollowed.append(sn)
logger.info(f"unfollowed {len(unfollowed)} / {len(to_unfollow)}: {sn}")
except TwythonError as e:
logger.info(f"exception for {sn}: {e}")
# logger.info(f'Sleeping for {sleep_seconds} seconds')
sleep(sleep_seconds)
logger.info(f"Unfollowed {len(unfollowed)} users who do not follow me")
# get the screen names I have replied to
# with user auth, can only make 900 requests in a 15-minute window (60 per minute)
# if instead was using app auth, could make 1500 requests in a 15-minute window (100 per minute)
# Twitter's API limits this to the most recent 3200 tweets, there's no way around this limit
# https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-user_timeline
poets = []
mytweets = twitter.get_user_timeline(
screen_name=MY_SCREEN_NAME, count=200, exclude_replies="false", include_rts="true"
)
poets.extend([tweet["in_reply_to_screen_name"] for tweet in mytweets])
max_id = mytweets[-1]["id_str"]
sleep_seconds = 1.1
counter = 1
while True:
logger.info(f"Query {counter}, Tweet max id: {max_id}")
mytweets = twitter.get_user_timeline(
screen_name=MY_SCREEN_NAME,
count=200,
exclude_replies="false",
include_rts="true",
max_id=max_id,
)
max_id_next = mytweets[-1]["id_str"]
if max_id_next == max_id:
break
else:
max_id = max_id_next
counter += 1
user_list = [tweet["in_reply_to_screen_name"] for tweet in mytweets]
poets.extend(user_list)
logger.info(f"Added {len(user_list)} users who I have replied to (total: {len(poets)})")
logger.info(f"Sleeping for {sleep_seconds} seconds")
sleep(sleep_seconds)
poets = list(set([sn for sn in poets if sn]))
logger.info(f"Found {len(poets)} poets who I have replied to")
# follow accounts I have replied to but not followed
# can only make 400 requests in a 24-hour window,
# also seems to require waiting a bit between requests
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/post-friendships-create
# update notification settings
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/post-friendships-update
followed = [] # don't overwrite this
do_not_follow = [] # don't overwrite this
stop_reasons = [
"You are unable to follow more people at this time",
]
exclude_reasons = [
"Cannot find specified user",
"You have been blocked from following",
]
sleep_seconds = 5
to_follow = list(
set(
[
sn
for sn in poets
if sn and (sn not in i_follow) and (sn not in followed) and (sn not in do_not_follow)
]
)
)
logger.info(f"Will attempt to follow {len(to_follow)} poets")
counter = 0
for sn in to_follow:
if sn:
try:
# follow='false' means don't turn on notifications
result = twitter.create_friendship(screen_name=sn, follow="false")
followed.append(sn)
counter += 1
logger.info(f"followed {counter} / {len(to_follow)}: {sn}")
# device='false' means turn off notifications
# result = twitter.update_friendship(screen_name=sn, device='false')
# logger.info(f'updated {counter} / {len(to_follow)}: {sn}')
except TwythonError as e:
logger.info(f"exception for {sn}: {e}")
# remove the screenname from the list if it matches a valid reason
if any([reason in str(e) for reason in exclude_reasons]):
do_not_follow.append(sn)
logger.info(f"Adding {sn} to do not follow list")
elif any([reason in str(e) for reason in stop_reasons]):
logger.info("Hit rate limit. Stopping.")
break
logger.info(f"Sleeping for {sleep_seconds} seconds")
sleep(sleep_seconds)
logger.info(f"Followed {len(followed)} users who I have replied to")
| StarcoderdataPython |
3296542 | <reponame>lifehackjim/tantrum
# -*- coding: utf-8 -*-
"""Exceptions and warnings for :mod:`tantrum.api_clients`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .. import exceptions
class ModuleError(exceptions.PackageError):
"""Parent of all exceptions for :mod:`tantrum.api_clients`."""
pass
class ModuleWarning(exceptions.PackageWarning):
"""Parent of all warnings for :mod:`tantrum.api_clients`."""
pass
class GetPlatformVersionWarning(ModuleWarning):
"""Thrown when an issue happens while trying to get the platform version."""
pass
| StarcoderdataPython |
1610085 | <reponame>splunkenizer/splunk_as_a_service_app
import os
import sys
bin_path = os.path.join(os.path.dirname(__file__))
if bin_path not in sys.path:
sys.path.insert(0, bin_path)
import json
import fix_path
from base_handler import BaseRestHandler
import json
from urllib.parse import parse_qs
import splunklib
import time
import stack_operation
import services
import clusters
import instances
CREATING = "Creating"
CREATED = "Created"
UPDATING = "Updating"
DELETING = "Deleting"
DELETED = "Deleted"
def get_stacks(splunk):
return splunk.kvstore["stacks"].data
def stack_exists(splunk, stack_id):
try:
get_stack_config(splunk, stack_id)
return True
except splunklib.binding.HTTPError as e:
if e.status == 404:
return False
else:
raise
def get_stack_config(splunk, stack_id):
stacks = get_stacks(splunk)
return stacks.query_by_id(stack_id)
def update_config(splunk, stack_id, updates):
stacks = get_stacks(splunk)
config = stacks.query_by_id(stack_id)
config.update(updates)
stacks.update(stack_id, json.dumps(config))
def trigger_updating_stack(splunk, stack_id, updates=None):
stack_config = get_stack_config(splunk, stack_id)
status = stack_config["status"]
if status != CREATED and status != UPDATING:
raise Exception("cannot update stack with status %s" % status)
if updates is None:
updates = {}
updates["status"] = UPDATING
update_config(splunk, stack_id, updates)
stack_operation.trigger(splunk, stack_id)
class StacksHandler(BaseRestHandler):
def handle_GET(self):
stacks = get_stacks(self.splunk)
request_query = self.request['query']
phase = request_query.get("phase", "living")
cluster = request_query.get("cluster", "*")
deleted_after = int(request_query.get("deleted_after", time.time() - 60 * 60 * 24 * 30))
deleted_before = int(request_query.get("deleted_before", time.time()))
query = {}
if phase == "living":
query["status"] = {"$ne": DELETED}
elif phase == "deleted":
query["status"] = DELETED
query["$and"] = [
{"deleted_time": {"$gt": deleted_after}},
{"deleted_time": {"$lt": deleted_before}}
]
if cluster and cluster != "*":
query["cluster"] = cluster
query = stacks.query(query=json.dumps(query))
def map(d):
return {
"id": d["_key"],
"status": d["status"],
"title": d["title"] if "title" in d else "",
"cluster": d["cluster"],
}
self.send_entries([map(d) for d in query])
def handle_POST(self):
stacks = get_stacks(self.splunk)
defaults = self.splunk.confs["defaults"]["general"]
# create stack record
stack_record = {
"status": CREATING,
}
fields_names = set([
"deployment_type",
"license_master_mode",
"enterprise_license",
"indexer_count",
"search_head_count",
"cpu_per_instance",
"memory_per_instance",
"title",
"data_fabric_search",
"spark_worker_count",
"cluster",
"namespace",
"etc_storage_in_gb",
"other_var_storage_in_gb",
"indexer_var_storage_in_gb",
])
# apply request parameters
request_params = parse_qs(self.request['payload'])
stack_record.update({
k: request_params[k][0]
for k in fields_names if k in request_params
})
# apply missing fields from defaults
stack_record.update({
k: defaults[k]
for k in fields_names if k in defaults and k not in stack_record
})
# apply missing fields from cluster config
cluster_name = stack_record["cluster"]
cluster_config = clusters.get_cluster(
self.service, cluster_name)
stack_record.update(
{
k: cluster_config[k]
for k in fields_names if k in cluster_config and k not in stack_record
}
)
# add missing fields
if "data_fabric_search" not in stack_record:
stack_record["data_fabric_search"] = "false"
if "spark_worker_count" not in stack_record:
stack_record["spark_worker_count"] = "0"
if "cpu_per_instance" not in stack_record:
stack_record["cpu_per_instance"] = "1"
if "memory_per_instance" not in stack_record:
stack_record["memory_per_instance"] = "4Gi"
# save stack
stack_id = stacks.insert(json.dumps(stack_record))["_key"]
# start operator
stack_operation.start(self.service, stack_id)
# return ID
self.send_result({
"stack_id": stack_id,
})
class StackHandler(BaseRestHandler):
def handle_GET(self):
path = self.request['path']
_, stack_id = os.path.split(path)
stack_config = get_stack_config(self.splunk, stack_id)
result = {
"status": stack_config["status"],
"title": stack_config["title"] if "title" in stack_config else "",
"deployment_type": stack_config["deployment_type"],
"license_master_mode": stack_config["license_master_mode"],
"cluster": stack_config["cluster"],
"namespace": stack_config["namespace"],
}
if stack_config["deployment_type"] == "distributed":
result["indexer_count"] = stack_config["indexer_count"]
result["search_head_count"] = stack_config["search_head_count"]
api_client = clusters.create_client(
self.service, stack_config["cluster"])
from kubernetes import client as kubernetes
core_api = kubernetes.CoreV1Api(api_client)
hosts = services.get_load_balancer_hosts(
core_api, stack_id, services.search_head_role, stack_config["namespace"])
if hosts:
admin_password = instances.get_admin_password(core_api, stack_id, stack_config, services.search_head_role)
result.update({
"search_head_endpoint": ["http://%s" % hostname for hostname in hosts],
"search_head_password": <PASSWORD>,
})
if stack_config["license_master_mode"] == "local":
hosts = services.get_load_balancer_hosts(
core_api, stack_id, services.license_master_role, stack_config["namespace"])
if hosts:
admin_password = instances.get_admin_password(core_api, stack_id, stack_config, services.license_master_role)
result.update({
"license_master_endpoint": ["http://%s" % hostname for hostname in hosts],
"license_master_password": <PASSWORD>,
})
hosts = services.get_load_balancer_hosts(
core_api, stack_id, services.cluster_master_role, stack_config["namespace"])
if hosts:
admin_password = instances.get_admin_password(core_api, stack_id, stack_config, services.cluster_master_role)
result.update({
"cluster_master_endpoint": ["http://%s" % hostname for hostname in hosts],
"cluster_master_password": <PASSWORD>,
})
hosts = services.get_load_balancer_hosts(
core_api, stack_id, services.deployer_role, stack_config["namespace"])
if hosts:
admin_password = instances.get_admin_password(core_api, stack_id, stack_config, services.deployer_role)
result.update({
"deployer_endpoint": ["http://%s" % hostname for hostname in hosts],
"deployer_password": <PASSWORD>,
})
hosts = services.get_load_balancer_hosts(
core_api, stack_id, services.standalone_role, stack_config["namespace"])
if hosts:
admin_password = instances.get_admin_password(core_api, stack_id, stack_config, services.standalone_role)
result.update({
"standalone_endpoint": ["http://%s" % hostname for hostname in hosts],
"standalone_password": <PASSWORD>,
})
hosts = services.get_load_balancer_hosts(
core_api, stack_id, services.indexer_role, stack_config["namespace"])
if hosts:
admin_password = instances.get_admin_password(core_api, stack_id, stack_config, services.indexer_role)
result.update({
"indexer_endpoint": ["%s:9997" % hostname for hostname in hosts],
"indexer_password": <PASSWORD>,
})
self.send_result(result)
def handle_POST(self):
path = self.request['path']
_, stack_id = os.path.split(path)
fields_names = set([
"title",
"search_head_count",
"indexer_count",
])
request_params = parse_qs(self.request['payload'])
stack_updates = {
k: request_params[k][0]
for k in fields_names if k in request_params
}
trigger_updating_stack(self.splunk, stack_id, stack_updates)
def handle_DELETE(self):
path = self.request['path']
_, stack_id = os.path.split(path)
if "force" in self.request["query"]:
force = self.request["query"]["force"] == "true"
else:
force = False
stack_operation.stop(
self.service, stack_id, force=force)
self.send_result({
"stack_id": stack_id,
})
| StarcoderdataPython |
3261815 | <reponame>JonarsLi/sanic-ext<filename>tests/extensions/openapi/test_exclude.py
from sanic import Blueprint, Request, Sanic, text
from sanic_ext.extensions.openapi import openapi
from utils import get_spec
def test_exclude_decorator(app: Sanic):
@app.route("/test0")
@openapi.exclude()
async def handler0(request: Request):
"""
openapi:
---
summary: This is a summary.
"""
return text("ok")
@app.route("/test1")
@openapi.definition(summary="This is a summary.", exclude=True)
async def handler1(request: Request):
return text("ok")
spec = get_spec(app)
paths = spec["paths"]
assert len(paths) == 0
def test_exclude_bp(app: Sanic):
bp1 = Blueprint("blueprint1")
bp2 = Blueprint("blueprint2")
@bp1.route("/op1")
@openapi.summary("handler 1")
async def handler1(request: Request):
return text("bp1, ok")
@bp2.route("/op2")
@openapi.summary("handler 2")
async def handler2(request: Request):
return text("bp2, ok")
app.blueprint(bp1)
app.blueprint(bp2)
openapi.exclude(bp=bp1)
spec = get_spec(app)
paths = spec["paths"]
assert len(paths) == 1
assert "/op2" in paths
assert not "/op1" in paths
assert paths["/op2"]["get"]["summary"] == "handler 2"
| StarcoderdataPython |
1637642 | from louqa import app
app.run(debug=True, port=8089)
| StarcoderdataPython |
3369011 | """ This file contain a class describing a memory efficient flat index """
import heapq
from typing import List, Optional, Tuple
from embedding_reader import EmbeddingReader
import faiss
import numpy as np
from tqdm import trange
from autofaiss.indices.faiss_index_wrapper import FaissIndexWrapper
class MemEfficientFlatIndex(FaissIndexWrapper):
"""
Faiss-like Flat index that can support any size of vectors
without memory issues.
Two search functions are available to use either batch of smaller faiss
flat index or rely fully on numpy.
"""
def __init__(self, d: int, metric_type: int):
"""
__init__ function for MemEfficientFlatIndex
Parameters:
-----------
d : int
dimension of the vectors, named d to keep Faiss notation
metric_type : int
similarity metric used in the vector space, using faiss
enumerate values (faiss.METRIC_INNER_PRODUCT and faiss.METRIC_L2)
"""
super().__init__(d, metric_type)
self.dim = d
self.prod_emb = np.zeros((0, self.dim))
self.embedding_reader: Optional[EmbeddingReader] = None
def delete_vectors(self):
"""delete the vectors of the index"""
self.prod_emb = np.zeros((0, self.dim))
# pylint: disable=missing-function-docstring, invalid-name
def add(self, x: np.ndarray):
if self.prod_emb.shape[0] == 0:
self.prod_emb = x.astype(np.float32)
else:
raise NotImplementedError("You can add vectors only once, delete them first with delete_vectors")
def add_all(self, filename: str, nb_items: int):
"""
Function that adds vectors to the index from a memmory-mapped array
Parameters
----------
filename : string
path of the 2D numpy array of shape (nb_items, vector_dim)
on the disk
nb_items : int
number of vectors in the 2D array (the dim is already known)
"""
if self.prod_emb.shape[0] == 0:
self.prod_emb = np.memmap(filename, dtype="float32", mode="r", shape=(nb_items, self.dim))
else:
raise NotImplementedError("You can add vectors only once, delete them first")
def add_files(self, embedding_reader: EmbeddingReader):
if self.embedding_reader is None:
self.embedding_reader = embedding_reader
else:
raise NotImplementedError("You can add vectors only once, delete them first with delete_vectors")
# pylint: disable too_many_locals
def search_numpy(self, xq: np.ndarray, k: int, batch_size: int = 4_000_000):
"""
Function that search the k nearest neighbours of a batch of vectors.
This implementation is based on vectorized numpy function, it is slower than
the search function based on batches of faiss flat indices.
We keep this implementation because we can build new functions using this code.
Moreover, the distance computation is more precise in numpy than the faiss implementation
that optimizes speed over precision.
Parameters
----------
xq : 2D numpy.array of floats
Batch of vectors of shape (batch_size, vector_dim)
k : int
Number of neighbours to retrieve for every vector
batch_size : int
Size of the batch of vectors that are explored.
A bigger value is prefered to avoid multiple loadings
of the vectors from the disk.
Returns
-------
D : 2D numpy.array of floats
Distances numpy array of shape (batch_size, k).
Contains the distances computed by the index of the k nearest neighbours.
I : 2D numpy.array of ints
Labels numpy array of shape (batch_size, k).
Contains the vectors' labels of the k nearest neighbours.
"""
assert self.metric_type == faiss.METRIC_INNER_PRODUCT
# Instanciate several heaps, (is there a way to have vectorized heaps?)
h: List[List[Tuple[float, int]]] = [[] for _ in range(xq.shape[0])]
# reshape input for vectorized distance computation
xq_reshaped = np.expand_dims(xq, 1)
# initialize index offset
offset = 0
# For each batch
for i in trange(0, self.prod_emb.shape[0], batch_size):
# compute distances in one tensor product
dist_arr = np.sum((xq_reshaped * np.expand_dims(self.prod_emb[i : i + batch_size], 0)), axis=-1)
# get index of the k biggest
# pylint: disable=unsubscriptable-object # pylint/issues/3139
max_k = min(k, dist_arr.shape[1])
ind_k_max = np.argpartition(dist_arr, -max_k)[:, -max_k:]
assert ind_k_max.shape == (xq.shape[0], max_k)
# to be vectorized if it is indeed the bottleneck, (it's not for batch_size >> 10000)
for j, inds in enumerate(ind_k_max):
for ind, distance in zip(inds, dist_arr[j, inds]):
true_ind = offset + ind if ind != -1 else -1
if len(h[j]) < k:
heapq.heappush(h[j], (distance, true_ind))
else:
heapq.heappushpop(h[j], (distance, true_ind))
offset += batch_size
# Fill distance and indice matrix
D = np.zeros((xq.shape[0], k), dtype=np.float32)
I = np.full((xq.shape[0], k), fill_value=-1, dtype=np.int32)
for i in range(xq.shape[0]):
# case where we couldn't find enough vectors
max_k = min(k, len(h[i]))
for j in range(max_k):
x = heapq.heappop(h[i])
D[i][max_k - 1 - j] = x[0]
I[i][max_k - 1 - j] = x[1]
return D, I
# pylint: disable=too-many-locals, arguments-differ
def search(self, x: np.ndarray, k: int, batch_size: int = 4_000_000):
"""
Function that search the k nearest neighbours of a batch of vectors
Parameters
----------
x : 2D numpy.array of floats
Batch of vectors of shape (batch_size, vector_dim)
k : int
Number of neighbours to retrieve for every vector
batch_size : int
Size of the batch of vectors that are explored.
A bigger value is prefered to avoid multiple loadings
of the vectors from the disk.
Returns
-------
D : 2D numpy.array of floats
Distances numpy array of shape (batch_size, k).
Contains the distances computed by the index of the k nearest neighbours.
I : 2D numpy.array of ints
Labels numpy array of shape (batch_size, k).
Contains the vectors' labels of the k nearest neighbours.
"""
if self.prod_emb is None:
raise ValueError("The index is empty")
# Cast in the right format for Faiss
if x.dtype != np.float32:
x = x.astype(np.float32)
# xq for x query, a better name than x which is Faiss convention
xq = x
# Instanciate several heaps, (is there a way to have vectorized heaps?)
h: List[List[Tuple[float, int]]] = [[] for _ in range(xq.shape[0])]
# initialize index offset
offset = 0
# For each batch
for i in trange(0, self.prod_emb.shape[0], batch_size):
# instanciate a Flat index
brute = faiss.IndexFlatIP(self.dim)
# pylint: disable=no-value-for-parameter
brute.add(self.prod_emb[i : i + batch_size])
D_tmp, I_tmp = brute.search(xq, k)
# to be vectorized if it is indeed the bottleneck, (it's not for batch_size >> 10000)
for j, (distances, inds) in enumerate(zip(D_tmp, I_tmp)):
for distance, ind in zip(distances, inds):
true_ind: int = offset + ind if ind != -1 else -1
if len(h[j]) < k:
heapq.heappush(h[j], (distance, true_ind))
else:
heapq.heappushpop(h[j], (distance, true_ind))
offset += batch_size
# Fill distance and indice matrix
D = np.zeros((xq.shape[0], k), dtype=np.float32)
I = np.full((xq.shape[0], k), fill_value=-1, dtype=np.int32)
for i in range(xq.shape[0]):
# case where we couldn't find enough vectors
max_k = min(k, len(h[i]))
for j in range(max_k):
x = heapq.heappop(h[i])
D[i][max_k - 1 - j] = x[0]
I[i][max_k - 1 - j] = x[1]
return D, I
def search_files(self, x: np.ndarray, k: int, batch_size: int):
if self.embedding_reader is None:
raise ValueError("The index is empty")
# Cast in the right format for Faiss
if x.dtype != np.float32:
x = x.astype(np.float32)
# xq for x query, a better name than x which is Faiss convention
xq = x
# Instanciate several heaps, (is there a way to have vectorized heaps?)
h: List[List[Tuple[float, int]]] = [[] for _ in range(xq.shape[0])]
# initialize index offset
offset = 0
# For each batch
for emb_array, _ in self.embedding_reader(batch_size):
# for i in trange(0, self.prod_emb.shape[0], batch_size):
# instanciate a Flat index
brute = faiss.IndexFlatIP(self.dim)
# pylint: disable=no-value-for-parameter
brute.add(emb_array)
D_tmp, I_tmp = brute.search(xq, k)
# to be vectorized if it is indeed the bottleneck, (it's not for batch_size >> 10000)
for j, (distances, inds) in enumerate(zip(D_tmp, I_tmp)):
for distance, ind in zip(distances, inds):
true_ind: int = offset + ind if ind != -1 else -1
if len(h[j]) < k:
heapq.heappush(h[j], (distance, true_ind))
else:
heapq.heappushpop(h[j], (distance, true_ind))
offset += emb_array.shape[0]
# Fill distance and indice matrix
D = np.zeros((xq.shape[0], k), dtype=np.float32)
I = np.full((xq.shape[0], k), fill_value=-1, dtype=np.int32)
for i in range(xq.shape[0]):
# case where we couldn't find enough vectors
max_k = min(k, len(h[i]))
for j in range(max_k):
x = heapq.heappop(h[i]) # type: ignore
D[i][max_k - 1 - j] = x[0]
I[i][max_k - 1 - j] = x[1]
return D, I
| StarcoderdataPython |
157578 | import logging
from django.apps import AppConfig
import large_image
logger = logging.getLogger(__name__)
class DjangoLargeImageConfig(AppConfig):
name = 'django_large_image'
verbose_name = 'Django Large Image'
default_auto_field = 'django.db.models.BigAutoField'
def ready(self):
# Set up cache with large_image
# This isn't necessary but it makes sure we always default
# to the django cache if others are available
large_image.config.setConfig('cache_backend', 'django')
| StarcoderdataPython |
4833000 | <reponame>lrahmani/agents-aea
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Peer to Peer connection and channel."""
import asyncio
import logging
import threading
import time
from asyncio import CancelledError
from threading import Thread
from typing import Any, Dict, List, Optional, Set, cast
from fetch.p2p.api.http_calls import HTTPCalls
from aea.configurations.base import ConnectionConfig, PublicId
from aea.connections.base import Connection
from aea.mail.base import AEAConnectionError, Address, Envelope
logger = logging.getLogger("aea.packages.fetchai.connections.p2p_client")
class PeerToPeerChannel:
"""A wrapper for an SDK or API."""
def __init__(
self,
address: Address,
provider_addr: str,
provider_port: int,
excluded_protocols: Optional[Set[PublicId]] = None,
):
"""
Initialize a channel.
:param address: the address
"""
self.address = address
self.provider_addr = provider_addr
self.provider_port = provider_port
self.in_queue = None # type: Optional[asyncio.Queue]
self.loop = None # type: Optional[asyncio.AbstractEventLoop]
self._httpCall = None # type: Optional[HTTPCalls]
self.excluded_protocols = excluded_protocols
self.thread = Thread(target=self.receiving_loop)
self.lock = threading.Lock()
self.stopped = True
logger.info("Initialised the peer to peer channel")
def connect(self):
"""
Connect.
:return: an asynchronous queue, that constitutes the communication channel.
"""
with self.lock:
if self.stopped:
self._httpCall = HTTPCalls(
server_address=self.provider_addr, port=self.provider_port
)
self.stopped = False
self.thread.start()
logger.debug("P2P Channel is connected.")
self.try_register()
def try_register(self) -> bool:
"""Try to register to the provider."""
try:
assert self._httpCall is not None
logger.info(self.address)
query = self._httpCall.register(sender_address=self.address, mailbox=True)
return query["status"] == "OK"
except Exception: # pragma: no cover
logger.warning("Could not register to the provider.")
raise AEAConnectionError()
def send(self, envelope: Envelope) -> None:
"""
Process the envelopes.
:param envelope: the envelope
:return: None
"""
assert self._httpCall is not None
if self.excluded_protocols is not None:
if envelope.protocol_id in self.excluded_protocols:
logger.error(
"This envelope cannot be sent with the oef connection: protocol_id={}".format(
envelope.protocol_id
)
)
raise ValueError("Cannot send message.")
self._httpCall.send_message(
sender_address=envelope.sender,
receiver_address=envelope.to,
protocol=str(envelope.protocol_id),
context=b"None",
payload=envelope.message,
)
def receiving_loop(self) -> None:
"""Receive the messages from the provider."""
assert self._httpCall is not None
assert self.in_queue is not None
assert self.loop is not None
while not self.stopped:
messages = self._httpCall.get_messages(
sender_address=self.address
) # type: List[Dict[str, Any]]
for message in messages:
logger.debug("Received message: {}".format(message))
envelope = Envelope(
to=message["TO"]["RECEIVER_ADDRESS"],
sender=message["FROM"]["SENDER_ADDRESS"],
protocol_id=PublicId.from_str(message["PROTOCOL"]),
message=message["PAYLOAD"],
)
self.loop.call_soon_threadsafe(self.in_queue.put_nowait, envelope)
time.sleep(0.5)
logger.debug("Receiving loop stopped.")
def disconnect(self) -> None:
"""
Disconnect.
:return: None
"""
assert self._httpCall is not None
with self.lock:
if not self.stopped:
self._httpCall.unregister(self.address)
# self._httpCall.disconnect()
self.stopped = True
self.thread.join()
class PeerToPeerClientConnection(Connection):
"""Proxy to the functionality of the SDK or API."""
def __init__(self, provider_addr: str, provider_port: int = 8000, **kwargs):
"""
Initialize a connection to an SDK or API.
:param provider_addr: the provider address.
:param provider_port: the provider port.
:param kwargs: keyword argument for the parent class.
"""
if kwargs.get("configuration") is None and kwargs.get("connection_id") is None:
kwargs["connection_id"] = PublicId("fetchai", "p2p_client", "0.1.0")
super().__init__(**kwargs)
provider_addr = provider_addr
provider_port = provider_port
self.channel = PeerToPeerChannel(self.address, provider_addr, provider_port, excluded_protocols=self.excluded_protocols) # type: ignore
async def connect(self) -> None:
"""
Connect to the gym.
:return: None
"""
if not self.connection_status.is_connected:
self.connection_status.is_connected = True
self.channel.in_queue = asyncio.Queue()
self.channel.loop = self.loop
self.channel.connect()
async def disconnect(self) -> None:
"""
Disconnect from P2P.
:return: None
"""
if self.connection_status.is_connected:
self.connection_status.is_connected = False
self.channel.disconnect()
async def send(self, envelope: "Envelope") -> None:
"""
Send an envelope.
:param envelope: the envelop
:return: None
"""
if not self.connection_status.is_connected:
raise ConnectionError(
"Connection not established yet. Please use 'connect()'."
) # pragma: no cover
self.channel.send(envelope)
async def receive(self, *args, **kwargs) -> Optional["Envelope"]:
"""
Receive an envelope.
:return: the envelope received, or None.
"""
if not self.connection_status.is_connected:
raise ConnectionError(
"Connection not established yet. Please use 'connect()'."
) # pragma: no cover
assert self.channel.in_queue is not None
try:
envelope = await self.channel.in_queue.get()
if envelope is None:
return None # pragma: no cover
return envelope
except CancelledError: # pragma: no cover
return None
@classmethod
def from_config(
cls, address: Address, configuration: ConnectionConfig
) -> "Connection":
"""
Get the P2P connection from the connection configuration.
:param address: the address of the agent.
:param configuration: the connection configuration object.
:return: the connection object
"""
addr = cast(str, configuration.config.get("addr"))
port = cast(int, configuration.config.get("port"))
return PeerToPeerClientConnection(
addr, port, address=address, configuration=configuration
)
| StarcoderdataPython |
1629220 | <reponame>ufo2011/platformio-core
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from twisted.logger import LogLevel # pylint: disable=import-error
from twisted.spread import pb # pylint: disable=import-error
from platformio import proc
from platformio.commands.remote.ac.process import ProcessAsyncCmd
from platformio.commands.remote.ac.psync import ProjectSyncAsyncCmd
from platformio.commands.remote.ac.serial import SerialPortAsyncCmd
from platformio.commands.remote.client.base import RemoteClientBase
from platformio.device.list import list_serial_ports
from platformio.project.config import ProjectConfig
from platformio.project.exception import NotPlatformIOProjectError
class RemoteAgentService(RemoteClientBase):
def __init__(self, name, share, working_dir=None):
RemoteClientBase.__init__(self)
self.log_level = LogLevel.info
self.working_dir = working_dir or os.path.join(
ProjectConfig.get_instance().get("platformio", "core_dir"), "remote"
)
if not os.path.isdir(self.working_dir):
os.makedirs(self.working_dir)
if name:
self.name = str(name)[:50]
self.join_options.update(
{"agent": True, "share": [s.lower().strip()[:50] for s in share]}
)
self._acs = {}
def agent_pool_ready(self):
pass
def cb_disconnected(self, reason):
for ac in self._acs.values():
ac.ac_close()
RemoteClientBase.cb_disconnected(self, reason)
def remote_acread(self, ac_id):
self.log.debug("Async Read: {id}", id=ac_id)
if ac_id not in self._acs:
raise pb.Error("Invalid Async Identifier")
return self._acs[ac_id].ac_read()
def remote_acwrite(self, ac_id, data):
self.log.debug("Async Write: {id}", id=ac_id)
if ac_id not in self._acs:
raise pb.Error("Invalid Async Identifier")
return self._acs[ac_id].ac_write(data)
def remote_acclose(self, ac_id):
self.log.debug("Async Close: {id}", id=ac_id)
if ac_id not in self._acs:
raise pb.Error("Invalid Async Identifier")
return_code = self._acs[ac_id].ac_close()
del self._acs[ac_id]
return return_code
def remote_cmd(self, cmd, options):
self.log.info("Remote command received: {cmd}", cmd=cmd)
self.log.debug("Command options: {options!r}", options=options)
callback = "_process_cmd_%s" % cmd.replace(".", "_")
return getattr(self, callback)(options)
def _defer_async_cmd(self, ac, pass_agent_name=True):
self._acs[ac.id] = ac
if pass_agent_name:
return (self.id, ac.id, self.name)
return (self.id, ac.id)
def _process_cmd_device_list(self, _):
return (self.name, list_serial_ports())
def _process_cmd_device_monitor(self, options):
if not options["port"]:
for item in list_serial_ports():
if "VID:PID" in item["hwid"]:
options["port"] = item["port"]
break
# terminate opened monitors
if options["port"]:
for ac in list(self._acs.values()):
if (
isinstance(ac, SerialPortAsyncCmd)
and ac.options["port"] == options["port"]
):
self.log.info(
"Terminate previously opened monitor at {port}",
port=options["port"],
)
ac.ac_close()
del self._acs[ac.id]
if not options["port"]:
raise pb.Error("Please specify serial port using `--port` option")
self.log.info("Starting serial monitor at {port}", port=options["port"])
return self._defer_async_cmd(SerialPortAsyncCmd(options), pass_agent_name=False)
def _process_cmd_psync(self, options):
for ac in list(self._acs.values()):
if (
isinstance(ac, ProjectSyncAsyncCmd)
and ac.options["id"] == options["id"]
):
self.log.info("Terminate previous Project Sync process")
ac.ac_close()
del self._acs[ac.id]
options["agent_working_dir"] = self.working_dir
return self._defer_async_cmd(
ProjectSyncAsyncCmd(options), pass_agent_name=False
)
def _process_cmd_run(self, options):
return self._process_cmd_run_or_test("run", options)
def _process_cmd_test(self, options):
return self._process_cmd_run_or_test("test", options)
def _process_cmd_run_or_test( # pylint: disable=too-many-locals,too-many-branches
self, command, options
):
assert options and "project_id" in options
project_dir = os.path.join(self.working_dir, "projects", options["project_id"])
origin_pio_ini = os.path.join(project_dir, "platformio.ini")
back_pio_ini = os.path.join(project_dir, "platformio.ini.bak")
# remove insecure project options
try:
conf = ProjectConfig(origin_pio_ini)
if os.path.isfile(back_pio_ini):
os.remove(back_pio_ini)
os.rename(origin_pio_ini, back_pio_ini)
# cleanup
if conf.has_section("platformio"):
for opt in conf.options("platformio"):
if opt.endswith("_dir"):
conf.remove_option("platformio", opt)
else:
conf.add_section("platformio")
conf.set("platformio", "build_dir", ".pio/build")
conf.save(origin_pio_ini)
# restore A/M times
os.utime(
origin_pio_ini,
(os.path.getatime(back_pio_ini), os.path.getmtime(back_pio_ini)),
)
except NotPlatformIOProjectError as e:
raise pb.Error(str(e))
cmd_args = ["platformio", "--force", command, "-d", project_dir]
for env in options.get("environment", []):
cmd_args.extend(["-e", env])
for target in options.get("target", []):
cmd_args.extend(["-t", target])
for ignore in options.get("ignore", []):
cmd_args.extend(["-i", ignore])
if options.get("upload_port", False):
cmd_args.extend(["--upload-port", options.get("upload_port")])
if options.get("test_port", False):
cmd_args.extend(["--test-port", options.get("test_port")])
if options.get("disable_auto_clean", False):
cmd_args.append("--disable-auto-clean")
if options.get("without_building", False):
cmd_args.append("--without-building")
if options.get("without_uploading", False):
cmd_args.append("--without-uploading")
if options.get("silent", False):
cmd_args.append("-s")
if options.get("verbose", False):
cmd_args.append("-v")
paused_acs = []
for ac in self._acs.values():
if not isinstance(ac, SerialPortAsyncCmd):
continue
self.log.info("Pause active monitor at {port}", port=ac.options["port"])
ac.pause()
paused_acs.append(ac)
def _cb_on_end():
if os.path.isfile(back_pio_ini):
if os.path.isfile(origin_pio_ini):
os.remove(origin_pio_ini)
os.rename(back_pio_ini, origin_pio_ini)
for ac in paused_acs:
ac.unpause()
self.log.info(
"Unpause active monitor at {port}", port=ac.options["port"]
)
return self._defer_async_cmd(
ProcessAsyncCmd(
{"executable": proc.where_is_program("platformio"), "args": cmd_args},
on_end_callback=_cb_on_end,
)
)
def _process_cmd_update(self, options):
cmd_args = ["platformio", "--force", "update"]
if options.get("only_check"):
cmd_args.append("--only-check")
return self._defer_async_cmd(
ProcessAsyncCmd(
{"executable": proc.where_is_program("platformio"), "args": cmd_args}
)
)
| StarcoderdataPython |
3266966 | import sys
import json
import uuid
import datetime
import logging
class RedBanjoConfig:
def __init__(self):
self._logger = logging.getLogger("RedBanjoConfig")
with open(sys.argv[1]) as jsonFile:
self._config = json.load(jsonFile)
self._logger.info("Parsed Config .................: \n%s", json.dumps(self._config, indent=4))
def execution_id(self):
return self._config["execution"]["id"]
def arg0(self):
return self._config["arguments"][0]
def arg1(self):
return self._config["arguments"][1]
def arg2(self):
return self._config["arguments"][2]
def arg3(self):
return self._config["arguments"][3]
def arg4(self):
return self._config["arguments"][4]
class RedBanjoChannel:
def __init__(self, execution_id):
self._logger = logging.getLogger('RedBanjoChannel')
self._execution_id = execution_id
self._path = sys.argv[2]
self._pipe = open(sys.argv[2], "w")
self._logger.info('channel path: %s', self._path)
self._logger.info('execution id: %s', self._execution_id)
def now(self) -> int:
time_now: datetime.datetime = datetime.datetime.now(tz=datetime.timezone.utc)
return int(time_now.timestamp() * 1000)
def send_message(self, msg_type, msg_data):
msg = {
"id": str(uuid.uuid4()),
"executionId": self._execution_id,
"messageType": msg_type,
"data": msg_data
}
self.send(msg)
def send(self, msg):
msg_json = json.dumps(msg)
self._logger.info("Sending message: %s", msg_json)
self._pipe.write(msg_json)
self._pipe.write("\n")
self._pipe.flush()
def close(self):
self._pipe.close()
class RedBanjo:
def __init__(self):
self._logger = logging.getLogger("RedBanjo")
self._config = RedBanjoConfig()
self._channel = RedBanjoChannel(self._config.execution_id())
def __str__(self):
return 'RedBanjo Client'
def config(self) -> RedBanjoConfig:
return self._config
def record_metric(self, name: str, value_numeric, value_string: str):
msg_data = {
"name": name,
"ts": int(self._channel.now()),
"valueNumeric": value_numeric,
"valueString": value_string
}
self._channel.send_message("recordMetric", msg_data)
def record_assertion(self, is_true: bool, reason: str, description: str):
msg_data = {
"timestamp": int(self._channel.now()),
"isTrue": is_true,
"reason": reason,
"description": description
}
self._channel.send_message("makeAssertion", msg_data)
class RedBanjoFactory:
__instance: RedBanjo = None
def __init__(self):
pass
@classmethod
def get(cls) -> RedBanjo:
if cls.__instance is None:
cls.__instance = RedBanjo()
return cls.__instance
| StarcoderdataPython |
126221 | import boto3
import botocore
def download_data_from_s3(bucket_name, key, dst):
try:
s3 = boto3.resource('s3')
s3.Bucket(bucket_name).download_file(key, dst)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def upload_data_to_s3(bucket_name, filepath, key_dst):
try:
s3 = boto3.resource('s3')
s3.Bucket(bucket_name).upload_file(filepath, key_dst)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("Upload not successful.")
else:
raise
| StarcoderdataPython |
1632947 | <reponame>sayRequil/qDev<filename>qdev_i.py
from pyparsing import *
data = open("code.q","r")
LBRACE,RBRACE,LPAREN,RPAREN,SEMI,EQUAL = map(Suppress,"{}();=")
GROUP = Keyword("$group")
ENTRY = Keyword("$enter")
PRINT = Keyword("$print")
VAR = Keyword("$local")
FROM = Keyword("$from")
CALLVAR = Keyword("$callvar")
ARRAY = Keyword("$array")
real = Regex(r"[+-]?\d+\.\d*").setParseAction(lambda t:float(t[0]))
integer = Regex(r"[+-]?\d+").setParseAction(lambda t:int(t[0]))
# parses a string enclosed in quotes, but strips off the quotes at parse time
string = QuotedString('"')
# define structure expressions
value = string | real | integer
entry = Group(ENTRY + LPAREN + Group(Optional(delimitedList(value)))) + RPAREN + SEMI
# define print function
value = string | real | integer
print_ = Group(PRINT + LPAREN + string("content") + RPAREN + SEMI)
# since Groups can contain Groups, need to use a Forward to define recursive expression
group = Forward()
group << Group(GROUP + LPAREN + string("name") + RPAREN +
LBRACE + Group(ZeroOrMore(group | entry))("body") + RBRACE)
# define variables
value = string | real | integer
var = Group(VAR + " " + string("var_name") + EQUAL + string("var_value") + SEMI)
# define from
from = Forward()
from << Group(FROM + " " + string("module") + SEMI)
# define callvar
callvar = Group(CALLVAR + LPAREN + string("var_n") + RPAREN + SEMI)
# define array
value = string | real |integer
array = Group(ARRAY + LBRACE + Group(Optional(delimitedList(value))) + RBRACE)
for i,v in pairs:
# ignore C style comments wherever they occur
group.ignore(cStyleComment)
# parse the sample text
result = group.parseString(data)
# print out the tokens as a nice indented list using pprint
from pprint import pprint
pprint(result.asList())
| StarcoderdataPython |
1663788 | class A:
pass
(member := A) | StarcoderdataPython |
191703 | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:4/30/2021 2:04 PM
# @File:PIL_utils
import cv2
import numpy as np
import numpy
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
from PIL import Image, ImageOps
def PIL2cv2(image):
"""PIL转cv"""
return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
def ImgText_CN(img, text, left, top, textColor=(0, 255, 0), textSize=20):
# 用于给图片添加中文字符
if (isinstance(img, numpy.ndarray)): # 判断是否为OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype("font/simhei.ttf", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)
| StarcoderdataPython |
198521 | <reponame>Rig0ur/VKAnalysis<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
@author: migalin
@contact: https://migalin.ru
@license Apache License, Version 2.0, see LICENSE file
Copyright (C) 2018
"""
import os
from PyQt5 import QtWidgets, QtGui, QtCore
from .config import *
from .MenuItemWidget import VKMenuItemWidget
class VKMenuWidget(QtWidgets.QDialog):
"""
Виджет меню приложения.
"""
def __init__(self, vk, parent=None):
"""
Конструктор виджета
:param vk: объект класса VK
:param parent: родительский объект
"""
super(VKMenuWidget, self).__init__(parent=parent)
self.vk = vk
self.initUI()
def run_dialog(self, name):
"""
Запускает модуль на выполнение
:param name: имя модуля
"""
VKAnalysisLoader[name](self.vk).exec_()
def initUI(self):
"""
Инициализирует интерфейс
"""
self.setWindowTitle("VKAnalysis " + str(VKAnalysisInfo['version']))
self.setStyleSheet("background-color: white;")
self.setFixedSize(1100, 800)
self.layout = QtWidgets.QVBoxLayout()
self.header_layout = QtWidgets.QGridLayout()
self.logo = QtWidgets.QLabel()
self.dirname = os.path.dirname(__file__)
pic = QtGui.QPixmap()
logo_path = os.path.join(self.dirname, 'images/logo_small.PNG')
if os.path.isfile(logo_path):
pic.loadFromData(open(logo_path, 'rb').read())
pic = pic.scaledToHeight(64)
self.logo.setPixmap(pic)
icon_path = os.path.join(self.dirname, 'images/icon.ico')
if os.path.isfile(icon_path):
self.setWindowIcon(QtGui.QIcon(icon_path))
self.header_layout.addWidget(self.logo, 0, 0)
self.user_photo = QtWidgets.QLabel()
self.user_photo_caption = QtWidgets.QLabel()
api = self.vk.get_api()
info = api.users.get(fields='photo_100')[0]
photo_url = info['photo_100']
pic.loadFromData(self.vk.http.get(photo_url).content)
self.user_photo.setPixmap(pic)
self.user_photo_caption.setText("<html>Вы вошли как <b>" + info['first_name'] + "</b></html>")
self.user_photo_caption.setFont(QtGui.QFont("Times", 12))
margin = QtWidgets.QLabel()
margin.setFixedWidth(970)
self.header_layout.addWidget(margin, 0,1)
self.header_layout.addWidget(self.user_photo_caption, 0 ,2, QtCore.Qt.AlignRight)
self.header_layout.addWidget(self.user_photo, 0, 3, QtCore.Qt.AlignLeft)
self.scroll = QtWidgets.QScrollArea()
self.scroll.setWidgetResizable(True)
#self.scroll.setFixedHeight(500)
self.mygroupbox = QtWidgets.QGroupBox()
self.scroll.setStyleSheet("border:0; background-color: white;")
self.scroll.setWidget(self.mygroupbox)
menu = VKAnalysisLoader.keys()
self.menu = []
self.menu_layout = QtWidgets.QGridLayout()
for i, item in enumerate(menu):
mi = VKMenuItemWidget(item)
self.menu.append(mi)
mi.setParent(self.mygroupbox)
self.menu_layout.addWidget(mi,i//2, i%2)
mi.clicked.connect(self.run_dialog)
self.mygroupbox.setLayout(self.menu_layout)
self.mygroupbox.resize(self.mygroupbox.width(), len(self.menu)*150+150)
self.scroll.setWidget(self.mygroupbox)
self.layout.addLayout(self.header_layout)
self.layout.addWidget(self.scroll)
self.setLayout(self.layout)
self.scroll.update()
| StarcoderdataPython |
36272 | from scipy import linalg
from sklearn.decomposition import PCA
from scipy.optimize import linear_sum_assignment as linear_assignment
import numpy as np
"""
A function that takes a list of clusters, and a list of centroids for each cluster, and outputs the N max closest images in each cluster to its centroids
"""
def closest_to_centroid(clusters,centroids,nb_closest=20):
output = [[] for i in range(len(centroids))]
#print(clusters)
for i in range(len(centroids)):
centroid = centroids[i]
cluster = clusters[i]
try :
cluste_temp = [x.cpu() if x.is_cuda else x for x in cluster]
except :
cluste_temp = cluster
cluster = [list(x) for x in cluste_temp]
nb_components = 7 if len(cluster)>10 else len(cluster) - 1
pca = PCA(n_components=nb_components) #args.sty_dim)
if len(cluster) > nb_closest :
cluster = pca.fit_transform(cluster)
centroid = centroid.reshape(1, -1)
centroid = pca.transform(centroid)
distances = [linalg.norm(x-centroid) for x in cluster]
duplicate_distances = distances
distances.sort()
if len(distances)>=nb_closest :
distances = distances[:nb_closest]
output[i] = [True if x in distances else False for x in duplicate_distances]
return output
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(w.max() - w)
indi = list(ind[0])
indj = list(ind[1])
the_sum = sum([w[i, j] for i, j in zip(indi,indj)])
return the_sum * 1.0 / y_pred.size
| StarcoderdataPython |
54099 | class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head=None
def print_llist(self):
temp = self.head
while temp:
print(temp.data)
temp=temp.next
llist = LinkedList()
llist.head = Node(1)
second = Node(2)
third = Node(3)
llist.head.next = second # Link first node with second
second.next = third # Link second node with the third node
llist.print_llist() | StarcoderdataPython |
140497 | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
import json
import os
import time
import zlib
import boto3
from botocore import client
from botocore.exceptions import ClientError
from botocore.vendored.requests.exceptions import Timeout
from botocore.vendored.requests.packages.urllib3.exceptions import TimeoutError
from stream_alert.shared import LOGGER
class LookupTables(object):
"""Lookup Tables to useful information which can be referenced from rules"""
_LOOKUP_TABLES_LAST_REFRESH = datetime(year=1970, month=1, day=1)
# Explicitly set timeout for S3 connection. The default timeout is 60 seconds.
BOTO_TIMEOUT = 10
def __init__(self, buckets_info):
boto_config = client.Config(
connect_timeout=self.BOTO_TIMEOUT,
read_timeout=self.BOTO_TIMEOUT
)
self._s3_client = boto3.resource('s3', config=boto_config)
self._buckets_info = buckets_info
def download_s3_objects(self):
"""Download S3 files (json format) from S3 buckets into memory.
Returns:
dict: A dictionary contains information loaded from S3. The file name
will be the key, and value is file content in json format.
"""
_lookup_tables = {}
for bucket, files in self._buckets_info.iteritems():
for json_file in files:
try:
start_time = time.time()
s3_object = self._s3_client.Object(bucket, json_file).get()
size_kb = round(s3_object.get('ContentLength') / 1024.0, 2)
size_mb = round(size_kb / 1024.0, 2)
display_size = '{}MB'.format(size_mb) if size_mb else '{}KB'.format(size_kb)
LOGGER.info('Downloaded S3 file size %s and updated lookup table %s',
display_size, json_file)
data = s3_object.get('Body').read()
except ClientError as err:
LOGGER.error('Encounterred error while downloading %s from %s, %s',
json_file, bucket, err.response['Error']['Message'])
return _lookup_tables
except(Timeout, TimeoutError):
# Catching TimeoutError will catch both `ReadTimeoutError` and
# `ConnectionTimeoutError`.
LOGGER.error('Reading %s from S3 is timed out.', json_file)
return _lookup_tables
# The lookup data can optionally be compressed, so try to decompress
# This will fall back and use the original data if decompression fails
try:
data = zlib.decompress(data, 47)
except zlib.error:
LOGGER.debug('Data in \'%s\' is not compressed', json_file)
table_name = os.path.splitext(json_file)[0]
_lookup_tables[table_name] = json.loads(data)
total_time = time.time() - start_time
LOGGER.info('Downloaded S3 file %s seconds', round(total_time, 2))
return _lookup_tables
@classmethod
def load_lookup_tables(cls, config):
"""Load arbitrary json files to memory from S3 buckets when lookup table enabled
The lookup tables will also be refreshed based on "cache_refresh_minutes" setting
in the config.
Args:
config (dict): Loaded configuration from 'conf/' directory
Returns:
Return False if lookup table enabled or missing config. Otherwise, it
will return an instance of LookupTables class.
"""
lookup_tables = config['global']['infrastructure'].get('lookup_tables')
if not (lookup_tables and lookup_tables.get('enabled', False)):
return False
buckets_info = lookup_tables.get('buckets')
if not buckets_info:
LOGGER.error('Buckets not defined')
return False
lookup_refresh_interval = lookup_tables.get('cache_refresh_minutes', 10)
now = datetime.utcnow()
refresh_delta = timedelta(minutes=lookup_refresh_interval)
needs_refresh = cls._LOOKUP_TABLES_LAST_REFRESH + refresh_delta < now
if not needs_refresh:
LOGGER.debug('lookup tables do not need refresh (last refresh time: %s; '
'current time: %s)', cls._LOOKUP_TABLES_LAST_REFRESH, now)
return False
LOGGER.info('Refreshing lookup tables (last refresh time: %s; current time: %s)',
cls._LOOKUP_TABLES_LAST_REFRESH, now)
cls._LOOKUP_TABLES_LAST_REFRESH = now
return cls(buckets_info)
| StarcoderdataPython |
47183 | import os
from argparse import SUPPRESS
import numpy as np
from pysam import Samfile, Fastafile
from scipy.stats import scoreatpercentile
# Internal
from rgt.Util import GenomeData, HmmData, ErrorHandler
from rgt.GenomicRegionSet import GenomicRegionSet
from rgt.HINT.biasTable import BiasTable
from rgt.HINT.signalProcessing import GenomicSignal
def tracks_args(parser):
# Parameters Options
parser.add_argument("--organism", type=str, metavar="STRING", default="hg19",
help="Organism considered on the analysis. Must have been setup in the RGTDATA folder. "
"Common choices are hg19, hg38. mm9, and mm10. DEFAULT: hg19")
parser.add_argument("--bias-table", type=str, metavar="FILE1_F,FILE1_R", default=None,
help="Bias table files used to generate bias corrected tracks. DEFAULT: None")
# Hidden Options
parser.add_argument("--initial-clip", type=int, metavar="INT", default=50, help=SUPPRESS)
parser.add_argument("--downstream-ext", type=int, metavar="INT", default=1, help=SUPPRESS)
parser.add_argument("--upstream-ext", type=int, metavar="INT", default=0, help=SUPPRESS)
parser.add_argument("--forward-shift", type=int, metavar="INT", default=5, help=SUPPRESS)
parser.add_argument("--reverse-shift", type=int, metavar="INT", default=-4, help=SUPPRESS)
parser.add_argument("--k-nb", type=int, metavar="INT", default=6, help=SUPPRESS)
# Output Options
parser.add_argument("--raw", action="store_true", default=False,
help="If set, the raw signals from DNase-seq or ATAC-seq data will be generated. DEFAULT: False")
parser.add_argument("--bc", action="store_true", default=False,
help="If set, the bias corrected signals from DNase-seq or ATAC-seq data will be generated. "
"DEFAULT: False")
parser.add_argument("--norm", action="store_true", default=False,
help="If set, the normalised signals from DNase-seq or ATAC-seq data will be generated. "
"DEFAULT: False")
parser.add_argument("--bigWig", action="store_true", default=False,
help="If set, all .wig files will be converted to .bw files. DEFAULT: False")
parser.add_argument("--strand-specific", action="store_true", default=False,
help="If set, the tracks will be splitted into two files, one for forward and another for "
"reverse strand. DEFAULT: False")
# Output Options
parser.add_argument("--output-location", type=str, metavar="PATH", default=os.getcwd(),
help="Path where the output bias table files will be written. DEFAULT: current directory")
parser.add_argument("--output-prefix", type=str, metavar="STRING", default="tracks",
help="The prefix for results files. DEFAULT: tracks")
parser.add_argument('input_files', metavar='reads.bam regions.bed', type=str, nargs='*',
help='BAM file of reads and BED files of interesting regions')
def tracks_run(args):
if args.raw:
get_raw_tracks(args)
if args.bc:
get_bc_tracks(args)
def get_raw_tracks(args):
# Initializing Error Handler
err = ErrorHandler()
if len(args.input_files) != 2:
err.throw_error("ME_FEW_ARG", add_msg="You must specify reads and regions file.")
output_fname = os.path.join(args.output_location, "{}.wig".format(args.output_prefix))
bam = Samfile(args.input_files[0], "rb")
regions = GenomicRegionSet("Interested regions")
regions.read(args.input_files[1])
regions.merge()
reads_file = GenomicSignal()
with open(output_fname, "a") as output_f:
for region in regions:
# Raw counts
signal = [0.0] * (region.final - region.initial)
for read in bam.fetch(region.chrom, region.initial, region.final):
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if region.initial <= cut_site < region.final:
signal[cut_site - region.initial] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if region.initial <= cut_site < region.final:
signal[cut_site - region.initial] += 1.0
if args.norm:
signal = reads_file.boyle_norm(signal)
perc = scoreatpercentile(signal, 98)
std = np.std(signal)
signal = reads_file.hon_norm_atac(signal, perc, std)
output_f.write("fixedStep chrom=" + region.chrom + " start=" + str(region.initial + 1) + " step=1\n" +
"\n".join([str(e) for e in np.nan_to_num(signal)]) + "\n")
output_f.close()
if args.bigWig:
genome_data = GenomeData(args.organism)
chrom_sizes_file = genome_data.get_chromosome_sizes()
bw_filename = os.path.join(args.output_location, "{}.bw".format(args.output_prefix))
os.system(" ".join(["wigToBigWig", output_fname, chrom_sizes_file, bw_filename, "-verbose=0"]))
os.remove(output_fname)
def get_bc_tracks(args):
# Initializing Error Handler
err = ErrorHandler()
if len(args.input_files) != 2:
err.throw_error("ME_FEW_ARG", add_msg="You must specify reads and regions file.")
regions = GenomicRegionSet("Interested regions")
regions.read(args.input_files[1])
regions.merge()
reads_file = GenomicSignal()
bam = Samfile(args.input_files[0], "rb")
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
hmm_data = HmmData()
if args.bias_table:
bias_table_list = args.bias_table.split(",")
bias_table = BiasTable().load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
else:
table_F = hmm_data.get_default_bias_table_F_ATAC()
table_R = hmm_data.get_default_bias_table_R_ATAC()
bias_table = BiasTable().load_table(table_file_name_F=table_F,
table_file_name_R=table_R)
if args.strand_specific:
fname_forward = os.path.join(args.output_location, "{}_forward.wig".format(args.output_prefix))
fname_reverse = os.path.join(args.output_location, "{}_reverse.wig".format(args.output_prefix))
f_forward = open(fname_forward, "a")
f_reverse = open(fname_reverse, "a")
for region in regions:
signal_f, signal_r = reads_file.get_bc_signal_by_fragment_length(
ref=region.chrom, start=region.initial, end=region.final, bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=args.forward_shift, reverse_shift=args.reverse_shift, min_length=None, max_length=None,
strand=True)
if args.norm:
signal_f = reads_file.boyle_norm(signal_f)
perc = scoreatpercentile(signal_f, 98)
std = np.std(signal_f)
signal_f = reads_file.hon_norm_atac(signal_f, perc, std)
signal_r = reads_file.boyle_norm(signal_r)
perc = scoreatpercentile(signal_r, 98)
std = np.std(signal_r)
signal_r = reads_file.hon_norm_atac(signal_r, perc, std)
f_forward.write("fixedStep chrom=" + region.chrom + " start=" + str(region.initial + 1) + " step=1\n" +
"\n".join([str(e) for e in np.nan_to_num(signal_f)]) + "\n")
f_reverse.write("fixedStep chrom=" + region.chrom + " start=" + str(region.initial + 1) + " step=1\n" +
"\n".join([str(-e) for e in np.nan_to_num(signal_r)]) + "\n")
f_forward.close()
f_reverse.close()
if args.bigWig:
genome_data = GenomeData(args.organism)
chrom_sizes_file = genome_data.get_chromosome_sizes()
bw_filename = os.path.join(args.output_location, "{}_forward.bw".format(args.output_prefix))
os.system(" ".join(["wigToBigWig", fname_forward, chrom_sizes_file, bw_filename, "-verbose=0"]))
os.remove(fname_forward)
bw_filename = os.path.join(args.output_location, "{}_reverse.bw".format(args.output_prefix))
os.system(" ".join(["wigToBigWig", fname_reverse, chrom_sizes_file, bw_filename, "-verbose=0"]))
os.remove(fname_reverse)
else:
output_fname = os.path.join(args.output_location, "{}.wig".format(args.output_prefix))
with open(output_fname, "a") as output_f:
for region in regions:
signal = reads_file.get_bc_signal_by_fragment_length(ref=region.chrom, start=region.initial,
end=region.final,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=None, max_length=None, strand=False)
if args.norm:
signal = reads_file.boyle_norm(signal)
perc = scoreatpercentile(signal, 98)
std = np.std(signal)
signal = reads_file.hon_norm_atac(signal, perc, std)
output_f.write("fixedStep chrom=" + region.chrom + " start=" + str(region.initial + 1) + " step=1\n" +
"\n".join([str(e) for e in np.nan_to_num(signal)]) + "\n")
output_f.close()
if args.bigWig:
genome_data = GenomeData(args.organism)
chrom_sizes_file = genome_data.get_chromosome_sizes()
bw_filename = os.path.join(args.output_location, "{}.bw".format(args.output_prefix))
os.system(" ".join(["wigToBigWig", output_fname, chrom_sizes_file, bw_filename, "-verbose=0"]))
os.remove(output_fname)
| StarcoderdataPython |
3265218 | # -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Project: YNet
# @Author : panjq
# @E-mail : <EMAIL>
# @Date : 2020-01-06 08:59:23
# --------------------------------------------------------
"""
import numpy as np
import tensorflow as tf
import math
def loss_fun(target, prediction, gamma=1.0, loss_type="l2"):
if loss_type == "l1":
loss = l1_loss(target, prediction, gamma)
elif loss_type == "l2":
loss = l2_loss(target, prediction, gamma)
else:
raise Exception("Error:{}".format(loss_type))
return loss
def l2_loss(target, prediction, gamma=1.0):
loss = tf.reduce_mean(tf.square(target - prediction))
return gamma * loss
def l1_loss(target, prediction, gamma=1.0):
loss = tf.reduce_mean(tf.abs(target - prediction))
return gamma * loss
def psnr(target, prediction):
squares = tf.square(target - prediction, name='squares')
squares = tf.reshape(squares, [tf.shape(squares)[0], -1])
# mean psnr over a batch
p = (-10 / np.log(10)) * tf.compat.v1.disp(tf.reduce_mean(squares, axis=[1]))
p = tf.reduce_mean(p)
return p
def psnr_tf(target, prediction):
p = tf.image.psnr(target, prediction, max_val=1.0)
p = tf.reduce_mean(p)
return p
def psnr_keras(y_true, y_pred):
max_pixel = 1.0
p = 10.0 * math.log10((max_pixel ** 2) / (tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true))))
return p
def psnr_numpy(im1, im2):
"""
# im1 和 im2 都为灰度图像,uint8 类型
:param im1:
:param im2:
:return:
"""
# method 1
diff = im1 - im2
mse = np.mean(np.square(diff))
p = 10 * np.log10(255 * 255 / mse) # for uint8,[0,255]
# p = 10 * np.log10(1 * 1 / mse) # for float32,[0,1]
return p
def psnr_skimage(im1, im2):
"""
for uint8,[0,255]
:param im1:
:param im2:
:return:
"""
import skimage
# p = skimage.measure.compare_psnr(im1, im2, 255) # for uint8,[0,255]
p = skimage.measure.compare_psnr(im1, im2, 255) # for float32,[0,1]
return p
if __name__ == "__main__":
data1 = np.zeros(shape=(1, 100, 100, 3))-0.01
data2 = np.zeros(shape=(1, 100, 100, 3)) + 0.012
p1 = psnr_keras(data1, data2)
p2 = psnr_numpy(data1, data2)
p3 = psnr_skimage(data1, data2)
p4 = psnr(data1, data2)
p5 = psnr_tf(data1, data2)
print(p1)
print(p2)
print(p3)
print(p4)
print(p5)
| StarcoderdataPython |
3310101 | <reponame>chaptergy/clothing-color-changer
import numpy as np
import cv2
import os.path
import warnings
import myLogger as log
def video_to_images(video_path, max_fps=20, max_size=None):
"""
Converts a video into a list of images and returns it. If necessary, it lowers the framerate and image size.
:param video_path: Path to the video
:param max_fps: Maximum frames per second for the output file
:param max_size: The maximum size the video should have. To disable resizing use None
:type max_size: (int width, int height) or None
:return: List of images
"""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise IOError('Error loading the video file!')
fps = int(round(cap.get(cv2.CAP_PROP_FPS)))
frames_to_drop = []
if (max_fps > 0) & (fps > max_fps):
nr_of_frames_to_be_removed = fps - max_fps
# calculate, which frames should be dropped (in regular intervals)
frames_to_drop = np.linspace(-1, fps - 1, nr_of_frames_to_be_removed + 1, endpoint=True).round()
frames_to_drop = frames_to_drop[1:] # remove first element (always -1)
log.info(len(frames_to_drop), "frames are dropped per second, to reduce the fps from", fps, "to", max_fps)
img_list = []
frame_counter = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
# reduce framerate
frame_counter = (frame_counter + 1)
if np.in1d((frame_counter % (fps + 1)), frames_to_drop): # drop frame if in drop array
continue
# shrink image
if (max_size is not None) & ((frame.shape[0] > max_size[0]) | (frame.shape[1] > max_size[1])):
perc = min(max_size[0] / frame.shape[0], max_size[1] / frame.shape[1])
frame = cv2.resize(frame, None, fx=perc, fy=perc)
img_list.append(frame)
else:
break
cap.release()
return img_list
def images_to_video(img_list, destination_path='output.avi', fps=20.):
"""
Converts a list of images to a video and saves it to file
:param img_list: List of images
:param destination_path: Path to the file where the video should be saved
:param fps: The frames per second of the output video
"""
try:
# If file already exists, delete it
os.remove(destination_path)
except OSError:
pass
filename, file_extension = os.path.splitext(destination_path)
if file_extension != '.avi':
warnings.warn("When the file extension is not .avi, it may happen, that no file will be saved!")
height, width, channels = img_list[0].shape # frameSize rausfinden
# VideoWriter needs to have the right codec depending on the system, so try multiple fourcc codecs
counter = 0
fourcc_array = ['X264', 'XVID', 'DIVX', 'MJPG', 'MRLE', 'Custom']
while counter < len(fourcc_array): # Try to find a working codec
if fourcc_array[counter] != 'Custom':
fourcc = cv2.VideoWriter_fourcc(*fourcc_array[counter])
else:
# When setting fourcc to -1, a dialog will show at runtime
# allowing the user to select one of the availabe codecs
fourcc = -1
out = cv2.VideoWriter(destination_path, fourcc, fps, (width, height), True)
for img in img_list:
out.write(np.uint8(img))
out.release()
try:
# If file was saved successfully, so file size is larger than 5 bytes
if os.path.getsize(destination_path) > 5:
if counter > 0:
log.debug("Saving with codec(s)", ", ".join([item for item in fourcc_array[:counter]]),
"failed.")
log.info("File", destination_path, "was saved with codec", fourcc_array[counter])
return
except:
pass
counter += 1
raise Exception("Unable to save" + str(destination_path) + "!")
| StarcoderdataPython |
1689991 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
# with open('README.md') as f:
# readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name="bitmex_trader_bot",
version="0.0.1",
description="Trade with bitmex bot",
# long_description=readme,
author="no-coin-no-life",
author_email="<EMAIL>",
url="https://github.com/no-coin-no-life/bitmex-trader-bot",
license=license,
packages=find_packages(exclude=("tests"))
) | StarcoderdataPython |
1661496 | # encoding: utf-8
'''
组合策略测试
'''
import sys
sys.path.append('../../')
from vnpy.app.cta_strategy.strategies.strategyMulti import MultiStrategy
import argparse
import pandas as pd
import numpy as np
from datetime import datetime
from setup_logger import setup_logger
setup_logger(filename='logsBackTest/vnpy_{0}.log'.format(datetime.now().strftime('%m%d_%H%M')), debug=False)
from vnpy.app.cta_strategy.backtesting import BacktestingEngine, OptimizationSetting
from vnpy.app.cta_strategy.backtestingPatch import BacktestingEnginePatch
from datetime import datetime,date,timedelta
import time
import json
import traceback
########################################################################
'''
backtesting
'''
def backtesting(settingFile, kLineCycle = 30, vt_symbol = 'rb1801', vt_symbol2 = None, mode = 'B', startDate = None, days = 1, historyDays = 0, optimization = False):
# 创建回测引擎
engine = BacktestingEnginePatch()
# 设置回测用的数据起始日期
if startDate:
startDate = startDate
endDate = datetime.strptime(startDate, '%Y%m%d') + timedelta(days)
else:
startDate = date.today() - timedelta(days + historyDays)
endDate = date.today()
engine.set_parameters(
vt_symbol=vt_symbol,
interval="1m",
start= startDate,
end=endDate,
rate=1/10000,
slippage=1,
size=10,
pricetick=1,
capital=1_000_000,
)
setting = {}
setting['vt_symbol'] = vt_symbol
setting['kLineCycle'] = kLineCycle
setting['settingFile'] = settingFile
engine.add_strategy(MultiStrategy, setting)
engine.load_data()
engine.run_backtesting()
df = engine.calculate_result()
engine.calculate_statistics()
#engine.show_chart()
# 显示回测结果
resultList = engine.showBacktestingResult()
# try:
# engine.showDailyResult()
# except:
# print ('-' * 20)
# print ('Failed to showDailyResult')
# #traceback.print_exc()
# pass
try:
# 显示定单信息
import pandas as pd
orders = pd.DataFrame([i.__dict__ for i in resultList['resultList']])
try:
orders['holdTime'] = (orders.exitDt - orders.entryDt).astype('timedelta64[m]')
except:
pass
pd.options.display.max_rows = 100
pd.options.display.width = 300
pd.options.display.precision = 2
engine.output ('-' * 50)
engine.output(str(orders))
except:
print ('-' * 20)
print ('Failed to print result')
#traceback.print_exc()
try:
# 显示详细信息
import pandas as pd
from utils import plot_candles, plot_candles1
import talib
import numpy as np
# analysis
#engine.loadHistoryData()
orders = pd.DataFrame([i.__dict__ for i in resultList['resultList']])
pricing = pd.DataFrame([i.__dict__ for i in engine.history_data])
#VPIN analysis
# from .VPINAnalysis import VPINAnalysis
# if len(pricing.index) > 1000:
# VPINAnalysis(pricing)
atr = talib.ATR(pricing.high_price.values, pricing.low_price.values, pricing.close_price.values, 25)
atr_ma = pd.DataFrame(atr).rolling(25).mean()[0].values
technicals = {
'rsi': talib.RSI(pricing.close_price.values, 4),
'atr': atr,
'atr-ma': atr_ma
}
technicals = {}
plot_candles1(pricing, volume_bars=True, orders=orders, technicals=technicals)
except:
print ('-' * 20)
print ('Failed to plot candles')
traceback.print_exc()
def main(argv):
# setup the argument parser
arg_parser = argparse.ArgumentParser(description='backtest')
arg_parser.add_argument('-m', '--mode',
required=False,
default='B',
help="set backtest mode(B or T)")
arg_parser.add_argument('-d', '--days',
required=False,
default=1,
type = int,
help="set backtest days")
arg_parser.add_argument('-sd', '--startDate',
required=False,
default='',
help="set backtest days")
arg_parser.add_argument('-s', '--vt_symbol',
required=False,
default='rb1801',
help="set backtest vt_symbol")
arg_parser.add_argument('-s2', '--vt_symbol2',
required=False,
default='',
help="set spread vt_symbol2")
arg_parser.add_argument('-hd', '--historyDays',
required=False,
default=0,
type = int,
help="set history days")
arg_parser.add_argument('-sf', '--settingFile',
required=False,
default='CTA_setting_multi.json',
help="setting file name")
arg_parser.add_argument('-o', '--optimization',
required=False,
default=False,
type = bool,
help="parameter optimization")
arg_parser.add_argument('-yappi', '--yappi',
required=False,
default=False,
type = bool,
help="yappi status")
# parse arguments
cmd = arg_parser.parse_args(argv)
if cmd.yappi:
import yappi
yappi.set_clock_type("cpu")
yappi.start()
backtesting(settingFile = cmd.settingFile, startDate = cmd.startDate, days = cmd.days, mode = cmd.mode,vt_symbol = cmd.vt_symbol, vt_symbol2 = cmd.vt_symbol2, historyDays = cmd.historyDays , optimization = cmd.optimization)
if cmd.yappi:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
if __name__ == "__main__":
main(sys.argv[1:])
#main("-d 1 -s rb1905 -hd 0 -sf CTA_setting_Spread.json -s2 rb1910 -m T".split())
#main('-d 240 -s rb000.SHFE -sf CTA_setting_alpha_real_rb.json'.split())
| StarcoderdataPython |
3264136 | <filename>gitlab_release/python-module/cz_nfc/setup.py<gh_stars>0
from setuptools import setup
setup(
name='NFC commitizen Custom Bump Map and changelog',
version='0.1.0',
py_modules=['cz_nfc'],
license='MIT',
long_description='this is a long description',
install_requires=['commitizen', 'gitpython']
)
| StarcoderdataPython |
3363467 | <reponame>URSec/Kage
#!/usr/bin/env python3
import argparse
import subprocess
from os import path
from pathlib import Path
from time import sleep
import serial
from colorama import Fore, Style
from elftools.elf.elffile import ELFFile
PROJECTS = {'microbenchmark': {'baseline': 'freertos_microbenchmarks_clang',
'baseline_mpu': 'freertos_mpu_microbenchmarks_clang',
'kage': 'microbenchmarks'},
'coremark': {'baseline': 'freertos_coremark_clang',
'baseline_mpu': '',
'kage': 'coremark'}}
CONFIG_TERMS = {'mpu': 'FreeRTOS with MPU enabled',
'baseline': 'FreeRTOS',
'kage-no-silhouette': 'Kage\'s OS mechanisms',
'kage-os-only': 'Kage\'s OS mechanisms',
'kage': 'Kage', }
DEVICE = 'demos/st/stm32l475_discovery/ac6'
OCD_CMD = 'program $PATH$ reset exit'
BUILD_CMD = \
'-nosplash --launcher.suppressErrors -application org.eclipse.cdt.managedbuilder.core.headlessbuild' \
+ ' -data $WORKSPACE$ -import $PROJPATH$ -cleanBuild $PROJECT$'
NUM_FREERTOS_MICROBENCHMARK_TESTS = 4
NUM_KAGE_MICROBENCHMARK_TESTS = 10
NUM_COREMARK_KAGE_NO_CACHE_TESTS = 6
NUM_COREMARK_TESTS = 3
# We use different configurations with unique names to enable different
# flags, in order to run different benchmarks of the same codebase.
# These configuration names are hard to understand, however, so we need
# to translate them.
def translateConfigName(name):
translated_name = ''
for configuration in CONFIG_TERMS:
if configuration in name:
translated_name = name.replace(configuration, (CONFIG_TERMS[configuration] + ': '))
translated_name = translated_name.replace('-', ' ')
break
return translated_name
# Main routine
if __name__ == "__main__":
# Argparse
parser = argparse.ArgumentParser()
# Optional custom workspace path
parser.add_argument('--workspace', type=Path, default=Path('../workspace'),
help="Specify path to the System Workbench workspace")
# Optional custom OpenOCD binary path
parser.add_argument('--openocd', type=str, default='openocd',
help="Specify custom path of OpenOCD")
# Optional OpenOCD configuration path
parser.add_argument('--ocdcfg', type=Path,
default='/usr/share/openocd/scripts/board/st_b-l475e-iot01a.cfg',
help="Specify custom OpenOCD configuration path")
# Optional System Workbench installation path
parser.add_argument('--ac6', type=Path,
default='~/Ac6/SystemWorkbench/eclipse',
help="Custom path to System Workbench")
# Optional subset of configurations
parser.add_argument('--configs', type=str, nargs='+',
default=['baseline', 'baseline_mpu', 'kage'],
choices=['baseline', 'baseline_mpu', 'kage'],
help="Select the configurations to run (Note: baseline_mpu only contains microbenchmark and "
"no coremark)")
# Optional subset of benchmark programs
parser.add_argument('--programs', type=str, nargs='+',
default=['microbenchmark', 'coremark'],
choices=['microbenchmark', 'coremark'],
help="Select benchmark programs")
# Print the output of subprocesses
parser.add_argument('--verbose', action='store_true', default=False,
help="Print all compilation and flashing logs.")
#
parser.add_argument('--disable_cache', action='store_true', default=False,
help="Disable instruction and data cache of the board (this option is only available when the "
"--programs argument contains only \"coremark\")")
# (Optional) Write results to file
parser.add_argument('--outfile', type=Path, required=False,
help="Write the results to a file")
parser.add_argument('--build', action='store_true', default=False,
help='Runs make clean and build on all of the binaries. Needed for the first run.')
parser.add_argument('--tries', type=int, default=3,
help="Number of tries when building a program. A value > 1 is recommended because the System Workbench's CMD interface is not very stable. Default: 3")
# Get arguments
args = parser.parse_args()
# Set destinations of stdout and stderr according to argument
if args.verbose:
std_dst = subprocess.PIPE
std_err = subprocess.STDOUT
else:
std_dst = subprocess.DEVNULL
std_err = subprocess.DEVNULL
# Initialize dict to store results
perf_dict = {}
size_dict = {}
# Generate project paths
for program in args.programs:
if program not in PROJECTS:
print(f'{Fore.RED}ERROR{Style.RESET_ALL}: Unknown program ', program)
exit(1)
if args.disable_cache and not program == 'coremark':
print(f'{Fore.YELLOW}WARNING{Style.RESET_ALL}: --disable_cache option only supports coremark. Skipping ',
program)
continue
# Initialize dict to store results
perf_dict[program] = {}
size_dict[program] = {}
projProgram = PROJECTS[program]
for config in args.configs:
if config not in projProgram:
print(f'{Fore.RED}ERROR{Style.RESET_ALL}: Unknown configuration ', config)
exit(1)
if projProgram[config] == '':
# No benchmark for this config and program combination, skipping
continue
projectPath = Path(args.workspace).joinpath(projProgram[config]).joinpath(DEVICE)
# Initialize dict to store results
perf_dict[program][config] = {}
size_dict[program][config] = {}
# Import the project to System Workbench's workspace and build the binaries
if args.build:
print('Compiling ', program, ' for ', config, '...')
ac6Arg = BUILD_CMD.replace('$WORKSPACE$',
args.workspace.as_posix())
ac6Arg = ac6Arg.replace('$PROJPATH$', projectPath.as_posix())
ac6Arg = ac6Arg.replace('$PROJECT$', projProgram[config])
ac6Arg = args.ac6.as_posix() + ' ' + ac6Arg
# Run the build process
for i in range(args.tries):
with subprocess.Popen(ac6Arg, stdout=std_dst, stderr=std_err,
bufsize=1, shell=True, text=True) as p:
while p.poll() is None:
if args.verbose:
for line in p.stdout:
print(f'{Style.DIM}', line, end='')
sleep(.01)
print(f'{Style.RESET_ALL}', end='')
if p.returncode != 0:
# Building failed
if i < args.tries - 1:
print(f'{Fore.MAGENTA}WARNING{Style.RESET_ALL}: Command \'{ac6Arg}\' '
f'returned status code {p.returncode}. Retrying...')
else:
print(f'{Fore.RED}ERROR{Style.RESET_ALL}: Command \'{ac6Arg}\' '
f'returned status code {p.returncode}. Terminating benchmarks...')
else:
# Success
break
# Get the build directories for the binaries and also removes hidden directories
build_directories = [d for d in projectPath.iterdir() if d.is_dir() and d.name[0] != '.']
# Check to make sure that the correct number of directories exist
# WARNING, THESE VALUES ARE HARDCODED
if program == 'microbenchmark':
if config == 'kage':
if len(build_directories) != NUM_KAGE_MICROBENCHMARK_TESTS:
print(
f'{Fore.RED}ERROR{Style.RESET_ALL}: Binary folders at {projectPath} '
f'do not exist. Run the script with the \'--build\' flag')
exit(1)
elif len(build_directories) != NUM_FREERTOS_MICROBENCHMARK_TESTS:
print(
f'{Fore.RED}ERROR{Style.RESET_ALL}: Binary folders at {projectPath} '
f'do not exist. Run the script with the \'--build\' flag')
exit(1)
if program == 'coremark':
# If the flag has been specified not to use cache, remove those directories from list
if args.disable_cache:
build_directories = [d for d in build_directories if 'no-cache' in d.name]
else:
build_directories = [d for d in build_directories if 'no-cache' not in d.name]
if config == 'kage':
if args.disable_cache:
if len(build_directories) != NUM_COREMARK_TESTS:
print(f'{Fore.RED}ERROR{Style.RESET_ALL}: Unexpected number of folders')
elif len(build_directories) != NUM_COREMARK_KAGE_NO_CACHE_TESTS:
print(f'{Fore.RED}ERROR{Style.RESET_ALL}: Unexpected number of folders')
elif len(build_directories) != NUM_COREMARK_TESTS:
print(
f'{Fore.RED}ERROR{Style.RESET_ALL}: Binary folders at {projectPath} '
f'do not exist. Run the script with the \'--build\' flag')
exit(1)
for configDir in build_directories:
print(f'Flashing and running {Fore.GREEN}', program, ' ',
translateConfigName(configDir.name), f'{Style.RESET_ALL}')
# Execute OpenOCD on binary found in each build config
binPath = configDir.joinpath(configDir.name + '.elf')
# Check to make sure the binary exists
if not path.isfile(binPath):
print(
f'{Fore.RED}ERROR{Style.RESET_ALL}: Binary {binPath} '
f'does not exist. Run the script with the \'--build\' flag')
exit(1)
ocdArg = OCD_CMD.replace('$PATH$', binPath.as_posix())
# Flash the binary asynchronously to immediately receive serial output
with subprocess.Popen([args.openocd, '-f', args.ocdcfg.as_posix(), '-c', ocdArg],
stdout=std_dst, stderr=std_err, bufsize=1, text=True) as p:
while p.poll() is None:
if args.verbose:
for line in p.stdout:
print(f'{Style.DIM}', line, end='')
sleep(.01)
print(f'{Style.RESET_ALL}', end='')
if p.returncode != 0:
print(
f'{Fore.RED}ERROR{Style.RESET_ALL}: Command \'{args.openocd} -f {args.ocdcfg.as_posix()} '
f'-c \"{ocdArg}\"\' returned status code {p.returncode}. Terminating benchmarks...')
exit(1)
# Determine the human-readable configuration name
confName = translateConfigName(configDir.name)
# Compute code size
with binPath.open('rb') as f:
elffile = ELFFile(f)
section = elffile.get_section_by_name('privileged_functions')
if section is None:
privileged_size = 0
else:
privileged_size = section.data_size
section = elffile.get_section_by_name('freertos_system_calls')
if section is None:
syscallSize = 0
else:
syscallSize = section.data_size
textSize = elffile.get_section_by_name('.text').data_size
# Calculate total trusted and untrusted size
if 'baseline' in config:
# Everything is trusted in FreeRTOS and FreeRTOS with MPU
trusted = privileged_size + syscallSize + textSize
untrusted = 0
else:
trusted = privileged_size
untrusted = syscallSize + textSize
# Open serial port with 2 minute timeout. This loop ends when the timeout is reached.
# or when the last line is read
with serial.Serial('/dev/ttyACM0', 115200, timeout=120) as ser:
while True:
# Sleep for just a millisecond to give a slight buffer
sleep(.001)
try:
line = ser.readline().decode()
if args.verbose:
print(f'{Style.DIM}', line, end='')
except UnicodeDecodeError as ude:
print(
f'{Style.RESET_ALL}{Fore.YELLOW}WARNING{Style.RESET_ALL}: '
f'Decoding error, skipping line')
print(ude)
continue
if len(line) == 0:
# timeout
print(f'\b{Style.RESET_ALL}{Fore.YELLOW}TIMEOUT REACHED{Style.RESET_ALL}: ', end='')
break
# Each benchmark has different output format, so do a manual matching here.
if 'stream-buffer' in configDir.name:
# Stream buffer microbenchmark
if 'Creating stream buffer' in line:
# Stream buffer creation
t = int(line.split(': ')[1].replace('\n', '').replace('\r', ''))
perf_dict[program][config][confName + ': create'] = t
if 'Received unsigned 9 from stream buffer' in line:
# Stream buffer send and receive
t = int(line.split(': ')[1].replace('\n', '').replace('\r', ''))
perf_dict[program][config][confName + ': send and receive'] = t
size_dict[program][config][confName] = \
{'trusted': trusted, 'untrusted': untrusted}
if 'Started Microbenchmark Low Priority Task' in line:
break
if 'queue' in configDir.name:
# Queue microbenchmark
if 'Creating queue' in line:
# Queue creation
t = int(line.split(': ')[1].replace('\n', '').replace('\r', ''))
perf_dict[program][config][confName + ': create'] = t
if 'Received unsigned 9 from queue' in line:
# Queue send and receive
t = int(line.split(': ')[1].replace('\n', '').replace('\r', ''))
perf_dict[program][config][confName + ': send and receive'] = t
size_dict[program][config][confName] = \
{'trusted': trusted, 'untrusted': untrusted}
if 'Started Microbenchmark Low Priority Task' in line:
break
if 'exception-dispatcher' in configDir.name:
# Exception microbenchmark
if 'DIV_BY_0' in line:
t = int(line.split(': 0 ')[1].split(' cycles')[0])
perf_dict[program][config][confName.replace('dispatcher', '')] = t
size_dict[program][config][confName.replace('dispatcher', '')] = \
{'trusted': trusted, 'untrusted': untrusted}
if 'Started Microbenchmark Low Priority Task' in line:
break
if 'context-switch' in configDir.name:
# Context switch microbenchmark
if 'Context Switch cycle' in line:
t = int(line.split(': ')[1].split(' cycles')[0])
perf_dict[program][config][confName] = t
size_dict[program][config][confName] = \
{'trusted': trusted, 'untrusted': untrusted}
break
if 'secure-api' in configDir.name:
# Secure API microbenchmark
if 'MPU checks' in line:
t = int(line.split(': ')[1].split(' cycles')[0])
perf_dict[program][config][confName + ': MPU region configuration'] = t
if 'xVerifyTCB' in line:
t = int(line.split(': ')[1].replace('\r', '').replace('\n', ''))
perf_dict[program][config][confName + ': task control block'] = t
if 'xVerifyUntrustedData' in line:
t = int(line.split(': ')[1].replace('\r', '').replace('\n', ''))
perf_dict[program][config][confName + ': other pointers'] = t
if 'Exception priority' in line:
t = int(line.split(': ')[1].replace('\r', '').replace('\n', ''))
perf_dict[program][config][confName + ': exception priority'] = t
size_dict[program][config][confName] = \
{'trusted': trusted, 'untrusted': untrusted}
break
if 'coremark' in configDir.name:
# CoreMark
if 'Iterations/Sec' in line:
t = float(line.split(': ')[1].replace('\n', ''))
perf_dict[program][config][confName] = t
size_dict[program][config][confName] = \
{'trusted': trusted, 'untrusted': untrusted}
if 'CoreMark 1.0' in line:
break
# While loop exited
print(f'{Style.RESET_ALL}{Fore.GREEN}All results read{Style.RESET_ALL}')
# Generate result string
resultStr = "Performance results:\n"
for program in perf_dict:
resultStr += (program + ':\n')
for config in perf_dict[program]:
perfDictPart = perf_dict[program][config]
# Sort the order of benchmarks to print
benchList = sorted(list(perfDictPart.keys()))
for bench in benchList:
resultStr += (bench.ljust(65) + str(perfDictPart[bench]))
if program == 'coremark':
resultStr += ' iter/sec\n'
else:
resultStr += ' cycles\n'
resultStr += '\nCode size results (bytes)\n'
for program in size_dict:
resultStr += (program + ':\n')
for config in size_dict[program]:
sizeDictPart = size_dict[program][config]
# Sort the order of benchmarks
benchList = sorted(list(sizeDictPart.keys()))
for bench in benchList:
resultStr += (bench.ljust(60) + str(sizeDictPart[bench]) + '\n')
print(resultStr)
if args.outfile is not None:
with args.outfile.open('w') as file:
file.write(resultStr)
print("Results stored to ", args.outfile.as_posix())
| StarcoderdataPython |
1742109 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.assuredworkloads.v1',
manifest={
'CreateWorkloadRequest',
'UpdateWorkloadRequest',
'DeleteWorkloadRequest',
'GetWorkloadRequest',
'ListWorkloadsRequest',
'ListWorkloadsResponse',
'Workload',
'CreateWorkloadOperationMetadata',
},
)
class CreateWorkloadRequest(proto.Message):
r"""Request for creating a workload.
Attributes:
parent (str):
Required. The resource name of the new Workload's parent.
Must be of the form
``organizations/{org_id}/locations/{location_id}``.
workload (google.cloud.assuredworkloads_v1.types.Workload):
Required. Assured Workload to create
external_id (str):
Optional. A identifier associated with the
workload and underlying projects which allows
for the break down of billing costs for a
workload. The value provided for the identifier
will add a label to the workload and contained
projects with the identifier as the value.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
workload = proto.Field(
proto.MESSAGE,
number=2,
message='Workload',
)
external_id = proto.Field(
proto.STRING,
number=3,
)
class UpdateWorkloadRequest(proto.Message):
r"""Request for Updating a workload.
Attributes:
workload (google.cloud.assuredworkloads_v1.types.Workload):
Required. The workload to update. The workload’s ``name``
field is used to identify the workload to be updated.
Format:
organizations/{org_id}/locations/{location_id}/workloads/{workload_id}
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated.
"""
workload = proto.Field(
proto.MESSAGE,
number=1,
message='Workload',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteWorkloadRequest(proto.Message):
r"""Request for deleting a Workload.
Attributes:
name (str):
Required. The ``name`` field is used to identify the
workload. Format:
organizations/{org_id}/locations/{location_id}/workloads/{workload_id}
etag (str):
Optional. The etag of the workload.
If this is provided, it must match the server's
etag.
"""
name = proto.Field(
proto.STRING,
number=1,
)
etag = proto.Field(
proto.STRING,
number=2,
)
class GetWorkloadRequest(proto.Message):
r"""Request for fetching a workload.
Attributes:
name (str):
Required. The resource name of the Workload to fetch. This
is the workloads's relative path in the API, formatted as
"organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}".
For example,
"organizations/123/locations/us-east1/workloads/assured-workload-1".
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ListWorkloadsRequest(proto.Message):
r"""Request for fetching workloads in an organization.
Attributes:
parent (str):
Required. Parent Resource to list workloads from. Must be of
the form ``organizations/{org_id}/locations/{location}``.
page_size (int):
Page size.
page_token (str):
Page token returned from previous request.
Page token contains context from previous
request. Page token needs to be passed in the
second and following requests.
filter (str):
A custom filter for filtering by properties
of a workload. At this time, only filtering by
labels is supported.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
filter = proto.Field(
proto.STRING,
number=4,
)
class ListWorkloadsResponse(proto.Message):
r"""Response of ListWorkloads endpoint.
Attributes:
workloads (Sequence[google.cloud.assuredworkloads_v1.types.Workload]):
List of Workloads under a given parent.
next_page_token (str):
The next page token. Return empty if reached
the last page.
"""
@property
def raw_page(self):
return self
workloads = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Workload',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class Workload(proto.Message):
r"""An Workload object for managing highly regulated workloads of
cloud customers.
Attributes:
name (str):
Optional. The resource name of the workload.
Format:
organizations/{organization}/locations/{location}/workloads/{workload}
Read-only.
display_name (str):
Required. The user-assigned display name of
the Workload. When present it must be between 4
to 30 characters. Allowed characters are:
lowercase and uppercase letters, numbers,
hyphen, and spaces.
Example: My Workload
resources (Sequence[google.cloud.assuredworkloads_v1.types.Workload.ResourceInfo]):
Output only. The resources associated with
this workload. These resources will be created
when creating the workload. If any of the
projects already exist, the workload creation
will fail. Always read only.
compliance_regime (google.cloud.assuredworkloads_v1.types.Workload.ComplianceRegime):
Required. Immutable. Compliance Regime
associated with this workload.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Immutable. The Workload creation
timestamp.
billing_account (str):
Required. Input only. The billing account used for the
resources which are direct children of workload. This
billing account is initially associated with the resources
created as part of Workload creation. After the initial
creation of these resources, the customer can change the
assigned billing account. The resource name has the form
``billingAccounts/{billing_account_id}``. For example,
``billingAccounts/012345-567890-ABCDEF``.
etag (str):
Optional. ETag of the workload, it is
calculated on the basis of the Workload
contents. It will be used in Update & Delete
operations.
labels (Sequence[google.cloud.assuredworkloads_v1.types.Workload.LabelsEntry]):
Optional. Labels applied to the workload.
provisioned_resources_parent (str):
Input only. The parent resource for the resources managed by
this Assured Workload. May be either empty or a folder
resource which is a child of the Workload parent. If not
specified all resources are created under the parent
organization. Format: folders/{folder_id}
kms_settings (google.cloud.assuredworkloads_v1.types.Workload.KMSSettings):
Input only. Settings used to create a CMEK
crypto key. When set a project with a KMS CMEK
key is provisioned. This field is mandatory for
a subset of Compliance Regimes.
resource_settings (Sequence[google.cloud.assuredworkloads_v1.types.Workload.ResourceSettings]):
Input only. Resource properties that are used
to customize workload resources. These
properties (such as custom project id) will be
used to create workload resources if possible.
This field is optional.
"""
class ComplianceRegime(proto.Enum):
r"""Supported Compliance Regimes."""
COMPLIANCE_REGIME_UNSPECIFIED = 0
IL4 = 1
CJIS = 2
FEDRAMP_HIGH = 3
FEDRAMP_MODERATE = 4
US_REGIONAL_ACCESS = 5
HIPAA = 6
HITRUST = 7
EU_REGIONS_AND_SUPPORT = 8
CA_REGIONS_AND_SUPPORT = 9
class ResourceInfo(proto.Message):
r"""Represent the resources that are children of this Workload.
Attributes:
resource_id (int):
Resource identifier. For a project this represents
project_number.
resource_type (google.cloud.assuredworkloads_v1.types.Workload.ResourceInfo.ResourceType):
Indicates the type of resource.
"""
class ResourceType(proto.Enum):
r"""The type of resource."""
RESOURCE_TYPE_UNSPECIFIED = 0
CONSUMER_PROJECT = 1
ENCRYPTION_KEYS_PROJECT = 2
KEYRING = 3
resource_id = proto.Field(
proto.INT64,
number=1,
)
resource_type = proto.Field(
proto.ENUM,
number=2,
enum='Workload.ResourceInfo.ResourceType',
)
class KMSSettings(proto.Message):
r"""Settings specific to the Key Management Service.
Attributes:
next_rotation_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Input only. Immutable. The time at
which the Key Management Service will
automatically create a new version of the crypto
key and mark it as the primary.
rotation_period (google.protobuf.duration_pb2.Duration):
Required. Input only. Immutable. [next_rotation_time] will
be advanced by this period when the Key Management Service
automatically rotates a key. Must be at least 24 hours and
at most 876,000 hours.
"""
next_rotation_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
rotation_period = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
class ResourceSettings(proto.Message):
r"""Represent the custom settings for the resources to be
created.
Attributes:
resource_id (str):
Resource identifier. For a project this represents
project_id. If the project is already taken, the workload
creation will fail.
resource_type (google.cloud.assuredworkloads_v1.types.Workload.ResourceInfo.ResourceType):
Indicates the type of resource. This field should be
specified to correspond the id to the right project type
(CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT)
display_name (str):
User-assigned resource display name.
If not empty it will be used to create a
resource with the specified name.
"""
resource_id = proto.Field(
proto.STRING,
number=1,
)
resource_type = proto.Field(
proto.ENUM,
number=2,
enum='Workload.ResourceInfo.ResourceType',
)
display_name = proto.Field(
proto.STRING,
number=3,
)
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
resources = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ResourceInfo,
)
compliance_regime = proto.Field(
proto.ENUM,
number=4,
enum=ComplianceRegime,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
billing_account = proto.Field(
proto.STRING,
number=6,
)
etag = proto.Field(
proto.STRING,
number=9,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=10,
)
provisioned_resources_parent = proto.Field(
proto.STRING,
number=13,
)
kms_settings = proto.Field(
proto.MESSAGE,
number=14,
message=KMSSettings,
)
resource_settings = proto.RepeatedField(
proto.MESSAGE,
number=15,
message=ResourceSettings,
)
class CreateWorkloadOperationMetadata(proto.Message):
r"""Operation metadata to give request details of CreateWorkload.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. Time when the operation was
created.
display_name (str):
Optional. The display name of the workload.
parent (str):
Optional. The parent of the workload.
compliance_regime (google.cloud.assuredworkloads_v1.types.Workload.ComplianceRegime):
Optional. Compliance controls that should be
applied to the resources managed by the
workload.
"""
create_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
parent = proto.Field(
proto.STRING,
number=3,
)
compliance_regime = proto.Field(
proto.ENUM,
number=4,
enum='Workload.ComplianceRegime',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| StarcoderdataPython |
26904 |
from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ParaView 5.4.1 64 bits
#--------------------------------------------------------------
# Global screenshot output options
imageFileNamePadding=5
rescale_lookuptable=False
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
# state file generated using paraview version 5.4.1
# ----------------------------------------------------------------
# setup views used in the visualization
# ----------------------------------------------------------------
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [1000, 700]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.CenterOfRotation = [0.5, 0.5, 0.5]
renderView1.StereoType = 0
renderView1.CameraPosition = [0.5, 0.5, 3.2557533687070332]
renderView1.CameraFocalPoint = [0.5, 0.5, -0.09031184624419736]
renderView1.CameraParallelScale = 0.8660254037844386
renderView1.Background = [0.0, 0.0, 0.0]
# register the view with coprocessor
# and provide it with information such as the filename to use,
# how frequently to write the images, etc.
coprocessor.RegisterView(renderView1,
filename='pv_image_3d_%t.png', freq=1, fittoscreen=0, magnification=1, width=1000, height=700, cinema={})
renderView1.ViewTime = datadescription.GetTime()
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
# create a new 'XML UniformGrid AMR Reader'
# create a producer from a simulation input
mesh_000 = coprocessor.CreateProducer(datadescription, 'mesh')
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=mesh_000)
# create a new 'Contour'
contour1 = Contour(Input=cellDatatoPointData1)
contour1.ContourBy = ['POINTS', 'phi']
contour1.ComputeScalars = 1
contour1.Isosurfaces = [0.99429, 1.1043655555555556, 1.214441111111111, 1.3245166666666668, 1.4345922222222223, 1.5446677777777778, 1.6547433333333332, 1.764818888888889, 1.8748944444444444, 1.98497]
contour1.PointMergeMethod = 'Uniform Binning'
# create a new 'Annotate Time'
annotateTime1 = AnnotateTime()
annotateTime1.Format = 't = %0.2f'
# ----------------------------------------------------------------
# setup color maps and opacity mapes used in the visualization
# note: the Get..() functions create a new object, if needed
# ----------------------------------------------------------------
# get color transfer function/color map for 'phi'
phiLUT = GetColorTransferFunction('phi')
phiLUT.RGBPoints = [0.99429, 0.278431372549, 0.278431372549, 0.858823529412, 1.13595724, 0.0, 0.0, 0.360784313725, 1.2766338000000002, 0.0, 1.0, 1.0, 1.41929172, 0.0, 0.501960784314, 0.0, 1.55996828, 1.0, 1.0, 0.0, 1.70163552, 1.0, 0.380392156863, 0.0, 1.84330276, 0.419607843137, 0.0, 0.0, 1.9849700000000001, 0.878431372549, 0.301960784314, 0.301960784314]
phiLUT.ColorSpace = 'RGB'
phiLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'phi'
phiPWF = GetOpacityTransferFunction('phi')
phiPWF.Points = [0.99429, 0.0, 0.5, 0.0, 1.9849700000000001, 1.0, 0.5, 0.0]
phiPWF.ScalarRangeInitialized = 1
# ----------------------------------------------------------------
# setup the visualization in view 'renderView1'
# ----------------------------------------------------------------
# show data from mesh_000
mesh_000Display = Show(mesh_000, renderView1)
# trace defaults for the display properties.
mesh_000Display.Representation = 'AMR Blocks'
mesh_000Display.ColorArrayName = [None, '']
mesh_000Display.DiffuseColor = [0.0, 0.0, 0.0]
mesh_000Display.OSPRayScaleArray = 'GhostType'
mesh_000Display.OSPRayScaleFunction = 'PiecewiseFunction'
mesh_000Display.SelectOrientationVectors = 'None'
mesh_000Display.ScaleFactor = 0.1
mesh_000Display.SelectScaleArray = 'None'
mesh_000Display.GlyphType = 'Arrow'
mesh_000Display.GlyphTableIndexArray = 'None'
mesh_000Display.DataAxesGrid = 'GridAxesRepresentation'
mesh_000Display.PolarAxes = 'PolarAxesRepresentation'
mesh_000Display.ScalarOpacityUnitDistance = 0.0174438098693218
# init the 'GridAxesRepresentation' selected for 'DataAxesGrid'
mesh_000Display.DataAxesGrid.XTitle = 'X'
mesh_000Display.DataAxesGrid.YTitle = 'Y'
mesh_000Display.DataAxesGrid.ZTitle = 'Z'
mesh_000Display.DataAxesGrid.XTitleBold = 1
mesh_000Display.DataAxesGrid.XTitleFontSize = 14
mesh_000Display.DataAxesGrid.YTitleBold = 1
mesh_000Display.DataAxesGrid.YTitleFontSize = 14
mesh_000Display.DataAxesGrid.ZTitleBold = 1
mesh_000Display.DataAxesGrid.ZTitleFontSize = 14
mesh_000Display.DataAxesGrid.XLabelBold = 1
mesh_000Display.DataAxesGrid.XLabelFontSize = 14
mesh_000Display.DataAxesGrid.YLabelBold = 1
mesh_000Display.DataAxesGrid.YLabelFontSize = 14
mesh_000Display.DataAxesGrid.ZLabelBold = 1
mesh_000Display.DataAxesGrid.ZLabelFontSize = 14
# show data from contour1
contour1Display = Show(contour1, renderView1)
# trace defaults for the display properties.
contour1Display.Representation = 'Surface'
contour1Display.ColorArrayName = ['POINTS', 'phi']
contour1Display.LookupTable = phiLUT
contour1Display.OSPRayScaleArray = 'GhostType'
contour1Display.OSPRayScaleFunction = 'PiecewiseFunction'
contour1Display.SelectOrientationVectors = 'GhostType'
contour1Display.ScaleFactor = 0.0572519063949585
contour1Display.SelectScaleArray = 'GhostType'
contour1Display.GlyphType = 'Arrow'
contour1Display.GlyphTableIndexArray = 'GhostType'
contour1Display.DataAxesGrid = 'GridAxesRepresentation'
contour1Display.PolarAxes = 'PolarAxesRepresentation'
contour1Display.GaussianRadius = 0.02862595319747925
contour1Display.SetScaleArray = ['POINTS', 'GhostType']
contour1Display.ScaleTransferFunction = 'PiecewiseFunction'
contour1Display.OpacityArray = ['POINTS', 'GhostType']
contour1Display.OpacityTransferFunction = 'PiecewiseFunction'
# show color legend
contour1Display.SetScalarBarVisibility(renderView1, True)
# show data from annotateTime1
annotateTime1Display = Show(annotateTime1, renderView1)
# trace defaults for the display properties.
annotateTime1Display.Bold = 1
annotateTime1Display.FontSize = 12
annotateTime1Display.WindowLocation = 'LowerLeftCorner'
# setup the color legend parameters for each legend in this view
# get color legend/bar for phiLUT in view renderView1
phiLUTColorBar = GetScalarBar(phiLUT, renderView1)
phiLUTColorBar.WindowLocation = 'AnyLocation'
phiLUTColorBar.Position = [0.852, 0.07857142857142851]
phiLUTColorBar.Title = 'phi'
phiLUTColorBar.ComponentTitle = ''
phiLUTColorBar.TitleBold = 1
phiLUTColorBar.TitleFontSize = 24
phiLUTColorBar.LabelBold = 1
phiLUTColorBar.LabelFontSize = 18
phiLUTColorBar.ScalarBarThickness = 24
phiLUTColorBar.ScalarBarLength = 0.8357142857142857
# ----------------------------------------------------------------
# finally, restore active source
SetActiveSource(mesh_000)
# ----------------------------------------------------------------
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
# these are the frequencies at which the coprocessor updates.
freqs = {'mesh': [1, 1, 1]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variable that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView and the update frequency
coprocessor.EnableLiveVisualization(False, 1)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=rescale_lookuptable,
image_quality=0, padding_amount=imageFileNamePadding)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
| StarcoderdataPython |
1791824 | <reponame>dek-odoo/python-samples<filename>python exercises/dek_program079.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#- Author : (DEK) <NAME>
# program079:
# Please write a program to randomly generate a list with 5 even numbers
# between 100 and 200 inclusive.
# Hints:
# Use random.sample() to generate a list of random values.
import random
def main(startLimit, endLimit):
print random.sample([number for number in range(startLimit, endLimit + 1)
if number % 2 == 0], 5)
if __name__ == '__main__':
# startLimit = int(raw_input("Input startLimit : "))
# endLimit = int(raw_input("Input endLimit : "))
# main(startLimit, endLimit)
main(100, 200)
| StarcoderdataPython |
3208767 | from model.group import Group
import random
def check_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="TEST GROUP NAME TO CHANGE", header="TEST GROUP HEADER TO CHANGE"))
def test_modify_random_group(app, db, check_ui):
check_empty_group_list(app, db)
mod_group = Group(name="TEST_MOD_NAME")
old_groups = db.get_group_list()
group = random.choice(old_groups)
index = old_groups.index(group)
app.group.modify_group_by_id(group.id, mod_group)
new_groups = db.get_group_list()
old_groups[index] = mod_group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| StarcoderdataPython |
4823280 | import unittest
import oauth
class ConsumerTests(unittest.TestCase):
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
csr = oauth.Consumer('asf', 'dasf')
self.assertEqual(csr.key, 'asf')
self.assertEqual(csr.secret, 'dasf')
class TokenTests(unittest.TestCase):
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
tok = oauth.Token('asf', 'dasf')
self.assertEqual(tok.key, 'asf')
self.assertEqual(tok.secret, 'dasf')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
tok = oauth.Token.from_string('oauth_token_secret=seecret&oauth_token=tooken')
self.assertEqual(tok.key, 'tooken')
self.assertEqual(tok.secret, 'seecret')
tok = oauth.Token.from_string('oauth_token=tooken&oauth_token_secret=seecret')
self.assertEqual(tok.key, 'tooken')
self.assertEqual(tok.secret, 'seecret')
tok = oauth.Token.from_string('blah=blah&oauth_token=tooken&oauth_token_secret=seecret')
self.assertEqual(tok.key, 'tooken')
self.assertEqual(tok.secret, 'seecret')
tok = oauth.Token.from_string('oauth_token=tooken&oauth_token_secret=seecret&blah=blah')
self.assertEqual(tok.key, 'tooken')
self.assertEqual(tok.secret, 'seecret')
tok = oauth.Token.from_string('blah=blah&oauth_token=tooken&oauth_token_secret=seecret&blah=blah')
self.assertEqual(tok.key, 'tooken')
self.assertEqual(tok.secret, 'seecret')
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
class RequestTests(unittest.TestCase):
def test_empty(self):
req = oauth.Request()
self.assertEqual(req.http_method, 'GET')
self.assert_(not hasattr(req, 'url'))
self.assertEqual(req.to_postdata(), '')
self.assertRaises(AttributeError, lambda: req.to_url())
self.assertEqual(req.to_header(), {'Authorization': 'OAuth realm=""'})
def test_method(self):
req = oauth.Request('GET')
self.assertEqual(req.method, 'GET')
req = oauth.Request('POST')
self.assertEqual(req.method, 'POST')
req = oauth.Request('AWESOME')
self.assertEqual(req.method, 'AWESOME')
req = oauth.Request('get')
self.assertEqual(req.method, 'GET')
req = oauth.Request('post')
self.assertEqual(req.method, 'POST')
req = oauth.Request('awesome')
self.assertEqual(req.method, 'AWESOME')
def test_sign(self):
req = oauth.Request('GET', 'http://example.com/')
self.assertEqual(req.method, 'GET')
self.assertEqual(req.url, 'http://example.com/')
sign = oauth.sign.HmacSha1()
csr = oauth.Consumer('csrkey', 'csrsecret')
token = oauth.Token('token', 'tokensecret')
req.sign_request(sign, csr, token)
self.assertEqual(req.to_postdata(), 'oauth_signature=mN4G%2FLGkKOPojpit%2F3LRMP1bQg8%3D&oauth_signature_method=HMAC-SHA1')
| StarcoderdataPython |
3310763 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import unittest
from shared.utils import get_input
from . import solution1, solution2, common
from .rescue import RescueMessage
SOLUTION_DIR = Path(__file__).parent
class TestSolution(unittest.TestCase):
module = None
input_filename = "test_input.txt"
expected = None
def setUp(self):
if self.module is None:
raise NotImplementedError(
"subclasses of TestSolution must provide module to test"
)
if self.expected is None:
raise NotImplementedError(
"subclasses of TestSolution must provide expected value"
)
self.input_path = SOLUTION_DIR.joinpath(self.input_filename)
self.input_text = get_input(self.input_path)
class TestValues:
test_inputs = [
"position=< 9, 1> velocity=< 0, 2>",
"position=< 7, 0> velocity=<-1, 0>",
"position=< 3, -2> velocity=<-1, 1>",
"position=< 6, 10> velocity=<-2, -1>",
"position=< 2, -4> velocity=< 2, 2>",
"position=<-6, 10> velocity=< 2, -2>",
"position=< 1, 8> velocity=< 1, -1>",
"position=< 1, 7> velocity=< 1, 0>",
"position=<-3, 11> velocity=< 1, -2>",
"position=< 7, 6> velocity=<-1, -1>",
"position=<-2, 3> velocity=< 1, 0>",
"position=<-4, 3> velocity=< 2, 0>",
"position=<10, -3> velocity=<-1, 1>",
"position=< 5, 11> velocity=< 1, -2>",
"position=< 4, 7> velocity=< 0, -1>",
"position=< 8, -2> velocity=< 0, 1>",
"position=<15, 0> velocity=<-2, 0>",
"position=< 1, 6> velocity=< 1, 0>",
"position=< 8, 9> velocity=< 0, -1>",
"position=< 3, 3> velocity=<-1, 1>",
"position=< 0, 5> velocity=< 0, -1>",
"position=<-2, 2> velocity=< 2, 0>",
"position=< 5, -2> velocity=< 1, 2>",
"position=< 1, 4> velocity=< 2, 1>",
"position=<-2, 7> velocity=< 2, -2>",
"position=< 3, 6> velocity=<-1, -1>",
"position=< 5, 0> velocity=< 1, 0>",
"position=<-6, 0> velocity=< 2, 0>",
"position=< 5, 9> velocity=< 1, -2>",
"position=<14, 7> velocity=<-2, 0>",
"position=<-3, 6> velocity=< 2, -1>",
]
parser_outputs = [
[(9, 1), (0, 2)],
[(7, 0), (-1, 0)],
[(3, -2), (-1, 1)],
[(6, 10), (-2, -1)],
[(2, -4), (2, 2)],
[(-6, 10), (2, -2)],
[(1, 8), (1, -1)],
[(1, 7), (1, 0)],
[(-3, 11), (1, -2)],
[(7, 6), (-1, -1)],
[(-2, 3), (1, 0)],
[(-4, 3), (2, 0)],
[(10, -3), (-1, 1)],
[(5, 11), (1, -2)],
[(4, 7), (0, -1)],
[(8, -2), (0, 1)],
[(15, 0), (-2, 0)],
[(1, 6), (1, 0)],
[(8, 9), (0, -1)],
[(3, 3), (-1, 1)],
[(0, 5), (0, -1)],
[(-2, 2), (2, 0)],
[(5, -2), (1, 2)],
[(1, 4), (2, 1)],
[(-2, 7), (2, -2)],
[(3, 6), (-1, -1)],
[(5, 0), (1, 0)],
[(-6, 0), (2, 0)],
[(5, 9), (1, -2)],
[(14, 7), (-2, 0)],
[(-3, 6), (2, -1)],
]
zero_seconds = [
"........█.............",
"................█.....",
".........█.█..█.......",
"......................",
"█..........█.█.......█",
"...............█......",
"....█.................",
"..█.█....█............",
".......█..............",
"......█...............",
"...█...█.█...█........",
"....█..█..█.........█.",
".......█..............",
"...........█..█.......",
"█...........█.........",
"...█.......█..........",
]
one_second = [
"........█....█....",
"......█.....█.....",
"█.........█......█",
"..................",
"....█.............",
"..██.........█....",
"....█.█...........",
"...██.██..█.......",
"......█.█.........",
"......█...█.....█.",
"█...........█.....",
"..█.....█.█.......",
]
two_seconds = [
"..........█...",
"█..█...████..█",
"..............",
"....█....█....",
"..█.█.........",
"...█...█......",
"...█..█..█.█..",
"█....█.█......",
".█...█...██.█.",
"....█.........",
]
three_seconds = [
"█...█..███",
"█...█...█.",
"█...█...█.",
"█████...█.",
"█...█...█.",
"█...█...█.",
"█...█...█.",
"█...█..███",
]
four_seconds = [
"........█....",
"....██...█.█.",
"..█.....█..█.",
".█..██.██.█..",
"...██.█....█.",
".......█....█",
"..........█..",
"█......█...█.",
".█.....██....",
"...........█.",
"...........█.",
]
class TestRescueMessage(unittest.TestCase, TestValues):
def test_constructor(self):
rm = RescueMessage(self.parser_outputs)
self.assertEqual("\n".join(self.zero_seconds), str(rm))
def test_advance(self):
rm = RescueMessage(self.parser_outputs)
rm.advance()
self.assertEqual("\n".join(self.one_second), str(rm))
rm.advance()
self.assertEqual("\n".join(self.two_seconds), str(rm))
rm.advance()
self.assertEqual("\n".join(self.three_seconds), str(rm))
rm.advance()
self.assertEqual("\n".join(self.four_seconds), str(rm))
def test_advance_using_n(self):
rm = RescueMessage(self.parser_outputs)
rm.advance(1)
self.assertEqual("\n".join(self.one_second), str(rm))
rm.advance(3)
self.assertEqual("\n".join(self.four_seconds), str(rm))
rm.advance(-1)
self.assertEqual("\n".join(self.three_seconds), str(rm))
class TestCommon(unittest.TestCase, TestValues):
module = common
def test_parser(self):
self.assertEqual(
self.parser_outputs,
self.module.parse(self.test_inputs)
)
def test_find_time_with_closest_fit(self):
rm = RescueMessage(self.parser_outputs)
self.assertEqual(
3, self.module.find_time_with_closest_fit(rm)
)
class TestSolution1(TestSolution):
module = solution1
expected = [
"█...█..███",
"█...█...█.",
"█...█...█.",
"█████...█.",
"█...█...█.",
"█...█...█.",
"█...█...█.",
"█...█..███",
]
def test_solver(self):
solution = self.module.solve(self.input_text)
self.assertEqual("\n".join(self.expected), solution)
class TestSolution2(TestSolution):
module = solution2
expected = 3
def test_solver(self):
solution = self.module.solve(self.input_text)
self.assertEqual(self.expected, solution)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.