id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3532316 | # import os
import time
import xml.dom.minidom
import pathlib
DAC_CONTEST = pathlib.Path('/home/xilinx/jupyter_notebooks/dac_sdc_2021/')
IMG_DIR = DAC_CONTEST / 'images'
RESULT_DIR = DAC_CONTEST / 'result'
# Return a batch of image dir when `send` is called
class Team:
def __init__(self, teamname, batch_size):
# self.dac_contest = DAC_CONTEST
# self.img_dir = IMG_DIR
# self.result_path = RESULT_DIR
self._result_path = RESULT_DIR / teamname
self.team_dir = DAC_CONTEST / teamname
self.batch_size = batch_size
folder_list = [self.team_dir, self._result_path]
for folder in folder_list:
if not folder.is_dir():
folder.mkdir()
self.img_list = self.get_image_paths()
self.batch_count = 0
def get_image_paths(self):
names_temp = [f for f in IMG_DIR.iterdir() if f.suffix == '.jpg']
names_temp.sort(key= lambda x:int(x.stem))
return names_temp
# Returns list of images paths for next batch of images
def get_next_batch(self):
start_idx = self.batch_count * self.batch_size
self.batch_count += 1
end_idx = self.batch_count * self.batch_size
if start_idx >= len(self.img_list):
return None
elif end_idx > len(self.img_list):
return self.img_list[start_idx:]
else:
return self.img_list[start_idx:end_idx]
def get_bitstream_path(self):
return str(self.team_dir / "dac_sdc.bit")
def reset_batch_count(self):
self.batch_count = 0
def save_results_xml(self, result_rectangle, runtime, energy):
if len(result_rectangle) != len(self.img_list):
raise ValueError("Result length not equal to number of images.")
doc = xml.dom.minidom.Document()
root = doc.createElement('results')
perf_e = doc.createElement('performance')
# Runtime
runtime_e = doc.createElement('runtime')
runtime_e.appendChild(doc.createTextNode(str(runtime)))
perf_e.appendChild(runtime_e)
root.appendChild(runtime_e)
# Energy
energy_e = doc.createElement('energy')
energy_e.appendChild(doc.createTextNode(str(energy)))
perf_e.appendChild(energy_e)
root.appendChild(energy_e)
for i in range(len(self.img_list)):
image_e = root.appendChild(doc.createElement("image"))
doc.appendChild(root)
name_e = doc.createElement('filename')
name_t = doc.createTextNode(self.img_list[i].name)
name_e.appendChild(name_t)
image_e.appendChild(name_e)
size_e = doc.createElement('size')
node_width = doc.createElement('width')
node_width.appendChild(doc.createTextNode("640"))
node_length = doc.createElement('length')
node_length.appendChild(doc.createTextNode("360"))
size_e.appendChild(node_width)
size_e.appendChild(node_length)
image_e.appendChild(size_e)
object_node = doc.createElement('object')
node_bnd_box = doc.createElement('bndbox')
node_bnd_box_xmin = doc.createElement('xmin')
node_bnd_box_xmin.appendChild(
doc.createTextNode(str(result_rectangle[i][0])))
node_bnd_box_xmax = doc.createElement('xmax')
node_bnd_box_xmax.appendChild(
doc.createTextNode(str(result_rectangle[i][1])))
node_bnd_box_ymin = doc.createElement('ymin')
node_bnd_box_ymin.appendChild(
doc.createTextNode(str(result_rectangle[i][2])))
node_bnd_box_ymax = doc.createElement('ymax')
node_bnd_box_ymax.appendChild(
doc.createTextNode(str(result_rectangle[i][3])))
node_bnd_box.appendChild(node_bnd_box_xmin)
node_bnd_box.appendChild(node_bnd_box_xmax)
node_bnd_box.appendChild(node_bnd_box_ymin)
node_bnd_box.appendChild(node_bnd_box_ymax)
object_node.appendChild(node_bnd_box)
image_e.appendChild(object_node)
file_name = self._result_path / "results.xml"
with open(file_name, 'w') as fp:
doc.writexml(fp, indent='\t', addindent='\t', newl='\n', encoding="utf-8")
| StarcoderdataPython |
9702725 | <gh_stars>0
from django import forms
from osc_bge.student import models as student_models
class AccountingForm(forms.ModelForm):
class Meta:
model = student_models.StudentAccounting
fields = ('invoice',)
| StarcoderdataPython |
9746345 | <gh_stars>1-10
import open3d as o3d
import numpy as np
pcd = o3d.io.read_point_cloud("/home/ros2-foxy/uneven_2.pcd")
aabb = pcd.get_axis_aligned_bounding_box()
aabb.color = (1, 0, 0)
box_corners = np.asarray(aabb.get_box_points())
min_corner = box_corners[0]
max_corner = box_corners[4]
print(box_corners)
x_dist = abs(max_corner[0] - min_corner[0])
y_dist = abs(max_corner[1] - min_corner[1])
step_size = 20
geometries = []
#geometries.append(aabb)
for x in range(0, int(x_dist / step_size + 1)):
for y in range(0, int(y_dist / step_size + 1)):
current_min_corner = [
min_corner[0] + step_size * x,
min_corner[1] + step_size * y,
min_corner[2],
]
current_max_corner = [
current_min_corner[0] + step_size,
current_min_corner[1] + step_size,
max_corner[2],
]
this_box = o3d.geometry.AxisAlignedBoundingBox(
current_min_corner, current_max_corner
)
cropped_pcd = pcd.crop(this_box)
print(cropped_pcd)
geometries.append(cropped_pcd)
geometries.append(this_box)
if not cropped_pcd.has_points():
print("PCL with no points !")
o3d.visualization.draw_geometries(geometries)
| StarcoderdataPython |
5092024 | <reponame>mcmk3/qiita
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from . import base
from . import util
from . import sql_connection
from . import metadata_template
from . import analysis
from . import artifact
from . import archive
from . import commands
from . import environment_manager
from . import exceptions
from . import investigation
from . import logger
from . import meta_util
from . import ontology
from . import portal
from . import reference
from . import software
from . import study
from . import user
from . import processing_job
__version__ = "052020"
__all__ = ["analysis", "artifact", "archive", "base", "commands",
"environment_manager", "exceptions", "investigation", "logger",
"meta_util", "ontology", "portal", "reference",
"software", "sql_connection", "study", "user", "util",
"metadata_template", "processing_job"]
| StarcoderdataPython |
4806348 | <reponame>Hyrschall/Woz-U-Python-Class<filename>lesson_seven/main/lesson_seven_pages.py
# L7 Standard Library Testing
# Page 6
# import math, random
#
# my_random = random.random()*100
#
# square_root = math.sqrt(my_random)
# Page 8
# file = open('example.txt', 'w')
#
# print('File Name:', file.name)
# print('File Open Mode:', file.mode)
#
#
# def status(x):
# if x.closed:
# return 'Closed'
# else:
# return 'Open'
#
#
# print('File Status:', status(file))
#
# file.close()
#
# print('File Status:', status(file))
# Page 9
# story = "Once upon a time there was\n"
# story += "a dog who loved to play ball.\n"
# story += "This dog could run as fast as the wind.\n"
#
# file = open('story.txt', 'w')
# file.write(story)
# file.close()
#
# file = open('story.txt', 'r')
#
# contents = file.read()
# print(contents)
# file.close()
# Page 10
# new_text = 'Python was conceived in the late 1990s by <NAME>.'
# with open('updating.txt', 'w') as file:
# file.write(new_text)
# print('\nFile Now Closed?:', file.closed)
# print('File Now Closed?:', file.closed)
#
# with open('updating.txt', 'r+') as file:
# new_text = file.read()
# print('\nString:', new_text)
# print('\nPosition In File Now:', file.tell())
# position = file.seek(33)
# print('Position In File Now:', file.tell())
# file.write('1980s')
# file.seek(0)
# new_text = file.read()
# print('\nString:', new_text)
| StarcoderdataPython |
8155961 | <filename>salter.py
#!/usr/bin/python
import os
import shutil
import subprocess
import time
from argparse import ArgumentParser
from paramiko import SSHClient, AutoAddPolicy
__version__ = 'SaltPY 0.1 Alpha'
__author__ = 'Riley - <EMAIL>'
# This is work in-progress.
# TODO: Finish SSH classes and start testing basic saltstack setups with SaltPY.
class Setup(object):
'''
Setup salt master, minions,
and download formulas from Riley's github.
Mostly using this to set a bunch of stuff to use later.
'''
def __init__(self, host, repo='git clone https://github.com/sadminriley/saltstack.git'):
self.host = host
self.repo = repo
def master_setup(self):
'''
Using subprocess.call to execute shell commands via subprocess.call() to
setup Salt master and download formulas from Riley's git
'''
subprocess.call(self.installer)
subprocess.call(self.install_master)
subprocess.call(self.formula_repo, cwd=self.formula_dir)
shutil.move('/srv/salt/saltstack/*', self.formula_dir)
def minion_setup(self):
'''
Setup Salt minions.
Executing commands to SSH class via paramiko.
'''
ssh = SSH(self.host)
commands = ['curl -L https://bootstrap.saltstack.com -o install_salt.sh','sh install_salt.sh -P -M','systemctl enable salt-minion','systemctl start salt-minion']
print("Running salt install....this may take a few minutes!")
ssh.connect(commands)
time.sleep(35)
accept_key = ['salt-key', '-A']
subprocess.call(accept_key)
class SSH(object):
'''
Object to establish ssh connection.
Example usage of ssh object-
>>> from salter import SSH
>>> host = 'riley.science'
>>> ssh = SSH(host)
>>> ssh.connect()
'''
client = SSHClient()
def __init__(self, host, user='root', port=22, ssh_key='~/.ssh/id_rsa.<EMAIL>', password=<PASSWORD>):
self.user = user
self.host = host
self.port = port
self.sshkey = ssh_key
self.password = password
def connect(self, commands):
self.client.set_missing_host_key_policy(AutoAddPolicy())
self.client.connect(self.host, port=self.port, username=self.user, password=self.password)
for command in commands:
stdin, stdout, stderr = self.client.exec_command(command)
for line in stdout.readlines():
print line
self.client.close()
class Cloud(object):
'''
Class to setup and utilize
Salt-Cloud
'''
def __init__(self):
provider = 'digital_ocean'
self.list_profiles = ['salt-cloud',
'--list-profiles',
provider]
self.list_sizes = ['salt-cloud',
'--list-sizes',
provider]
self.list_images = ['salt-cloud',
'--list-images',
provider]
def list_pro(self):
subprocess.call(self.list_profiles)
def list_sizes(self):
subprocess.call(self.list_sizes)
def list_images(self):
subprocess.call(self.list_images)
class Master(object):
'''
Class to control minions from the Salt master.
Trying some things with this one, it's a mess right now!
'''
def __init__(self, minions='', ping='test.ping'):
self.minions = minions
self.ping = ping
def test_ping(self, targets):
ping_command = 'salt '+ targets + ' test.ping'
os.system(ping_command)
def main():
'''
Main function to initialize classes
and parse user arguments.
'''
print __version__
parser = ArgumentParser(description='A Saltstack utility' +
' to make Saltstack easier to' +
' use and setup')
parser.add_argument('--profiles',
help='List provider profiles',
dest='profiles',
action='store_true')
parser.add_argument('--sizes',
help='List provider sizes',
dest='sizes',
action='store_true')
parser.add_argument('--setupmaster',
help='Setup a Salt master',
dest='setupmaster',
action='store_true')
args = parser.parse_args()
return args
main()
| StarcoderdataPython |
3371003 | <filename>sdks/apigw-manager/tests/apigw_manager/apigw/test_authentication.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import pytest
from django.contrib.auth.models import AnonymousUser
from django.core.cache import caches
from django.core.cache.backends.dummy import DummyCache
from apigw_manager.apigw import authentication
@pytest.fixture()
def mock_response(mocker):
return mocker.MagicMock()
@pytest.fixture()
def mock_request(rf):
return rf.get("/")
@pytest.fixture()
def request_user(mock_request, mocker):
mock_request.user = mocker.MagicMock()
return mock_request.user
@pytest.fixture()
def django_jwt_cache_name():
return "test-jwt"
@pytest.fixture()
def django_jwt_cache(settings, django_jwt_cache_name, mocker):
settings.APIGW_JWT_PUBLIC_KEY_CACHE_MINUTES = 1
settings.APIGW_JWT_PUBLIC_KEY_CACHE_VERSION = "0"
settings.APIGW_JWT_PUBLIC_KEY_CACHE_NAME = django_jwt_cache_name
cache = mocker.MagicMock()
caches._caches.caches[django_jwt_cache_name] = cache
try:
yield cache
finally:
del caches._caches.caches[django_jwt_cache_name]
@pytest.fixture()
def apigw_request(jwt_encoded, mock_request):
mock_request.META["HTTP_X_BKAPI_JWT"] = jwt_encoded
return mock_request
@pytest.fixture()
def jwt_request(api_name, jwt_decoded, mock_request):
mock_request.jwt = authentication.ApiGatewayJWTMiddleware.JWT(
api_name=api_name,
payload=jwt_decoded,
)
return mock_request
class TestApiGatewayJWTMiddleware:
@pytest.fixture(autouse=True)
def setup_middleware(self, mock_response, api_name):
self.middleware = authentication.ApiGatewayJWTMiddleware(mock_response)
def test_default_config(self, api_name, jwt_algorithm):
assert self.middleware.default_api_name == api_name
assert self.middleware.algorithm == jwt_algorithm
def test_get_public_key_not_set(self, settings, api_name):
assert not hasattr(settings, 'APIGW_PUBLIC_KEY')
assert self.middleware.get_public_key(api_name) is None
def test_get_public_key(self, settings, api_name, public_key):
settings.APIGW_PUBLIC_KEY = public_key
assert self.middleware.get_public_key(api_name) == public_key
def test_decode_jwt_header(self, jwt_header, jwt_encoded):
assert self.middleware.decode_jwt_header(jwt_encoded) == jwt_header
def test_decode_jwt(self, jwt_encoded, public_key, jwt_algorithm, jwt_decoded):
decoded = self.middleware.decode_jwt(jwt_encoded, public_key, jwt_algorithm)
assert decoded == jwt_decoded
def test_call_without_jwt(self, mock_response, mock_request):
assert self.middleware.JWT_KEY_NAME not in mock_request.META
self.middleware(mock_request)
mock_response.assert_called_with(mock_request)
assert not hasattr(mock_request, 'jwt')
def test_call_without_public_key(self, settings, apigw_request, mock_response):
assert not hasattr(settings, 'APIGW_PUBLIC_KEY')
self.middleware(apigw_request)
mock_response.assert_called_with(apigw_request)
assert not hasattr(mock_request, 'jwt')
def test_call(self, settings, public_key, apigw_request, mock_response, api_name):
assert self.middleware.JWT_KEY_NAME in apigw_request.META
settings.APIGW_PUBLIC_KEY = public_key
self.middleware(apigw_request)
mock_response.assert_called_with(apigw_request)
assert apigw_request.jwt.api_name == api_name
assert apigw_request._dont_enforce_csrf_checks
class TestApiGatewayJWTGenericMiddleware:
def test_init_with_settings(self, settings, mock_response, django_jwt_cache):
middleware = authentication.ApiGatewayJWTGenericMiddleware(mock_response)
assert middleware.cache_expires == settings.APIGW_JWT_PUBLIC_KEY_CACHE_MINUTES * 60
assert middleware.cache_version == settings.APIGW_JWT_PUBLIC_KEY_CACHE_VERSION
assert middleware.cache == django_jwt_cache
def test_init_without_settings(self, mock_response):
middleware = authentication.ApiGatewayJWTGenericMiddleware(mock_response)
assert middleware.cache_expires == 0
assert isinstance(middleware.cache, DummyCache)
def test_get_public_key_from_cache(self, mock_response, api_name, django_jwt_cache):
django_jwt_cache.get.return_value = "testing"
middleware = authentication.ApiGatewayJWTGenericMiddleware(mock_response)
public_key = middleware.get_public_key(api_name)
assert public_key == "testing"
def test_get_public_key_cache_missed(self, mock_response, api_name, django_jwt_cache, public_key_in_db):
django_jwt_cache.get.return_value = None
middleware = authentication.ApiGatewayJWTGenericMiddleware(mock_response)
public_key = middleware.get_public_key(api_name)
assert public_key == public_key_in_db
django_jwt_cache.set.assert_called_with(
"apigw:public_key:%s" % api_name,
public_key_in_db,
middleware.cache_expires,
middleware.cache_version,
)
class TestApiGatewayJWTAppMiddleware:
@pytest.fixture(autouse=True)
def setup_middleware(self, mock_response):
self.middleware = authentication.ApiGatewayJWTAppMiddleware(mock_response)
def test_make_app(self):
app = self.middleware.make_app(bk_app_code="testing", verified=False)
assert app.bk_app_code == "testing"
assert app.verified is False
def test_call_without_jwt(self, mock_request, mock_response):
assert not hasattr(mock_request, 'jwt')
self.middleware(mock_request)
assert not hasattr(mock_request, 'app')
mock_response.assert_called_with(mock_request)
@pytest.mark.parametrize("field", ["app_code", "bk_app_code"])
def test_call_with_jwt(self, jwt_request, mock_response, jwt_app, field):
jwt_app = jwt_request.jwt.payload["app"]
app_code = jwt_app.pop("app_code", None) or jwt_app.pop("bk_app_code", None)
jwt_app[field] = app_code
self.middleware(jwt_request)
assert jwt_request.app.bk_app_code == app_code
assert jwt_request.app.verified == jwt_app["verified"]
mock_response.assert_called_with(jwt_request)
class TestApiGatewayJWTUserMiddleware:
@pytest.fixture(autouse=True)
def setup_middleware(self, mock_response):
self.middleware = authentication.ApiGatewayJWTUserMiddleware(mock_response)
@pytest.fixture(autouse=True)
def patch_authenticate(self, mocker):
self.authenticate_function = mocker.patch("django.contrib.auth.authenticate")
@pytest.fixture(autouse=True)
def setup_user(self, mocker):
self.user = mocker.MagicMock()
def test_get_user(self, jwt_request):
self.authenticate_function.return_value = self.user
assert self.middleware.get_user(jwt_request) == self.user
def test_get_user_not_found(self, jwt_request):
self.authenticate_function.return_value = None
assert self.middleware.get_user(jwt_request) is None
def test_call_without_jwt(self, mock_request, mock_response):
assert not hasattr(mock_request, 'jwt')
self.middleware(mock_request)
assert not hasattr(mock_request, 'user')
mock_response.assert_called_with(mock_request)
def test_call_with_authenticated_user(self, mock_request, mock_response, request_user):
self.middleware(mock_request)
assert mock_request.user == request_user
mock_response.assert_called_with(mock_request)
@pytest.mark.parametrize("field", ["username", "bk_username"])
def test_call_with_jwt(self, jwt_request, mock_response, field):
self.authenticate_function.return_value = self.user
jwt_user = jwt_request.jwt.payload["user"]
username = jwt_user.pop("username", None) or jwt_user.pop("bk_username", None)
jwt_user[field] = username
self.middleware(jwt_request)
assert jwt_request.user == self.user
mock_response.assert_called_with(jwt_request)
class TestUserModelBackend:
@pytest.fixture(autouse=True)
def setup_backend(self, mocker):
self.user_maker = mocker.MagicMock()
self.backend = authentication.UserModelBackend()
self.backend.user_maker = self.user_maker
def test_authenticate_user(self, mock_request):
user = self.backend.authenticate(mock_request, api_name="test", bk_username="admin", verified=True)
assert not isinstance(user, AnonymousUser)
self.user_maker.assert_called_with("admin")
def test_authenticate_anonymou_user(self, mock_request):
user = self.backend.authenticate(mock_request, api_name="test", bk_username="admin", verified=False)
assert isinstance(user, AnonymousUser)
self.user_maker.assert_not_called()
| StarcoderdataPython |
6525078 | <reponame>pedaling/critics<gh_stars>10-100
# coding: utf-8
from collections import namedtuple
import json
import datetime
import logging
import os
import re
from time import mktime
import feedparser
import requests
from lxml import html, etree
from .compat import python_2_unicode_compatible
logger = logging.getLogger('critics')
timeout = os.environ.get('CRITICS_TIMEOUT', 5)
utf8_parser = html.HTMLParser(encoding='utf-8')
@python_2_unicode_compatible
class Review(namedtuple('Review',
['id', 'platform', 'title', 'rating', 'summary', 'url',
'author', 'date', 'language', 'version'])):
__slots__ = ()
def __str__(self):
return (u'Review (%s):\ntitle=%s\nrating=%s\nsummary=%s\nurl=%s\n'
u'author=%s\ndate=%s\nlanguage=%s\nversion=%s' % (
self.id,
self.title,
self.rating,
self.summary,
self.url,
self.author,
self.date,
self.language,
self.version
))
def get_ios_reviews(app_id, language, limit=100):
url = 'https://itunes.apple.com/%(language)srss/customerreviews/id=%(app_id)s/sortBy=mostRecent/xml' % {
'language': '%s/' % language if language else '', 'app_id': app_id}
response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)'},
timeout=timeout)
response.encoding = 'utf-8' # avoid chardet not guessing correctly
feed = feedparser.parse(response.text)
reviews = [Review(
id=entry.id,
platform='ios',
title=entry.title,
rating=int(entry.im_rating),
summary=entry.summary,
url=entry.href,
author=entry.author, # author url: entry.href
date=datetime.datetime.fromtimestamp(mktime(entry.updated_parsed)),
language=language,
version=entry.im_version
) for entry in feed['entries'][1:1 + limit]]
return reviews
def get_android_reviews(app_id, language, limit=100):
url = 'https://play.google.com/store/getreviews'
payload = {'xhr': 1, 'id': app_id, 'reviewSortOrder': 0, 'pageNum': 0, 'reviewType': 0}
if language:
payload['hl'] = language
response = requests.post(url, data=payload, timeout=timeout)
json_source = response.text[response.text.find('['):]
response_as_json = json.loads(json_source)
try:
response_as_html = response_as_json[0][2]
except IndexError:
logger.error('Unexpected json for app_id=%s', app_id, exc_info=True)
return []
# no reviews
if not response_as_html:
return []
try:
doc = html.fromstring(response_as_html.encode('utf-8'), parser=utf8_parser)
reviews_html = doc.cssselect('.single-review')
except etree.XMLSyntaxError:
logger.error('Unparsable html', exc_info=True)
return []
def get_rating_from_html(review_html):
star_style = review_html.cssselect('.current-rating')[0].get('style') # e.g. 'width: 20%'
return int(re.search('(\d+)%', star_style).group(1)) / 20
reviews = [Review(
id=review_html.cssselect('.review-header')[0].get('data-reviewid'),
platform='android',
title=review_html.cssselect('.review-body .review-title')[0].text_content().strip(),
rating=get_rating_from_html(review_html),
summary=review_html.cssselect('.review-body .review-title')[0].tail.strip(),
url='https://play.google.com' + review_html.cssselect('.reviews-permalink')[0].get('href'),
author=review_html.cssselect('.review-header .author-name')[0].text_content().strip(),
date=review_html.cssselect('.review-header .review-date')[0].text_content().strip(),
language=language,
version=None
) for review_html in reviews_html[:limit]]
return reviews
| StarcoderdataPython |
4821261 | from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
class MediaStorage(S3BotoStorage):
bucket_name = settings.AWS_MEDIA_STORAGE_BUCKET_NAME
class StaticStorage(S3BotoStorage):
bucket_name = settings.AWS_STATIC_STORAGE_BUCKET_NAME
| StarcoderdataPython |
5111871 | <filename>assignment_01/src/q1.py
#!/usr/bin/python
"""
author: <NAME>
author: 18998712
module: Applied Mathematics(Numerical Analysis) TW324
task : computer assignment 01
since : Friday-09-02-2018
"""
def question1a():
step, x = 1./10,1.0
print "=" * 64
for i in xrange(0,10):
E1 = (1-cos(x)) / pow(sin(x),2)
E2 = 1 / ( 1 + cos(x))
print( format(x, ".14f") + "\t" + format(E1, ".14f") + "\t" + format(E2, ".14f"))
x = x * step
print "=" * 64
def question1b():
print "=" * 64
step, x = 1./10,1.0
for i in xrange(0,10):
F1 = ((1-1/cos(x))/ pow(sin(x)/cos(x),2))
F2 = -1*(cos(x) / ( 1 + cos(x)))
print( format(x, ".14f") + "\t" + format(F1, ".14f") + "\t" + format(F2, ".14f"))
x = x * step
print "=" * 64
from math import (cos, sin)
question1a() #call code for question 1a
question1b() #call code for question 1b
| StarcoderdataPython |
1652730 | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main running pipeline for the rewriter.
"""
import sys
from examples.Cliner.Cliner import ClinicalNER
from examples.Cliner.Cliner_train import CliTrain
from forte.data.data_pack import DataPack
from forte.data.readers import RawDataDeserializeReader
from forte.pipeline import Pipeline
from forte.processors.writers import PackNameJsonPackWriter
# Let's create a pipeline first.
pipeline = Pipeline[DataPack]()
def do_process(input_pack_str: str):
pipeline.process([input_pack_str])
# data_json = datapack.serialize()
# with open('generation.json', 'w') as fo:
# fo.write(data_json)
if __name__ == '__main__':
if sys.argv[1] == 'train': # train mode
model = CliTrain()
model.train()
else: # inference mode
pipeline.set_reader(RawDataDeserializeReader())
pipeline.add(ClinicalNER(), config={
'config_output': sys.argv[3],
'config_data': sys.argv[4]
})
pipeline.add(
PackNameJsonPackWriter(),
{
'output_dir': 'output',
'indent': 2,
'overwrite': True,
}
)
pipeline.initialize()
with open('Cliner_input.json') as fi:
test_str = fi.read()
do_process(test_str)
| StarcoderdataPython |
6500589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# By: <NAME>
# Standard libraries
# External libraries
# Internal libraries
from .pnet import PNet
from .rnet import RNet
from .onet import ONet
| StarcoderdataPython |
3419764 | studio = AStudio()
shape = RollingPolygon(width=200, N=10)
studio.append(shape)
studio.append(AImage(width=100, image='cat_exotic_shorthair.png'))
for i in range(200):
studio.render()
IPython.display.Image(studio.create_anime())
| StarcoderdataPython |
1662870 | <reponame>efargas/PyBBIO
# gpio.py
# Part of PyBBIO
# github.com/alexanderhiam/PyBBIO
# MIT License
#
# Beaglebone GPIO driver
import os, math, sysfs
from bbio.util import addToCleanup
from config import GET_USR_LED_DIRECTORY, GPIO, GPIO_FILE_BASE, INPUT,\
CONF_PULLUP, CONF_PULLDOWN, CONF_PULL_DISABLE,\
CONF_GPIO_INPUT, CONF_GPIO_OUTPUT, FALLING, HIGH, LOW,\
MSBFIRST
from bbio.platform.platform import detect_platform
_platform = detect_platform()
if "3.8" in _platform:
from bone_3_8 import pinmux
elif "3.2" in _platform:
from bone_3_2 import pinmux
del _platform
def getGPIODirectory(gpio_pin):
""" Returns the sysfs kernel driver base directory for the given pin. """
if 'USR' in gpio_pin:
# USR LEDs use a different driver
return GET_USR_LED_DIRECTORY(gpio_pin)
gpio_num = GPIO[gpio_pin][2]
return '%s/gpio%i' % (GPIO_FILE_BASE, gpio_num)
def getGPIODirectionFile(gpio_pin):
""" Returns the absolute path to the state control file for the given pin. """
if 'USR' in gpio_pin:
# USR LED driver doesn't have a direction file
return ''
d = getGPIODirectory(gpio_pin)
return '%s/direction' % d
def getGPIOStateFile(gpio_pin):
""" Returns the absolute path to the state control file for the given pin. """
d = getGPIODirectory(gpio_pin)
if 'USR' in gpio_pin:
# USR LEDs use a different driver
return '%s/brightness' % d
return '%s/value' % d
def pinMode(gpio_pin, direction, pull=0, preserve_mode_on_exit=False):
""" Sets given digital pin to input if direction=1, output otherwise.
'pull' will set the pull up/down resistor if setting as an input:
pull=-1 for pull-down, pull=1 for pull up, pull=0 for none.
If preserve_mode_on_exit=True, the DT overlay and will remain
loaded, the pin will remain exported to user-space control, and
the INPUT/OUTPUT mode will be preserved when the program exits. """
if 'USR' in gpio_pin:
if direction == INPUT:
print 'warning: cannot set USR LEDs to INPUT'
return
assert (gpio_pin in GPIO), "*Invalid GPIO pin: '%s'" % gpio_pin
exported = pinmux.export(gpio_pin)
if not exported:
print "warning: could not export pin '%s', skipping pinMode()" % gpio_pin
return
elif not preserve_mode_on_exit:
addToCleanup(lambda: pinmux.unexport(gpio_pin))
direction_file = getGPIODirectionFile(gpio_pin)
if (direction == INPUT):
# Pinmux:
if (pull > 0): pull = CONF_PULLUP
elif (pull < 0): pull = CONF_PULLDOWN
else: pull = CONF_PULL_DISABLE
pinmux.pinMux(gpio_pin, CONF_GPIO_INPUT | pull, preserve_mode_on_exit)
# Set input:
with open(direction_file, 'wb') as f:
f.write('in')
return
# Pinmux:
pinmux.pinMux(gpio_pin, CONF_GPIO_OUTPUT, preserve_mode_on_exit)
# Set output:
with open(direction_file, 'wb') as f:
f.write('out')
def digitalWrite(gpio_pin, state):
""" Writes given digital pin low if state=0, high otherwise. """
assert (gpio_pin in GPIO), "*Invalid GPIO pin: '%s'" % gpio_pin
gpio_file = getGPIOStateFile(gpio_pin)
if not os.path.exists(gpio_file):
print "warning: digitalWrite() failed, pin '%s' not exported." % gpio_pin +\
" Did you call pinMode()?"
return
if (state):
sysfs.kernelFilenameIO(gpio_file, '1')
else:
sysfs.kernelFilenameIO(gpio_file, '0')
def digitalRead(gpio_pin):
""" Returns input pin state as 1 or 0. """
assert (gpio_pin in GPIO), "*Invalid GPIO pin: '%s'" % gpio_pin
gpio_file = getGPIOStateFile(gpio_pin)
return int(sysfs.kernelFilenameIO(gpio_file))
def toggle(gpio_pin):
""" Toggles the state of the given digital pin. """
digitalWrite(gpio_pin, digitalRead(gpio_pin) ^ 1)
def pinState(gpio_pin):
""" Returns the state of a digital pin if it is configured as
an output. Returns None if it is configuredas an input. """
# With sysfs driver this is identical to digitalRead()
return digitalRead(gpio_pin)
def shiftIn(data_pin, clk_pin, bit_order, n_bits=8, edge=FALLING):
""" Implements software SPI on the given pins to receive given number
of bits from a slave device. edge is the edge which triggers the
device to write data. """
# Ensure clock is in idle state:
digitalWrite(clk_pin, HIGH if (edge==FALLING) else LOW)
if (bit_order == MSBFIRST): loop_range = (n_bits-1, -1, -1)
else: loop_range = (n_bits,)
data = 0
for i in range(*loop_range):
digitalWrite(clk_pin, LOW if (edge==FALLING) else HIGH)
digitalWrite(clk_pin, HIGH if (edge==FALLING) else LOW)
data |= digitalRead(data_pin) << i
return data
def shiftOut(data_pin, clk_pin, bit_order, data, edge=FALLING):
""" Implements software SPI on the given pins to shift out data.
data can be list, string, or integer, and if more than one byte
each byte will be shifted out with the same endianness as the
bits. """
assert (type(data) != dict), "*shiftOut() does not support dictionaries"
assert (type(data) != float), "*shiftOut() does not support floats"
if ((type(data) != int) and ((len(data) > 1) or (type(data) == list))):
# Test for type list here to handle lists of length 1
for i in data if (bit_order == MSBFIRST) else data[::-1]:
# Loop through forward if MSB first, otherwise in reverse
shiftOut(data_pin, clk_pin, bit_order, i, edge)
else:
if (type(data) == str):
# Data is a single character here, get ascii value:
data = ord(data)
n_bytes = 1
else:
# Value is a number, calculate number of bytes:
if (data == 0):
# int.bit_length(0) returns 0:
n_bytes = 1
else:
n_bytes = int(math.ceil(data.bit_length()/8.0))
# Ensure clock is in idle state:
digitalWrite(clk_pin, HIGH if (edge==FALLING) else LOW)
byte_range = (n_bytes-1, -1, -1) if (bit_order == MSBFIRST) else (n_bytes,)
bit_range = (7, -1, -1)if (bit_order == MSBFIRST) else (8,)
# Shift out the data:
for i in range(*byte_range):
byte = data >> (8*i)
for j in range(*bit_range):
digitalWrite(data_pin, (byte>>j) & 0x01)
digitalWrite(clk_pin, LOW if (edge==FALLING) else HIGH)
digitalWrite(clk_pin, HIGH if (edge==FALLING) else LOW)
| StarcoderdataPython |
9626158 |
class Node:
def __init__(self, value, next_=None):
self.value = value
self.next = next_
class Queue:
def __init__(self, front=None, back=None):
self.front = front
self.back = back
def enqueue(self, value):
node = Node(value)
# is the list empty?
if not self.front:
self.front = node
self.back = node
return
# current back next value becomes new node
self.back.next = node
# the new back is the new node
self.back = node
def peek(self):
if not self.front:
raise InvalidOperationError(
"Method not allowed on empty collection")
return self.front.value
def is_empty(self):
# return boolean true as self.front is falsys
return not self.front
def dequeue(self):
if not self.front:
raise InvalidOperationError(
"Method not allowed on empty collection")
# save front node into a variable
target_node = self.front
# reassign front node to to old front.next
self.front = target_node.next
# return stored node
return target_node.value
class AnimalShelter:
# initiate class with a queue for each cats and dogs
def __init__(self):
self.dog_q = Queue()
self.cat_q = Queue()
# add dogs to dog queue and cats to cat queue or return null
def enqueue(self, value):
if value == "dog":
self.dog_q.enqueue(value)
return
elif value == "cat":
self.cat_q.enqueue(value)
return
return "Null"
# return preferred animal
def dequeue(self, preference):
if preference.lower == "dog":
return self.dog_q.dequeue()
elif preference.lower == "cat":
return self.cat_q.dequeue()
return "Null"
| StarcoderdataPython |
1746726 | """
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Here we only register the few-shot datasets and complete COCO, PascalVOC and
LVIS have been handled by the builtin datasets in detectron2.
"""
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.lvis import (
get_lvis_instances_meta,
register_lvis_instances,
)
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.register_coco import register_coco_instances
from .builtin_meta import _get_builtin_metadata
from .meta_coco import register_meta_coco
from .meta_lvis import register_meta_lvis
from .meta_pascal_voc import register_meta_pascal_voc
# ==== Predefined datasets and splits for COCO ==========
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": (
"coco/val2014",
"coco/annotations/instances_val2014.json",
),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_minival_100": (
"coco/val2014",
"coco/annotations/instances_minival2014_100.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": (
"coco/train2017",
"coco/annotations/instances_train2017.json",
),
"coco_2017_val": (
"coco/val2017",
"coco/annotations/instances_val2017.json",
),
"coco_2017_test": (
"coco/test2017",
"coco/annotations/image_info_test2017.json",
),
"coco_2017_test-dev": (
"coco/test2017",
"coco/annotations/image_info_test-dev2017.json",
),
"coco_2017_val_100": (
"coco/val2017",
"coco/annotations/instances_val2017_100.json",
),
}
def register_all_coco(root="datasets"):
# for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
# for key, (image_root, json_file) in splits_per_dataset.items():
# # Assume pre-defined datasets live in `./datasets`.
# register_coco_instances(
# key,
# _get_builtin_metadata(dataset_name),
# os.path.join(root, json_file)
# if "://" not in json_file
# else json_file,
# os.path.join(root, image_root),
# )
# register meta datasets
METASPLITS = [
(
"coco_trainval_all",
"coco/trainval2014",
"cocosplit/datasplit/trainvalno5k.json",
),
(
"coco_trainval_base",
"coco/trainval2014",
"cocosplit/datasplit/trainvalno5k.json",
),
("coco_test_all", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco_test_base", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco_test_novel", "coco/val2014", "cocosplit/datasplit/5k.json"),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for shot in [1, 2, 3, 5, 10, 30]:
for seed in range(10):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "coco_trainval_{}_{}shot{}".format(prefix, shot, seed)
METASPLITS.append((name, "coco/trainval2014", ""))
for name, imgdir, annofile in METASPLITS:
register_meta_coco(
name,
_get_builtin_metadata("coco_fewshot"),
os.path.join(root, imgdir),
os.path.join(root, annofile),
)
# ==== Predefined datasets and splits for LVIS ==========
_PREDEFINED_SPLITS_LVIS = {
"lvis_v0.5": {
# "lvis_v0.5_train": ("coco/train2017", "lvis/lvis_v0.5_train.json"),
"lvis_v0.5_train_freq": (
"coco/train2017",
"lvis/lvis_v0.5_train_freq.json",
),
"lvis_v0.5_train_common": (
"coco/train2017",
"lvis/lvis_v0.5_train_common.json",
),
"lvis_v0.5_train_rare": (
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
# "lvis_v0.5_val": ("coco/val2017", "lvis/lvis_v0.5_val.json"),
# "lvis_v0.5_val_rand_100": (
# "coco/val2017",
# "lvis/lvis_v0.5_val_rand_100.json",
# ),
# "lvis_v0.5_test": (
# "coco/test2017",
# "lvis/lvis_v0.5_image_info_test.json",
# ),
},
}
def register_all_lvis(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_lvis_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# register meta datasets
METASPLITS = [
(
"lvis_v0.5_train_shots",
"coco/train2017",
"lvissplit/lvis_shots.json",
),
(
"lvis_v0.5_train_rare_novel",
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
("lvis_v0.5_val_novel", "coco/val2017", "lvis/lvis_v0.5_val.json"),
]
for name, image_root, json_file in METASPLITS:
dataset_name = "lvis_v0.5_fewshot" if "novel" in name else "lvis_v0.5"
register_meta_lvis(
name,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# ==== Predefined splits for PASCAL VOC ===========
def register_all_pascal_voc(root="datasets"):
# SPLITS = [
# ("voc_2007_trainval", "VOC2007", "trainval"),
# ("voc_2007_train", "VOC2007", "train"),
# ("voc_2007_val", "VOC2007", "val"),
# ("voc_2007_test", "VOC2007", "test"),
# ("voc_2012_trainval", "VOC2012", "trainval"),
# ("voc_2012_train", "VOC2012", "train"),
# ("voc_2012_val", "VOC2012", "val"),
# ]
# for name, dirname, split in SPLITS:
# year = 2007 if "2007" in name else 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year)
# MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# register meta datasets
METASPLITS = [
("voc_2007_trainval_base1", "VOC2007", "trainval", "base1", 1),
("voc_2007_trainval_base2", "VOC2007", "trainval", "base2", 2),
("voc_2007_trainval_base3", "VOC2007", "trainval", "base3", 3),
("voc_2012_trainval_base1", "VOC2012", "trainval", "base1", 1),
("voc_2012_trainval_base2", "VOC2012", "trainval", "base2", 2),
("voc_2012_trainval_base3", "VOC2012", "trainval", "base3", 3),
("voc_2007_trainval_all1", "VOC2007", "trainval", "base_novel_1", 1),
("voc_2007_trainval_all2", "VOC2007", "trainval", "base_novel_2", 2),
("voc_2007_trainval_all3", "VOC2007", "trainval", "base_novel_3", 3),
("voc_2012_trainval_all1", "VOC2012", "trainval", "base_novel_1", 1),
("voc_2012_trainval_all2", "VOC2012", "trainval", "base_novel_2", 2),
("voc_2012_trainval_all3", "VOC2012", "trainval", "base_novel_3", 3),
("voc_2007_test_base1", "VOC2007", "test", "base1", 1),
("voc_2007_test_base2", "VOC2007", "test", "base2", 2),
("voc_2007_test_base3", "VOC2007", "test", "base3", 3),
("voc_2007_test_novel1", "VOC2007", "test", "novel1", 1),
("voc_2007_test_novel2", "VOC2007", "test", "novel2", 2),
("voc_2007_test_novel3", "VOC2007", "test", "novel3", 3),
("voc_2007_test_all1", "VOC2007", "test", "base_novel_1", 1),
("voc_2007_test_all2", "VOC2007", "test", "base_novel_2", 2),
("voc_2007_test_all3", "VOC2007", "test", "base_novel_3", 3),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for sid in range(1, 4):
for shot in [1, 2, 3, 5, 10]:
for year in [2007, 2012]:
for seed in range(100):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "voc_{}_trainval_{}{}_{}shot{}".format(
year, prefix, sid, shot, seed
)
dirname = "VOC{}".format(year)
img_file = "{}_{}shot_split_{}_trainval".format(
prefix, shot, sid
)
keepclasses = (
"base_novel_{}".format(sid)
if prefix == "all"
else "novel{}".format(sid)
)
METASPLITS.append(
(name, dirname, img_file, keepclasses, sid)
)
for name, dirname, split, keepclasses, sid in METASPLITS:
year = 2007 if "2007" in name else 2012
register_meta_pascal_voc(
name,
_get_builtin_metadata("pascal_voc_fewshot"),
os.path.join(root, dirname),
split,
year,
keepclasses,
sid,
)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# Register them all under "./datasets"
register_all_coco()
register_all_lvis()
register_all_pascal_voc() | StarcoderdataPython |
6537878 | from . import TransformFunction, string_to_tfarg_function, mime_type_based_transform
import htmlmth.mods.html
remove_html_comments = TransformFunction("",
"remove html comments",
mime_type_based_transform({
'text/html': string_to_tfarg_function(lambda x: htmlmth.mods.html.remove_html_comments(x))
}))
| StarcoderdataPython |
3541441 | <filename>Python/09. Errors and Exceptions/002. Incorrect Regex.py<gh_stars>1-10
# Problem: https://www.hackerrank.com/challenges/incorrect-regex/problem
# Score: 20
import re
for _ in range(int(input())):
try:
print(bool(re.compile(input())))
except re.error:
print('False')
| StarcoderdataPython |
4937100 | from selenium import webdriver
import pandas as pd
import datetime
import time
import bs4
'''
This script requires an auto-switching VPN to work properly. If you run without regularly switching IP addresses
Angel List detects the scraper and throws up a captcha. Recommend Hide My Ass v2 or similar VPN.
'''
# Function for opening the WebDriver, scrolling for max page view, and then capturing the html
def FFwebdriver(driver, url):
driver.get(url)
time.sleep(15)
# One format of the company pages has a 'Read more' button that must be clicked to reveal all
# relevant html, the other format has a button but the text is still visible in html thus passing
try:
driver.find_element_by_xpath("//*[text()='Read more']") .click()
except:
pass
time.sleep(5)
htmlResults = driver.page_source
return htmlResults
def csv_scrape(csv):
descriptions = list()
# Initialize the webdriver
driver=webdriver.Firefox(executable_path='C:/Users/Zero/Downloads/geckodriver-v0.24.0-win64/geckodriver.exe')
# Turn raw csv from github into dataframe for ease
df = pd.read_csv(csv)
start = datetime.datetime.now()
# Conditionally searches angellist for products with taglines and extracts the html
for i in range(len(df)):
desc = 'Empty'
if df['tagline'][i] != "Empty section for this company":
url = df.AngelList_website[i]
html = FFwebdriver(driver, url)
soup = bs4.BeautifulSoup(html, 'html.parser')
# Attempt to parse the html, pages have two formats for description plus a no description format
try:
desc = soup.find(class_="component_bc35d").get_text().strip()
except:
try:
desc = soup.find("div", class_="product_desc").get_text().strip()
except:
desc = 'Empty'
descriptions.append(desc)
# Create a new column in the DataFrame
df['descriptions'] = descriptions
return df
# Run the scraper and then export to CSV
# Change the CSV name below to 'angel-co-desc-' + funding range of the seed csv + '.csv'
df = csv_scrape('https://raw.githubusercontent.com/veritaem/DS-Sprint-01-Dealing-With-Data/master/angel-co-1000000-100000000.csv')
df.to_csv('angel-co-desc-1000000-100000000.csv') | StarcoderdataPython |
1808016 | <gh_stars>1-10
import re
class Reader:
def __init__(self, string):
self.regex_separacao = re.compile(r'[ \t\n]+')
self.regex_excluir = re.compile(r'({[^}]+})|(/\*([\s\S]+?)\*/)')
# passando ponteiro file
# with open(string, 'r') as reader:
# self.dados = reader.read()
self.dados = string
self.codigo = self.dados
self.__trata_texto()
def __trata_texto(self):
# Transforma todo texto para lowercase
# self.dados = self.dados.lower()
# Substitui tabulações, espaços e quebra de linha por espaços
self.dados = re.sub(self.regex_separacao,' ',self.dados)
# Exclui comentários cobertos de chaves {}
self.dados = re.sub(self.regex_excluir, '', self.dados)
def get_programa_formatted(self):
return self.dados, self.codigo
| StarcoderdataPython |
141779 | from endochrone.classification.binary_tree import BinaryDecisionTree
from endochrone.classification.naive_knn import KNearest
from endochrone.classification.naive_bayes import NaiveBayes
__all__ = ['BinaryDecisionTree', 'KNearest', 'NaiveBayes']
| StarcoderdataPython |
8042801 | <reponame>brookisme/tfbox
from pprint import pprint
import tensorflow.keras as keras
from . import load
from . import blocks
from . import addons
import tfbox.utils.helpers as h
#
# Model: Parent class for TBox models/blocks
#
# a simple wrapper of keras.Model with the following additions:
#
# - an optional classifier
# - is_skip property
# - standardized naming for tfbox models/blocks
#
class Model(keras.Model):
#
# CONSTANTS
#
NAME='TFBoxModel'
DEFAULT_KEY=NAME
SEGMENT='segment'
GLOBAL_POOLING='global_pooling'
DEFAULT_CLASSIFIER=SEGMENT
#
# PUBLIC
#
def __init__(self,
is_skip=False,
name=NAME,
named_layers=True,
noisy=True):
super(Model, self).__init__()
self.classifier=None
self.is_skip=is_skip
self.model_name=name
self.named_layers=named_layers
def set_classifier(self,
nb_classes,
config,
group_maps=None,
group_nb_classes=None,
file_name='classifier',
folder=load.TFBOX,
from_logits=None):
if nb_classes:
self.classifier=self._get_classifier(
nb_classes,
config,
file_name=file_name,
folder=folder,
from_logits=from_logits)
if group_maps:
self.grouping=addons.Groups(group_maps)
self.group_classifier=self._get_classifier(
group_nb_classes or len(group_maps),
config.get('group_classifier'),
file_name=file_name,
folder=folder,
from_logits=from_logits)
else:
self.grouping=False
def output(self,x):
if self.classifier:
x=self.classifier(x)
if self.grouping:
gx=self.grouping(x)
if self.group_classifier:
gx=self.group_classifier(gx)
x=[x, gx]
return x
def layer_name(self,group=None,index=None):
return blocks.layer_name(self.model_name,group,index=index,named=self.named_layers)
#
# INTERNAL
#
def _get_classifier(self,
nb_classes,
config,
file_name='classifier',
folder=load.TFBOX,
from_logits=None):
if config:
if from_logits in [True,False]:
config=self._update_activation(config,from_logits)
if nb_classes and config:
if config is True:
config={}
elif isinstance(config,str):
config={ 'classifier_type': config }
else:
config=load.config(config,file_name,folder)
classifier_type=config.pop( 'classifier_type', self.DEFAULT_CLASSIFIER )
if classifier_type==Model.SEGMENT:
classifier=blocks.SegmentClassifier(
nb_classes=nb_classes,
**config)
elif classifier_type==Model.GLOBAL_POOLING:
raise NotImplementedError('TODO: GAPClassifier')
else:
raise NotImplementedError(f'{classifier_type} is not a valid classifier')
else:
classifier=False
return classifier
def _update_activation(self,config,from_logits):
if config is True:
config={}
if config is False:
config={
'filters': False
}
config['output_act']=not from_logits
return config
| StarcoderdataPython |
1725471 | <gh_stars>0
import logging
import rethinkdb as r
from datetime import datetime as dt
import emoji
import time
from tornado.gen import coroutine, Return
from relay_config import cfg
DB_NAME = cfg.db_name
WALKER_TABLE = cfg.walker_table
TEAM_TABLE = cfg.team_table
MIN_LAP_TIME = cfg.min_lap_time
MICROS_PER_SEC = 1000000.0
class RelayDB(object):
def __init__(self, *args, **kwargs):
self.conn = None
@coroutine
def open(self):
r.set_loop_type('tornado')
self.conn = yield r.connect(db=DB_NAME)
db_names = yield r.db_list().run(self.conn)
if DB_NAME not in db_names:
logging.debug('creating database')
yield r.db_create(DB_NAME).run(self.conn)
table_names = yield r.table_list().run(self.conn)
if WALKER_TABLE not in table_names:
yield r.db(DB_NAME).table_create(WALKER_TABLE, durability='soft').run(self.conn)
yield r.table(WALKER_TABLE).index_create('team_id').run(self.conn)
yield r.table(WALKER_TABLE).index_wait().run(self.conn)
if TEAM_TABLE not in table_names:
yield r.db(DB_NAME).table_create(TEAM_TABLE, durability='soft').run(self.conn)
yield r.table(TEAM_TABLE).index_create('laps').run(self.conn)
yield r.table(TEAM_TABLE).index_create('avg_laps').run(self.conn)
yield r.table(TEAM_TABLE).index_wait().run(self.conn)
@coroutine
def close(self):
then = dt.now()
for table in [WALKER_TABLE, TEAM_TABLE]:
result = yield r.table(table).sync().run(self.conn)
if result is None or result.get('synced') != 1:
log.error('sync %s' % table)
self.conn.close()
delta = dt.now() - then
duration = delta.seconds + (delta.microseconds/MICROS_PER_SEC)
logging.debug('closed in %f secs' % duration)
def emojize(self, emoji_name):
emoji_name = emoji_name or 'grinning face'
cldr_name = ':' + emoji_name.replace(' ', '_') + ':'
em = emoji.emojize(cldr_name)
if em == cldr_name:
logging.warn('no emoji for %s' % emoji_name)
em = emoji.emojize(':grinning_face:')
return em
@coroutine
def update_emoji(self, team_id, short_name):
yield r.table(TEAM_TABLE).get(team_id).update({
'emoji': self.emojize(short_name)
}).run(self.conn)
@coroutine
def insert_teams(self, teams):
for team in teams:
team['laps'] = 0
team['avg_laps'] = 0.0
if 'id' not in team:
team['id'] = yield self.get_next_team_id()
team['emoji'] = self.emojize(team['emoji'])
result = yield r.table(TEAM_TABLE).insert(teams).run(self.conn)
if result is None or result.get('errors') != 0:
logging.error('insert_teams %s ' % result)
@coroutine
def get_teams(self):
teams = []
cur = yield r.table(TEAM_TABLE).run(self.conn)
while (yield cur.fetch_next()):
team = yield cur.next()
teams.append(team)
raise Return(teams)
def append_rank_suffix(self, n):
# https://stackoverflow.com/questions/3644417/
return str(n) + ('th' if 4 <= n % 100 <= 20 else {1:'st', 2:'nd', 3:'rd'}.get(n % 10, 'th'))
@coroutine
def get_team_rank(self, team_id):
# Get the offset of the team_id in the team table ordered by lap count
# TODO: offsets_of doesn't handle ties. Could scan the team table linearly? Ugh.
offsets = yield r.table(TEAM_TABLE).order_by(r.desc('laps')).offsets_of(
r.row['id'].eq(team_id)
).run(self.conn)
if offsets is None or len(offsets) != 1:
logging.error('unexpected offsets: %s' % offsets)
rank = self.append_rank_suffix(offsets[0] + 1) # show rank as one-based
raise Return(rank)
@coroutine
def get_next_team_id(self):
team_with_max_id = yield r.table(TEAM_TABLE).max('id').run(self.conn)
if team_with_max_id:
raise Return(team_with_max_id['id'] + 1)
else:
raise Return(0)
@coroutine
def get_walker(self, walker_id):
walker = yield r.table(WALKER_TABLE).get(walker_id).run(self.conn)
raise Return(walker)
@coroutine
def get_walkers(self, team_id):
walkers = []
cur = yield r.table(WALKER_TABLE).get_all(team_id, index='team_id').run(self.conn)
while (yield cur.fetch_next()):
walker = yield cur.next()
walkers.append(walker)
raise Return(walkers)
@coroutine
def get_tags(self):
tags = []
cur = yield r.table(WALKER_TABLE).with_fields('id').run(self.conn)
while (yield cur.fetch_next()):
item = yield cur.next()
tags.append(item['id'])
raise Return(tags)
@coroutine
def insert_walkers(self, walkers):
for walker in walkers:
walker['laps'] = 0
walker['last_updated_time'] = 0.0
result = yield r.table(WALKER_TABLE).insert(walkers).run(self.conn)
if result is None or result.get('errors') != 0:
logging.error('insert_walkers %s' % result)
@coroutine
def increment_laps(self, tags):
cur = yield r.table(WALKER_TABLE).get_all(r.args(tags)).run(self.conn)
if len(tags) > 1:
logging.debug('updating %d records' % len(tags))
while (yield cur.fetch_next()):
walker = yield cur.next()
tags.remove(walker['id']) # remove the ones we've seen to find unassigned below
now = time.time()
if now - walker['last_updated_time'] > MIN_LAP_TIME:
# Increment lap totals
yield r.table(WALKER_TABLE).get(walker['id']).update({
'laps': r.row['laps'] + 1,
'last_updated_time': now
}).run(self.conn)
avg_laps = yield r.table(WALKER_TABLE).get_all(
walker['team_id'], index='team_id'
).avg('laps').run(self.conn)
rank = yield self.get_team_rank(walker['team_id'])
yield r.table(TEAM_TABLE).get(walker['team_id']).update({
'avg_laps': avg_laps,
'laps': r.row['laps'] + 1,
'last_updated_time': now,
'rank': rank
}).run(self.conn)
else:
# Not so fast buddy
d = dt.fromtimestamp(walker['last_updated_time'])
logging.warn('too soon: %d last lap: %s' % (walker['id'], d.strftime('%x %X')))
if len(tags) > 0:
# Shouldn't happen
logging.warn('unassigned tags: %s' % tags)
@coroutine
def zero_all_laps(self):
yield r.table(WALKER_TABLE).update({
'laps': 0,
'last_updated_time': 0.0,
}).run(self.conn)
yield r.table(TEAM_TABLE).update({
'laps': 0,
'avg_laps': 0.0
}).run(self.conn)
| StarcoderdataPython |
9624467 | import argparse
import glob
import os
import sys
import subprocess
import configparser
import re
def clean_classes(directory):
for class_file in glob.glob(os.path.join(directory, "*.class")):
print(f"Cleaning class file {class_file}")
os.remove(class_file)
def clean_tests(directory):
for txt_file in glob.glob(os.path.join(directory, "*.txt")):
print(f"Cleaning test file {txt_file}")
os.remove(txt_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="The directory containing test files")
parser.add_argument("-o", "--output-record", help="Record the outputs of files into .txt files", action="store_true")
parser.add_argument("-s", "--separate-output", help="Outputs stdout and stderr separately on failure", action="store_true")
parser.add_argument("-c", "--clean", help="Remove .class files from test folder", action="store_true")
parser.add_argument("-ct", "--clean-tests", help="Remove .txt files from test folder", action="store_true")
parser.add_argument("-ca", "--clean-all", help="Remove .txt and .class files from test folder", action="store_true")
parser.add_argument("-r", "--run", help="Runs tests even if clean is set", action="store_true")
args = parser.parse_args()
directory = os.path.join(sys.path[0], args.directory)
if args.clean_all:
clean_classes(directory)
clean_tests(directory)
if not args.run: return
if args.clean:
clean_classes(directory)
if args.clean_tests:
clean_tests(directory)
if (args.clean or args.clean_tests) and not args.run:
return
config = configparser.RawConfigParser()
config.read(".env")
test_files = glob.glob(os.path.join(directory, "*.wtr"))
total_tests = len(test_files)
passing_tests = 0
failing_tests = 0
for wtr_file in test_files:
if args.output_record:
print(f"Generating tests for {wtr_file}...")
else:
print(f"Running tests for {wtr_file}...")
compile_process = subprocess.run(["java",
"-p",
f"{config.get('Libraries', 'jcommander')};{config.get('Libraries', 'asm')};{config.get('Libraries', 'runtime')};{config.get('Libraries', 'compiler')}",
"-m",
"water.compiler/water.compiler.Main",
wtr_file
],
cwd="D:/Programming/Java/Water/out/production/Compiler",
capture_output=True,
text=True
)
p_stdout = compile_process.stdout
p_stderr = compile_process.stderr
if compile_process.returncode == 0:
className = os.path.basename(wtr_file).replace(".wtr", "") + "Wtr"
run_process = subprocess.run([
"java",
className
],
cwd=directory,
capture_output=True,
text=True
)
p_stdout += run_process.stdout
p_stderr += run_process.stderr
p_stderr = re.sub(
r"^\[.*\]",
"[LOC]",
p_stderr
)
if args.output_record:
with open(wtr_file + ".txt", "w") as f:
f.write(p_stdout)
f.write("$stderr:\n")
f.write(p_stderr)
else:
with open(wtr_file + ".txt", "r") as f:
file = "".join(f.readlines())
t_stdout, t_stderr = file.split("$stderr:\n")
if t_stdout != p_stdout or t_stderr != p_stderr:
if args.separate_output:
if t_stdout != p_stdout:
print(f"Test in file {wtr_file} failed:\nStdout Expected:\n{t_stdout}\nGot:{p_stdout}", file=sys.stderr)
if t_stderr != p_stderr:
print(f"Test in file {wtr_file} failed:\nStderr Expected:\n{t_stderr}\nGot:{p_stderr}", file=sys.stderr)
else:
t_output = (t_stdout + '\n' + t_stderr).strip()
p_output = (p_stdout + '\n' + p_stderr).strip()
print(f"Test in file {wtr_file} failed:\nExpected Output:\n{t_output}\nGot:\n{p_output}", file=sys.stderr)
print("(Output is stripped)")
failing_tests += 1
else:
passing_tests += 1
if not args.output_record:
print(f"\nRan all tests [{passing_tests} of {total_tests} succeeded] [{failing_tests} of {total_tests} failed]")
if __name__ == "__main__":
main() | StarcoderdataPython |
327422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
A simple tool to manipulate data from/to ascii files.
"""
import copy as cp
import numpy as np
import fnmatch as fnm
#-----------------------------------------------------------------------------------------
class AsciiTable():
def __init__(self, header=[]):
if header:
self.header = header
else:
self.header = []
self.data = []
#---------------------------------------------------------------------------------------
def AddElement(self, data=[]):
"""
Add an element (with header's format) to the data structure.
Element can be empty or filled with data.
"""
newitem = {}
for i, key in enumerate(self.header):
if not data:
newitem[key] = []
else:
newitem[key] = data[i]
self.data.append(newitem)
#---------------------------------------------------------------------------------------
def AddKey(self, key, data=[], index=-1):
"""
Add header key at given position (default is last element).
Data structure can optionally be inflated.
"""
# Default index simply appends
if index == -1:
index = len(self.header)
self.header.insert(index, key)
# Loop over data
for i, item in enumerate(self.data):
# Check value types
if not data or type(data) != list:
element = data
else:
element = data[i]
# Add element at corresponding key
self.data[i][key] = element
#---------------------------------------------------------------------------------------
def RemoveKey(self, key):
"""
Remove a given key from header and data structure.
"""
# Remove from header
i = self.header.index(key)
self.header.pop(i)
# Remove from data
for i, item in enumerate(self.data):
self.data[i].pop(key)
#---------------------------------------------------------------------------------------
def RenameKey(self, old_key, new_key):
"""
Rename a given key in the header and the data structure.
"""
# Rename header's key
i = self.header.index(old_key)
self.header[i] = new_key
# Rename key in data structure
for i, item in enumerate(self.data):
self.data[i][new_key] = self.data[i].pop(old_key)
#---------------------------------------------------------------------------------------
def Replace(self, key, old_value, new_value):
"""
Replace occurences of a key give value.
If old_value is '*' it replaces all values.
"""
# Loop over data
for i, item in enumerate(self.data):
# Replace all keys
if old_value == '*':
self.data[i][key] = new_value
# Replace matching values only
else:
if self.data[i][key] == old_value:
self.data[i][key] = new_value
#---------------------------------------------------------------------------------------
def Size(self):
"""
Method to return size of the data matrix.
"""
enum = len(self.data)
hnum = len(self.header)
return [enum, hnum]
#---------------------------------------------------------------------------------------
def Import(self, ascii_file,
header=[],
dtype='float',
delimiter=',',
skipline=0,
comment='#',
empty=[]):
"""
Method to import data from ascii file (tabular)
"""
self.header = []
self.data = []
# Open input ascii file
with open(ascii_file, 'r') as f:
# Ignore initial line(s) if necessary
for i in range(0, skipline):
f.readline()
# Import header (skip comments)
if not header:
while 1:
line = f.readline()
if line[0] != comment: break
header = line.strip().split(delimiter)
# Removing empty fields from header
for h in header:
if h != '':
self.header.append(h)
# Loop over lines
for line in f:
# Skip comments, if any
if line[0] != comment:
value = line.strip().split(delimiter)
# Loop over data values
data = []
for i, h in enumerate(header):
# Skip empty header fields
if h != '':
# Data type(s) switch
if type(dtype) == list:
dtp = dtype[i]
else:
dtp = dtype
# Check for empty elements
if not value[i]:
value[i] = empty
data.append(_CastValue(value[i],dtp))
self.AddElement(data)
f.close()
return
# Warn user if model file does not exist
print('File not found.')
#---------------------------------------------------------------------------------------
def ExportEQT(self, ascii_file,
write_header='no',
delimiter=' '):
"""
Method to export data object into an ascii file.
"""
try:
with open(ascii_file, 'w') as f:
# Write header
if write_header == 'yes':
header = delimiter.join(self.header)
f.write(header + '\n')
# Write data (loop over rows)
for i, item in enumerate(self.data):
data = [_CastValue(item[j],'s') for j in self.header]
data = delimiter.join(data)
if i < (self.Size()[0]-1):
f.write(data + '\n')
else:
f.write(data)
f.close()
except:
# Warn user if model file does not exist
print('File not found.')
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
def Export(self, ascii_file,
write_header='yes',
delimiter=','):
"""
Method to export data object into an ascii file.
"""
try:
with open(ascii_file, 'w') as f:
# Write header
if write_header == 'yes':
header = delimiter.join(self.header)
f.write(header + '\n')
# Write data (loop over rows)
for i, item in enumerate(self.data):
data = [_CastValue(item[j],'s') for j in self.header]
data = delimiter.join(data)
if i < (self.Size()[0]-1):
f.write(data + '\n')
else:
f.write(data)
f.close()
except:
# Warn user if model file does not exist
print('File not found.')
#---------------------------------------------------------------------------------------
def Append(self, new_table):
"""
Method to merge two data structures consecutively.
(Header structure must be identical).
"""
if self.header == new_table.header:
for i in range(0,new_table.Size()[0]):
self.data.append(new_table.data[i])
else:
print('Error: headers do not match...')
#---------------------------------------------------------------------------------------
def Extract(self, key, dtype='float'):
"""
Method to extract data values by key.
Data type can be specified.
"""
values = []
for item in self.data:
value = _CastValue(item[key], dtype)
values.append(value)
return values
#---------------------------------------------------------------------------------------
def Filter(self, key, filter_key):
"""
Method to filter the data table by key value.
Value can be a string to amtch (* and ? allowed)
or a numerical range (as a list of floats).
In output it is returned a new table.
"""
NewTab = AsciiTable(self.header)
# String matching
if type(filter_key) is str:
for item in self.data:
if fnm.fnmatch(item[key],filter_key):
NewTab.data.append(item)
# Filter by value
if type(filter_key) is list:
for item in self.data:
if not _isNaN(item[key]):
ik = _CastValue(item[key])
if ik >= filter_key[0] and ik <= filter_key[1]:
NewTab.data.append(item)
return NewTab
#-----------------------------------------------------------------------------------------
def _CastValue(value, dtype='float'):
"""
Private method to recast variables.
"""
# Check for empty fields or nans
if not _isEmpty(value) and not _isNaN(value):
# Data casting
if dtype in ['Int','int','I','i']:
value = int(value)
if dtype in ['Float','float','F','f']:
value = float(value)
if dtype in ['String','string','S','s']:
value = str(value)
else:
# Using default strings
if dtype in ['String','string','S','s']:
if _isEmpty(value):
value = ''
if _isNaN(value):
value = 'nan'
return value
#-----------------------------------------------------------------------------------------
def _isNaN(number):
"""
Simple private method to check if a number is NaN.
It returns a boolean evaluation.
"""
return number != number
def _isEmpty(number):
"""
Simple private method to check if a variable (list) is empty.
It returns a boolean evaluation.
"""
return (number == [] or number == '') | StarcoderdataPython |
4891809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, XLAB Steampunk <<EMAIL>>
#
# Apache License v2.0 (see https://www.apache.org/licenses/LICENSE-2.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["stableinterface"],
"supported_by": "community",
}
DOCUMENTATION = """
module: marker
author:
- <NAME> (@tadeboro)
short_description: Manage marker files
description:
- Manage marker files with user-defined content.
version_added: "1.0"
options:
path:
description:
- Location of the marker file.
type: str
required: true
state:
description:
- Target state of the marker file.
type: str
choices: [ present, absent ]
default: present
content:
description:
- The marker file content.
type: str
default: MARKER
"""
EXAMPLES = """
- name: Make sure marker file with default content is present
steampunk.demo.marker:
path: /tmp/marker
- name: Make sure marker file with custom content is present
steampunk.demo.marker:
path: /tmp/marker
content: >
A long marker line that is split here because we can do that in YAML
documtnts, so we did it.
- name: Delete marker file if it is present on the system
steampunk.demo.marker:
path: /tmp/marker
state: absent
"""
RETURN = """ # """
import os
from ansible.module_utils.basic import AnsibleModule
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError # Python2 compatibility
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
path=dict(
required=True,
),
state=dict(
choices=["present", "absent"],
default="present",
),
content=dict(
default="MARKER",
),
),
)
if module.params["state"] == "present":
try:
with open(module.params["path"]) as fd:
existing_content = fd.read()
except FileNotFoundError:
existing_content = ""
changed = existing_content != module.params["content"]
if changed and not module.check_mode:
with open(module.params["path"], "w") as fd:
fd.write(module.params["content"])
else:
changed = os.path.isfile(module.params["path"])
if changed and not module.check_mode:
os.remove(module.params["path"])
module.exit_json(changed=changed)
if __name__ == "__main__":
main()
| StarcoderdataPython |
8025478 | <reponame>quintenroets/sysetup<gh_stars>0
import argparse
import cli
from . import env, files, git, installer
def setup():
env.setup()
files.setup()
installer.setup()
git.setup()
cli.run("reboot now", root=True)
def main():
parser = argparse.ArgumentParser(description="Setup OS")
parser.add_argument(
"action",
nargs="?",
help="The setup action to do: [all(default), files, install, git, env]",
default="all",
)
args = parser.parse_args()
action_mapper = {
"all": setup,
"files": files.setup,
"install": installer.setup,
"git": git.setup,
"env": env.setup,
}
action = action_mapper[args.action]
action()
if __name__ == "__main__":
main()
| StarcoderdataPython |
9759584 | """
Чтение навигационных файлов в формате RINEX
"""
import datetime
import logging
from abc import ABC, abstractmethod
from coordinates.exceptions import RinexNavFileError
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def validate_epoch(epoch):
"""Check epoch and convert into datetime.datetime
Sometimes the seconds or minutes value >= 60, to return datetime.datetime
we need to check this. Also converts YY to YYYY (datetime.datetime threats
92 and 1992 in different ways).
Parameters
----------
epoch : list
epoch = [year, month, day, hour, min, sec, microsec]
Returns
-------
datetime : datetime.datetime
"""
epoch = epoch[:]
# YY -> YYYY
if epoch[0] < 100:
if epoch[0] >= 89:
epoch[0] += 1900
elif epoch[0] < 89:
epoch[0] += 2000
delta = datetime.timedelta(0)
# epoch[-2] - seconds; epoch[-3] - minutes
# we do all calculation in seconds so we use multiplier
for i, ier in [(-2, 1), (-3, 60)]:
if 60 <= epoch[i] <= 120:
sec = (epoch[i] - 59) * ier
delta += datetime.timedelta(seconds=sec)
epoch[i] = 59
epoch = datetime.datetime(*epoch) + delta
return epoch
def sec2sec_ms(sec):
"""Converts float seconds into seconds and microseconds
Parameters
----------
sec : float
Returns
-------
seconds : int
microsec : int
"""
microsec = (sec - int(sec)) * 1e+6
microsec = float("%.1f" % microsec)
return int(sec), int(microsec)
class RinexNavFile(ABC):
item_len = 19
orbit_start = None
orbit_end = None
values_per_orbit = dict(
G=(4, 4, 4, 4, 4, 4, 2), # GPS
R=(4, 4, 4), # GLONASS
E=(4, 4, 4, 4, 4, 4, 2), # Galileo
S=(4, 4, 4), # SBAS (GEO)
J=(4, 4, 4, 4, 4, 4, 2), # QZSS
C=(4, 4, 4, 4, 4, 4, 2), # BDS
I=(4, 4, 4, 4, 4, 4, 2), # IRNSS
)
@abstractmethod
def __init__(self, filename):
pass
@staticmethod
@abstractmethod
def retrieve_ver_type(filename):
pass
@staticmethod
@abstractmethod
def parse_epoch(file_object):
pass
@abstractmethod
def __iter__(self):
while False:
yield None
@staticmethod
def skip_header(file_object):
"""
Skip the header of the file.
Parameters
----------
file_object : file-like object
Returns
-------
None
Raises
------
RinexNavFileError
on unexpected end of the file.
"""
line = ''
while line[60:73] != 'END OF HEADER':
try:
line = next(file_object)
except StopIteration:
msg = 'Unexpected end of the file.'
raise RinexNavFileError(msg)
@staticmethod
def read_orbits(file_object, num_of_orbits):
"""Return list of orbits read from the file.
Parameters
----------
file_object : iterable
file-like object
num_of_orbits : int
Returns
-------
orbits : list
Raises
------
RinexNavFileError
on unexpected end of the file.
"""
orbits = [None] * num_of_orbits
for i in range(num_of_orbits):
try:
line = next(file_object)
except StopIteration:
msg = 'Unexpected end of the file.'
raise RinexNavFileError(msg)
orbits[i] = line.rstrip()
return orbits
def parse_orbits(self, orbits, values_per_orbit):
"""Return navigation message. Message parsed from the orbits list
according to values_per_orbit.
Parameters
----------
orbits : list
list of 'broadcast orbit' records from coordinates file
values_per_orbit : list
Returns
-------
message : tuple
Raises
------
RinexNavFileError
Can't parse the orbit record.
"""
message = []
# shortcuts
ln = self.item_len
start = self.orbit_start
end = self.orbit_end
for num, orbit in enumerate(orbits):
values = []
for i in range(start, end, ln):
value = orbit[i:i + ln].rstrip().lower().replace('d', 'e')
values.append(value)
try:
values = values[:values_per_orbit[num]]
values = [s and float(s) or 0. for s in values]
except ValueError:
msg = "Can't parse the orbit: {}".format(orbit)
raise RinexNavFileError(msg)
message += values
return tuple(message)
class RinexNavFileV2(RinexNavFile):
orbit_start = 3
orbit_end = 75
system = dict(
N='G',
G='R',
H='S',
)
def __init__(self, filename):
super().__init__(filename)
self.filename = filename
self.version, self.file_type = self.retrieve_ver_type(filename)
@staticmethod
def retrieve_ver_type(filename):
"""Returns RINEX version and type
Returns
-------
tuple
(float, str) -- RINEX version and type.
Raises
------
RinexNavFileError
on empty file.
"""
with open(filename) as rinex:
try:
header_line = next(rinex)
except StopIteration:
raise RinexNavFileError('Unexpected end of the file.')
else:
version = float(header_line[:9])
file_type = header_line[20]
return version, file_type
@staticmethod
def parse_epoch(file_object):
"""Return satellite number, epoch and sv_clock
Raises
------
EOFError
on the end of the file.
RinexNavFileError
when it can't parse the epoch record.
"""
try:
line = next(file_object)
except StopIteration:
raise EOFError
number = line[0:2]
# year, month, day, hour, min; +sec
epoch = [line[i:i + 3] for i in range(2, 17, 3)]
sec = line[17:22]
sv_clock = [line[i:i + 19] for i in (22, 41, 60)]
sv_clock = [i.lower().replace('d', 'e') for i in sv_clock]
try:
number = int(number)
sec = sec2sec_ms(float(sec))
epoch = [int(i) for i in epoch] + list(sec)
epoch = validate_epoch(epoch)
sv_clock = [float(f) for f in sv_clock]
except ValueError:
msg = "Can't read epoch: {}.".format(line)
raise RinexNavFileError(msg)
return number, epoch, tuple(sv_clock)
def __iter__(self):
system = self.system[self.file_type]
values_per_orbit = self.values_per_orbit[system]
num_of_orbits = len(values_per_orbit)
with open(self.filename, 'r') as file_object:
self.skip_header(file_object)
while True:
try:
satellite, epoch, sv_clock = self.parse_epoch(file_object)
except EOFError:
break
orbits = self.read_orbits(file_object, num_of_orbits)
message = self.parse_orbits(orbits, values_per_orbit)
yield system, satellite, epoch, sv_clock, message
class RinexNavFileV3(RinexNavFile):
orbit_start = 4
orbit_end = 76
def __init__(self, filename):
super().__init__(filename)
self.filename = filename
version, file_type, system = self.retrieve_ver_type(filename)
self.version = version
self.file_type = file_type
self.system = system
@staticmethod
def retrieve_ver_type(filename):
"""Возвращает версию, тип файла и спутниковую систему
"""
with open(filename) as rinex:
header_line = next(rinex)
version = float(header_line[:9])
file_type = header_line[20]
system = header_line[40]
return version, file_type, system
@staticmethod
def parse_epoch(file_object):
"""Returns epoch components
Parameters
----------
file_object : file-like object
Returns
-------
epoch_components : tuple
(satellite_system, satellite_number, epoch, sv_clock)
Raises
------
EOFError
on the end of the file.
RinexNavFileError
when it can't parse the epoch record.
"""
try:
line = next(file_object)
except StopIteration:
raise EOFError
system = line[0]
number = line[1:3]
# month, day, hour, min, sec; year + ...
epoch = [line[i:i + 3] for i in range(8, 23, 3)]
epoch = [line[4:9]] + epoch
sv_clock = [line[i:i + 19] for i in (23, 42, 61)]
sv_clock = [i.lower().replace('d', 'e') for i in sv_clock]
try:
number = int(number)
epoch = [int(i) for i in epoch]
epoch = validate_epoch(epoch)
sv_clock = [float(f) for f in sv_clock]
except ValueError:
msg = "Can't read epoch: {}.".format(line)
raise RinexNavFileError(msg)
return system, number, epoch, tuple(sv_clock)
def __iter__(self):
with open(self.filename, 'r') as file_object:
self.skip_header(file_object)
while True:
try:
(system, satellite,
epoch, sv_clock) = self.parse_epoch(file_object)
except EOFError:
break
values_per_orbit = self.values_per_orbit[system]
num_of_orbits = len(values_per_orbit)
orbits = self.read_orbits(file_object, num_of_orbits)
message = self.parse_orbits(orbits, values_per_orbit)
yield system, satellite, epoch, sv_clock, message
def rnx_nav(filename):
"""Возвращает объект RinexNavFile в зависимости от версии в файле.
"""
version, file_type = RinexNavFileV2.retrieve_ver_type(filename)
if version in {2.0, 2.01, 2.1, 2.11}:
return RinexNavFileV2(filename)
elif version in {3.0, 3.01, 3.02, 3.03, 3.04}:
return RinexNavFileV3(filename)
else:
msg = 'Version {} is not supported.'.format(version)
raise RinexNavFileError(msg)
| StarcoderdataPython |
1633129 | print('\033[32m-=\033[m' * 30)
print('Analisador de Triângulos ')
print('\033[32m-=\033[m' * 30)
a = float(input('Primeiro segmento: '))
b = float(input('Segundo segmento: '))
c = float(input('Terceiro segmento: '))
if (b-c) < a < (b+c) and (a-c) < b < (a+c) and (a-b) < c < (a+b):
print('Os segmentos acima PODEM FORMAR triângulo !')
else:
print('Os segmentos acima NÃO PODEM FORMAR triângulo !')
| StarcoderdataPython |
204380 | <gh_stars>1-10
#Defining a simple dictionary where key is "A" and its value is a list i.e. ["apple", "animal"]
myDict = {"A": ["apple", "animal"]}
print(myDict["A"]) | StarcoderdataPython |
8105623 | <gh_stars>10-100
#!/usr/bin/env python
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import shutil
import errno
import zipfile
import requests
dl_name = "dxl_v3_4_3.zip"
url = "https://github.com/ROBOTIS-GIT/DynamixelSDK/archive/3.4.3.zip"
arm_dir = "arm/ggd/servo"
master_dir = "master/ggd/servo"
print("[begin] Downloading Dynamixel SDK: {0}".format(url))
r = requests.get(url, stream=True)
with open(dl_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
r.close()
print("[end] Download complete.")
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
make_sure_path_exists(arm_dir)
make_sure_path_exists(master_dir)
print("[begin] Unzipping download {0}".format(dl_name))
with zipfile.ZipFile(dl_name, 'r') as z:
z.extractall(os.curdir)
print("...extracted to: {0}".format(os.curdir))
print("[end] Unzipped download")
print("[begin] Configure Dynamixel for Raspbian.")
dxl_python_file = os.curdir + '/DynamixelSDK-3.4.3/python/dynamixel_functions_py/dynamixel_functions.py'
win_match = r'dxl_lib = cdll.LoadLibrary("../../c/build/win32/output/dxl_x86_c.dll")'
win_after = r'# dxl_lib = cdll.LoadLibrary("../../c/build/win32/output/dxl_x86_c.dll")'
sbc_match = r'# dxl_lib = cdll.LoadLibrary("../../c/build/linux_sbc/libdxl_sbc_c.so")'
sbc_imp = 'import os\n'
sbc_path = 'dir_path = os.path.dirname(os.path.realpath(__file__))\n'
sbc_after = 'dxl_lib = cdll.LoadLibrary(dir_path + "/DynamixelSDK-3.4.3/c/build/linux_sbc/libdxl_sbc_c.so")'
out_fname = "dxl_funcs_py.tmp"
print("...finding lib strings...")
with open(dxl_python_file) as f:
with open(out_fname, "w") as out:
for line in f:
if line.find(win_match) >= 0:
out.write(line.replace(win_match, win_after))
elif line.find(sbc_match) >= 0:
out.write(sbc_imp)
out.write(sbc_path)
out.write(line.replace(sbc_match, sbc_after))
else:
out.write(line)
print("...replaced lib strings in temp dynamixel_functions.py.")
print("...copying Dynamixel directory...")
try:
shutil.copytree(os.curdir + '/DynamixelSDK-3.4.3',
os.curdir + '/' + master_dir + '/DynamixelSDK-3.4.3')
shutil.copytree(os.curdir + '/DynamixelSDK-3.4.3',
os.curdir + '/' + arm_dir + '/DynamixelSDK-3.4.3')
except OSError as ose:
print("...ERROR copying Dynamixel directory:{0}".format(ose))
exit(1)
print("...copied Dynamixel directory into ggd directories.")
print("...copying modified dynamixel_functions.py")
try:
shutil.copy(out_fname,
os.curdir + '/' + master_dir + '/dynamixel_functions.py')
shutil.copy(out_fname,
os.curdir + '/' + arm_dir + '/dynamixel_functions.py')
except OSError as ose:
print("...ERROR copying modified dynamixel_functions.py:{0}".format(ose))
exit(1)
print("...copied modified dynamixel_functions.py into ggd directories.")
print("[end] Configured Dynamixel for Raspbian.")
print("[begin] Cleaning up.")
os.remove(out_fname)
print("...removed temp dynamixel_functions.py.")
shutil.rmtree(os.curdir + '/DynamixelSDK-3.4.3')
print("...removed expanded Dynamixel SDK")
os.remove(dl_name)
print("...removed downloaded Dynamixel SDK")
print("[end] Cleaned up")
| StarcoderdataPython |
3590246 | <gh_stars>1-10
produtos = ('Lapis', 1.75,
'Borracha', 2,
'Caderno', 15.98,
'Estojo', 25,
'Transferidor', 4.20,
'Compasso', 9.99,
'Mochila', 120.32,
'Canetas', 22.30,
'Livros', 34.90)
print('-'*43)
print('{:^43}'.format('LISTAGEM DE PREÇOS'))
print('-'*43)
for pos in range(0, len(produtos)):
if pos % 2 == 0:
print(f'{produtos[pos]:.<30}',end='')
else:
print(f'R${produtos[pos]:>10.2f}')
print('-'*43)
| StarcoderdataPython |
11284960 | import os
import signal
class SignalHandler(object):
"""Class to detect OS signals
e.g. detect when CTRL+C is pressed and issue a callback
"""
def __init__(self, sig=signal.SIGINT, callback=None, resignal_on_exit=False):
self.sig = sig
self.interrupted = False
self.released = False
self.original_handler = None
self.resignal_on_exit = resignal_on_exit
self.callback = callback
def __enter__(self):
self.interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def _handler(signum, frame):
forward_signal = False
if not self.interrupted:
if self.callback:
try:
forward_signal = self.callback() == 'forward-signal'
except:
pass
self.interrupted = True
self.release()
if forward_signal:
self.original_handler()
signal.signal(self.sig, _handler)
return self
def __exit__(self, t, value, tb):
self.release()
if self.interrupted and self.resignal_on_exit:
os.kill(os.getpid(), self.sig)
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True | StarcoderdataPython |
8161674 | import random
banner = '''\nWelcome to connect, I hope you enjoy your dots.\n- <NAME>[ka-ne-ct]\n'''
chars = ['a', 'b' 'c' 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
data = [
'''When you bring your best to the table, no matter where you are or what you are doing, you bring out the best in others. And soon, you start to realize, that, in turn, helps them bring out the best in you. That’s the upward spiral. You find each other and form an elite group of go-to people in an otherwise ordinary context. I see that happen everywhere I go: circles or networks of go-to people who help each other and go out of their way to be mutually reliable.''',
'''“My mother always wanted to live near the water," she said. "She said it's the one thing that brings us all together. That I can have my toe in the ocean off the coast of Maine, and a girl my age can have her toe in the ocean off the coast of Africa, and we would be touching. On opposite sides of the world.”''',
'''“Reconnect to what makes you happy and brings you Joy. If there is something that used to make you happy which you have stopped doing, do it again. Seek to find deeper meaning and significance rather than living on the surface.”'''
]
headers = [
'''The connection flows through us all.''',
'''Connection of us flows through you.''',
'''Enjoyment is the best connection of all.'''
]
ids = []
ssl = False
strs = []
titles = ['The connections you build over time.', 'Everything is connected.', 'Unlock the mind to connect to the universe of thought.']
version = '0.0'
random_data=(titles[random.randint(0,len(titles)-1)], headers[random.randint(0,len(headers)-1)], data[random.randint(0,len(data)-1)])
def generate_id():
new_id = [str(random.randint(0,9)) for random_integer in range (0,10)]
new_id = ''.join(new_id)
if new_id in ids:
new_id = generate_id()
ids.append(new_id)
return new_id
def generate_str():
new_str = [str(chars[random.randint(0, len(chars)-1)]) for random_char in range (0,10)]
new_str = ''.join(new_str)
if new_str in strs:
new_str = generate_str()
strs.append(new_str)
return new_str
def server_context():
if ssl:
return 'https'
return 'http'
| StarcoderdataPython |
6449868 | <filename>src/pathmagic.py
import os
import sys
def setup():
"""Add path to this file to sys.path"""
app_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(app_dir)
sys.path.insert(0, app_dir)
return app_dir
| StarcoderdataPython |
219334 | # Copyright (c) IMToolkit Development Team
# This toolkit is released under the MIT License, see LICENSE.txt
import os
import sys
import glob
import re
import time
import shutil
from scipy import special
import numpy as np
import itertools
from imtoolkit import *
def getHammingDistanceTable(MCK, indsdec):
# This method is not dependent on Q
hds = np.zeros((MCK, MCK), dtype = np.int)
imax = MCK * (MCK - 1) / 2
i = 0
for y in range(MCK):
for x in range(y + 1, MCK):
hammingdis = countErrorBits(indsdec[y], indsdec[x])
hds[y][x] = hds[x][y] = hammingdis
i += 1
print("%.3f percent completed." % (i / imax * 100.0))
return hds
def getGoodDecsTable(M, K):
inds = list(itertools.combinations(range(M), K))
indsv = convertIndsToVector(inds, M)
indsdec = convertIndsToIndsDec(inds, M)
MCK = len(inds)
hds = np.zeros((MCK, MCK), dtype = np.int) # This is not dependent on Q
minHT = 4
newdecs = {}
for x in range(1, MCK):
hds[0][x] = np.sum(np.logical_xor(indsv[0].reshape(M), indsv[x].reshape(M)))
#print(hds[0])
while True:
gooddecs = where(hds[0] >= minHT)[0].tolist()
if len(gooddecs) == 0:
break
print("minHT = %d" % (minHT))
newdecs[minHT] = [0]
newdecs[minHT].extend(gooddecs)
#print("extended")
#print(newdecs)
lennd = len(newdecs[minHT])
deletepos = []
for y in range(1, lennd):
if y in deletepos:
continue
yp = newdecs[minHT][y]
for x in range(y + 1, lennd):
if x in deletepos:
continue
xp = newdecs[minHT][x]
if hds[yp][xp] == 0:
hds[yp][xp] = np.sum(np.logical_xor(indsv[yp].reshape(M), indsv[xp].reshape(M)))
if hds[yp][xp] < minHT:
deletepos.append(x)
print("%.2f percent" % (100.0 * y / lennd))
newdecs[minHT] = np.delete(newdecs[minHT], deletepos, axis = 0)
if len(newdecs[minHT]) <= 1:
del newdecs[minHT]
break
newdecs[minHT] = np.take(indsdec, newdecs[minHT]).tolist()
#print("deleted")
#print(newdecs)
#print(getMinimumHamming(convertIndsDecToInds(newdecs[minHT], M), M))
if len(newdecs[minHT]) == 0:
break
minHT += 2
#print("%.3f percent completed." % (i / imax * 100.0))
return newdecs
def getGoodDecsTableSmallMemory(M, K):
minHT = 4
indsiter = itertools.combinations(range(M), K)
firstivec = np.zeros(M, dtype=np.int)
firstind = np.array(next(indsiter))
firstivec[firstind] = 1
#print(firstivec)
firstdec = np.sum(np.power(2, firstind))
# Extracts the active indices having minHT >= 4
indsvec = [firstivec]
indsdec = [firstdec]
for ind in indsiter:
ivec = np.zeros(M, dtype=np.int)
npind = np.array(ind)
ivec[npind] = 1
hd = getHammingDistance(firstivec, ivec)
if hd < minHT:
continue
indsvec.append(ivec)
indsdec.append(np.sum(np.power(2, npind)))
indsvec = np.array(indsvec)
#print(np.take(indsvec, np.array([0, 1]), axis=0))
#print(len(indsvec))
#print(len(indsdec))
MCK = len(indsvec)
newdecs = {}
while True:
print("minHT = %d" % (minHT))
newdecs[minHT] = indsdec
#print(newdecs)
lennd = len(newdecs[minHT])
lstart = 0
if minHT == 4:
lstart = 1
deletepos = []
ys = np.array(list(range(lstart, lennd)))
#print(ys)
#for y in range(lstart, lennd):
yi = 0
y = ys[yi]
while True:
#if y in deletepos:
# continue
xs = np.array(list(range(y + 1, lennd)))
#print(xs)
#print(deletepos)
xs = np.setdiff1d(xs, deletepos)
if len(xs) > 0:
#print(indsvec[xs])
vxs = np.take(indsvec, xs, axis = 0)
#print(vxs.shape)
#print(vxs)
vys = np.tile(indsvec[y], len(xs)).reshape(-1, M)
#print(vys)
hds = np.sum(np.logical_xor(vxs, vys), axis = 1)
#hds = np.apply_along_axis(lambda x: getHammingDistance(indsvec[y], indsvec[x[0]]), 0, xs.reshape(1, len(xs)))
#print(hds)
#print(list(np.where(hds < minHT)[0]))
newdel = list(xs[np.where(hds < minHT)[0]])
deletepos.extend(newdel)
ys = np.setdiff1d(ys, newdel)
#print(ys)
#for x in range(y + 1, lennd):
# if x in deletepos:
# continue
# hd = np.sum(np.logical_xor(indsvec[y], indsvec[x]))
# if hd < minHT:
# deletepos.append(x)
print("%.2f percent" % (100.0 * y / lennd))
yi += 1
if yi >= len(ys):
break
y = ys[yi]
#print(deletepos)
newdecs[minHT] = list(np.delete(newdecs[minHT], deletepos, axis = 0))
if len(newdecs[minHT]) <= 1:
del newdecs[minHT]
break
if len(newdecs[minHT]) == 0:
break
minHT += 2
return newdecs
def getAllIndsBasedOnDecFile(M, K, Q):
basePath = os.path.dirname(os.path.abspath(__file__))
decfilename = basePath + "/decs/M=%d_K=%d.txt" % (M, K)
if os.path.exists(decfilename):
with open(decfilename, mode = 'r') as f:
decs = eval(f.read())
#print(decs)
print("Read " + decfilename)
minh = 0
for key in decs.keys():
if Q <= len(decs[key]):
minh = key
print(minh)
if minh > 0:
return convertIndsDecToInds(decs[minh], M)
return []
return None
def outputCPLEXModelFile(M, K, Q):
decallinds = getAllIndsBasedOnDecFile(M, K, Q)
if K > 1 and K < M-1 and decallinds == None:
print("The dec file for (%d,%d) does not exist." % (M,K))
return None
if decallinds != None and len(decallinds) > 0:
allinds = decallinds
#print(getMinimumHamming(allinds, M))
allindsvec = convertIndsToVector(allinds, M)
allindsmat = np.hstack(allindsvec).T.tolist() # MCK \times M
MCK = len(allindsmat)
else:
allinds = list(itertools.combinations(range(M), K))
allindsvec = convertIndsToVector(allinds, M)
allindsmat = np.hstack(allindsvec).T.tolist() # MCK \times M
MCK = len(allindsmat)
constraints = [" a[1] == 1;\n"]
#
#indsv = convertIndsToVector(allinds, M)
#print(getGoodDecsTable(MCK, indsdec))
#print("hds generated.")
basePath = os.path.dirname(os.path.abspath(__file__))
fname = basePath + "/inds-raw/M=%d_K=%d_Q=%d.mod" % (M, K, Q)
with open(fname, mode = 'w') as f:
f.write("int M=%d; int K=%d; int Q=%d; int MCK=%d;\n" % (M, K, Q, MCK))
f.write("int allinds[1..MCK][1..M] = " + str(allindsmat) + ";\n\n")
f.write("dvar boolean a[1..MCK];\n\n")
#
f.write("execute PARAMS {\n")
f.write(" cplex.mipemphasis = 0;\n")
f.write(" cplex.tilim = 60 * 60;\n")
f.write(" cplex.mipdisplay = 3;\n")
f.write("}\n\n")
#
f.write("minimize sum(m in 1..M) (abs(sum(q in 1..MCK)(a[q] * allinds[q][m]) - (Q * K / M)));\n\n")
#
f.write("subject to{\n")
# add constraints
f.writelines(constraints)
f.write(" sum(q in 1..MCK)(a[q]) == Q;\n")
f.write("}\n\n")
#
f.write("execute{\n")
f.write(" var f = new IloOplOutputFile(\"M=\" + M + \"_K=\"+ K + \"_Q=\" + Q + \"_obj=\" + cplex.getObjValue() + \".txt\");\n")
f.write(" f.write(a);\n")
f.write(" f.close();\n")
f.write("}\n")
print("Saved to " + fname)
return fname
def convertCPLEXOutputToInds(fname, M, K, Q):
allinds = np.array(list(itertools.combinations(range(M), K)))
decallinds = getAllIndsBasedOnDecFile(M, K, Q)
if decallinds != None and len(decallinds) > 0:
allinds = decallinds
with open(fname, mode='r') as f:
content = f.read()
content = re.sub(r'\s+', ' ', content)
content = re.sub(r'^\s+', '', content)
content = re.sub(r'\n', '', content)
content = content.replace(" ", ",")
#print(content)
inds = np.array(eval(content))
#print(inds)
#print(np.nonzero(inds)[0].tolist())
inds = np.take(allinds, np.nonzero(inds)[0], axis = 0)
return inds
def numpyToPythonStr(numpystr):
return numpystr.replace(".", "").replace(" ", " ").replace("\n\n", "\n").replace("\n ", ", ").replace(" 0", ", 0").replace(" 1", ", 1")
def convertIndsToRST(basePath, M, K, Q):
titlestr = "M = %d, K = %d, Q = %d" % (M, K, Q)
filepat = basePath + "/inds/M=%d_K=%d_Q=%d_*.txt" % (M, K, Q)
files = glob.glob(filepat)
if len(files) == 0:
print(filepat + " does not exist.")
return
fninds = files[0]
# copy the original file to the build/html/
bpath = basePath + "/../docs/build/html/db/M=%d/M=%d_K=%d_Q=%d.txt" % (M, M, K, Q)
if not os.path.exists(os.path.dirname(bpath)):
os.mkdir(os.path.dirname(bpath))
if not os.path.exists(bpath) or (os.path.exists(bpath) and os.stat(bpath).st_mtime < os.stat(fninds).st_mtime):
shutil.copyfile(fninds, bpath)
mpath = basePath + "/../docs/source/db/M=%d/" % (M)
if not os.path.exists(mpath):
os.mkdir(mpath)
fname = mpath + "M=%d_K=%d_Q=%d.rst" % (M, K, Q)
if not os.path.exists(fname) or (os.path.exists(fname) and os.stat(fname).st_mtime < os.stat(fninds).st_mtime):
with open(fname, mode = 'w') as f:
f.write("\n")
f.write("=" * len(titlestr) + "\n")
f.write(titlestr + "\n")
f.write("=" * len(titlestr) + "\n")
f.write("\n")
fn = os.path.basename(fninds)
iurl = "https://github.com/imtoolkit/imtoolkit/blob/master/imtoolkit/inds/" + fn.replace("=", "%3D")
f.write("`" + fn + " is available here. <" + iurl + ">`_\n\n")
fn = fn.replace(".txt", "")
p = Parameters(fn)
if p.Q <= 1024:
inds = np.loadtxt(fninds, dtype = np.int)
inds = inds.reshape(p.Q, p.K).tolist()
if p.Q <= 128:
at = np.array(convertIndsToMatrix(inds, M))
ats = numpyToPythonStr(str(at))
vrstr = numpyToPythonStr(str(np.array(convertIndsToVector(inds, M)).reshape(-1, p.M)))
vrstr = vrstr.replace("], ", "],\n ")
#ts = fn.replace("M=%d_"%M, "").replace("_minh=%d"%p["minh"], "").replace("_ineq=%d"%p["ineq"], "").replace("_", ", ").replace("=", " = ")
#print(ts)
#print("-" * len(ts))
f.write(".. code-block:: python\n\n")
f.write(" # minimum Hamming distance = %d\n" % p["minh"])
f.write(" # activation inequality = %d\n" % p["ineq"])
if p.Q <= 1024:
f.write(" # active indices\n")
f.write(" a = " + str(inds) + "\n")
if p.Q <= 128:
f.write(" # activation tensor\n")
f.write(" A = " + ats + "\n")
f.write(" # vector representation\n")
f.write(" " + vrstr + "\n")
else:
f.write(" # activation tensor and its vector representation are omitted.\n")
else:
f.write(" # active indices, activation tensor and its vector representation are omitted.\n")
f.write("\n")
print("The generated rst was saved to " + fname)
def main():
#print("DEBUG MODE ENABLED")
#np.set_printoptions(threshold=np.inf)
#basePath = os.path.dirname(os.path.abspath(__file__))
#convertIndsToRST(basePath, 16, 5, 2)
#quit()
if len(sys.argv) <= 1:
print("DECPARAMS_M=32")
print("DECPARAMSDO_M=32")
print("SEARCHPARAMS_M=32_P=2")
print("SEARCHPARAMSDO_M=32")
print("SEARCH_M=2_K=1_Q=2")
print("SEARCH_M=4_K=1_Q=2")
print("EVAL_dm=dic_M=2_K=1_Q=2")
print("EVAL_dm=dic_M=16_K=8_Q=16")
print("EVAL_dm=mes_M=16_K=8_Q=16")
print("EVAL_dm=wen_M=16_K=8_Q=16")
print("DECSEARCH_M=4_K=2")
print("DECEVAL_M=4_K=2")
print("DECSEARCH_M=8_K=4")
print("DECEVAL_M=8_K=4")
print("DECSEARCH_M=16_K=8")
print("DECEVAL_M=16_K=8")
print("MINH")
print("DOCMUPDATE_M=32")
print("DOCMINDEX")
print("COVERAGE")
quit()
args = sys.argv[1:]
for arg in args:
print("-" * 50)
print("arg = " + arg)
params = Parameters(arg)
basePath = os.path.dirname(os.path.abspath(__file__))
start_time = time.time()
if params.mode == "COVERAGE":
allpossibleparams = 0
hitcount = 0
M = 2
while True:
for K in range(1, M):
ps = getIMParameters(M, K)
for p in ps:
allpossibleparams += 1
M, K, Q = p[0], p[1], p[2]
fpy = glob.glob(basePath + "/inds/M=%d_K=%d_Q=%d_*.txt" % (M, K, Q))
if len(fpy) > 0:
hitcount += 1
M += 2
print("M <= %d, %d / %d = %2.2f" % (M, hitcount, allpossibleparams, 100.0 * hitcount / allpossibleparams))
if M == 32:
break
elif params.mode == "SEARCHPARAMS" or params.mode == "SEARCHPARAMSDO":
imparams = []
allpossibleparams = 0
M = 2
while True:
for K in range(1, M):
ps = getIMParameters(M, K)
for p in ps:
allpossibleparams += 1
M, K, Q = p[0], p[1], p[2]
fpy = glob.glob(basePath + "/inds/M=%d_K=%d_Q=%d_*.txt" % (M, K, Q))
if len(fpy) == 0:
imparams.append(p)
#else:
# if Q == 2 or Q * K <= M:
# print("May be wrong: /inds/M=%d_K=%d_Q=%d*.txt" % (M, K, Q))
# os.remove(fpy[0])
# fpy = glob.glob(basePath + "/decs/M=%d_K=%d.txt" % (M, K))
# if (len(fpy)) == 0:
# print("May be wrong: /inds/M=%d_K=%d_Q=%d*.txt" % (M, K, Q))
M += 2
if M > params.M:
break
print("Possible IM parameters = %d" % allpossibleparams)
print("Supported IM parameters = %d" % (allpossibleparams - len(imparams)))
print("Search coverage = %.2f percent" % (100.0 - 100.0 * len(imparams) / allpossibleparams))
imparams.sort(key = lambda x:x[2])
cmds = []
for p in imparams:
cmd = "imsearch SEARCH_M=%d_K=%d_Q=%d" % (p[0], p[1], p[2])
cmds.append(cmd)
if params.mode == "SEARCHPARAMS":
for p in range(params.P):
print("Set %d ====================================" % p)
i = 0
for cmd in cmds:
if i % params.P == p:
print(cmd)
i += 1
print("")
elif params.mode == "SEARCHPARAMSDO":
for cmd in cmds:
os.system(cmd)
elif params.mode == "DECPARAMS" or params.mode == "DECPARAMSDO":
decparams = []
allpossibleparams = 0
M = 2
while True:
for K in range(2, M - 1): # excludes K = 1 and K = M - 1
decfilename = basePath + "/decs/M=%d_K=%d.txt" % (M, K)
if not os.path.exists(decfilename):
decparams.append([M, K])
allpossibleparams += 1
M += 2
if M > params.M:
break
print("Search coverage = %.2f percent" % (100.0 - 100.0 * len(decparams) / allpossibleparams))
decparams.sort(key = lambda x:special.binom(x[0], x[1]))
cmd = "echo " + " ".join(["DECSEARCH_M=%d_K=%d" % (p[0], p[1]) for p in decparams]) + " | xargs -n1 -P15 imsearch"
if params.mode == "DECPARAMSDO":
os.system(cmd)
else:
print(cmd)
elif params.mode == "DECSEARCH":
#dectable = getGoodDecsTable(params.M, params.K)
dectable = getGoodDecsTableSmallMemory(params.M, params.K)
decfilename = basePath + "/decs/M=%d_K=%d_option=low.txt" % (params.M, params.K)
with open(decfilename, mode = 'w') as f:
f.write(str(dectable))
print("Saved to " + decfilename)
elif params.mode == "DECEVAL":
decfilename = basePath + "/decs/M=%d_K=%d.txt" % (params.M, params.K)
with open(decfilename, mode = 'r') as f:
print("Read " + decfilename)
dectable = eval(f.read())
print(dectable)
for minh in dectable.keys():
print("minh = %d" % (minh))
allinds = convertIndsDecToInds(dectable[minh], params.M)
print(allinds)
print("actual minh = %d" % (getMinimumHammingDistance(allinds, params.M)))
elif params.mode == "SEARCH":
if params.Q == 2 or params.Q * params.K <= params.M:
print("A self-evident solution is available for this setup.")
#params = Parameters("M=26_K=25_Q=16")
qstarts = np.floor(np.arange(params.Q) * (params.M - params.K) / (params.Q - 1) + 0.5)
inds = np.zeros((params.Q, params.K), dtype = np.int)
for q in range(params.Q):
inds[q] = qstarts[q] + np.arange(params.K)
outputIndsToFile(inds, params.M)
else:
fname = outputCPLEXModelFile(params.M, params.K, params.Q)
if fname != None and ".mod" in fname:
os.system("oplrun " + fname)
# Convert the obtained solution to a numpy file
fcout = glob.glob(basePath + "/inds-raw/M=%d_K=%d_Q=%d_*.txt" % (params.M, params.K, params.Q))
if len(fcout) > 0:
fcout.sort()
fname = fcout[0]
obj = 0
if "obj=" in fname:
res = re.match(r'.*_obj=(\d+)', fname)
if res:
obj = int(res.group(1))
inds = convertCPLEXOutputToInds(fname, params.M, params.K, params.Q)
outputIndsToFile(inds, params.M)
elif params.mode == "EVAL":
inds = getIndexes(params.dm, params.M, params.K, params.Q)
print(np.array(convertIndsToVector(inds, params.M)).reshape(-1, params.Q))
print("Minimum Hamming distance = %d" % getMinimumHammingDistance(inds, params.M))
print("Inequality L1 = %d" % getInequalityL1(inds, params.M))
elif params.mode == "DOCMUPDATE":
np.set_printoptions(threshold=np.inf)
M = 2
while True:
for K in range(1, M):
ps = getIMParameters(M, K)
for p in ps:
M, K, Q = p[0], p[1], p[2]
convertIndsToRST(basePath, M, K, Q)
M += 2
if M > params.M:
break
elif params.mode == "DOCMINDEX":
np.set_printoptions(threshold=np.inf)
def lfs(f):
fn = os.path.basename(f).replace(".rst", "")
p = Parameters(fn)
return (p.M * 32 + p.K) * 10000000 + p.Q
dirs = glob.glob(basePath + "/../docs/source/db/M=*")
for mdir in dirs:
print(mdir)
M = int(os.path.basename(mdir).replace("M=", ""))
titlestr = "M = %d" % M
files = glob.glob(mdir + "/M=*.rst")
files.sort(key = lfs)
fout = mdir + "/index.rst"
with open(fout, mode = 'w') as f:
f.write("\n")
f.write("=" * len(titlestr) + "\n")
f.write(titlestr + "\n")
f.write("=" * len(titlestr) + "\n")
f.write("\n\n")
f.write("This webpage provides the designed active indices in the :math:`M = %d` case.\n\n" % M)
frsts = [os.path.basename(frst).replace(".rst", "") for frst in files]
f.write(".. toctree::\n")
f.write(" :maxdepth: 2\n")
f.write(" :hidden:\n")
f.write(" \n")
for frst in frsts:
f.write(" " + frst + "\n")
f.write("\n")
for frst in frsts:
f.write("- :doc:`" + frst + "`\n")
print("The index.rst file was saved to " + fout)
elapsed_time = time.time() - start_time
print ("Elapsed time = %.10f seconds" % (elapsed_time))
| StarcoderdataPython |
3487872 | import pyimgur
from unsplash.api import Api
from unsplash.auth import Auth
IMGUR = pyimgur.Imgur("123")
DARK_SKY_API_KEY = "123"
UNSPLASH = Api(Auth("123", "123", ""))
LOG_DIR = "log"
SENDER_EMAIL = "123@123.123"
SENDER_PASSWORD = "<PASSWORD>"
| StarcoderdataPython |
6533590 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Module laying out basic functionality for reading and writing NITF files. This is **intended**
to represent base functionality to be extended for SICD, CPHD, and SIDD capability.
"""
import logging
from typing import List, Tuple
import re
import numpy
from .base import BaseChipper, BaseReader, AbstractWriter, int_func
from .bip import BIPChipper, BIPWriter
from ..nitf.nitf_head import NITFDetails, NITFHeader, ImageSegmentsType, DataExtensionsType
from ..nitf.security import NITFSecurityTags
from ..nitf.image import ImageSegmentHeader
from ..nitf.des import DataExtensionHeader
from .sicd_elements.blocks import LatLonType
from sarpy.geometry.geocoords import ecf_to_geodetic, geodetic_to_ecf
class MultiSegmentChipper(BaseChipper):
"""
Required chipping object to allow for the fact that a single image in a
NITF file will often be broken up into a collection of image segments.
"""
__slots__ = ('_file_name', '_data_size', '_dtype', '_complex_out',
'_symmetry', '_bounds', '_bands_ip', '_child_chippers')
def __init__(self, file_name, bounds, data_offsets, data_type,
symmetry=None, complex_type=False, bands_ip=1):
"""
Parameters
----------
file_name : str
The name of the file from which to read
bounds : numpy.ndarray
Two-dimensional array of [row start, row end, column start, column end]
data_offsets : numpy.ndarray
Offset for each image segment from the start of the file
data_type : numpy.dtype
The data type of the underlying file
symmetry : tuple
See `BaseChipper` for description of 3 element tuple of booleans.
complex_type : callable|bool
See `BaseChipper` for description of `complex_type`
bands_ip : int
number of bands - this will always be one for sicd.
"""
if not isinstance(bounds, numpy.ndarray):
raise ValueError('bounds must be an numpy.ndarray, not {}'.format(type(bounds)))
if not (bounds.ndim == 2 and bounds.shape[1] == 4):
raise ValueError('bounds must be an Nx4 numpy.ndarray, not shape {}'.format(bounds.shape))
data_sizes = numpy.zeros((bounds.shape[0], 2), dtype=numpy.int64)
p_row_start, p_row_end, p_col_start, p_col_end = None, None, None, None
for i, entry in enumerate(bounds):
# Are the order of the entries in bounds sensible?
if not (0 <= entry[0] < entry[1] and 0 <= entry[2] < entry[3]):
raise ValueError('entry {} of bounds is {}, and cannot be of the form '
'[row start, row end, column start, column end]'.format(i, entry))
# Are the elements of bounds sensible in relative terms?
# we must traverse by a specific block of columns until we reach the row limit,
# and then moving on the next segment of columns - note that this will almost
# always be a single block of columns only broken down in row order
if i > 0:
if not ((p_row_end == entry[0] and p_col_start == entry[2] and p_col_end == entry[3]) or
(p_col_end == entry[2] and entry[0] == 0)):
raise ValueError('The relative order for the chipper elements cannot be determined.')
p_row_start, p_row_end, p_col_start, p_col_end = entry
# define the data_sizes entry
data_sizes[i, :] = (entry[1] - entry[0], entry[3] - entry[2])
if not isinstance(data_offsets, numpy.ndarray):
raise ValueError('data_offsets must be an numpy.ndarray, not {}'.format(type(data_offsets)))
if not (len(data_offsets.shape) == 1):
raise ValueError(
'data_sizes must be an one-dimensional numpy.ndarray, '
'not shape {}'.format(data_offsets.shape))
if data_sizes.shape[0] != data_offsets.size:
raise ValueError(
'data_sizes and data_offsets arguments must have compatible '
'shape {} - {}'.format(data_sizes.shape, data_sizes.size))
self._file_name = file_name
# all of the actual reading and reorienting work will be done by these
# child chippers, which will read from their respective image segments
self._child_chippers = tuple(
BIPChipper(file_name, data_type, img_siz, symmetry=symmetry,
complex_type=complex_type, data_offset=img_off,
bands_ip=bands_ip)
for img_siz, img_off in zip(data_sizes, data_offsets))
self._bounds = bounds
self._bands_ip = int_func(bands_ip)
data_size = (self._bounds[-1, 1], self._bounds[-1, 3])
# all of the actual reading and reorienting done by child chippers,
# so do not reorient or change type at this level
super(MultiSegmentChipper, self).__init__(data_size, symmetry=(False, False, False), complex_type=False)
def _read_raw_fun(self, range1, range2):
def subset(rng, start_ind, stop_ind):
# find our rectangular overlap between the desired indices and chipper bounds
if rng[2] > 0:
if rng[1] < start_ind or rng[0] >= stop_ind:
return None, None
# find smallest element rng[0] + mult*rng[2] which is >= start_ind
mult1 = 0 if start_ind <= rng[0] else int_func(numpy.ceil((start_ind - rng[0])/rng[2]))
ind1 = rng[0] + mult1*rng[2]
# find largest element rng[0] + mult*rng[2] which is <= min(stop_ind, rng[1])
max_ind = min(rng[1], stop_ind)
mult2 = int_func(numpy.floor((max_ind - rng[0])/rng[2]))
ind2 = rng[0] + mult2*rng[2]
else:
if rng[0] < start_ind or rng[1] >= stop_ind:
return None, None
# find largest element rng[0] + mult*rng[2] which is <= stop_ind-1
mult1 = 0 if rng[0] < stop_ind else int_func(numpy.floor((stop_ind - 1 - rng[0])/rng[2]))
ind1 = rng[0] + mult1*rng[2]
# find smallest element rng[0] + mult*rng[2] which is >= max(start_ind, rng[1]+1)
mult2 = int_func(numpy.floor((start_ind - rng[0])/rng[2])) if rng[1] < start_ind \
else int_func(numpy.floor((rng[1] -1 - rng[0])/rng[2]))
ind2 = rng[0] + mult2*rng[2]
return (ind1, ind2, rng[2]), (mult1, mult2)
range1, range2 = self._reorder_arguments(range1, range2)
rows_size = int_func((range1[1]-range1[0])/range1[2])
cols_size = int_func((range2[1]-range2[0])/range2[2])
if self._bands_ip == 1:
out = numpy.empty((rows_size, cols_size), dtype=numpy.complex64)
else:
out = numpy.empty((rows_size, cols_size, self._bands_ip), dtype=numpy.complex64)
for entry, child_chipper in zip(self._bounds, self._child_chippers):
row_start, row_end, col_start, col_end = entry
# find row overlap for chipper - it's rectangular
crange1, cinds1 = subset(range1, row_start, row_end)
if crange1 is None:
continue # there is no row overlap for this chipper
# find column overlap for chipper - it's rectangular
crange2, cinds2 = subset(range2, col_start, col_end)
if crange2 is None:
continue # there is no column overlap for this chipper
if self._bands_ip == 1:
out[cinds1[0]:cinds1[1], cinds2[0]:cinds2[1]] = child_chipper(crange1, crange2)
else:
out[cinds1[0]:cinds1[1], cinds2[0]:cinds2[1], :] = child_chipper(crange1, crange2)
return out
class NITFReader(BaseReader):
"""
A reader object for **something** in a NITF 2.10 container
"""
__slots__ = ('_nitf_details', )
def __init__(self, nitf_details):
"""
Parameters
----------
nitf_details : NITFDetails
The NITFDetails object
"""
if not isinstance(nitf_details, NITFDetails):
raise TypeError('The input argument for NITFReader must be a NITFDetails object.')
self._nitf_details = nitf_details
# get sicd structure
if hasattr(nitf_details, 'sicd_meta'):
sicd_meta = nitf_details.sicd_meta
else:
sicd_meta = None
self._sicd_meta = sicd_meta
# determine image segmentation from image headers
segments = self._find_segments()
# construct the chippers
chippers = tuple(self._construct_chipper(segment, i) for i, segment in enumerate(segments))
# construct regularly
super(NITFReader, self).__init__(sicd_meta, chippers)
def _find_segments(self):
"""
Determine the image segment collections.
Returns
-------
List[List[int]]
"""
raise NotImplementedError
def _construct_chipper(self, segment, index):
"""
Construct the appropriate multi-segment chipper given the list of image
segment indices.
Parameters
----------
segment : List[int]
index : int
Returns
-------
MultiSegmentChipper
"""
raise NotImplementedError
class ImageDetails(object):
"""
Helper class for managing the details about a given NITF segment.
"""
__slots__ = (
'_bands', '_dtype', '_complex_type', '_parent_index_range',
'_subheader', '_subheader_offset', '_item_offset',
'_subheader_written', '_pixels_written')
def __init__(self, bands, dtype, complex_type, parent_index_range, subheader):
"""
Parameters
----------
bands : int
The number of bands.
dtype : str|numpy.dtype
The dtype for the associated chipper.
complex_type : bool|callable
The complex_type for the associated chipper.
parent_index_range : Tuple[int]
Indicates `(start row, end row, start column, end column)` relative to
the parent image.
subheader : ImageSegmentHeader
The image subheader.
"""
self._subheader_offset = None
self._item_offset = None
self._pixels_written = int_func(0)
self._subheader_written = False
self._bands = int_func(bands)
if self._bands <= 0:
raise ValueError('bands must be positive.')
self._dtype = dtype
self._complex_type = complex_type
if len(parent_index_range) != 4:
raise ValueError('parent_index_range must have length 4.')
self._parent_index_range = (
int_func(parent_index_range[0]), int_func(parent_index_range[1]),
int_func(parent_index_range[2]), int_func(parent_index_range[3]))
if self._parent_index_range[0] < 0 or self._parent_index_range[1] <= self._parent_index_range[0]:
raise ValueError(
'Invalid parent row start/end ({}, {})'.format(self._parent_index_range[0],
self._parent_index_range[1]))
if self._parent_index_range[2] < 0 or self._parent_index_range[3] <= self._parent_index_range[2]:
raise ValueError(
'Invalid parent row start/end ({}, {})'.format(self._parent_index_range[2],
self._parent_index_range[3]))
if not isinstance(subheader, ImageSegmentHeader):
raise TypeError(
'subheader must be an instance of ImageSegmentHeader, got '
'type {}'.format(type(subheader)))
self._subheader = subheader
@property
def subheader(self):
"""
ImageSegmentHeader: The image segment subheader.
"""
return self._subheader
@property
def rows(self):
"""
int: The number of rows.
"""
return self._parent_index_range[1] - self._parent_index_range[0]
@property
def cols(self):
"""
int: The number of columns.
"""
return self._parent_index_range[3] - self._parent_index_range[2]
@property
def subheader_offset(self):
"""
int: The subheader offset.
"""
return self._subheader_offset
@subheader_offset.setter
def subheader_offset(self, value):
if self._subheader_offset is not None:
logging.warning("subheader_offset is read only after being initially defined.")
return
self._subheader_offset = int_func(value)
self._item_offset = self._subheader_offset + self._subheader.get_bytes_length()
@property
def item_offset(self):
"""
int: The image offset.
"""
return self._item_offset
@property
def end_of_item(self):
"""
int: The position of the end of the image.
"""
return self.item_offset + self.image_size
@property
def total_pixels(self):
"""
int: The total number of pixels.
"""
return self.rows*self.cols
@property
def image_size(self):
"""
int: The size of the image in bytes.
"""
return int_func(self.total_pixels*self.subheader.ABPP*len(self.subheader.Bands)/8)
@property
def pixels_written(self):
"""
int: The number of pixels written
"""
return self._pixels_written
@property
def subheader_written(self):
"""
bool: The status of writing the subheader.
"""
return self._subheader_written
@subheader_written.setter
def subheader_written(self, value):
if self._subheader_written:
return
elif value:
self._subheader_written = True
@property
def image_written(self):
"""
bool: The status of whether the image segment is fully written. This
naively checks assuming that no pixels have been written redundantly.
"""
return self._pixels_written >= self.total_pixels
def count_written(self, index_tuple):
"""
Count the overlap that we have written in a given step.
Parameters
----------
index_tuple : Tuple[int]
Tuple of the form `(row start, row end, column start, column end)`
Returns
-------
None
"""
new_pixels = (index_tuple[1] - index_tuple[0])*(index_tuple[3] - index_tuple[2])
self._pixels_written += new_pixels
if self._pixels_written > self.total_pixels:
logging.error('A total of {} pixels have been written for an image that '
'should only have {} pixels.'.format(self._pixels_written, self.total_pixels))
def get_overlap(self, index_range):
"""
Determines overlap for the given image segment.
Parameters
----------
index_range : Tuple[int]
Indicates `(start row, end row, start column, end column)` for prospective incoming data.
Returns
-------
Union[Tuple[None], Tuple[Tuple[int]]
`(None, None)` if there is no overlap. Otherwise, tuple of the form
`((start row, end row, start column, end column),
(parent start row, parent end row, parent start column, parent end column))`
indicating the overlap portion with respect to this image, and the parent image.
"""
def element_overlap(this_start, this_end, parent_start, parent_end):
st, ed = None, None
if this_start <= parent_start <= this_end:
st = parent_start
ed = min(int_func(this_end), parent_end)
elif parent_start <= this_start <= parent_end:
st = int_func(this_start)
ed = min(int_func(this_end), parent_end)
return st, ed
# do the rows overlap?
row_s, row_e = element_overlap(index_range[0], index_range[1],
self._parent_index_range[0], self._parent_index_range[1])
if row_s is None:
return None, None
# do the columns overlap?
col_s, col_e = element_overlap(index_range[2], index_range[3],
self._parent_index_range[2], self._parent_index_range[3])
if col_s is None:
return None, None
return (row_s-self._parent_index_range[0], row_e-self._parent_index_range[0],
col_s-self._parent_index_range[2], col_e-self._parent_index_range[2]), \
(row_s, row_e, col_s, col_e)
def create_writer(self, file_name):
"""
Creates the BIP writer for this image segment.
Parameters
----------
file_name : str
The parent file name.
Returns
-------
BIPWriter
"""
if self._item_offset is None:
raise ValueError('The image segment subheader_offset must be defined '
'before a writer can be defined.')
return BIPWriter(
file_name, (self.rows, self.cols), self._dtype,
self._complex_type, data_offset=self.item_offset)
class DESDetails(object):
"""
Helper class for managing the details about a given NITF Data Extension Segment.
"""
__slots__ = (
'_subheader', '_subheader_offset', '_item_offset', '_des_bytes',
'_subheader_written', '_des_written')
def __init__(self, subheader, des_bytes):
"""
Parameters
----------
subheader : DataExtensionHeader
The data extension subheader.
"""
self._subheader_offset = None
self._item_offset = None
self._subheader_written = False
self._des_written = False
if not isinstance(subheader, DataExtensionHeader):
raise TypeError(
'subheader must be an instance of DataExtensionHeader, got '
'type {}'.format(type(subheader)))
self._subheader = subheader
if not isinstance(des_bytes, bytes):
raise TypeError('des_bytes must be an instance of bytes, got '
'type {}'.format(type(des_bytes)))
self._des_bytes = des_bytes
@property
def subheader(self):
"""
DataExtensionHeader: The data extension subheader.
"""
return self._subheader
@property
def des_bytes(self):
"""
bytes: The data extension bytes.
"""
return self._des_bytes
@property
def subheader_offset(self):
"""
int: The subheader offset.
"""
return self._subheader_offset
@subheader_offset.setter
def subheader_offset(self, value):
if self._subheader_offset is not None:
logging.warning("subheader_offset is read only after being initially defined.")
return
self._subheader_offset = int_func(value)
self._item_offset = self._subheader_offset + self._subheader.get_bytes_length()
@property
def item_offset(self):
"""
int: The image offset.
"""
return self._item_offset
@property
def end_of_item(self):
"""
int: The position of the end of the data extension.
"""
return self.item_offset + len(self._des_bytes)
@property
def subheader_written(self):
"""
bool: The status of writing the subheader.
"""
return self._subheader_written
@subheader_written.setter
def subheader_written(self, value):
if self._subheader_written:
return
elif value:
self._subheader_written = True
@property
def des_written(self):
"""
bool: The status of writing the subheader.
"""
return self._des_written
@des_written.setter
def des_written(self, value):
if self._des_written:
return
elif value:
self._des_written = True
def get_npp_block(value):
"""
Determine the number of pixels per block value.
Parameters
----------
value : int
Returns
-------
int
"""
return 0 if value > 8192 else value
def image_segmentation(rows, cols, pixel_size):
"""
Determine the appropriate segmentation for the image.
Parameters
----------
rows : int
cols : int
pixel_size : int
Returns
-------
tuple
Of the form `((row start, row end, column start, column end))`
"""
im_seg_limit = 10**10 - 2 # as big as can be stored in 10 digits, given at least 2 bytes per pixel
dim_limit = 10**5 - 1 # as big as can be stored in 5 digits
im_segments = []
row_offset = 0
col_offset = 0
col_limit = min(dim_limit, cols)
while (row_offset < rows) and (col_offset < cols):
# determine row count, given row_offset, col_offset, and col_limit
# how many bytes per row for this column section
row_memory_size = (col_limit - col_offset) * pixel_size
# how many rows can we use
row_count = min(dim_limit, rows - row_offset, int_func(im_seg_limit / row_memory_size))
im_segments.append((row_offset, row_offset + row_count, col_offset, col_limit))
row_offset += row_count # move the next row offset
if row_offset == rows:
# move over to the next column section
col_offset = col_limit
col_limit = min(col_offset + dim_limit, cols)
row_offset = 0
return tuple(im_segments)
def interpolate_corner_points_string(entry, rows, cols, icp):
"""
Interpolate the corner points for the given subsection from
the given corner points. This supplies entries for the NITF headers.
Parameters
----------
entry : numpy.ndarray
The corner pints of the form `(row_start, row_stop, col_start, col_stop)`
rows : int
The number of rows in the parent image.
cols : int
The number of cols in the parent image.
icp : the parent image corner points in geodetic coordinates.
Returns
-------
str
"""
if icp is None:
return ''
if icp.shape[1] == 2:
icp_new = numpy.zeros((icp.shape[0], 3), dtype=numpy.float64)
icp_new[:, :2] = icp
icp = icp_new
icp_ecf = geodetic_to_ecf(icp)
const = 1. / (rows * cols)
pattern = entry[numpy.array([(0, 2), (1, 2), (1, 3), (0, 3)], dtype=numpy.int64)]
out = []
for row, col in pattern:
pt_array = const * numpy.sum(icp_ecf *
(numpy.array([rows - row, row, row, rows - row]) *
numpy.array([cols - col, cols - col, col, col]))[:, numpy.newaxis], axis=0)
pt = LatLonType.from_array(ecf_to_geodetic(pt_array)[:2])
dms = pt.dms_format(frac_secs=False)
out.append('{0:02d}{1:02d}{2:02d}{3:s}'.format(*dms[0]) + '{0:03d}{1:02d}{2:02d}{3:s}'.format(*dms[1]))
return ''.join(out)
class NITFWriter(AbstractWriter):
__slots__ = (
'_file_name', '_security_tags', '_nitf_header', '_nitf_header_written',
'_img_groups', '_shapes', '_img_details', '_writing_chippers', '_des_details',
'_closed')
def __init__(self, file_name):
self._writing_chippers = None
self._nitf_header_written = False
self._closed = False
super(NITFWriter, self).__init__(file_name)
self._create_security_tags()
self._create_image_segment_details()
self._create_data_extension_details()
self._create_nitf_header()
@property
def nitf_header_written(self): # type: () -> bool
"""
bool: The status of whether of not we have written the NITF header.
"""
return self._nitf_header_written
@property
def security_tags(self): # type: () -> NITFSecurityTags
"""
NITFSecurityTags: The NITF security tags, which will be constructed initially using
the :func:`default_security_tags` method. This object will be populated **by reference**
upon construction as the `SecurityTags` property for `nitf_header`, each entry of
`image_segment_headers`, and `data_extension_header`.
.. Note:: required edits should be made before adding any data via :func:`write_chip`.
"""
return self._security_tags
@property
def nitf_header(self): # type: () -> NITFHeader
"""
NITFHeader: The NITF header object. The `SecurityTags` property is populated
using `security_tags` **by reference** upon construction.
.. Note:: required edits should be made before adding any data via :func:`write_chip`.
"""
return self._nitf_header
@property
def image_details(self): # type: () -> Tuple[ImageDetails]
"""
Tuple[ImageDetails]: The individual image segment details.
"""
return self._img_details
@property
def des_details(self): # type: () -> Tuple[DESDetails]
"""
Tuple[DESDetails]: The individual data extension details.
"""
return self._des_details
def _set_offsets(self):
"""
Sets the offsets for the ImageDetail and DESDetail objects.
Returns
-------
None
"""
if self.nitf_header is None:
raise ValueError("The _set_offsets method must be called AFTER the "
"_create_nitf_header, _create_image_segment_headers, "
"and _create_data_extension_headers methods.")
if self._img_details is not None and \
(self.nitf_header.ImageSegments.subhead_sizes.size != len(self._img_details)):
raise ValueError('The length of _img_details and the defined ImageSegments '
'in the NITF header do not match.')
elif self._img_details is None and \
self.nitf_header.ImageSegments.subhead_sizes.size != 0:
raise ValueError('There are no _img_details defined, while there are ImageSegments '
'defined in the NITF header.')
if self._des_details is not None and \
(self.nitf_header.DataExtensions.subhead_sizes.size != len(self._des_details)):
raise ValueError('The length of _des_details and the defined DataExtensions '
'in the NITF header do not match.')
elif self._des_details is None and \
self.nitf_header.DataExtensions.subhead_sizes.size != 0:
raise ValueError('There are no _des_details defined, while there are DataExtensions '
'defined in the NITF header.')
offset = self.nitf_header.get_bytes_length()
# set the offsets for the image details
if self._img_details is not None:
for details in self._img_details:
details.subheader_offset = offset
offset = details.end_of_item
# set the offsets for the data extensions
if self._des_details is not None:
for details in self._des_details:
details.subheader_offset = offset
offset = details.end_of_item
# set the file size in the nitf header
self.nitf_header.FL = offset
self.nitf_header.CLEVEL = self._get_clevel(offset)
def _write_file_header(self):
"""
Write the file header.
Returns
-------
None
"""
if self._nitf_header_written:
return
logging.info('Writing NITF header.')
with open(self._file_name, mode='r+b') as fi:
fi.write(self.nitf_header.to_bytes())
self._nitf_header_written = True
def prepare_for_writing(self):
"""
The NITF file header makes specific reference of the locations/sizes of
various components, specifically the image segment subheader lengths and
the data extension subheader and item lengths. These items must be locked
down BEFORE we can allocate the required file writing specifics from the OS.
Any desired header modifications (i.e. security tags or any other issues) must be
finalized, before the final steps to actually begin writing data. Calling
this method prepares the final versions of the headers, and prepares for actual file
writing. Any modifications to any header information made AFTER calling this method
will not be reflected in the produced NITF file.
.. Note:: This will be implicitly called at first attempted chip writing
if it has not be explicitly called before.
Returns
-------
None
"""
if self._nitf_header_written:
return
# set the offsets for the images and data extensions,
# and the file size in the NITF header
self._set_offsets()
self._write_file_header()
logging.info(
'Setting up the image segments in virtual memory. '
'This may require a large physical memory allocation, '
'and be time consuming.')
self._writing_chippers = tuple(
details.create_writer(self._file_name) for details in self.image_details)
def _write_image_header(self, index):
"""
Write the image subheader at `index`, if necessary.
Parameters
----------
index : int
Returns
-------
None
"""
details = self.image_details[index]
if details.subheader_written:
return
if details.subheader_offset is None:
raise ValueError('DESDetails.subheader_offset must be defined for index {}.'.format(index))
logging.info(
'Writing image segment {} header. Depending on OS details, this '
'may require a large physical memory allocation, '
'and be time consuming.'.format(index))
with open(self._file_name, mode='r+b') as fi:
fi.seek(details.subheader_offset)
fi.write(details.subheader.to_bytes())
details.subheader_written = True
def _write_des_header(self, index):
"""
Write the des subheader at `index`, if necessary.
Parameters
----------
index : int
Returns
-------
None
"""
details = self.des_details[index]
if details.subheader_written:
return
if details.subheader_offset is None:
raise ValueError('DESDetails.subheader_offset must be defined for index {}.'.format(index))
logging.info(
'Writing data extension {} header.'.format(index))
with open(self._file_name, mode='r+b') as fi:
fi.seek(details.subheader_offset)
fi.write(details.subheader.to_bytes())
details.subheader_written = True
def _write_des_bytes(self, index):
"""
Write the des bytes at `index`, if necessary.
Parameters
----------
index : int
Returns
-------
None
"""
details = self.des_details[index]
assert isinstance(details, DESDetails)
if details.des_written:
return
if not details.subheader_written:
self._write_des_header(index)
logging.info(
'Writing data extension {}.'.format(index))
with open(self._file_name, mode='r+b') as fi:
fi.seek(details.item_offset)
fi.write(details.des_bytes)
details.des_written = True
def _get_ftitle(self):
"""
Define the FTITLE for the NITF header.
Returns
-------
str
"""
raise NotImplementedError
def _get_fdt(self):
"""
Gets the NITF header FDT field value.
Returns
-------
str
"""
return re.sub(r'[^0-9]', '', str(numpy.datetime64('now', 's')))
def _get_ostaid(self):
"""
Gets the NITF header OSTAID field value.
Returns
-------
str
"""
return 'Unknown'
def _get_clevel(self, file_size):
"""
Gets the NITF complexity level of the file. This is likely always
dominated by the memory constraint.
Parameters
----------
file_size : int
The file size in bytes
Returns
-------
int
"""
def memory_level():
if file_size < 50*(1024**2):
return 3
elif file_size < (1024**3):
return 5
elif file_size < 2*(int_func(1024)**3):
return 6
elif file_size < 10*(int_func(1024)**3):
return 7
else:
return 9
def index_level(ind):
if ind <= 2048:
return 3
elif ind <= 8192:
return 5
elif ind <= 65536:
return 6
else:
return 7
row_max = max(entry[0] for entry in self._shapes)
col_max = max(entry[1] for entry in self._shapes)
return max(memory_level(), index_level(row_max), index_level(col_max))
def write_chip(self, data, start_indices=(0, 0), index=0):
"""
Write the data to the file(s). This is an alias to :code:`writer(data, start_indices)`.
Parameters
----------
data : numpy.ndarray
the complex data
start_indices : tuple[int, int]
the starting index for the data.
index : int
the chipper index to which to write
Returns
-------
None
"""
self.__call__(data, start_indices=start_indices, index=index)
def __call__(self, data, start_indices=(0, 0), index=0):
"""
Write the data to the file(s).
Parameters
----------
data : numpy.ndarray
the complex data
start_indices : Tuple[int, int]
the starting index for the data.
index : int
the main image index to which to write - parent group of NITF image segments.
Returns
-------
None
"""
if index >= len(self._img_groups):
raise IndexError('There are only {} image groups, got index {}'.format(len(self._img_groups), index))
self.prepare_for_writing() # no effect if already called
# validate the index and data arguments
start_indices = (int_func(start_indices[0]), int_func(start_indices[1]))
shape = self._shapes[index]
if (start_indices[0] < 0) or (start_indices[1] < 0):
raise ValueError('start_indices must have positive entries. Got {}'.format(start_indices))
if (start_indices[0] >= shape[0]) or \
(start_indices[1] >= shape[1]):
raise ValueError(
'start_indices must be bounded from above by {}. Got {}'.format(shape, start_indices))
index_range = (start_indices[0], start_indices[0] + data.shape[0],
start_indices[1], start_indices[1] + data.shape[1])
if (index_range[1] > shape[0]) or (index_range[3] > shape[1]):
raise ValueError(
'Got start_indices = {} and data of shape {}. '
'This is incompatible with total data shape {}.'.format(start_indices, data.shape, shape))
# iterate over the image segments for this group, and write as appropriate
for img_index in self._img_groups[index]:
details = self._img_details[img_index]
overall_inds, this_inds = details.get_overlap(index_range)
if overall_inds is None:
# there is no overlap here, so skip
continue
self._write_image_header(img_index) # no effect if already called
# what are the relevant indices into data?
data_indices = (overall_inds[0] - start_indices[0], overall_inds[1] - start_indices[0],
overall_inds[2] - start_indices[1], overall_inds[3] - start_indices[1])
# write the data
self._writing_chippers[img_index](data[data_indices[0]:data_indices[1], data_indices[2]: data_indices[3]],
(this_inds[0], this_inds[2]))
# count the written pixels
details.count_written(this_inds)
def close(self):
"""
Completes any necessary final steps.
Returns
-------
None
"""
if self._closed:
return
# set this status first, in the event of some kind of error
self._closed = True
# ensure that all images are fully written
if self.image_details is not None:
for i, img_details in enumerate(self.image_details):
if not img_details.image_written:
logging.critical("This NITF file in not completely written and will be corrupt. "
"Image segment {} has only written {} "
"or {}".format(i, img_details.pixels_written, img_details.total_pixels))
# ensure that all data extensions are fully written
if self.des_details is not None:
for i, des_detail in enumerate(self.des_details):
if not des_detail.des_written:
self._write_des_bytes(i)
# close all the chippers
if self._writing_chippers is not None:
for entry in self._writing_chippers:
entry.close()
# require specific implementations
def _create_security_tags(self):
"""
Creates the main NITF security tags object with `CLAS` and `CODE`
attributes set sensibly.
It is expected that output from this will be modified as appropriate
and used to set ONLY specific security tags in `data_extension_headers` or
elements of `image_segment_headers`.
If simultaneous modification of all security tags attributes for the entire
NITF is the goal, then directly modify the value(s) using `security_tags`.
Returns
-------
None
"""
# self._security_tags = <something>
raise NotImplementedError
def _create_image_segment_details(self):
"""
Create the image segment headers.
Returns
-------
None
"""
if self._security_tags is None:
raise ValueError(
"This NITF has no previously defined security tags, so this method "
"is being called before the _create_secrity_tags method.")
# _img_groups, _shapes should be defined here or previously.
# self._img_details = <something>
def _create_data_extension_details(self):
"""
Create the data extension headers.
Returns
-------
None
"""
if self._security_tags is None:
raise ValueError(
"This NITF has no previously defined security tags, so this method "
"is being called before the _create_secrity_tags method.")
# self._des_details = <something>
def _get_nitf_image_segments(self):
"""
Get the ImageSegments component for the NITF header.
Returns
-------
ImageSegmentsType
"""
if self._img_details is None:
return ImageSegmentsType(subhead_sizes=None, item_sizes=None)
else:
im_sizes = numpy.zeros((len(self._img_details), ), dtype=numpy.int64)
subhead_sizes = numpy.zeros((len(self._img_details), ), dtype=numpy.int64)
for i, details in enumerate(self._img_details):
subhead_sizes[i] = details.subheader.get_bytes_length()
im_sizes[i] = details.image_size
return ImageSegmentsType(subhead_sizes=subhead_sizes, item_sizes=im_sizes)
def _get_nitf_data_extensions(self):
"""
Get the DataEXtensions component for the NITF header.
Returns
-------
DataExtensionsType
"""
if self._des_details is None:
return DataExtensionsType(subhead_sizes=None, item_sizes=None)
else:
des_sizes = numpy.zeros((len(self._des_details), ), dtype=numpy.int64)
subhead_sizes = numpy.zeros((len(self._des_details), ), dtype=numpy.int64)
for i, details in enumerate(self._des_details):
subhead_sizes[i] = details.subheader.get_bytes_length()
des_sizes[i] = len(details.des_bytes)
return DataExtensionsType(subhead_sizes=subhead_sizes, item_sizes=des_sizes)
def _create_nitf_header(self):
"""
Create the main NITF header.
Returns
-------
None
"""
if self._img_details is None:
logging.warning(
"This NITF has no previously defined image segments, or the "
"_create_nitf_header method has been called BEFORE the "
"_create_image_segment_headers method.")
if self._des_details is None:
logging.warning(
"This NITF has no previously defined data extensions, or the "
"_create_nitf_header method has been called BEFORE the "
"_create_data_extension_headers method.")
# NB: CLEVEL and FL will be corrected in prepare_for_writing method
self._nitf_header = NITFHeader(
Security=self.security_tags, CLEVEL=3, OSTAID=self._get_ostaid(),
FDT=self._get_fdt(), FTITLE=self._get_ftitle(), FL=0,
ImageSegments=self._get_nitf_image_segments(),
DataExtensions=self._get_nitf_data_extensions())
| StarcoderdataPython |
5016258 | import asyncio
import logging
from centimani.headers import Headers
_LOGGER = logging.getLogger(__name__)
REQUEST_METHODS = frozenset(
{"GET", "HEAD", "POST", "OPTIONS", "PUT", "PATCH", "DELETE"}
)
#======================================#
# HTTP Response and Request Structures #
#======================================#
class Request:
"""Structure used to store server requests."""
__slots__ = ("method", "path", "query", "headers")
def __init__(self, method="GET", path="/", query=None, headers=None):
self.method = method
self.path = path
self.query = query or {}
self.headers = headers or Headers()
def __repr__(self):
fields = (
"{0}: {1!r}".format(name, getattr(self, name))
for name in self.__slots__
)
return "".join(("Request(", ", ".join(fields), ")"))
class Response:
"""Structure used to store server responses."""
__slots__ = ("status", "headers")
def __init__(self, status, headers=None):
self.status = status
self.headers = Headers()
self.headers.update(headers)
def __repr__(self):
fields = (
"{0}: {1!r}".format(name, getattr(self, name))
for name in self.__slots__
)
return "".join(("Response(", ", ".join(fields), ")"))
#======================================#
# Connection and Protocol base classes #
#======================================#
class ConnectionLogger(logging.LoggerAdapter):
"""A logging adapter used to log message associated with connection
peername.
"""
def __init__(self, logger, peername):
super().__init__(logger, {"peername": peername})
def process(self, msg, kwargs):
tmp = "@{0[0]}:{0[1]}\n{1}".format(self.extra["peername"], msg)
return tmp, kwargs
class Connection:
"""Interface for connections initiated in ``Server``.
Attributes:
:server: The server that have created this connection.
:reader: The sream for reading data from the remote client.
:writer: The stream for sending data to the remote client.
:peername: A (host, port) tuple associated to the client, as returned
by ``Socket.getpeername``.
"""
def __init__(self, server, reader, writer, peername, logger=_LOGGER):
"""Initialize the connection."""
self._server = server
self._reader = reader
self._writer = writer
self._peername = peername
self._logger = ConnectionLogger(logger, self.peername)
@property
def server(self):
return self._server
@property
def reader(self):
return self._reader
@property
def writer(self):
return self._writer
@property
def peername(self):
return self._peername
@property
def logger(self):
return self._logger
@property
def _loop(self):
"""Shortcut property that returns server's loop."""
return self._server._loop
async def listen(self):
""" This method is executed when the client connects to the
server and returns when the connection should be closed.
"""
raise NotImplementedError
def close(self):
"""Close the connection."""
if not self.writer.is_closing():
self.writer.close()
class ProtocolHandler:
"""This class defines the layer between ``RequestHandler`` and
``Connection``. It will handle the reception of the request, the
delegation to a request handler, receiving the payload of the
request and sending the response.
Attributes:
:connection: The connection that created this transport.
:request: The current request.
:body_reader: The current body reader, used to read the request
payload.
:handler: The current handler, chosen from the current request.
:response: The response sent, may be sent by the handler, or an
error sent by the transport.
:error: The current HTTP error, is None if there is no error.
"""
def __init__(self, connection):
"""Initialize the transport."""
self._connection = connection
self._request = None
self._body_reader = None
self._handler = None
self._response = None
self._error = None
@property
def request(self):
return self._request
@property
def body_reader(self):
return self._body_reader
@property
def handler(self):
return self._handler
@property
def response(self):
return self._response
@property
def error(self):
return self._error
#------------------------------------#
# Shortcuts to connection attributes #
#------------------------------------#
@property
def _loop(self):
return self._connection._loop
@property
def _server(self):
return self._connection._server
@property
def _logger(self):
return self._connection._logger
@property
def _reader(self):
return self._connection._reader
@property
def _writer(self):
return self._connection._writer
@property
def _peername(self):
return self._connection._peername
#------------------#
# Abstract methods #
#------------------#
async def send_response(self, status, headers=None, body=None):
"""Send an HTTP response to the remote client.
Arguments:
:status: The HTTP status of the response.
:headers: A collection of header fields sent in the response.
:body: the response payload body.
"""
raise NotImplementedError
async def send_error(self, code, headers=None, **kwargs):
"""Shortcut used to send HTTP errors to the client."""
assert 400 <= code < 600
await self.send_response(code, headers)
async def process_request(self):
"""Process a single request, then returns."""
raise NotImplementedError
#=================#
# Request handler #
#=================#
class MetaRequestHandler(type):
"""
Metaclass for all user-defined request handlers.
Populate the methods attribute of the request handler in order to
easily acces to handler's allowed methods.
"""
def __init__(cls, name, bases, namespace):
methods = set()
for method in REQUEST_METHODS:
method_handler = getattr(cls, method.lower(), None)
if method_handler and asyncio.iscoroutinefunction(method_handler):
methods.add(method.lower())
cls.methods = frozenset(methods)
class RequestHandler:
"""Base class for all user defined request handlers.
Each method defined in a sublass of this class that have the same
name as an HTTP method will be called to handle this HTTP method.
"""
@classmethod
def allowed_methods(cls):
return frozenset(
method for method in REQUEST_METHODS
if hasattr(cls, method.lower())
)
def __init__(self, protocol):
self._protocol = protocol
self.request = protocol.request
self.body_reader = protocol.body_reader
def send_response(self, status, headers=None, body=None):
"""A shortcut to the protocol ``send_response`` method."""
assert self.request is self._protocol.request
if isinstance(body, str):
body = body.encode("utf-8")
return self._protocol.send_response(status, headers, body)
def send_error(self, status, headers=None, **kwargs):
"""A shortcut to the protocol ``send_response`` method."""
assert self.request is self._protocol.request
return self._protocol.send_error(status, headers, **kwargs)
async def can_continue(self):
"""Checks if the validity of the request may be asserted before
reading the payload body.
This function should retuns True if the request is fine or if
this handler requires the payload body.
When this function returns False, it should have send an error
prior to returning, or the default error 417 will be sent to the
client.
"""
return True
| StarcoderdataPython |
4879726 | """
BUFR - SYNOP Map
"""
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import metview as mv
# read bufr file
filename = "synop.bufr"
if mv.exist(filename):
bd = mv.read(filename)
else:
bd = mv.gallery.load_dataset(filename)
# define observation plotting
obsp = mv.mobs(obs_distance_apart=1.5, obs_size=0.25, obs_ring_size=0.2)
# define land-sea shading
coast = mv.mcoast(
map_coastline_land_shade="on",
map_coastline_land_shade_colour="grey",
map_coastline_sea_shade="on",
map_coastline_sea_shade_colour="RGB(0.7903,0.8438,0.943)",
map_grid_colour="charcoal",
map_grid_longitude_increment=10,
)
# define map
view = mv.geoview(
map_area_definition="corners", area=[30, -16, 75, 45], coastlines=coast
)
mv.setoutput(mv.pdf_output(output_name="synop_map"))
# generate plot
mv.plot(view, bd, obsp)
| StarcoderdataPython |
1861746 | import time
import json
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
#awshost you got from `aws iot describe-endpoint`
awshost = "a134g88szk3vbi.iot.us-east-1.amazonaws.com"
# Edit this to be your device name in the AWS IoT console
thing = "raspberry_pi2"
awsport = 8883
caPath = "/home/levon/iot_keys/root-CA.crt"
certPath = "/home/levon/iot_keys/raspberry_pi.cert.pem"
keyPath = "/home/levon/iot_keys/raspberry_pi.private.key"
def parse_payload(payload, responseStatus, token):
parsed_json = json.loads(payload)
temp_num = parsed_json['state']['reported']['temp']
temp_time_epoch = parsed_json['metadata']['reported']['temp']['timestamp']
print temp_num,temp_time_epoch
# Set up the shadow client
myShadowClient = AWSIoTMQTTShadowClient(thing)
myShadowClient.configureEndpoint(awshost, awsport)
myShadowClient.configureCredentials(caPath, keyPath, certPath)
myShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myShadowClient.configureConnectDisconnectTimeout(10)
myShadowClient.configureMQTTOperationTimeout(5)
myShadowClient.connect()
myDeviceShadow = myShadowClient.createShadowHandlerWithName("raspberry_pi", True)
# You can implement a custom callback function if you like, but once working I didn't require one. We still need to define it though.
customCallback = ""
while True:
myDeviceShadow.shadowGet(parse_payload,5)
time.sleep(60)
| StarcoderdataPython |
3426648 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a default plist plugin in Plaso."""
from plaso.events import plist_event
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class BluetoothPlugin(interface.PlistPlugin):
"""Basic plugin to extract interesting Bluetooth related keys."""
NAME = 'plist_bluetooth'
DESCRIPTION = u'Parser for Bluetooth plist files.'
PLIST_PATH = 'com.apple.bluetooth.plist'
PLIST_KEYS = frozenset(['DeviceCache', 'PairedDevices'])
# LastInquiryUpdate = Device connected via Bluetooth Discovery. Updated
# when a device is detected in discovery mode. E.g. BT headphone power
# on. Pairing is not required for a device to be discovered and cached.
#
# LastNameUpdate = When the human name was last set. Usually done only once
# during initial setup.
#
# LastServicesUpdate = Time set when device was polled to determine what it
# is. Usually done at setup or manually requested via advanced menu.
def GetEntries(self, parser_context, match=None, **unused_kwargs):
"""Extracts relevant BT entries.
Args:
parser_context: A parser context object (instance of ParserContext).
match: Optional dictionary containing extracted keys from PLIST_KEYS.
The default is None.
"""
root = '/DeviceCache'
for device, value in match['DeviceCache'].items():
name = value.get('Name', '')
if name:
name = u''.join(('Name:', name))
if device in match['PairedDevices']:
desc = 'Paired:True {0:s}'.format(name)
key = device
if 'LastInquiryUpdate' in value:
event_object = plist_event.PlistEvent(
root, key, value['LastInquiryUpdate'], desc)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if value.get('LastInquiryUpdate'):
desc = u' '.join(filter(None, ('Bluetooth Discovery', name)))
key = u''.join((device, '/LastInquiryUpdate'))
event_object = plist_event.PlistEvent(
root, key, value['LastInquiryUpdate'], desc)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if value.get('LastNameUpdate'):
desc = u' '.join(filter(None, ('Device Name Set', name)))
key = u''.join((device, '/LastNameUpdate'))
event_object = plist_event.PlistEvent(
root, key, value['LastNameUpdate'], desc)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if value.get('LastServicesUpdate'):
desc = desc = u' '.join(filter(None, ('Services Updated', name)))
key = ''.join((device, '/LastServicesUpdate'))
event_object = plist_event.PlistEvent(
root, key, value['LastServicesUpdate'], desc)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
plist.PlistParser.RegisterPlugin(BluetoothPlugin)
| StarcoderdataPython |
8037563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import gtk
import time
import appindicator
import httplib
import re
UPDATE_FREQ_IN_MINUTES = 15
APP_VERSION = "0.1.0"
APP_ID = "yandex-probki-indicator"
USER_AGENT = "{0}/{1} ({2})".format(
APP_ID, APP_VERSION,
"{0}/{1}".format("https://github.com/ekalinin", APP_ID))
class YaJamsIndicator:
def __init__(self):
self.ind = appindicator.Indicator(
APP_ID, gtk.STOCK_INFO,
appindicator.CATEGORY_APPLICATION_STATUS)
self.ind.set_status(appindicator.STATUS_ACTIVE)
self.menu_setup()
def menu_setup(self):
self.menu = gtk.Menu()
self.quit_item = gtk.MenuItem("Выход")
self.quit_item.connect("activate", self.quit)
self.quit_item.show()
self.menu.append(self.quit_item)
self.ind.set_menu(self.menu)
def quit(self, widget):
sys.exit(0)
def main(self):
self.update_jams()
gtk.timeout_add(UPDATE_FREQ_IN_MINUTES * 60 * 1000, self.update_jams)
gtk.main()
def get_icon_path(self, icon_filename):
return os.path.abspath(os.path.join('icons', icon_filename))
def update_jams(self):
print ("{}: Updating jams ...".format(time.strftime("%d/%m %H:%M:%S")))
lvl, lvl_txt = self.get_jam_velel()
self.ind.set_label(lvl_txt)
if lvl > 0 and lvl <= 3:
self.ind.set_icon(self.get_icon_path('green.svg'))
elif lvl > 3 and lvl < 7:
self.ind.set_icon(self.get_icon_path('yellow.svg'))
else:
self.ind.set_icon(self.get_icon_path('red.svg'))
return True
def get_jam_velel(self):
conn = httplib.HTTPSConnection("www.yandex.ru")
conn.request("GET", "/", None, {"User-Agent": USER_AGENT})
resp = conn.getresponse().read()
conn.close()
p = re.compile('\d{1,2} бал[\S]*</a')
res = p.findall(resp)
if len(res) == 0:
return (-1, "")
else:
txt = res[0].replace('</a', '')
lvl = int(txt[:2])
return (lvl, txt)
if __name__ == "__main__":
indicator = YaJamsIndicator()
indicator.main()
| StarcoderdataPython |
1776558 | <reponame>hp-storage/horizon-ssmc-link
# (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
import json
except ImportError:
import simplejson as json
from horizon_hpe_storage.api.common import exceptions
from horizon_hpe_storage.api.common import http
LOG = logging.getLogger(__name__)
class HTTPJSONRESTClient(http.HTTPJSONRESTClient):
"""
HTTP/REST client to access SSMC backend service
"""
def authenticateSSMC(self, user, password, token, optional=None):
"""
This tries to create an authenticated session with the
HPE3PAR SSMC service
:param user: The username
:type user: str
:param password: Password
:type password: str
"""
try:
# this prevents re-auth attempt if auth fails
self.auth_try = 1
self.session_key = None
# first check if old token is still valid
if token is not None:
LOG.info("####### 1-check if SSMC Token is valid: %s\n", token)
header = {'Authorization': token}
try:
resp, body = self.get(
'/foundation/REST/sessionservice/sessions/' +
token + '/context',
headers=header)
LOG.info("####### 2-SSMC Token is valid: %s\n", token)
self.auth_try = 0
self.user = user
self.password = password
self.session_key = token
return
except Exception as ex:
# token has expired
token = None
info = {'username': user,
'password': password,
'adminLogin': False,
'authLoginDomain': 'LOCAL'}
self._auth_optional = None
if optional:
self._auth_optional = optional
info.update(optional)
LOG.info("####### 3-request new token\n")
resp, body = self.post('/foundation/REST/sessionservice/sessions',
body=info)
if body and 'object' in body:
object = body['object']
if object and 'Authorization' in object:
self.session_key = object['Authorization']
if self.session_key:
LOG.info("####### 4-our new token: %s\n", self.session_key)
else:
LOG.info("####### 4-our new token: NONE\n")
self.auth_try = 0
self.user = user
self.password = password
except Exception as ex:
LOG.error("Unable to create SSMC Authorization Token: %s\n", body)
self.session_key = None
def getVolumeLink(self, name):
self.auth_try = 1
info = {'Authorization': self.session_key}
nn = "'%s'" % name
path = \
'/provisioning/REST/volumeviewservice/volumes?query=name+eq+' + nn
resp, body = self.get(path, headers=info)
if body and 'count' in body:
count = body['count']
if count > 0:
if 'members' in body:
members = body['members']
member = members[0]
if member:
if 'links' in member:
# store off link to this volume
links = member['links']
self_link = links[0]
if self_link and 'href' in self_link:
self.href = self_link['href']
if 'systemWWN' in member:
# store off link to array WWN for this volume
self.systemWWN = member['systemWWN']
if 'userCpgUid' in member:
# store off link to CPG for this volume
self.cpg = member['userCpgUid']
if 'domainUID' in member:
# store off link to Domain for this volume
self.domain = member['domainUID']
def getCGroupLink(self, name):
self.auth_try = 1
info = {'Authorization': self.session_key}
nn = "'%s'" % name
path = \
'/provisioning/REST/volumesetviewservice/sets?query=name+eq+' + nn
resp, body = self.get(path, headers=info)
if body and 'count' in body:
count = body['count']
if count > 0:
if 'members' in body:
members = body['members']
member = members[0]
if member:
if 'links' in member:
# store off link to this volume
links = member['links']
for link in links:
if link['rel'] == "self":
self.href = link['href']
break
# NOT NEEDED???
def getVolumeDetails(self):
self.auth_try = 1
info = {'Authorization': self.session_key}
cnt = self.href.find('/provisioning')
ref = self.href[cnt:]
resp, body = self.get(ref, headers=info)
if body and 'uid' in body:
self.uid = body['uid']
if 'systemWWN' in body:
self.systemWWN = body['systemWWN']
def getSessionKey(self):
return self.session_key
def getVolumeRef(self):
return self.href
def getVolumeCPG(self):
return self.cpg
def getVolumeDomain(self):
return self.domain
def getVolumeID(self):
return self.uid
def getSystemWWN(self):
return self.systemWWN
def _reauth(self):
self.authenticateSSMC(self.user, self.password, self._auth_optional)
def unauthenticateSSMC(self):
"""
This clears the authenticated session with the 3PAR server.
"""
# delete the session on the 3Par
try:
self.delete(
'/foundation/REST/sessionservice/sessions/%s' %
self.session_key)
self.session_key = None
except Exception as ex:
exceptions.handle(self.request,
('Unable to log-off SSMC.'))
| StarcoderdataPython |
1755377 | <reponame>jimit105/leetcode-submissions
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def diameterOfBinaryTree(self, root: Optional[TreeNode]) -> int:
diameter = 0
def find_longest_path(node):
nonlocal diameter
if not node:
return 0
left_path = find_longest_path(node.left)
right_path = find_longest_path(node.right)
diameter = max(diameter, left_path+right_path)
return max(left_path, right_path)+1
find_longest_path(root)
return diameter
| StarcoderdataPython |
1966906 | <reponame>jafingerhut/dotfiles<filename>templates/python-3-template.py<gh_stars>1-10
#! /usr/bin/env python3
import os, sys
import re
#import argparse
#import collections
#import fileinput
#import glob
# To enable logging in a standalone Python program (as opposed to one
# that is part of pyATS, which seems to configure the logger by
# default in its own specific way):
#import logging
# Can replace __name__ with any string you want to appear in the log
# messages.
#log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
#logging.basicConfig(stream=sys.stdout)
# sys.exit(exit_status)
# Search for regex anywhere within string
# match = re.search(r"re pattern here", string)
# if match:
# Search for regex starting at beginning of string
# match = re.match(r"re pattern here", string)
# if match:
# For debugging
# import traceback
# traceback.print_stack() # to stderr
# traceback.print_stack(file=sys.stdout)
# Cmd line args in sys.argv
# sys.argv[0] is command name
# sys.argv[1] is 1st arg after command name
# Find all files matching a shell glob pattern like "foo*.txt":
#import glob
#matching_filename_list = glob.glob("foo*.txt")
# Dictionaries with something similar to Perl's auto-vivification
#import collections
#dict_default0 = collections.defaultdict(int)
#dict_default0['a'] += 1 # Makes value for key 'a' equal to 1 even if key 'a' did not already have a value
#dict_default_emptylist = collections.defaultdict(list)
#dict_default_emptydict = collections.defaultdict(dict)
# This is a trickier example of a dict that auto-vivifies 2 levels of
# keys deep, and at that level the default value is 0.
#fancydict = collections.defaultdict(lambda : collections.defaultdict(int))
# This is a trickier example of a dict that auto-vivifies 3 levels of
# keys deep, and at that level the default value is 0.
#fancydict = collections.defaultdict(lambda : collections.defaultdict(lambda : collections.defaultdict(int)))
# Run a command '/path/to/res' with arg 'avail', and return its output as a
# string.
#import subprocess
# output = subprocess.check_output(['/path/to/res', 'avail'])
######################################################################
# Parsing optional command line arguments
######################################################################
import argparse
parser = argparse.ArgumentParser(description="""
Text describing what this program does and how to use it.""")
parser.add_argument('--testbed', dest='testbed', type=topology.loader.load)
parser.add_argument('--R1', dest='R1', type=str,
help="""The name of the device in the testbed
file to test.""")
parser.add_argument('--module-num', dest='module_num', type=int,
help="""The slot number of the LC module that
is the focus of the performance test. Routes
may be installed on other modules as well, but
this is the only one where various show
commands will be run to collect measurements.""")
parser.add_argument('--intfs', dest='intfs', nargs='+',
help="""One or more interfaces to bring up and
assign IP addresses to before adding routes.
This helps control which ASIC instances will
have routes installed in them.""")
parser.add_argument('--trace', dest='trace',
choices=['enable', 'disable', 'leave-unchanged'],
help="""If 'enable', then configure 'hardware
forwarding unicast trace'. If 'disable', then
configure 'no hardware forwarding unicast
trace'. If 'leave-unchanged' (the default if
this option is not specified), then do not
configure either command, but leave it as it
is on the device.""")
args = parser.parse_known_args()[0]
######################################################################
# Example of regex split across multiple lines, and using named
# instead of numbered fields.
######################################################################
match = re.match(r"""(?x)
^\s* (?P<size_bytes>\d+)
\s+ (?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)
\s+ (?P<day>\d+)
\s+ (?P<hour>\d+):(?P<minute>\d+):(?P<second>\d+)
\s+ (?P<year>\d+)
\s+ (?P<filename>.*)$""", line)
if match:
filename = match.group('filename')
is_directory = False
if filename[-1] == '/':
is_directory = True
filename = filename[:-1]
file_info = {'is_directory': is_directory,
'size_bytes': match.group('size_bytes'),
'year': int(match.group('year')),
'month': match.group('month'),
'day': int(match.group('day')),
'hour': int(match.group('hour')),
'minute': int(match.group('minute')),
'second': int(match.group('second'))}
#for line in fileinput.input(files=['infile1', 'infile2']):
for line in fileinput.input():
# do something to line here
# Current file name: fileinput.filename()
# Line number within current file: fileinput.filelineno()
# Cumulative line number across all files: fileinput.lineno()
match = re.search(r"re pattern here", line)
if match:
| StarcoderdataPython |
6646820 | <reponame>imyhacker/yukabsen
#!bin/python
import os
#os.system('ifconfig');
def my_function():
print """
====================================
Laravel Helper With Python
Coded By AriKUN | IndoSec
====================================
1. Start Server | 6. Make:Midd
2. Make:Contrll | 7. Make:Seed
3. Make:Mod | 8. Make:Prov
4. Make:MIg | 9. Make:Fact
5. Make:Mod + Mig | 10. Make:Req
====================================
"""
my_function()
masukan = raw_input("Kamu Memilih : ")
if masukan == '1':
print "Start Server"
os.system('php artisan serv')
elif masukan == '2':
a = raw_input('New Controller : ')
os.system('php artisan make:controller '+a)
elif masukan == '3':
a = raw_input('New Model : ')
os.system('php artisan make:model '+a)
elif masukan == '4':
a = raw_input('New Migration : ')
os.system('php artisan make:migration '+a)
elif masukan == '5':
a = raw_input('New Model + Mig : ')
os.system('php artisan make:model '+a+"-m")
pass
| StarcoderdataPython |
12839689 | #!/usr/bin/env python
"""
Example of using the hierarchical classifier to classify (a subset of) the digits data set.
Demonstrated some of the capabilities, e.g using a Pipeline as the base estimator,
defining a non-trivial class hierarchy, etc.
"""
from sklearn import svm
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn_hierarchical_classification.classifier import HierarchicalClassifier
from sklearn_hierarchical_classification.constants import ROOT
from sklearn_hierarchical_classification.metrics import h_fbeta_score, multi_labeled
from sklearn_hierarchical_classification.tests.fixtures import make_digits_dataset
# Used for seeding random state
RANDOM_STATE = 42
def classify_digits():
r"""Test that a nontrivial hierarchy leaf classification behaves as expected.
We build the following class hierarchy along with data from the handwritten digits dataset:
<ROOT>
/ \
A B
/ \ | \
1 7 C 9
/ \
3 8
"""
class_hierarchy = {
ROOT: ["A", "B"],
"A": ["1", "7"],
"B": ["C", "9"],
"C": ["3", "8"],
}
base_estimator = make_pipeline(
TruncatedSVD(n_components=24),
svm.SVC(
gamma=0.001,
kernel="rbf",
probability=True
),
)
clf = HierarchicalClassifier(
base_estimator=base_estimator,
class_hierarchy=class_hierarchy,
)
X, y = make_digits_dataset(
targets=[1, 7, 3, 8, 9],
as_str=False,
)
# cast the targets to strings so we have consistent typing of labels across hierarchy
y = y.astype(str)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=RANDOM_STATE,
)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Classification Report:\n", classification_report(y_test, y_pred))
# Demonstrate using our hierarchical metrics module with MLB wrapper
with multi_labeled(y_test, y_pred, clf.graph_) as (y_test_, y_pred_, graph_):
h_fbeta = h_fbeta_score(
y_test_,
y_pred_,
graph_,
)
print("h_fbeta_score: ", h_fbeta)
if __name__ == "__main__":
classify_digits()
| StarcoderdataPython |
112694 | # -*- coding: utf-8 -*-
import os
from icon.utils.in_memory_zip import InMemoryZip
def test_in_memory_zip():
parent_path: str = os.path.dirname(__file__)
path: str = os.path.join(parent_path, "data_to_zip")
mem_zip = InMemoryZip()
mem_zip.run(path)
with open("data.zip", "wb") as f:
f.write(mem_zip.data)
| StarcoderdataPython |
5186888 | """
This is the sqlalchemy class for communicating with the database.
"""
from datetime import datetime
from sqlalchemy import Integer, Unicode, DateTime, Boolean
from sqlalchemy import Column as col
from sqlalchemy.orm import relationship
import models.base as base
from models.dependent import Dependent
class CustomerFamily(base.Base):
"""Sqlalchemy deals model"""
__tablename__ = "customerfamily"
id = col(Integer, primary_key=True)
email = col('email', Unicode, default='')
phone = col('phone', Unicode, default='')
address = col('address', Unicode, default='')
city = col('city', Unicode, default='', nullable=False)
state = col('state', Unicode, default='', nullable=False)
zip = col('zip', Unicode, default='', nullable=False)
datecreated = col('datecreated', DateTime, nullable=False)
comments = col('comments', Unicode, default='')
checkoutComments = col('checkoutcomments', Unicode, default='')
adminComments = col('admincomments', Unicode, default='')
isCustomer = col('is_customer', Boolean, default=True, nullable=False)
isVolunteer = col('is_volunteer', Boolean, default=False, nullable=False)
depOrder = 'Dependent.isPrimary.desc()'
dependents = relationship("Dependent", backref="family", order_by=depOrder)
visits = relationship("Visit", backref="family", lazy="dynamic")
vTable = "VolunteerVisit"
volunteerVisits = relationship(vTable, backref="family", lazy="dynamic")
def __checkFirstName__(self, formDependent, form):
hasError = False
if formDependent['firstName'].data == '':
formError = 'First name is required'
formDependent['firstName'].errors.append(formError)
form.errors['dependent_firstname'] = 'required'
hasError = True
return hasError
def __checkLastName__(self, formDependent, form):
hasError = False
if formDependent['lastName'].data == '':
formErr = 'Last name is required'
formDependent['lastName'].errors.append(formErr)
form.errors['dependent_lastname'] = 'required'
hasError = True
return hasError
def __checkBirthDate__(self, formDependent, form):
hasError = False
# Only customers need a birthdate
if not form.isCustomer.data:
pass
elif formDependent['birthdate'].data is None:
formError = 'Birthday is required'
formDependent['birthdate'].errors.append(formError)
form.errors['dependent_birthdate'] = 'required'
hasError = True
elif formDependent['birthdate'].data < datetime(1900, 1, 1):
formError = 'Birthday must be after 1900'
formDependent['birthdate'].errors.append(formError)
form.errors['dependent_birthdate'] = 'required'
formDependent['birthdate'].data = None
hasError = True
return hasError
def __checkRelationship__(self, formDependent, form):
hasError = False
# Is optional
if not formDependent['relationship'].data:
pass
elif formDependent['relationship'].data < 1 or \
formDependent['relationship'].data > 5:
formError = 'Relationship is invalid'
formDependent['relationship'].errors.append(formError)
form.errors['dependent_relationship'] = 'required'
hasError = True
return hasError
def updatedFromRegistration(self, form):
pass
def fromForm(self, id, form):
if id is not None:
self.id = id
self.datecreated = form.datecreated.data
else:
self.datecreated = datetime.now()
self.email = form.email.data
self.phone = form.phone.data
self.address = form.address.data
self.city = form.city.data
self.state = form.state.data
self.zip = form.zip.data
self.comments = form.comments.data
self.adminComments = form.adminComments.data
self.isVolunteer = form.isVolunteer.data
self.isCustomer = form.isCustomer.data
for formDependent in form.dependents:
if not formDependent['isPrimary'].data and \
(formDependent['firstName'].data == '' and
formDependent['lastName'].data == ''):
continue
dependent = Dependent()
dependent.id = formDependent['id'].data
dependent.isPrimary = formDependent['isPrimary'].data
hasError = self.__checkFirstName__(formDependent, form)
dependent.firstName = formDependent['firstName'].data
if self.__checkLastName__(formDependent, form):
hasError = True
dependent.lastName = formDependent['lastName'].data
if self.__checkBirthDate__(formDependent, form):
hasError = True
dependent.birthdate = formDependent['birthdate'].data
if self.__checkRelationship__(formDependent, form):
hasError = True
dependent.relationship = formDependent['relationship'].data
if hasError:
raise Exception('Dependent data needed')
self.dependents.append(dependent)
def findMatch(self, form, db):
matchedFam = None
# A match is when the first name, last name, zip, and city all match
for formDependent in form.dependents:
if not formDependent['isPrimary'].data:
continue
deps = db.query(Dependent).filter(Dependent.isPrimary)\
.filter(Dependent.firstName==formDependent.firstName.data)\
.filter(Dependent.lastName==formDependent.lastName.data)
for dep in deps:
fam = dep.family
if fam is not None and fam.zip == form.zip.data and fam.city == form.city.data:
matchedFam = fam
break
return matchedFam
| StarcoderdataPython |
4866523 | import dataset
import matplotlib.pyplot as plt
from datetime import datetime
import pytz
date_min = datetime.min.replace(tzinfo=pytz.UTC)
date_max = datetime.max.replace(tzinfo=pytz.UTC)
def generate_plot(process_data):
hours, temps, labels = extract_temperature_hour(process_data)
for i, j, k in zip(hours, temps, labels):
plt.plot(i, j, label=k)
plt.legend()
plt.xlabel("Date")
plt.ylabel("Temperature °C")
plt.title("Temperature evolution")
plt.savefig(f"out.png", dpi=300, verbose=False)
# plt.show()
def parse_data(input_data: list):
out_data = []
_data = next(input_data)
current_day = _data["date"].day
current_data = [_data]
for i in input_data:
day = i["date"].day
if day != current_day:
out_data.append(current_data)
current_data = []
else:
current_data.append(i)
current_day = day
if current_data:
out_data.append(current_data)
return out_data
def extract_data(db, _date_min=date_min, _date_max=date_max):
table = db["weather"]
out_data = table.find(date={"between": [_date_min, _date_max]})
return out_data
def extract_temperature_hour(_data):
out_temp = []
out_hours = []
out_labels = []
for day in _data:
current_temp = []
current_hour = []
for measure in day:
current_temp.append(measure["temperature"])
current_hour.append(measure["date"].hour + measure["date"].minute / 60)
current_day = measure["date"]
out_labels.append(f"{current_day.day}/{current_day.month}/{current_day.year}")
out_temp.append(current_temp)
out_hours.append(current_hour)
return out_hours, out_temp, out_labels
if __name__ == "__main__":
with dataset.connect("sqlite:///weather.db") as db:
data = extract_data(db)
data_parsed = parse_data(data)
generate_plot(data_parsed)
| StarcoderdataPython |
3200112 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms import widgets
from django.forms.fields import ChoiceField
from django.forms.models import ModelForm
from django.utils.html import format_html
from django.utils.encoding import force_text
from django.utils.translation import ungettext_lazy, ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade import app_settings
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.fields import GlossaryField
from .plugin_base import BootstrapPluginBase
from . import grid
def get_widget_choices():
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
widget_choices = []
for index, (bp, bound) in enumerate(breakpoints.items()):
if index == 0:
widget_choices.append((bp.name, "{} (<{:.1f}px)".format(bp.label, bound.max)))
elif index == len(breakpoints) - 1:
widget_choices.append((bp.name, "{} (≥{:.1f}px)".format(bp.label, bound.min)))
else:
widget_choices.append((bp.name, "{} (≥{:.1f}px and <{:.1f}px)".format(bp.label, bound.min, bound.max)))
return widget_choices
class ContainerBreakpointsWidget(widgets.CheckboxSelectMultiple):
template_name = 'cascade/forms/widgets/container_breakpoints.html'
def render(self, name, value, attrs=None, renderer=None):
attrs = dict(attrs, version=4)
return super(ContainerBreakpointsWidget, self).render(name, value, attrs, renderer)
class BootstrapContainerForm(ModelForm):
"""
Form class to validate the container.
"""
def clean_glossary(self):
if len(self.cleaned_data['glossary']['breakpoints']) == 0:
raise ValidationError(_("At least one breakpoint must be selected."))
return self.cleaned_data['glossary']
class ContainerGridMixin(object):
def get_grid_instance(self):
fluid = self.glossary.get('fluid', False)
try:
breakpoints = [getattr(grid.Breakpoint, bp) for bp in self.glossary['breakpoints']]
except KeyError:
breakpoints = [bp for bp in grid.Breakpoint]
if fluid:
bounds = dict((bp, grid.fluid_bounds[bp]) for bp in breakpoints)
else:
bounds = dict((bp, grid.default_bounds[bp]) for bp in breakpoints)
return grid.Bootstrap4Container(bounds=bounds)
class BootstrapContainerPlugin(BootstrapPluginBase):
name = _("Container")
parent_classes = None
require_parent = False
form = BootstrapContainerForm
glossary_variables = ['container_max_widths', 'media_queries']
glossary_field_order = ['breakpoints', 'fluid']
model_mixins = (ContainerGridMixin,)
breakpoints = GlossaryField(
ContainerBreakpointsWidget(choices=get_widget_choices()),
label=_('Available Breakpoints'),
initial=[bp.name for bp in app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds'].keys()],
help_text=_("Supported display widths for Bootstrap's grid system."),
)
fluid = GlossaryField(
widgets.CheckboxInput(),
label=_('Fluid Container'), initial=False,
help_text=_("Changing your outermost '.container' to '.container-fluid'.")
)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapContainerPlugin, cls).get_identifier(obj)
breakpoints = obj.glossary.get('breakpoints')
content = obj.glossary.get('fluid') and '(fluid) ' or ''
if breakpoints:
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
devices = ', '.join([force_text(bp.label) for bp in breakpoints])
content = _("{0}for {1}").format(content, devices)
return format_html('{0}{1}', identifier, content)
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapContainerPlugin, cls).get_css_classes(obj)
if obj.glossary.get('fluid'):
css_classes.append('container-fluid')
else:
css_classes.append('container')
return css_classes
def save_model(self, request, obj, form, change):
super(BootstrapContainerPlugin, self).save_model(request, obj, form, change)
obj.sanitize_children()
plugin_pool.register_plugin(BootstrapContainerPlugin)
class BootstrapRowForm(ManageChildrenFormMixin, ModelForm):
"""
Form class to add non-materialized field to count the number of children.
"""
ROW_NUM_COLUMNS = [1, 2, 3, 4, 6, 12]
num_children = ChoiceField(
choices=[(i, ungettext_lazy('{0} column', '{0} columns', i).format(i)) for i in ROW_NUM_COLUMNS],
initial=3, label=_('Columns'),
help_text=_('Number of columns to be created with this row.'))
class RowGridMixin(object):
def get_grid_instance(self):
row = grid.Bootstrap4Row()
query = Q(plugin_type='BootstrapContainerPlugin') | Q(plugin_type='BootstrapColumnPlugin') \
| Q(plugin_type='BootstrapJumbotronPlugin')
container = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin().get_grid_instance()
container.add_row(row)
return row
class BootstrapRowPlugin(BootstrapPluginBase):
name = _("Row")
default_css_class = 'row'
parent_classes = ['BootstrapContainerPlugin', 'BootstrapColumnPlugin', 'BootstrapJumbotronPlugin']
form = BootstrapRowForm
fields = ['num_children', 'glossary']
model_mixins = (RowGridMixin,)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapRowPlugin, cls).get_identifier(obj)
num_cols = obj.get_num_children()
content = ungettext_lazy("with {0} column", "with {0} columns", num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super(BootstrapRowPlugin, self).save_model(request, obj, form, change)
child_glossary = {'xs-column-width': 'col'}
self.extend_children(obj, wanted_children, BootstrapColumnPlugin, child_glossary=child_glossary)
plugin_pool.register_plugin(BootstrapRowPlugin)
class ColumnGridMixin(object):
valid_keys = ['xs-column-width', 'sm-column-width', 'md-column-width', 'lg-column-width', 'xs-column-width',
'xs-column-offset', 'sm-column-offset', 'md-column-offset', 'lg-column-offset', 'xs-column-offset']
def get_grid_instance(self):
column = None
query = Q(plugin_type='BootstrapRowPlugin')
row_obj = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin()
# column_siblings = row_obj.get_descendants().order_by('depth').filter(plugin_type='BootstrapColumnPlugin')
row = row_obj.get_grid_instance()
for column_sibling in self.get_siblings():
classes = [val for key, val in column_sibling.get_bound_plugin().glossary.items()
if key in self.valid_keys and val]
if column_sibling.pk == self.pk:
column = grid.Bootstrap4Column(classes)
row.add_column(column)
else:
row.add_column(grid.Bootstrap4Column(classes))
return column
class BootstrapColumnPlugin(BootstrapPluginBase):
name = _("Column")
parent_classes = ('BootstrapRowPlugin',)
child_classes = ('BootstrapJumbotronPlugin',)
alien_child_classes = True
default_css_attributes = [fmt.format(bp.name) for bp in grid.Breakpoint
for fmt in ('{}-column-width', '{}-column-offset', '{}-column-ordering', '{}-responsive-utils')]
glossary_variables = ['container_max_widths']
model_mixins = (ColumnGridMixin,)
def get_form(self, request, obj=None, **kwargs):
def choose_help_text(*phrases):
bounds = 'fluid_bounds' if container.glossary.get('fluid') else 'default_bounds'
bs4_breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4'][bounds]
if last:
return phrases[0].format(bs4_breakpoints[last].max)
elif len(breakpoints) > 1:
return phrases[1].format(bs4_breakpoints[first].min)
else:
return phrases[2]
if 'parent' in self._cms_initial_attributes:
container=self._cms_initial_attributes['parent'].get_ancestors().order_by('depth').last().get_bound_plugin()
else:
containers=obj.get_ancestors().filter(plugin_type='BootstrapContainerPlugin')
if containers:
container=containers.order_by('depth').last().get_bound_plugin()
else:
jumbotrons=obj.get_ancestors().filter(plugin_type='BootstrapJumbotronPlugin')
container=jumbotrons.order_by('depth').last().get_bound_plugin()
breakpoints = container.glossary['breakpoints']
glossary_fields = []
units = [ungettext_lazy("{} unit", "{} units", i).format(i) for i in range(0, 13)]
for bp in breakpoints:
try:
last = getattr(grid.Breakpoint, breakpoints[breakpoints.index(bp) + 1])
except IndexError:
last = None
finally:
first = getattr(grid.Breakpoint, bp)
devices = ', '.join([force_text(b.label) for b in grid.Breakpoint.range(first, last)])
if bp == 'xs':
choices = [('col', _("Flex column"))]
choices.extend(('col-{}'.format(i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-auto', _("Auto column")))
else:
choices = [('col-{}'.format(bp), _("Flex column"))]
choices.extend(('col-{}-{}'.format(bp, i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-{}-auto'.format(bp), _("Auto column")))
if breakpoints.index(bp) == 0:
# first breakpoint
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Column width for {}").format(devices),
name='{}-column-width'.format(bp),
initial='col-{}-12'.format(bp),
help_text=choose_help_text(
_("Column width for devices narrower than {:.1f} pixels."),
_("Column width for devices wider than {:.1f} pixels."),
_("Column width for all devices."),
)
))
else:
# wider breakpoints may inherit from next narrower ones
choices.insert(0, ('', _("Inherit from above")))
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Column width for {}").format(devices),
name='{}-column-width'.format(bp),
initial='',
help_text=choose_help_text(
_("Override column width for devices narrower than {:.1f} pixels."),
_("Override column width for devices wider than {:.1f} pixels."),
_("Override column width for all devices."),
)
))
# handle offset
if breakpoints.index(bp) == 0:
choices = [('', _("No offset"))]
offset_range = range(1, 13)
else:
choices = [('', _("Inherit from above"))]
offset_range = range(0, 13)
if bp == 'xs':
choices.extend(('offset-{}'.format(i), units[i]) for i in offset_range)
else:
choices.extend(('offset-{}-{}'.format(bp, i), units[i]) for i in offset_range)
label = _("Offset for {}").format(devices)
help_text = choose_help_text(
_("Offset width for devices narrower than {:.1f} pixels."),
_("Offset width for devices wider than {:.1f} pixels."),
_("Offset width for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=label,
name='{}-column-offset'.format(bp),
help_text=help_text))
# handle column reordering
choices = [('', _("No reordering"))]
if bp == 'xs':
choices.extend(('order-{}'.format(i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
else:
choices.extend(('order-{}-{}'.format(bp, i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
label = _("Reordering for {}").format(devices)
help_text = choose_help_text(
_("Reordering for devices narrower than {:.1f} pixels."),
_("Reordering for devices wider than {:.1f} pixels."),
_("Reordering for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=label,
name='{}-column-ordering'.format(bp),
help_text=help_text))
# handle responsive utilities
choices = [('', _("Default")), ('visible-{}'.format(bp), _("Visible")), ('hidden-{}'.format(bp), _("Hidden"))]
label = _("Responsive utilities for {}").format(devices)
help_text = choose_help_text(
_("Utility classes for showing and hiding content by devices narrower than {:.1f} pixels."),
_("Utility classes for showing and hiding content by devices wider than {:.1f} pixels."),
_("Utility classes for showing and hiding content for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.RadioSelect(choices=choices),
label=label,
name='{}-responsive-utils'.format(bp),
initial='',
help_text=help_text))
glossary_fields = [
glossary_fields[i + len(glossary_fields) // len(breakpoints) * j]
for i in range(0, len(glossary_fields) // len(breakpoints))
for j in range(0, len(breakpoints))
]
kwargs.update(glossary_fields=glossary_fields)
return super(BootstrapColumnPlugin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
super(BootstrapColumnPlugin, self).save_model(request, obj, form, change)
obj.sanitize_children()
@classmethod
def sanitize_model(cls, obj):
sanitized = super(BootstrapColumnPlugin, cls).sanitize_model(obj)
return sanitized
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapColumnPlugin, cls).get_identifier(obj)
glossary = obj.get_complete_glossary()
widths = []
for bp in glossary.get('breakpoints', []):
width = obj.glossary.get('{0}-column-width'.format(bp), '').replace('col-{0}-'.format(bp), '')
if width:
widths.append(width)
if len(widths) > 1:
content = _('widths: {0} units').format(' / '.join(widths))
elif len(widths) == 1:
width = widths[0]
content = ungettext_lazy('default width: {0} unit', 'default width: {0} units', width).format(width)
else:
content = _('unknown width')
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(BootstrapColumnPlugin)
| StarcoderdataPython |
3270355 | """
Antelope Interface Definitions
The abstract classes in this sub-package define what information is made available via a stateless query to an Antelope
resource of some kind. The interfaces must be instantiated in order to be used. In the core package
"""
from .interfaces.abstract_query import PrivateArchive, EntityNotFound, NoAccessToEntity
from .interfaces.iconfigure import ConfigureInterface
from .interfaces.iexchange import ExchangeInterface, ExchangeRequired
from .interfaces.iindex import IndexInterface, IndexRequired, directions, comp_dir, num_dir, check_direction, valid_sense, comp_sense
from .interfaces.ibackground import BackgroundInterface, BackgroundRequired
from .interfaces.iquantity import QuantityInterface, QuantityRequired, NoFactorsFound, ConversionReferenceMismatch, FlowableMismatch
from .interfaces.iforeground import ForegroundInterface
from .flows import BaseEntity, FlowInterface, Flow
from .refs.process_ref import MultipleReferences, NoReference
from .refs.catalog_ref import CatalogRef, QuantityRef, UnknownOrigin
from .refs.quantity_ref import convert, NoUnitConversionTable
from .refs.base import NoCatalog, EntityRefMergeError
from .refs.exchange_ref import ExchangeRef, RxRef
import re
from os.path import splitext
from collections import namedtuple
class PropertyExists(Exception):
pass
'''
Query classes
'''
class BasicQuery(IndexInterface, ExchangeInterface, QuantityInterface):
def __init__(self, archive, debug=False):
self._archive = archive
self._dbg = debug
def _perform_query(self, itype, attrname, exc, *args, strict=False, **kwargs):
if itype is None:
itype = 'basic'
iface = self._archive.make_interface(itype)
result = getattr(iface, attrname)(*args, **kwargs)
if result is not None: # successful query must return something
return result
raise exc(itype, attrname, *args)
@property
def origin(self):
return self._archive.ref
def make_ref(self, entity):
"""
Query subclasses can return abstracted versions of query results.
:param entity:
:return: an entity that could have a reference to a grounded query
"""
if entity is None:
return None
if entity.is_entity:
return entity.make_ref(self)
else:
return entity # already a ref
'''
I think that's all I need to do!
'''
class LcQuery(BasicQuery, BackgroundInterface, ConfigureInterface):
pass
'''
Utilities
'''
def local_ref(source, prefix=None):
"""
Create a semantic ref for a local filename. Just uses basename. what kind of monster would access multiple
different files with the same basename without specifying ref?
alternative is splitext(source)[0].translate(maketrans('/\\','..'), ':~') but ugghh...
Okay, FINE. I'll use the full path. WITH leading '.' removed.
Anyway, to be clear, local semantic references are not supposed to be distributed.
:param source:
:param prefix: [None] default 'local'
:return:
"""
if prefix is None:
prefix = 'local'
xf = source.translate(str.maketrans('/\\', '..', ':~'))
while splitext(xf)[1] in {'.gz', '.json', '.zip', '.txt', '.spold', '.7z'}:
xf = splitext(xf)[0]
while xf[0] == '.':
xf = xf[1:]
while xf[-1] == '.':
xf = xf[:-1]
return '.'.join([prefix, xf])
def q_node_activity(fg):
"""
A reference quantity for dimensionless node activity. This should be part of Qdb reference quantities (but isn't)
:param fg:
:return:
"""
try:
return fg.get_canonical('node activity')
except EntityNotFound:
fg.new_quantity('Node Activity', ref_unit='activity', external_ref='node activity', comment='MFA metric')
return fg.get_canonical('node activity')
def enum(iterable, filt=None, invert=True):
"""
Enumerate an iterable for interactive use. return it as a list. Optional negative filter supplied as regex
:param iterable:
:param filt:
:param invert: [True] sense of filter. note default is negative i.e. to screen *out* matches
(the thinking is that the input is already positive-filtered)
:return:
"""
ret = []
if filt is not None:
if invert:
_iter = filter(lambda x: not bool(re.search(filt, str(x), flags=re.I)), iterable)
else:
_iter = filter(lambda x: bool(re.search(filt, str(x), flags=re.I)), iterable)
else:
_iter = iterable
for k, v in enumerate(_iter):
print(' [%02d] %s' % (k, v))
ret.append(v)
return ret
"""
In most LCA software, including the current operational version of lca-tools, a 'flow' is a composite entity
that is made up of a 'flowable' (substance, product, intervention, or service) and a 'context', which is
synonymous with an environmental compartment.
The US EPA review of elementary flows recommended managing the names of flowables and contexts separately, and that
is the approach that is done here.
The exchange model consists of : parent | flow(able), direction | [exch value] | [terminal node]
If the terminal node is a context, the exchange is elementary. if it's a process, then intermediate.
If none, then cutoff.
The new Flat Background already implements context-as-termination, but the main code has had to transition and we are
still technically debugging the CalRecycle project. So we introduce this flag CONTEXT_STATUS_ to express to client code
which one to do. It should take either of the two values: 'compat' means "old style" (flows have Compartments) and
'new' means use the new data model (exchange terminations are contexts)
"""
CONTEXT_STATUS_ = 'new' # 'compat': context = flow['Compartment']; 'new': context = exch.termination
# Containers of information about linked exchanges. Direction is given with respect to the termination.
ExteriorFlow = namedtuple('ExteriorFlow', ('origin', 'flow', 'direction', 'termination'))
# ProductFlow = namedtuple('ProductFlow', ('origin', 'flow', 'direction', 'termination', 'component_id'))
EntitySpec = namedtuple('EntitySpec', ('link', 'ref', 'name', 'group'))
# packages that contain 'providers'
antelope_herd = [
'antelope_background',
'antelope_foreground'
]
| StarcoderdataPython |
3587801 | <reponame>ygingras/mtlpyweb<gh_stars>1-10
from django.test import TestCase
from mtlpy import views
from mtlpy.models import Sponsor
class IntegrationTestCase(TestCase):
def setUp(self):
self.sponsor = Sponsor.objects.create(
name='test', slug='test', url='http://testserver.com', logo=None)
def test_home_page(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.url, '/en/')
def test_home_page_redirected(self):
resp = self.client.get('/en/')
self.assertEqual(resp.status_code, 200)
def test_styleguide(self):
resp = self.client.get('/en/styleguide/')
self.assertEqual(resp.status_code, 200)
def test_videos(self):
views.get_all_videos = lambda x: []
resp = self.client.get('/en/videos/')
self.assertEqual(resp.status_code, 200)
def test_sponsor_details(self):
resp = self.client.get('/en/sponsor/invalid/')
self.assertEqual(resp.status_code, 404)
resp = self.client.get('/en/sponsor/test/')
self.assertEqual(resp.status_code, 200)
def test_sponsorship(self):
resp = self.client.get('/en/sponsorship/')
self.assertEqual(resp.status_code, 200)
def test_robotstxt(self):
resp = self.client.get('/robots.txt')
self.assertContains(resp, "User-agent: AhrefsBot")
self.assertContains(resp, "Disallow: /")
| StarcoderdataPython |
3529497 | <filename>pf_constants.py
# Constants for use in the pynk_floyd project
# File path for training data
# TRAINING_DATA_PATH = "C:\\Users\\daeur\\PycharmProjects\\pynk_floyd\\Training Data\\darkside.txt"
TRAINING_DATA_PATH = "C:\\Users\\daeur\\PycharmProjects\\pynk_floyd\\Training Data\\pinkfloyd.txt"
EPOCHS = 20
# Lower = more predictable, higher = more surprising.
# This is a good number to tweak
TEMPERATURE = 0.85
# List of initial inputs for batch generation
# This will change after the proof of concept stage
# OUTPUT_SEEDS = ['D', 'A', 'R', 'K', 'S', 'I', 'D', 'E']
# OUTPUT_SEEDS = ['D', 'A', 'D']
OUTPUT_SEEDS = ['P', 'I', 'N', 'K', 'F', 'L', 'O', 'Y', 'D']
# Name to use in folder creation
PROJECT_NAME = 'pinkfloyd'
#####################################
# Some settings that have worked well
# EPOCHS 20 TEMPERATURE 0.85
#
#
#
| StarcoderdataPython |
6472597 | <reponame>bobwol/subscribely<gh_stars>10-100
"""
Module to provide plug-and-play authentication support for Google App Engine
using flask-auth.
"""
from google.appengine.ext import db
from flaskext.auth import AuthUser
class User(db.Model, AuthUser):
"""
Implementation of User for persistence in Google's App Engine datastore.
"""
username = db.EmailProperty()
name = db.StringProperty()
password = db.StringProperty()
stripe_customer_id = db.StringProperty()
salt = db.StringProperty()
role = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
def __init__(self, *args, **kwargs):
kwargs['key_name'] = kwargs.get('username')
super(User, self).__init__(*args, **kwargs)
password = <PASSWORD>('password')
if password is not None and not self.has_key():
# Initialize and encrypt password before first save.
self.set_and_encrypt_password(password)
@classmethod
def get_by_username(cls, username):
return cls.get_by_key_name(username) | StarcoderdataPython |
6602838 | <reponame>TurtleSquad007/low-pressure<filename>ratecourse/api/migrations/0007_comments_user.py<gh_stars>0
# Generated by Django 3.2.9 on 2022-01-15 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20220115_1511'),
]
operations = [
migrations.AddField(
model_name='comments',
name='user',
field=models.CharField(blank=True, max_length=50),
),
]
| StarcoderdataPython |
18445 | <reponame>cptanalatriste/AttnGAN<gh_stars>0
from datetime import datetime
from typing import List
import dateutil
from datasets import TextDataset
from miscc.config import cfg_from_file, cfg
from torchvision.transforms import transforms
from attnganw.train import GanTrainerWrapper, BirdGenerationFromCaption
def get_text_dataset(tree_base_size: int, tree_branch_number: int, dataset_split: str,
data_directory: str) -> TextDataset:
image_size = tree_base_size * (2 ** (tree_branch_number - 1))
image_transform = transforms.Compose([
transforms.Scale(int(image_size * 76 / 64)),
transforms.RandomCrop(image_size),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(data_directory, dataset_split,
base_size=tree_base_size,
transform=image_transform)
return dataset
def get_output_directory(dataset_name: str, config_name: str) -> str:
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_directory = '../output/%s_%s_%s' % \
(dataset_name, config_name, timestamp)
return output_directory
def generate_images(config_file: str, gpu_id: int, identifier: str,
caption_list: List[str]) -> List[BirdGenerationFromCaption]:
cfg_from_file(config_file)
cfg.GPU_ID = gpu_id
dataset_split: str = 'test'
shuffle_data_loader: bool = True
output_directory: str = get_output_directory(dataset_name=cfg.DATASET_NAME, config_name=cfg.CONFIG_NAME)
text_dataset: TextDataset = get_text_dataset(tree_base_size=cfg.TREE.BASE_SIZE,
tree_branch_number=cfg.TREE.BRANCH_NUM,
dataset_split=dataset_split, data_directory=cfg.DATA_DIR)
gan_trainer_wrapper: GanTrainerWrapper = GanTrainerWrapper(output_directory=output_directory,
text_data_set=text_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE,
shuffle_data_loader=shuffle_data_loader,
data_loader_workers=int(cfg.WORKERS),
split_directory=dataset_split)
return gan_trainer_wrapper.generate_from_caption_list(identifier=identifier, caption_list=caption_list)
| StarcoderdataPython |
33370 | from rest_framework import serializers
from ..models import Faculties, Departaments, StudyGroups, Auditories, Disciplines
class FacultiesSerializers(serializers.ModelSerializer):
"""Faculties API"""
class Meta:
fields = '__all__'
model = Faculties
class DepartamentsSerializers(serializers.ModelSerializer):
"""Departaments API"""
class Meta:
fields = '__all__'
model = Departaments
class StudyGroupsSerializers(serializers.ModelSerializer):
"""StudyGroups API"""
class Meta:
fields = '__all__'
model = StudyGroups
class AuditoriesSerializers(serializers.ModelSerializer):
"""Auditories API"""
class Meta:
fields = '__all__'
model = Auditories
class DisciplinesSerializers(serializers.ModelSerializer):
"""Disciplines API"""
class Meta:
fields = '__all__'
model = Disciplines
| StarcoderdataPython |
5029880 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import *
from .views import like_set
urlpatterns = [
url(r'^like/$', like_set, name="like_set"),
] | StarcoderdataPython |
11344864 | import os
from pathlib import Path
from urllib.parse import urljoin
from datetime import datetime
from django.http import Http404
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from django.http import JsonResponse
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from .forms import UploadFileForm
from PIL import Image
from django.conf import settings
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
class NoImageException(Exception):
pass
class ImageTooLargeException(Exception):
def __init__(self, fileSize):
self.fileSize = fileSize
def image_verify(f):
try:
Image.open(f).verify()
except IOError:
raise NoImageException
# Check the image file size and raise exception if too big
def check_image_size(f):
if f.size > settings.CKEDITOR5_MAX_FILE_SIZE:
image_size = humansize(f.size)
raise ImageTooLargeException(image_size)
def handle_uploaded_file(f):
folder = getattr(settings, 'CKEDITOR_5_UPLOADS_FOLDER', 'django_ckeditor_5')
yearAndMonth = "%s-%s" %(datetime.today().year, datetime.today().month)
uploads_path = Path(settings.MEDIA_ROOT, folder, yearAndMonth)
fs = FileSystemStorage(location=uploads_path)
filename = fs.save(f.name, f)
return ''.join([urljoin(fs.base_url, folder), '/'.join([yearAndMonth, filename])])
def upload_file(request):
has_perm = request.user.has_perm(settings.CKEDITOR_5_UPLOAD_PERMISSION) if hasattr(settings, 'CKEDITOR_5_UPLOAD_PERMISSION') else request.user.is_staff
if request.method == 'POST' and has_perm: #request.user.is_staff:
form = UploadFileForm(request.POST, request.FILES)
try:
image_verify(request.FILES['upload'])
except NoImageException as ex:
return JsonResponse({
"error": {
"message": "{}".format(str(ex))
}
})
try:
check_image_size(request.FILES['upload'])
except ImageTooLargeException as ex:
return JsonResponse({
"error": {
"message": "Image must be under 3MB, it is currently {}".format(ex.fileSize)
}
})
if form.is_valid():
url = handle_uploaded_file(request.FILES['upload'])
return JsonResponse({'url': url,})
else:
return JsonResponse({
"error": {
"message": "You do not have the permission to upload files."
}
})
raise Http404(_('Page not found.'))
def delete_file(request, path):
has_perm = request.user.has_perm(settings.CKEDITOR_5_UPLOAD_PERMISSION) if hasattr(settings, 'CKEDITOR_5_UPLOAD_PERMISSION') else request.user.is_staff
if request.method == 'POST' and has_perm:
if os.path.isfile(path):
os.remove(path)
return JsonResponse({
"image_delete": {
"success": True,
}
})
else:
raise Http404(_('No file matching the path found on server.'))
else:
raise PermissionDenied("You have no permission to access this site.")
| StarcoderdataPython |
11311208 | def binary_search(user_list, item):
""" Бинарный поиск элемент в списке
Функция получает отсортированный список и значение, которое необходимо найти.
Если значение присутствует в массиве, то функция возвращает его позицию.
Бинарный поиск работает только в том случае, если список отсортирован.
При бинарном поиске каждый раз исключается половина чисел.
:param user_list: отсортированный список
:param item: значение, которое нужно найти
:return: позицию item
"""
low = 0 # Начальная граница
high = len(user_list) - 1 # Конечная граница
while low <= high: # Пока эта часть не сократится до одного элемента...
mid = (low + high) // 2 # проверяем средний элемент
guess = user_list[mid]
if guess == item: # Значение найдено
return mid
if guess > item: # Много
high = mid - 1
else: # Мало
low = mid + 1
return None # Значение не существует
if __name__ == '__main__':
my_list = [1, 3, 5, 7, 9, 17, 19]
print(binary_search(my_list, 9)) # -> 4
print(binary_search(my_list, 2)) # -> None, т.е. признак того, что элемент не найден
| StarcoderdataPython |
6496135 | import sys
import importlib
from .backend import backend
__all__ = ["get_registry", "load_models"]
def _gen_missing_model(model, backend):
def _missing_model(*args, **kwargs):
raise ImportError(f"model {model} is not supported by '{backend}'."
" You can switch to other backends by setting"
" the 'graphgallery.set_backend()'.")
return _missing_model
def get_registry(mapping, backend_name=None):
_backend = backend(backend_name)
registry = mapping.get(_backend.abbr, None)
if registry is None:
raise RuntimeError(f"Currently {_backend} is not supported for this module.")
return registry
def load_models(package, mapping, backend_name=None, sub_module=None):
_backend = backend(backend_name)
thismod = sys.modules[package]
registry = get_registry(mapping, _backend)
if sub_module:
module_path = f".{sub_module}.{_backend.abbr}"
else:
module_path = f".{_backend.abbr}"
# e.g., graphgallery.gallery.nodeclas.torch
# where ``module_path=graphgallery.gallery.nodeclas`` and
# ``package=torch``
importlib.import_module(module_path, package)
for model, model_class in registry.items():
setattr(thismod, model, model_class)
| StarcoderdataPython |
4978902 | <filename>est8/backend/definitions.py
from dataclasses import dataclass
from enum import Enum, auto
from random import choice, shuffle
from typing import Tuple, Dict, Iterable, Optional, Generator
class ActionEnum(Enum):
"""Enum of possible card actions."""
bis = auto()
fence = auto()
park = auto()
invest = auto()
pool = auto()
temp = auto()
@dataclass(frozen=True)
class CardDefinition:
number: int
action: ActionEnum
@dataclass
class CardPair:
number_card: CardDefinition = None
action_card: CardDefinition = None
@dataclass(frozen=True)
class DeckDefinition:
bis_numbers: Tuple[int, ...]
fence_numbers: Tuple[int, ...]
park_numbers: Tuple[int, ...]
invest_numbers: Tuple[int, ...]
pool_numbers: Tuple[int, ...]
temp_agency_numbers: Tuple[int, ...]
@classmethod
def default(cls) -> "DeckDefinition":
return cls(
bis_numbers=(3, 4, 6, 7, 8, 9, 10, 12, 13),
fence_numbers=(1, 2, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 11, 13, 14, 15),
park_numbers=(1, 2, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 14, 15),
invest_numbers=(1, 2, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 14, 15),
pool_numbers=(3, 4, 6, 7, 8, 9, 10, 12, 13),
temp_agency_numbers=(3, 4, 6, 7, 8, 8, 9, 10, 12, 13),
)
@property
def deck_size(self) -> int:
return sum(
(
len(self.bis_numbers),
len(self.fence_numbers),
len(self.park_numbers),
len(self.invest_numbers),
len(self.pool_numbers),
len(self.temp_agency_numbers),
)
)
def ordered_card_generator(self) -> Generator[CardDefinition, None, None]:
for number in self.bis_numbers:
yield CardDefinition(number=number, action=ActionEnum.bis)
for number in self.fence_numbers:
yield CardDefinition(number=number, action=ActionEnum.fence)
for number in self.park_numbers:
yield CardDefinition(number=number, action=ActionEnum.park)
for number in self.pool_numbers:
yield CardDefinition(number=number, action=ActionEnum.pool)
for number in self.invest_numbers:
yield CardDefinition(number=number, action=ActionEnum.invest)
for number in self.temp_agency_numbers:
yield CardDefinition(number=number, action=ActionEnum.temp)
def random_card_generator(
self, no_reshuffle_last_n: int = 0
) -> Generator[CardDefinition, None, None]:
"""
A generator that returns each defined card in a random order.
When trying to draw more cards than there are in the deck, all of the cards are
shuffled again and then more are picked.
:param no_reshuffle_last_n: Number of cards that were last drawn to not re-shuffle into
the deck. This simulates behaviour of leaving cards on the table while reshuffling
the rest.
"""
all_cards = list(self.ordered_card_generator())
last_n_cards = []
while True:
# Deal out the current deck in a random order.
shuffle(all_cards)
for card in all_cards:
yield card
# Add back in the previous set of last cards to the front.
all_cards = last_n_cards + all_cards
# Record the last cards dealt from the end
last_n_cards = all_cards[-no_reshuffle_last_n:]
# Remove those cards from the current deck
all_cards = all_cards[: len(all_cards) - no_reshuffle_last_n]
@dataclass(frozen=True)
class StreetDefinition:
num_houses: int
pool_locations: Tuple[int, ...]
park_scoring: Tuple[int, ...]
def can_have_pool_at(self, plot_no: int) -> bool:
return plot_no in self.pool_locations
def park_score(self, num_parks_built: int) -> int:
return self.park_scoring[min(num_parks_built, len(self.park_scoring) - 1)]
@dataclass(frozen=True)
class NeighbourhoodDefinition:
streets: Tuple[StreetDefinition, ...]
@classmethod
def default(cls) -> "NeighbourhoodDefinition":
return cls(
streets=(
StreetDefinition(
num_houses=10, pool_locations=(2, 6, 7), park_scoring=(0, 2, 4, 10),
),
StreetDefinition(
num_houses=11,
pool_locations=(0, 3, 7),
park_scoring=(0, 2, 4, 6, 14),
),
StreetDefinition(
num_houses=12,
pool_locations=(1, 6, 10),
park_scoring=(0, 2, 4, 6, 8, 18),
),
)
)
def can_have_pool_at(self, street_no: int, plot_no: int) -> bool:
if street_no >= len(self.streets) or street_no < 0:
return False
return self.streets[street_no].can_have_pool_at(plot_no)
@dataclass(frozen=True)
class InvestDefinition:
map: Dict[int, Tuple[int, ...]]
@classmethod
def default(cls) -> "InvestDefinition":
return cls(
map={
1: (1, 3),
2: (2, 3, 4),
3: (3, 4, 5, 6),
4: (4, 5, 6, 7, 8),
5: (5, 6, 7, 8, 10),
6: (6, 7, 8, 10, 12),
}
)
def get_estate_value(self, estate_size: int, investment_level: int) -> int:
return self.map[estate_size][
min(investment_level, len(self.map[estate_size]) - 1)
]
@dataclass(frozen=True)
class ScoringDefinition:
"""
Definition of global scoring mechanisms.
NB: per-street scoring handled by the street definition.
"""
bis: Tuple[int, ...]
invest: InvestDefinition
permit_refusal: Tuple[int, ...]
pool: Tuple[int, ...]
roundabout: Tuple[int, ...]
temp_agency: Tuple[int, ...]
@classmethod
def default(cls) -> "ScoringDefinition":
return ScoringDefinition(
bis=(0, -1, -3, -6, -9, -12, -16, -20, -24, -28),
invest=InvestDefinition.default(),
permit_refusal=(0, 0, -3, -5),
pool=(0, 3, 6, 9, 13, 17, 21, 26, 31, 36),
roundabout=(0, -3, -8),
temp_agency=(7, 4, 1),
)
def bis_score(self, num_biss: int) -> int:
return self.bis[min(num_biss, len(self.bis) - 1)]
def permit_refusal_score(self, num_permit_refusals: int) -> int:
return self.permit_refusal[
min(num_permit_refusals, len(self.permit_refusal) - 1)
]
def pool_score(self, num_pools: int) -> int:
return self.pool[min(num_pools, len(self.pool) - 1)]
def roundabouts_score(self, num_roundabouts: int) -> int:
return self.roundabout[min(num_roundabouts, len(self.roundabout) - 1)]
def investment_score(
self, estates: Iterable[int], investments: Dict[int, int]
) -> int:
estate_values: Dict[int, int] = {}
# Build up map of estate size worth
for estate_size in self.invest.map.keys():
estate_values[estate_size] = self.invest.get_estate_value(
estate_size, investments.get(estate_size, 0)
)
# Now sum up estate values.
total = 0
for estate in estates:
total += estate_values[estate]
return total
def temp_agency_score(
self, all_players_temps: Tuple[int, ...], player_temps: int
) -> int:
# Have to use at least one temp to score anything.
if player_temps == 0:
return 0
# Make list of num temps for each podium position, allowing friendly ties.
reduced_sorted_all_players_temps = sorted(set(all_players_temps), reverse=True)
podium_position = reduced_sorted_all_players_temps.index(player_temps)
if podium_position < len(self.temp_agency):
return self.temp_agency[podium_position]
return 0
@dataclass(frozen=True)
class PlanDefinition:
points: Tuple[int, int]
@dataclass(frozen=True)
class PlanDeckDefinition:
no_1: Tuple[PlanDefinition, ...]
no_2: Tuple[PlanDefinition, ...]
no_3: Tuple[PlanDefinition, ...]
@classmethod
def default(cls) -> "PlanDeckDefinition":
return cls(
no_1=(PlanDefinition((6, 2)),),
no_2=(PlanDefinition((8, 3)),),
no_3=(PlanDefinition((11, 5)),),
)
def pick_3(self) -> Tuple[PlanDefinition, PlanDefinition, PlanDefinition]:
return choice(self.no_1), choice(self.no_2), choice(self.no_3)
@dataclass(frozen=True)
class GameDefinition:
neighbourhood: NeighbourhoodDefinition
scoring: ScoringDefinition
deck: DeckDefinition
plans: Tuple[PlanDefinition, PlanDefinition, PlanDefinition]
num_cards_drawn_at_once: int = 3
@classmethod
def default(cls) -> "GameDefinition":
return cls(
neighbourhood=NeighbourhoodDefinition.default(),
scoring=ScoringDefinition.default(),
deck=DeckDefinition.default(),
plans=PlanDeckDefinition.default().pick_3(),
)
def can_have_pool_at(self, street_no: int, plot_no: int) -> bool:
return self.neighbourhood.can_have_pool_at(street_no, plot_no)
@property
def max_roundabouts(self) -> int:
return len(self.scoring.roundabout) - 1
def max_investments_in_estate_size(self, estate_size: int) -> int:
return len(self.scoring.invest.map[estate_size]) - 1
def generate_card_pairs(self) -> Generator[Tuple[CardPair, ...], None, None]:
"""
Generate tuples of CardPairs representing the deck being drawn from.
The number card of the pair is used as the action card in the next pair.
"""
random_card_gen = self.deck.random_card_generator()
def next_n_cards() -> Tuple[CardDefinition]:
return tuple(
(next(random_card_gen) for _ in range(self.num_cards_drawn_at_once))
)
action_cards = next_n_cards()
while True:
number_cards = next_n_cards()
yield tuple(
(
CardPair(number_card=number_cards[i], action_card=action_cards[i])
for i in range(self.num_cards_drawn_at_once)
)
)
action_cards = number_cards
| StarcoderdataPython |
1935627 | from copy import deepcopy
from pystac.item import (Item, Asset)
from pystac import STACError
class EOItem(Item):
"""EOItem represents a snapshot of the earth for a single date and time.
Args:
id (str): Provider identifier. Must be unique within the STAC.
geometry (dict): Defines the full footprint of the asset represented by this item,
formatted according to `RFC 7946, section 3.1 (GeoJSON)
<https://tools.ietf.org/html/rfc7946>`_.
bbox (List[float]): Bounding Box of the asset represented by this item using
either 2D or 3D geometries. The length of the array must be 2*n where n is the
number of dimensions.
datetime (Datetime): Datetime associated with this item.
properties (dict): A dictionary of additional metadata for the item.
gsd (float): Ground Sample Distance at the sensor.
platform (str): Unique name of the specific platform to which the instrument is attached.
instrument (str): Name of instrument or sensor used (e.g., MODIS, ASTER, OLI, Canon F-1).
bands (List[Band]): This is a list of :class:`~pystac.Band` objects that represent
the available bands.
constellation (str): Optional name of the constellation to which the platform belongs.
epsg (int): Optional `EPSG code <http://www.epsg-registry.org/>`_.
cloud_cover (float): Optional estimate of cloud cover as a percentage (0-100) of the
entire scene. If not available the field should not be provided.
off_nadir (float): Optional viewing angle. The angle from the sensor between
nadir (straight down) and the scene center. Measured in degrees (0-90).
azimuth (float): Optional viewing azimuth angle. The angle measured from the
sub-satellite point (point on the ground below the platform) between the
scene center and true north. Measured clockwise from north in degrees (0-360).
sun_azimuth (float): Optional sun azimuth angle. From the scene center point on
the ground, this is the angle between truth north and the sun. Measured clockwise
in degrees (0-360).
sun_elevation (float): Optional sun elevation angle. The angle from the tangent of
the scene center point to the sun. Measured from the horizon in degrees (0-90).
stac_extensions (List[str]): Optional list of extensions the Item implements.
href (str or None): Optional HREF for this item, which be set as the item's
self link's HREF.
collection (Collection or str): The Collection or Collection ID that this item
belongs to.
Attributes:
id (str): Provider identifier. Unique within the STAC.
geometry (dict): Defines the full footprint of the asset represented by this item,
formatted according to `RFC 7946, section 3.1 (GeoJSON)
<https://tools.ietf.org/html/rfc7946>`_.
bbox (List[float]): Bounding Box of the asset represented by this item using
either 2D or 3D geometries. The length of the array is 2*n where n is the
number of dimensions.
datetime (Datetime): Datetime associated with this item.
properties (dict): A dictionary of additional metadata for the item.
stac_extensions (List[str] or None): Optional list of extensions the Item implements.
collection (Collection or None): Collection that this item is a part of.
gsd (float): Ground Sample Distance at the sensor.
platform (str): Unique name of the specific platform to which the instrument is attached.
instrument (str): Name of instrument or sensor used (e.g., MODIS, ASTER, OLI, Canon F-1).
bands (List[Band]): This is a list of :class:`~pystac.Band` objects that represent
the available bands.
constellation (str or None): Name of the constellation to which the platform belongs.
epsg (int or None): `EPSG code <http://www.epsg-registry.org/>`_.
cloud_cover (float or None): Estimate of cloud cover as a percentage (0-100) of the
entire scene. If not available the field should not be provided.
off_nadir (float or None): Viewing angle. The angle from the sensor between
nadir (straight down) and the scene center. Measured in degrees (0-90).
azimuth (float or None): Viewing azimuth angle. The angle measured from the
sub-satellite point (point on the ground below the platform) between the
scene center and true north. Measured clockwise from north in degrees (0-360).
sun_azimuth (float or None): Sun azimuth angle. From the scene center point on
the ground, this is the angle between truth north and the sun. Measured clockwise
in degrees (0-360).
sun_elevation (float or None): Sun elevation angle. The angle from the tangent of
the scene center point to the sun. Measured from the horizon in degrees (0-90).
links (List[Link]): A list of :class:`~pystac.Link` objects representing
all links associated with this STACObject.
assets (Dict[str, Asset]): Dictionary of asset objects that can be downloaded,
each with a unique key.
collection_id (str or None): The Collection ID that this item belongs to, if any.
"""
_EO_FIELDS = [
'gsd', 'platform', 'instrument', 'bands', 'constellation', 'epsg',
'cloud_cover', 'off_nadir', 'azimuth', 'sun_azimuth', 'sun_elevation'
]
@staticmethod
def _eo_key(key):
return 'eo:{}'.format(key)
def __init__(self,
id,
geometry,
bbox,
datetime,
properties,
gsd,
platform,
instrument,
bands,
constellation=None,
epsg=None,
cloud_cover=None,
off_nadir=None,
azimuth=None,
sun_azimuth=None,
sun_elevation=None,
stac_extensions=None,
href=None,
collection=None):
if stac_extensions is None:
stac_extensions = []
if 'eo' not in stac_extensions:
stac_extensions.append('eo')
super().__init__(id, geometry, bbox, datetime, properties,
stac_extensions, href, collection)
self.gsd = gsd
self.platform = platform
self.instrument = instrument
self.bands = bands
self.constellation = constellation
self.epsg = epsg
self.cloud_cover = cloud_cover
self.off_nadir = off_nadir
self.azimuth = azimuth
self.sun_azimuth = sun_azimuth
self.sun_elevation = sun_elevation
def __repr__(self):
return '<EOItem id={}>'.format(self.id)
@classmethod
def from_dict(cls, d, href=None, root=None):
item = Item.from_dict(d, href=href, root=root)
return cls.from_item(item)
@classmethod
def from_item(cls, item):
"""Creates an EOItem from an Item.
Args:
item (Item): The Item to create an EOItem from.
Returns:
EOItem: A new EOItem from item. If the item
item is already an EOItem, simply returns a clone of item.
"""
if isinstance(item, EOItem):
return item.clone()
eo_params = {}
for eof in EOItem._EO_FIELDS:
eo_key = EOItem._eo_key(eof)
if eo_key in item.properties.keys():
if eof == 'bands':
eo_params[eof] = [
Band.from_dict(b) for b in item.properties.pop(eo_key)
]
else:
eo_params[eof] = item.properties.pop(eo_key)
elif eof in ('gsd', 'platform', 'instrument', 'bands'):
raise STACError(
"Missing required field '{}' in properties".format(eo_key))
if not any(item.properties):
item.properties = None
e = cls(id=item.id,
geometry=item.geometry,
bbox=item.bbox,
datetime=item.datetime,
properties=item.properties,
stac_extensions=item.stac_extensions,
collection=item.collection_id,
**eo_params)
e.links = item.links
e.assets = item.assets
for k, v in item.assets.items():
if EOAsset.is_eo_asset(v):
e.assets[k] = EOAsset.from_asset(v)
e.assets[k].set_owner(e)
return e
def get_eo_assets(self):
"""Gets the assets of this item that are :class:`~pystac.EOAsset` s.
Returns:
Dict[EOAsset]: This item's assets, subestted to only include EOAssets.
"""
return {k: v for k, v in self.assets.items() if isinstance(v, EOAsset)}
def add_asset(self, key, asset):
"""Adds an Asset to this item. If this Asset contains band information
in it's properties, converts the Asset to an :class:`~pystac.EOAsset`.
Args:
key (str): The unique key of this asset.
asset (Asset): The Asset to add.
"""
if asset.properties is not None and 'eo:bands' in asset.properties:
asset = EOAsset.from_asset(asset)
return super().add_asset(key, asset)
# @staticmethod
# def from_file(href):
# """Reads an EOItem from a file.
# Args:
# href (str): The HREF to read the item from.
# Returns:
# EOItem: EOItem that was read from the given file.
# """
# return EOItem.from_item(Item.from_file(href))
def clone(self):
c = super(EOItem, self).clone()
self._add_eo_fields_to_dict(c.properties)
return EOItem.from_item(c)
def to_dict(self, include_self_link=True):
d = super().to_dict(include_self_link=include_self_link)
if 'properties' not in d.keys():
d['properties'] = {}
self._add_eo_fields_to_dict(d['properties'])
return deepcopy(d)
def _add_eo_fields_to_dict(self, d):
for eof in EOItem._EO_FIELDS:
try:
a = getattr(self, eof)
if a is not None:
d[EOItem._eo_key(eof)] = a
if eof == 'bands':
d['eo:bands'] = [b.to_dict() for b in d['eo:bands']]
except AttributeError:
pass
class EOAsset(Asset):
"""An Asset that contains band information via a bands property that is an array of 0
based indexes to the correct band object on the owning EOItem.
Args:
href (str): Link to the asset object. Relative and absolute links are both allowed.
bands (List[int]): Lists the band names available in the asset.
title (str): Optional displayed title for clients and users.
media_type (str): Optional description of the media type. Registered Media Types
are preferred. See :class:`~pystac.MediaType` for common media types.
properties (dict): Optional, additional properties for this asset.
Attributes:
href (str): Link to the asset object. Relative and absolute links are both allowed.
bands (List[int]): Lists the band names available in the asset.
title (str): Optional displayed title for clients and users.
media_type (str): Optional description of the media type. Registered Media Types
are preferred. See :class:`~pystac.MediaType` for common media types.
properties (dict): Optional, additional properties for this asset. This is used by
extensions as a way to serialize and deserialize properties on asset
object JSON.
item (Item or None): The Item this asset belongs to.
"""
def __init__(self,
href,
bands,
title=None,
media_type=None,
properties=None):
super().__init__(href, title, media_type, properties)
self.bands = bands
@staticmethod
def is_eo_asset(asset):
"""Method for checking if an Asset represents an EOAsset.
Args:
asset (Asset): The asset to check.
Returns:
bool: True if the asset is an instance of EOAsset, or if
the asset contains eo:band information in it's properties.
"""
if isinstance(asset, EOAsset):
return True
return asset.properties is not None and \
'eo:bands' in asset.properties
@staticmethod
def from_dict(d):
"""Constructs an EOAsset from a dict.
Returns:
EOAsset: The EOAsset deserialized from the JSON dict.
"""
asset = Asset.from_dict(d)
return EOAsset.from_asset(asset)
@classmethod
def from_asset(cls, asset):
"""Constructs an EOAsset from an Asset.
Returns:
EOAsset: The EOAsset created from this asset. If the asset is
already an EOAsset, will return a clone.
Raises:
:class:`~pystac.STACError`: Raised if no band information is in the properties
of asset.
"""
a = asset.clone()
if isinstance(a, EOAsset):
return a
if not a.properties or 'eo:bands' not in a.properties.keys():
raise STACError('Missing eo:bands property in asset.')
bands = a.properties.pop('eo:bands')
properties = None
if any(a.properties):
properties = a.properties
return cls(a.href, bands, a.title, a.media_type, properties)
def to_dict(self):
"""Generate a dictionary representing the JSON of this EOAsset.
Returns:
dict: A serializion of the EOAsset that can be written out as JSON.
"""
d = super().to_dict()
d['eo:bands'] = self.bands
return d
def clone(self):
return EOAsset(href=self.href,
title=self.title,
media_type=self.media_type,
bands=self.bands,
properties=self.properties)
def __repr__(self):
return '<EOAsset href={}>'.format(self.href)
def get_bands(self):
"""Returns the band information from the owning item for the bands referenced
by this EOAsset.
Returns:
List[Band]: The band information from the owning item for each band that
is represented by this EOAsset's :attr:`~pystac.EOAsset.bands`.
Raises:
:class:`~pystac.STACError`: Raised if no band information is in the properties
of asset.
"""
if not self.item:
raise STACError('Asset is currently not associated with an item.')
return [self.item.bands[i] for i in self.bands]
class Band:
"""Represents Band information attached to an EOItem.
Args:
name (str): The name of the band (e.g., "B01", "B02", "B1", "B5", "QA").
common_name (str): The name commonly used to refer to the band to make it easier
to search for bands across instruments. See the `list of accepted common names
<https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
description (str): Description to fully explain the band.
gsd (float): Ground Sample Distance, the nominal distance between pixel
centers available, in meters. Defaults to the EOItems' eo:gsd if not provided.
accuracy (float): The expected error between the measured location and the
true location of a pixel, in meters on the ground.
center_wavelength (float): The center wavelength of the band, in micrometers (μm).
full_width_half_max (float): Full width at half maximum (FWHM). The width of the band,
as measured at half the maximum transmission, in micrometers (μm).
Attributes:
name (str): The name of the band (e.g., "B01", "B02", "B1", "B5", "QA").
common_name (str): The name commonly used to refer to the band to make it easier
to search for bands across instruments. See the `list of accepted common names
<https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
description (str): Description to fully explain the band.
gsd (float): Ground Sample Distance, the nominal distance between pixel
centers available, in meters. Defaults to the EOItems' eo:gsd if not provided.
accuracy (float): The expected error between the measured location and the
true location of a pixel, in meters on the ground.
center_wavelength (float): The center wavelength of the band, in micrometers (μm).
full_width_half_max (float): Full width at half maximum (FWHM). The width of the band,
as measured at half the maximum transmission, in micrometers (μm).
"""
def __init__(
self,
name=None,
common_name=None,
description=None,
gsd=None,
accuracy=None,
center_wavelength=None,
full_width_half_max=None,
):
self.name = name
self.common_name = common_name
self.description = description
self.gsd = gsd
self.accuracy = accuracy
self.center_wavelength = center_wavelength
self.full_width_half_max = full_width_half_max
def __repr__(self):
return '<Band name={}>'.format(self.name)
@staticmethod
def band_range(common_name):
"""Gets the band range for a common band name.
Args:
common_name (str): The common band name. Must be one of the `list of accepted common names <https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
Returns:
Tuple[float, float] or None: The band range for this name as (min, max), or
None if this is not a recognized common name.
""" # noqa E501
name_to_range = {
'coastal': (0.40, 0.45),
'blue': (0.45, 0.50),
'green': (0.50, 0.60),
'red': (0.60, 0.70),
'yellow': (0.58, 0.62),
'pan': (0.50, 0.70),
'rededge': (0.70, 0.75),
'nir': (0.75, 1.00),
'nir08': (0.75, 0.90),
'nir09': (0.85, 1.05),
'cirrus': (1.35, 1.40),
'swir16': (1.55, 1.75),
'swir22': (2.10, 2.30),
'lwir': (10.5, 12.5),
'lwir11': (10.5, 11.5),
'lwir12': (11.5, 12.5)
}
return name_to_range.get(common_name)
@staticmethod
def band_description(common_name):
"""Returns a description of the band for one with a common name.
Args:
common_name (str): The common band name. Must be one of the `list of accepted common names <https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
Returns:
str or None: If a recognized common name, returns a description including the
band range. Otherwise returns None.
""" # noqa E501
r = Band.band_range(common_name)
if r is not None:
r = "Common name: {}, Range: {} to {}".format(
common_name, r[0], r[1])
return r
@staticmethod
def from_dict(d):
"""Constructs a Band from a dict.
Returns:
Band: The Band deserialized from the JSON dict.
"""
name = d.get('name')
common_name = d.get('common_name')
gsd = d.get('gsd')
center_wavelength = d.get('center_wavelength')
full_width_half_max = d.get('full_width_half_max')
description = d.get('description')
accuracy = d.get('accuracy')
return Band(name=name,
common_name=common_name,
description=description,
gsd=gsd,
accuracy=accuracy,
center_wavelength=center_wavelength,
full_width_half_max=full_width_half_max)
def to_dict(self):
"""Generate a dictionary representing the JSON of this Band.
Returns:
dict: A serializion of the Band that can be written out as JSON.
"""
d = {}
if self.name:
d['name'] = self.name
if self.common_name:
d['common_name'] = self.common_name
if self.gsd:
d['gsd'] = self.gsd
if self.center_wavelength:
d['center_wavelength'] = self.center_wavelength
if self.full_width_half_max:
d['full_width_half_max'] = self.full_width_half_max
if self.description:
d['description'] = self.description
if self.accuracy:
d['accuracy'] = self.accuracy
return deepcopy(d)
| StarcoderdataPython |
3440039 | <filename>src/pyrin/database/bulk/component.py
# -*- coding: utf-8 -*-
"""
database bulk component module.
"""
from pyrin.application.decorators import component
from pyrin.application.structs import Component
from pyrin.database.bulk import DatabaseBulkPackage
from pyrin.database.bulk.manager import DatabaseBulkManager
@component(DatabaseBulkPackage.COMPONENT_NAME)
class DatabaseBulkComponent(Component, DatabaseBulkManager):
"""
database bulk component class.
"""
pass
| StarcoderdataPython |
4947293 | <reponame>EliahKagan/old-practice-snapshot
#!/usr/bin/env python3
from itertools import chain
import re
EMPLOYEE_REGEX = re.compile(r'(?P<name>\w+)\W+(?P<salary>\d+)')
def read_employees():
input() # don't need n
for match in EMPLOYEE_REGEX.finditer(input()):
yield match.group('name'), int(match.group('salary'))
def get_name(employee):
name, _ = employee
return name
def get_salary(employee):
_, salary = employee
return salary
def print_nested(nested):
print(' '.join(map(str, chain.from_iterable(nested))))
for _ in range(int(input())):
employees = list(read_employees())
employees.sort(key=get_name)
employees.sort(key=get_salary)
print_nested(employees)
| StarcoderdataPython |
3458171 | <reponame>vishalbelsare/cameo
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multiprocess heuristic optimization.
Implementation of the islands model. The islands model consists of having multiple processes running in parallel where
individual process is running an isolated heuristic optimization, therefor called island.
At certain time points, individuals in an island can migrate through a Queue. That individual will arrive in a another
island and introduce variability in population of that island.
For information on how to run Gene Knockout or Reaction Knockout optimizations refer to
cameo.strain_design.heuristic.optimization
The result object is the same as in the single objective optimization. The knockouts solutions resulting from all
processes are merged.
Examples
--------
>>> from cameo import models
>>> from cameo.strain_design.heuristic.evolutionary.multiprocess import MultiprocessGeneKnockoutOptimization
>>> from cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_yield
>>> import inspyred
>>>
>>> model = models.bigg.iJO1366
>>> objective_function = biomass_product_coupled_yield('BIOMASS_iJO1366_core_53p95M', 'EX_succ_e', 'EX_glc__D_e')
>>> opt = MultiprocessGeneKnockoutOptimization(model=model,
>>> objective_function=objective_function,
>>> heuristic_method=inspyred.ec.GA,
>>> max_migrants=1)
>>> result = opt.run()
"""
from .optimization import MultiprocessGeneKnockoutOptimization, MultiprocessReactionKnockoutOptimization
| StarcoderdataPython |
1887676 | """
Unit tests for vmc_distributed_firewall_rules execution module
"""
from unittest.mock import patch
import pytest
import saltext.vmware.modules.vmc_distributed_firewall_rules as vmc_distributed_firewall_rules
from saltext.vmware.utils import vmc_constants
@pytest.fixture
def distributed_firewall_rules_data_by_id(mock_vmc_request_call_api):
data = {
"action": "DROP",
"resource_type": "Rule",
"id": "rule-id",
"display_name": "DFR_001",
"description": " comm entry",
"path": "/infra/domains/cgw/security-policies/SP_1/rules/DFR_001",
"relative_path": "DFR_001",
"parent_path": "/infra/domains/cgw/security-policies/SP_1",
"unique_id": "1026",
"marked_for_delete": False,
"overridden": False,
"rule_id": 1026,
"sequence_number": 1,
"sources_excluded": False,
"destinations_excluded": False,
"source_groups": ["/infra/domains/cgw/groups/SG_CGW_001"],
"destination_groups": ["ANY"],
"services": ["ANY"],
"profiles": ["ANY"],
"logged": False,
"scope": ["ANY"],
"disabled": False,
"direction": "IN_OUT",
"ip_protocol": "IPV4_IPV6",
"is_default": False,
"_create_time": 1619101829635,
"_create_user": "<EMAIL>",
"_last_modified_time": 1619101829641,
"_last_modified_user": "<EMAIL>",
"_system_owned": False,
"_protection": "NOT_PROTECTED",
"_revision": 0,
}
mock_vmc_request_call_api.return_value = data
yield data
@pytest.fixture
def distributed_firewall_rules_data(
mock_vmc_request_call_api, distributed_firewall_rules_data_by_id
):
data = {"result_count": 1, "results": [distributed_firewall_rules_data_by_id]}
mock_vmc_request_call_api.return_value = data
yield data
def test_get_distributed_firewall_rules_should_return_api_response(distributed_firewall_rules_data):
assert (
vmc_distributed_firewall_rules.get(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_policy_id="security_policy_id",
verify_ssl=False,
)
== distributed_firewall_rules_data
)
def test_get_distributed_firewall_rules_called_with_url():
expected_url = (
"https://hostname/vmc/reverse-proxy/api/orgs/org_id/sddcs/sddc_id/policy/api/"
"v1/infra/domains/domain_id/security-policies/security_policy_id/rules"
)
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
result = vmc_distributed_firewall_rules.get(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_policy_id="security_policy_id",
verify_ssl=False,
)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["url"] == expected_url
assert call_kwargs["method"] == vmc_constants.GET_REQUEST_METHOD
@pytest.mark.parametrize(
"actual_args, expected_params",
[
# all actual args are None
(
{},
{},
),
# all actual args have few param values
(
{"page_size": 2},
{"page_size": 2},
),
# all actual args have all possible params
(
{"cursor": "00012", "page_size": 2, "sort_by": ["action"], "sort_ascending": True},
{"cursor": "00012", "page_size": 2, "sort_by": ["action"], "sort_ascending": True},
),
],
)
def test_assert_get_distributed_firewall_rules_should_correctly_filter_args(
actual_args, expected_params
):
common_actual_args = {
"hostname": "hostname",
"refresh_key": "refresh_key",
"authorization_host": "authorization_host",
"org_id": "org_id",
"sddc_id": "sddc_id",
"domain_id": "domain_id",
"security_policy_id": "security_policy_id",
"verify_ssl": False,
}
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
actual_args.update(common_actual_args)
vmc_distributed_firewall_rules.get(**actual_args)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["params"] == expected_params
| StarcoderdataPython |
94811 | <filename>validate_no_label.py<gh_stars>0
#!/usr/bin/env python
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by <NAME> (https://github.com/rwightman)
"""
import sys
import argparse
import os
import csv
import glob
import time
import yaml
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
from collections import OrderedDict
import numpy as np
import re, pickle, random
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models
from timm.models.layers.classifier import create_classifier_layerfc
from timm.data import Dataset, DatasetTar, create_loader, resolve_data_config, RealLabelsImagenet
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config, we will use them for evaluation', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', '-m', metavar='MODEL', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=1000,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',
help='disable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--amp', action='store_true', default=False,
help='Use AMP mixed precision')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true',
help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance')
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
# ! my own args
parser.add_argument("--has_eval_label", action='store_true', default=False,
help='on-the-fly aug of eval data')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed') # in case we apply data augmentation on test set
def set_jit_legacy():
""" Set JIT executor to legacy w/ support for op fusion
This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes
in the JIT exectutor. These API are not supported so could change.
"""
#
assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!"
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
#torch._C._jit_set_texpr_fuser_enabled(True)
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
if args.legacy_jit:
set_jit_legacy()
# create model
if 'inception' in args.model:
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
aux_logits=True, # ! add aux loss
in_chans=3,
scriptable=args.torchscript)
else:
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=3,
scriptable=args.torchscript)
# ! add more layer to classifier layer
if args.create_classifier_layerfc:
model.global_pool, model.classifier = create_classifier_layerfc(model.num_features, model.num_classes)
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(vars(args), model=model)
model, test_time_pool = apply_test_time_pool(model, data_config, args)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
if args.amp:
model = amp.initialize(model.cuda(), opt_level='O1')
else:
model = model.cuda()
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
if args.has_eval_label:
criterion = nn.CrossEntropyLoss().cuda() # ! don't have gold label
if os.path.splitext(args.data)[1] == '.tar' and os.path.isfile(args.data):
dataset = DatasetTar(args.data, load_bytes=args.tf_preprocessing, class_map=args.class_map)
else:
dataset = Dataset(args.data, load_bytes=args.tf_preprocessing, class_map=args.class_map, args=args)
if args.valid_labels:
with open(args.valid_labels, 'r') as f: # @valid_labels is index numbering
valid_labels = {int(line.rstrip()) for line in f}
valid_labels = [i in valid_labels for i in range(args.num_classes)]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'], # 'blank' is default Image.BILINEAR https://github.com/rwightman/pytorch-image-models/blob/470220b1f4c61ad7deb16dbfb8917089e842cd2a/timm/data/transforms.py#L43
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
tf_preprocessing=args.tf_preprocessing,
auto_augment=args.aa,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
args=args
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
topk = AverageMeter()
prediction = None # ! need to save output
true_label = None
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + data_config['input_size']).cuda()
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader): # ! not have real label
if args.has_eval_label: # ! just save true labels anyway... why not
if true_label is None: true_label = target.cpu().data.numpy()
else: true_label = np.concatenate ( ( true_label,target.cpu().data.numpy() ) , axis=0 )
if args.no_prefetcher:
target = target.cuda()
input = input.cuda()
if args.fp16:
input = input.half()
# compute output
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0] # ! some model returns both loss + aux loss
if valid_labels is not None:
output = output[:, valid_labels] # ! keep only valid labels ? good to eval by class.
# ! save prediction, don't append too slow ... whatever ?
# ! are names of files also sorted ?
if prediction is None:
prediction = output.cpu().data.numpy() # batchsize x label
else: # stack
prediction = np.concatenate ( (prediction, output.cpu().data.numpy() ) , axis=0 )
if real_labels is not None:
real_labels.add_result(output)
if args.has_eval_label:
# measure accuracy and record loss
loss = criterion(output, target) # ! don't have gold standard on testset
acc1, acc5 = accuracy(output.data, target, topk=(1, args.topk))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
topk.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.has_eval_label and (batch_idx % args.log_freq == 0):
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@topk: {topk.val:>7.3f} ({topk.avg:>7.3f})'.format(
batch_idx, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses, top1=top1, topk=topk))
if not args.has_eval_label:
top1a, topka = 0, 0 # just dummy, because we don't know ground labels
else:
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, topka = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=args.topk)
else:
top1a, topka = top1.avg, topk.avg
results = OrderedDict(
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
topk=round(topka, 4), topk_err=round(100 - topka, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
cropt_pct=crop_pct,
interpolation=data_config['interpolation'])
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@topk {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['topk'], results['topk_err']))
return results, prediction, true_label
def main():
setup_default_logging()
args, args_text = _parse_args()
# args = parser.parse_args()
torch.manual_seed(args.seed) # ! mostly for aug on test
random.seed(args.seed)
np.random.seed(args.seed)
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint): # ! can pass in a directory of checkpoints
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True)
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, '') for n in model_names]
results_file = args.results_file or './results-all.csv'
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
start_batch_size = args.batch_size
for m, c in model_cfgs:
batch_size = start_batch_size
args.model = m
args.checkpoint = c
result = OrderedDict(model=args.model)
r = {}
while not r and batch_size >= args.num_gpu:
torch.cuda.empty_cache()
try:
args.batch_size = batch_size
print('Validating with batch size: %d' % args.batch_size)
r, prediction, true_label = validate(args)
except RuntimeError as e:
if batch_size <= args.num_gpu:
print("Validation failed with no ability to reduce batch size. Exiting.")
raise e
batch_size = max(batch_size // 2, args.num_gpu)
print("Validation failed, reducing batch size by 50%")
result.update(r)
if args.checkpoint:
result['checkpoint'] = args.checkpoint
results.append(result)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
if len(results):
write_results(results_file, results)
else:
# ! eval one single model
results_file = re.sub (r'\.csv', '', results_file ) + '-standard.csv'
_, prediction, true_label = validate(args)
from HAM10000 import helper
prediction = helper.softmax(prediction) # softmax convert to range 0-1, sum to 1
if args.has_eval_label:
from sklearn.metrics import accuracy_score, balanced_accuracy_score
true_label_onehot = np.identity(args.num_classes)[true_label] # array into 1 hot
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
_logger.info ( ' * sklearn multilabel probabilities accuracy_score {:.3f} '.format ( accuracy_score(true_label_onehot, np.round(prediction)) ) )
_logger.info ( ' * sklearn max probabilities balanced_accuracy_score {:.3f} '.format ( balanced_accuracy_score(true_label, helper.convert_max_1_other_0(prediction).argmax(axis=1) ) ) )
# output csv, need to reorder columns
helper.save_output_csv(prediction, obs_name=[], output_name=results_file) # no name for each observation, use []
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| StarcoderdataPython |
333773 | <filename>src/tests/test_merge.py
from unittest import TestCase
from merge import Merge3, merge3_lists
class TestThreeWayListMerge(TestCase):
def test_added_deleted(self):
x = ['Module1']
a = ['Module1', 'Module2']
b = ['Module1', 'Module3']
added, deleted, maybe_modified = merge3_lists(a=a, b=b, x=x)
self.assertEqual(added, ['Module3'])
self.assertEqual(deleted, [])
self.assertEqual(maybe_modified, ['Module1'])
def test_deleted_modified(self):
x = ['Module1', 'Module2']
a = ['Module1']
b = ['Module1', 'Module2']
added, deleted, maybe_modified = merge3_lists(a=a, b=b, x=x)
self.assertEqual(added, [])
self.assertEqual(deleted, [])
self.assertEqual(maybe_modified, ['Module1', 'Module2'])
class TestThreeWayMerge(TestCase):
def test_conflict(self):
a = ['Option Explicit', "'test1", 'Sub test()', ' Debug.Print "hello1"', 'End Sub']
b = ['Option Explicit', 'Function test()', ' Debug.Print "hello1"', ' test = "test"', 'End Function']
x = ['Option Explicit', 'Sub test()', ' Debug.Print "hello1"', 'End Sub']
m3 = Merge3(a=a, b=b, base=x)
self.assertTrue(m3.is_conflicted())
self.assertEqual(
['Option Explicit', '<<<<<<< ours\n', "'test1", 'Sub test()', '=======\n', 'Function test()', '>>>>>>> theirs\n', ' Debug.Print "hello1"', ' test = "test"', 'End Function'],
[x for x in m3.merge_lines(name_a='ours', name_b='theirs')]
)
| StarcoderdataPython |
11375130 | <filename>test/conftest.py
# Copyright 2021 Open Rise Robotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from unittest.mock import MagicMock
import pytest
import rclpy
from rclpy.node import Node
from watchdog.events import FileSystemEventHandler
from xacro_live import XacroObserver
class RobotDescriptionServer(Node):
def __init__(self):
super().__init__('robot_state_publisher')
self.declare_parameter('robot_description', str())
class AsyncTimeout:
def __init__(self, duration: float, auto_start=True):
self.duration = duration
if auto_start:
self.start()
def start(self):
self.start_time = time.clock_gettime(time.CLOCK_MONOTONIC)
def running(self) -> bool:
return not self.finished()
def finished(self) -> bool:
return (time.clock_gettime(time.CLOCK_MONOTONIC) - self.start_time) >= self.duration
@pytest.fixture(scope='session')
def async_timeout():
def timeout_fn(duration, auto_start=True):
return AsyncTimeout(duration, auto_start)
return timeout_fn
@pytest.fixture
def xacro_dir(tmp_path):
shutil.copytree('test/urdf', tmp_path, dirs_exist_ok=True)
return tmp_path
@pytest.fixture
def xacro_file(xacro_dir):
return os.path.join(xacro_dir, 'robot.xacro')
@pytest.fixture
def xacro_observer(xacro_file):
return XacroObserver(xacro_file)
@pytest.fixture
def event_handler_mock():
event_handler = FileSystemEventHandler()
event_handler.on_modified = MagicMock()
return event_handler
@pytest.fixture(scope='session')
def canonicalize_xml():
import xml.etree.ElementTree
return xml.etree.ElementTree.canonicalize
@pytest.fixture
def init_node():
yield rclpy.init()
rclpy.shutdown()
@pytest.fixture
def robot_description_server(init_node):
return RobotDescriptionServer()
| StarcoderdataPython |
3495927 | class MondayCollection:
def __init__(self, client):
self.client = client
self.collection = {}
def __iter__(self):
yield from (self.collection.get(i) for i in self.collection)
def __len__(self):
return len(self.collection)
def __getitem__(self, item_id):
item = self.collection.get(str(item_id))
if item is None:
raise KeyError(f'ID {item_id} not in collection')
return item
@property
def values(self):
return [self.collection[x].values for x in self.collection]
# yield from (self.collection[x].values for x in self.collection)
class MondayItem:
def __init__(self, data, client):
self.client = client
self.values = data
self.type = type(self)
# def __setattr__(self, key, value):
# object.__setattr__(self, key, value)
| StarcoderdataPython |
3443399 | import os, sys
tail -n +15 sys.argv[1] > sys.argv[1]
| StarcoderdataPython |
11345126 | <gh_stars>1000+
#!/usr/bin/env python3
#
# alloc_instrumentation.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fileinput
import argparse
from collections import defaultdict
# Processes the stdout produced by defining ALLOC_INSTRUMENTATION_STDOUT in FastAlloc.h
allocs = {}
class Allocation:
def __init__(self, size, backtrace):
self.size = size
self.backtrace = backtrace
def print_stacks(stack_count, sort_by_count):
counts = defaultdict(int)
sizes = defaultdict(int)
for id, allocation in allocs.items():
counts[allocation.backtrace] += 1
sizes[allocation.backtrace] += allocation.size
sort_dict = counts if sort_by_count else sizes
ordered_list = [(val, backtrace) for (backtrace, val) in sort_dict.items()]
ordered_list.sort(reverse=True)
if stack_count:
ordered_list = ordered_list[:stack_count]
for size, backtrace in ordered_list:
print(str.format('bytes={0:<10} count={1:<8} {2}', sizes[backtrace], counts[backtrace], backtrace))
print('-'*80)
def process_line(line, quiet):
items = line.split('\t')
if items[0] == 'Alloc':
allocs[items[1]] = Allocation(size=int(items[2]), backtrace=items[3])
elif items[0] == 'Dealloc':
allocs.pop(items[1], None)
elif not quiet:
print(line)
def non_negative_int(value_str):
value = int(value_str)
if value < 0:
raise argparse.ArgumentTypeError("%s is negative" % value)
return value
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parses the output from enabling ALLOC_INSTRUMENTATION in FoundationDB and reports information about the top memory users.')
parser.add_argument('input_file', type=str, help='Path to file(s) containing the output from a run of FoundationDB with ALLOC_INSTRUMENTATION enabled. If not specified, stdin will be used.', default='-', nargs='*')
parser.add_argument('-f', '--logging-frequency', type=non_negative_int, help='How frequently the top stacks will be logged, measured in lines of output processed. A value of 0 disables periodic logging. Defaults to 1,000,000.', default=1000000)
parser.add_argument('-p', '--periodic-stack-count', type=non_negative_int, help='How many stack traces to log when periodically logging output. A value of 0 results in all stacks being logged. Defaults to 15.', default=15)
parser.add_argument('-s', '--final-stack-count', type=non_negative_int, help='How many stack traces to log when finished processing output. A value of 0 results in all stacks being logged. Defaults to 0.', default=0)
parser.add_argument('-c', '--sort-by-count', action='store_true', default=False, help='If specified, stacks will be sorted by largest count rather than largest number of bytes.')
parser.add_argument('-q', '--quiet', action='store_true', default=False, help='If specified, lines from the input file that are not parsable by this tool will not be printed.')
args = parser.parse_args()
# Process each line, periodically reporting the top stacks by size
for line_num, line in enumerate(fileinput.input(args.input_file)):
process_line(line.rstrip(), args.quiet)
if args.logging_frequency and line_num and line_num % args.logging_frequency == 0:
print_stacks(args.periodic_stack_count, args.sort_by_count)
# Print all stacks
print_stacks(args.final_stack_count, args.sort_by_count)
| StarcoderdataPython |
4882216 | <reponame>qiwihui/django-event-system
from .TestDispatch import *
from .TestEvent import TestEventAndListener
from .TestDispatcherMock import TestDispatcherMock
from .TestUtils import TestUtils | StarcoderdataPython |
3301560 | import Block as bl
import time
class Blockchain:
DIFFICULTY = 4
def __init__(self):
self.chain = []
self.unconfirmed_transactions = [] # data not yet validated
def create_genesis_block(self):
genesis_block = bl.Block(0, [], 0, "0")
genesis_block.hash = genesis_block.compute_hash()
self.chain.append(genesis_block)
@property
def last_block(self):
"""
The last block in the chain, ie. the most recent block added
"""
return self.chain[-1]
@staticmethod
def proof_of_work(block):
"""
A proof of work is the process of adding a constraint to a block's
hash. By adding the constraint, it makes it difficult for a valid
hash to be computed.
"""
block.nonce = 0
computed_hash = block.compute_hash()
while (not computed_hash.startswith('0' * Blockchain.DIFFICULTY)):
block.nonce += 1
computed_hash = block.compute_hash()
return computed_hash
def add_block(self, block, proof):
"""
To add a block into the blockchain, we must determine if the block
to be added is in the correct chronological order (no adding
transactions that occured before the last block),
and we must determine if the data has not been tampered with.
"""
previous_hash = self.last_block.hash
# is the block in the right chronological order?
if (previous_hash != block.previous_hash):
return False
# has the block been tampered with
if (not Blockchain.is_valid_proof(block, proof)):
return False
# if the above constraints are satisfied, add the block
block.hash = proof
self.chain.append(block)
return True
@classmethod
def is_valid_proof(self, block, block_hash):
# does the hash satisfy the contraints?
# does the hash of the block match the proof provided?
return (block_hash.startswith('0' * Blockchain.DIFFICULTY) and
block_hash == block.compute_hash())
def add_transaction(self, transaction):
# Add a transaction to the list
self.unconfirmed_transactions.append(transaction)
def mine(self):
# is the list of unconfirmed transactions empty?
if (not self.unconfirmed_transactions):
return False
# get the last block to determine the index and previous_hash of
# the new block
last_block = self.last_block
new_block = bl.Block(last_block.index + 1,
self.unconfirmed_transactions,
time.time(),
last_block.hash)
# do work to find a valid hash
proof = self.proof_of_work(new_block)
self.add_block(new_block, proof)
# reset the transactions
self.unconfirmed_transactions = []
return True
@classmethod
def check_chain_validity(cls, chain):
result = True
previous_hash = "0"
# Iterate through every block
for block in chain:
block_hash = block.hash
# remove the hash field in order to compute it again
delattr(block, "hash")
if not cls.is_valid_proof(block, block.hash) or \
previous_hash != block.previous_hash:
result = False
break
block.hash, previous_hash = block_hash, block_hash
return result
| StarcoderdataPython |
101861 | from pathlib import Path
import pytest
import sys
import ssh2net
from ssh2net import SSH2Net
from ssh2net.exceptions import ValidationError, SetupTimeout
NET2_DIR = ssh2net.__file__
UNIT_TEST_DIR = f"{Path(NET2_DIR).parents[1]}/tests/unit/"
def test_init__shell():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn._shell is False
def test_init_host_strip():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.host == "my_device"
def test_init_validate_host():
test_host = {
"setup_host": "8.8.8.8",
"setup_validate_host": True,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.host == "8.8.8.8"
def test_init_valid_port():
test_host = {
"setup_host": "my_device ",
"setup_port": 123,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.port == 123
def test_init_invalid_port():
test_host = {
"setup_host": "my_device ",
"setup_port": "notanint",
"auth_user": "username",
"auth_password": "password",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_setup_timeout():
test_host = {
"setup_host": "my_device ",
"setup_timeout": 10,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.setup_timeout == 10
def test_init_invalid_setup_timeout():
test_host = {
"setup_host": "my_device ",
"setup_timeout": "notanint",
"auth_user": "username",
"auth_password": "password",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_timeout": 10,
}
conn = SSH2Net(**test_host)
assert conn.session_timeout == 10
def test_init_invalid_session_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_timeout": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive": True,
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive is True
def test_init_invalid_session_keepalive():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive": "notabool",
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_interval():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_interval": 10,
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_interval == 10
def test_init_invalid_session_keepalive_interval():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_interval": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_type():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_type": "standard",
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_type == "standard"
def test_init_invalid_session_keepalive_type():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_type": "notvalid",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_pattern():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_pattern": "\007",
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_pattern == "\x07"
def test_init_username_strip():
test_host = {"setup_host": "my_device", "auth_user": "username ", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.auth_user == "username"
def test_init_password_strip():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password "}
conn = SSH2Net(**test_host)
assert conn.auth_password == "password"
def test_init_ssh_key_strip():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_public_key": "/some/public/key ",
}
conn = SSH2Net(**test_host)
assert conn.auth_public_key == b"/some/public/key"
def test_init_valid_comms_strip_ansi():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_strip_ansi": True,
}
conn = SSH2Net(**test_host)
assert conn.comms_strip_ansi is True
def test_init_invalid_comms_strip_ansi():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_strip_ansi": 123,
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_comms_prompt_regex():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_prompt_regex": "somestr",
}
conn = SSH2Net(**test_host)
assert conn.comms_prompt_regex == "somestr"
def test_init_invalid_comms_prompt_regex():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_prompt_regex": 123,
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_comms_prompt_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"comms_operation_timeout": 10,
}
conn = SSH2Net(**test_host)
assert conn.comms_operation_timeout == 10
def test_init_invalid_comms_prompt_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"comms_operation_timeout": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_comms_return_char():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_return_char": "\rn",
}
conn = SSH2Net(**test_host)
assert conn.comms_return_char == "\rn"
def test_init_invalid_comms_return_char():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_return_char": False,
}
with pytest.raises(TypeError) as e:
SSH2Net(**test_host)
assert str(e.value) == "'comms_return_char' must be <class 'str'>, got: <class 'bool'>'"
def test_init_valid_comms_pre_login_handler_func():
def pre_login_handler_func():
pass
login_handler = pre_login_handler_func
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": login_handler,
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_pre_login_handler)
def test_init_valid_comms_pre_login_handler_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": "tests.unit.ext_test_funcs.some_pre_login_handler_func",
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_pre_login_handler)
def test_init_invalid_comms_pre_login_handler():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": "not.a.valid.ext.function",
}
with pytest.raises(ValueError) as e:
SSH2Net(**test_host)
assert (
str(e.value)
== f"{test_host['comms_pre_login_handler']} is an invalid comms_pre_login_handler function or path to a function."
)
def test_init_valid_comms_disable_paging_default():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "term length 0"
def test_init_valid_comms_disable_paging_func():
def disable_paging_func():
pass
disable_paging = disable_paging_func
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": disable_paging,
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_disable_paging)
def test_init_valid_comms_disable_paging_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "tests.unit.ext_test_funcs.some_disable_paging_func",
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_disable_paging)
def test_init_valid_comms_disable_paging_str():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "do some paging stuff",
}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "do some paging stuff"
def test_init_invalid_comms_disable_paging_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "tests.unit.ext_test_funcs.some_disable_paging_func_BAD",
}
with pytest.raises(AttributeError):
SSH2Net(**test_host)
def test_init_valid_comms_disable_paging_default():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "terminal length 0"
def test_init_invalid_comms_disable_paging_str():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": 1234,
}
with pytest.raises(ValueError) as e:
SSH2Net(**test_host)
assert (
str(e.value)
== f"{test_host['comms_disable_paging']} is an invalid comms_disable_paging function, path to a function, or is not a string."
)
def test_init_ssh_config_file():
test_host = {
"setup_host": "someswitch1",
"setup_ssh_config_file": f"{UNIT_TEST_DIR}_ssh_config",
}
conn = SSH2Net(**test_host)
assert conn.auth_user == "carl"
# will fail without mocking or a real host
# def test_enter_exit():
# test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
# with SSH2Net(**test_host) as conn:
# assert bool(conn) is True
# assert bool(conn) is False
def test_str():
test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert str(conn) == f"SSH2Net Connection Object for host {test_host['setup_host']}"
def test_repr():
test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert repr(conn) == (
"SSH2Net {'_shell': False, 'host': '1.2.3.4', 'port': 22, 'setup_timeout': 5, "
"'setup_use_paramiko': False, 'session_timeout': 5000, 'session_keepalive': False, "
"'session_keepalive_interval': 10, 'session_keepalive_type': 'network', "
"'session_keepalive_pattern': '\\x05', 'auth_user': 'username', 'auth_public_key': None, "
"'auth_password': '********', 'comms_strip_ansi': False, 'comms_prompt_regex': "
"'^[a-z0-9.\\\\-@()/:]{1,32}[#>$]$', 'comms_operation_timeout': 10, 'comms_return_char': "
"'\\n', 'comms_pre_login_handler': '', 'comms_disable_paging': 'terminal length 0'}"
)
def test_bool():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert bool(conn) is False
def test__validate_host_valid_ip():
test_host = {"setup_host": "8.8.8.8", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
r = conn._validate_host()
assert r is None
def test__validate_host_valid_dns():
test_host = {"setup_host": "google.com", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
r = conn._validate_host()
assert r is None
def test__validate_host_invalid_ip():
test_host = {
"setup_host": "255.255.255.256",
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(ValidationError) as e:
conn._validate_host()
assert str(e.value) == f"Host {test_host['setup_host']} is not an IP or resolvable DNS name."
def test__validate_host_invalid_dns():
test_host = {
"setup_host": "notresolvablename",
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(ValidationError) as e:
conn._validate_host()
assert str(e.value) == f"Host {test_host['setup_host']} is not an IP or resolvable DNS name."
def test__socket_alive_false():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn._socket_alive() is False
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_alive_true():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
conn._socket_open()
assert conn._socket_alive() is True
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_close():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
conn._socket_open()
assert conn._socket_alive() is True
conn._socket_close()
assert conn._socket_alive() is False
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_open_timeout():
test_host = {
"setup_host": "240.0.0.1",
"setup_timeout": 1,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(SetupTimeout):
conn._socket_open()
| StarcoderdataPython |
6442854 | <gh_stars>1-10
# Created by <NAME>, <NAME> Scientific
# import modules
from .lockable import Lockable
class EnumDataType(Lockable):
"""
The pyeds.EnumDataType class is used to hold all the information about a
specific enum type. It provide access to details of all defined elements as
well as conversion function between int and the element itself.
Attributes:
ID: int
Unique ID of the enum.
TypeName: str
Magellan type name.
IsFlagsEnum: bool
Specifies whether the enum is flags enum.
CVReference: str
Controlled vocabulary reference.
CVTermId: str
Controlled vocabulary term ID.
CVTermName: str
Controlled vocabulary term name.
CVTermDefinition: str
Controlled vocabulary term definition.
Elements: (pyeds.EnumElement,)
Defined elements.
"""
def __init__(self):
"""Initializes a new instance of EnumDataType."""
super().__init__()
self.ID = None
self.TypeName = None
self.IsFlagsEnum = None
self.CVReference = None
self.CVTermId = None
self.CVTermName = None
self.CVTermDefinition = None
self._elements = {} # for .Value
def __str__(self):
"""Gets standard string representation."""
names = self.TypeName.split(",")
names = names[0].split(".")
return names[-1]
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
@property
def Elements(self):
"""
Gets all available elements of current enum type.
Returns:
(pyeds.EnumElement,)
Available elements.
"""
return tuple(self._elements.values())
def AddElement(self, element):
"""
Adds enum element to current enum type.
This method is not intended to be used by user. It is used automatically
by the library itself.
Args:
element: pyeds.EnumElement
Element to be added.
"""
self._elements[element.Value] = element
def GetElement(self, value):
"""
Gets enum element for given value.
Args:
value: int
Enum value for which to get the element definition.
Returns:
pyeds.EnumElement
Enum element definition.
"""
if value not in self._elements:
message = "'%s' doesn't contain value '%s'!" % (self.TypeName, value)
raise KeyError(message)
return self._elements[value]
def GetElements(self, value):
"""
Gets enum elements for given value. This is mainly used for flags enums
to get all elements included in given value.
Args:
value: int
Enum value for which to get the element definitions.
Returns:
(pyeds.EnumElement,)
Included enum element definitions.
"""
# init elements
elements = []
# direct match
if value in self._elements:
elements.append(self._elements[value])
# check zero
elif value == 0:
pass
# check flags enum
elif not self.IsFlagsEnum:
pass
# get elements
else:
for el in self._elements:
if el != 0 and (value & el) == el:
elements.append(self._elements[el])
return tuple(elements)
def Convert(self, value):
"""
Parses raw DB value into enum element.
Args:
value: int
Raw value as stored in DB.
Returns:
pyeds.EnumValue
Parsed value.
"""
# check value
if value is None:
return None
# create value
val = EnumValue(self, value)
val.Lock()
return val
@staticmethod
def FromDBData(data):
"""
Creates instance from database data.
This method is not intended to be used by user. It is used automatically
by the library itself.
Args:
data: dict
Database data.
Returns:
pyeds.EnumDataType
Enum data type instance.
"""
enum_data_type = EnumDataType()
enum_data_type.ID = data['EnumID']
enum_data_type.TypeName = data['EnumType']
enum_data_type.IsFlagsEnum = data['IsFlagsEnum']
enum_data_type.CVReference = data['CVReference']
enum_data_type.CVTermId = data['CVTermId']
enum_data_type.CVTermName = data['CVTermName']
enum_data_type.CVTermDefinition = data['CVTermDefinition']
enum_data_type.Name = data['EnumType'].split(',')[0].split('.')[-1]
return enum_data_type
class EnumElement(Lockable):
"""
The pyeds.EnumElement class is used to hold all the information about a
particular enum element.
Attributes:
EnumID: int
Unique ID of parent enum.
Value: int
Actual value.
DisplayName: str
Human-readable display name.
Abbreviation:
Human-readable abbreviation.
CVReference:
Controlled vocabulary term reference.
CVTermId:
Controlled vocabulary term ID.
CVTermName:
Controlled vocabulary term name.
CVTermDefinition:
Controlled vocabulary term definition.
"""
def __init__(self):
"""Initializes a new instance of EnumElement."""
super().__init__()
self.EnumID = None
self.Value = None
self.DisplayName = None
self.Abbreviation = None
self.CVReference = None
self.CVTermId = None
self.CVTermName = None
self.CVTermDefinition = None
def __str__(self):
"""Gets standard string representation."""
return self.DisplayName
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
def __hash__(self):
"""Gets value hash."""
return hash(self.Value)
def __eq__(self, other):
"""Equal operator."""
if self is other:
return True
if isinstance(other, int):
return self.Value == other
if not isinstance(other, EnumElement):
return False
return (self.Value == other.Value
and self.EnumID == other.EnumID)
def __ne__(self, other):
"""Not equal operator."""
return not self.__eq__(other)
@staticmethod
def FromDBData(data):
"""
Creates instance from database data.
This method is not intended to be used by user. It is used automatically
by the library itself.
Args:
data: dict
Database data.
Returns:
pyeds.EnumElement
Enum element instance.
"""
enum_element = EnumElement()
enum_element.EnumID = data['EnumID']
enum_element.Value = data['Value']
enum_element.DisplayName = data['DisplayName']
enum_element.Abbreviation = data['Abbreviation']
enum_element.CVReference = data['CVReference']
enum_element.CVTermId = data['CVTermId']
enum_element.CVTermName = data['CVTermName']
enum_element.CVTermDefinition = data['CVTermDefinition']
return enum_element
class EnumValue(Lockable):
"""
The pyeds.EnumValue class is used to hold actual value of enum property.
Attributes:
Type: pyeds.EnumDataType
Enum definition.
Value: int
Actual value.
IsFlagsEnum: bool
Specifies whether the enum is flags enum.
Elements: (pyeds.EnumElement,)
Parsed value.
DisplayName: str
Human-readable display name.
"""
def __init__(self, enum_type, value):
"""
Initializes a new instance of EnumValue.
Args:
enum_type: pyeds.EnumDataType
Enum definition.
value: int
Actual value.
"""
super().__init__()
self._type = enum_type
self._value = int(value)
def __str__(self):
"""Gets standard string representation."""
return self.DisplayName
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
def __hash__(self):
"""Gets value hash."""
return hash(self._value)
def __eq__(self, other):
"""Equal operator."""
if self is other:
return True
if isinstance(other, int):
return self.Value == other
if isinstance(other, EnumElement):
return self.Value == other.Value
if not isinstance(other, EnumValue):
return False
return self.Value == other.Value
def __ne__(self, other):
"""Not equal operator."""
return not self.__eq__(other)
def __contains__(self, value):
"""Checks if current value equals or contains (for flags) given value."""
return self.Contains(value)
@property
def Type(self):
"""
Gets enum definition.
Returns:
pyeds.EnumDataType
Enum definition.
"""
return self._type
@property
def Value(self):
"""
Gets current int value.
Returns:
int
Actual value.
"""
return self._value
@property
def IsFlagsEnum(self):
"""Checks whether this value is flags enum."""
return self._type.IsFlagsEnum
@property
def Elements(self):
"""
Gets current value as pyeds.EnumElements.
Returns:
(pyeds.EnumElement,)
Current elements.
"""
return self._type.GetElements(self._value)
@property
def DisplayName(self):
"""Gets current value display name."""
elements = [e.DisplayName or str(e) for e in self.Elements]
return "|".join(elements)
def Contains(self, value):
"""
Checks whether current value equals or contains (for flags) given value.
Args:
value: int, pyeds.EnumValue or pyeds.EnumElement
Value to check.
Returns:
bool
Returns True if current value equals or contains (for flags)
given value.
"""
# compare directly
if self == value:
return True
# check flags
if not self.IsFlagsEnum:
return False
# get int value
if isinstance(value, (EnumValue, EnumElement)):
value = value.Value
# check zeros
if self._value == 0 or value == 0:
return False
# compare flags
return (self._value & value) == value
| StarcoderdataPython |
320346 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
pretrained_models = {
"panns_cnn6-32k": {
'url':
'https://paddlespeech.bj.bcebos.com/cls/inference_model/panns_cnn6_static.tar.gz',
'md5':
'da087c31046d23281d8ec5188c1967da',
'cfg_path':
'panns.yaml',
'model_path':
'inference.pdmodel',
'params_path':
'inference.pdiparams',
'label_file':
'audioset_labels.txt',
},
"panns_cnn10-32k": {
'url':
'https://paddlespeech.bj.bcebos.com/cls/inference_model/panns_cnn10_static.tar.gz',
'md5':
'5460cc6eafbfaf0f261cc75b90284ae1',
'cfg_path':
'panns.yaml',
'model_path':
'inference.pdmodel',
'params_path':
'inference.pdiparams',
'label_file':
'audioset_labels.txt',
},
"panns_cnn14-32k": {
'url':
'https://paddlespeech.bj.bcebos.com/cls/inference_model/panns_cnn14_static.tar.gz',
'md5':
'ccc80b194821274da79466862b2ab00f',
'cfg_path':
'panns.yaml',
'model_path':
'inference.pdmodel',
'params_path':
'inference.pdiparams',
'label_file':
'audioset_labels.txt',
},
}
| StarcoderdataPython |
3470428 | # Modified version of Transformers compute metrics script
# Source: https://github.com/huggingface/transformers/blob/v2.7.0/src/transformers/data/metrics/__init__.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, accuracy_score, jaccard_score
_has_sklearn = True
except (AttributeError, ImportError):
_has_sklearn = False
def is_sklearn_available():
return _has_sklearn
if _has_sklearn:
def acc_and_f1(preds, labels, average="binary"):
f1 = f1_score(y_true=labels, y_pred=preds, average=average)
acc = accuracy_score(preds, labels)
return {
"f1": f1,
"acc": acc,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearman": spearman_corr,
}
def jaccard_and_f1(preds, labels):
jaccard = jaccard_score(y_true=labels, y_pred=preds, average="samples")
f1_macro = f1_score(y_true=labels, y_pred=preds, average="macro")
f1_micro = f1_score(y_true=labels, y_pred=preds, average="micro")
return {
"jaccard": jaccard,
"f1-macro": f1_macro,
"f1-micro": f1_micro,
}
def alue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "mq2q":
return acc_and_f1(preds, labels)
elif task_name == "mdd":
return acc_and_f1(preds, labels, average="macro")
elif task_name == "fid":
return acc_and_f1(preds, labels)
elif task_name == "svreg":
return pearson_and_spearman(preds, labels)
elif task_name == "sec":
return jaccard_and_f1(preds, labels)
elif task_name == "oold":
return acc_and_f1(preds, labels)
elif task_name == "ohsd":
return acc_and_f1(preds, labels)
elif task_name == "xnli":
return acc_and_f1(preds, labels, average="macro")
else:
raise KeyError(task_name)
| StarcoderdataPython |
227499 | <reponame>SELO77/django_template<gh_stars>0
# from core.urls import urlpatterns
# __all__ = [
# 'urlpatterns', ''
# ] | StarcoderdataPython |
385154 | <gh_stars>0
import numpy as np
import os.path, sys
import h5py
from scipy.interpolate import splev, splrep
from randoms import make_random_catalogue,make_Nrandom_catalogue
#############################
#
# Input ARGUMENTS
#
narg = len(sys.argv)
if(narg == 7):
mag_lim = float(sys.argv[1])
N_rand = int(sys.argv[2])
version = sys.argv[3]
singlef = sys.argv[4]
Nz_path = sys.argv[5]
outdir = sys.argv[6]
else:
sys.exit('6 arguments to be passed')
singlefile = False
if(singlef=='True' or singlef=='true'):
singlefile = True
if (singlefile):
# Generate ONE hdf5 random catalogue with N_rand*Ngals
root = "randoms_r%.1f_N%i" %(mag_lim, N_rand)
file_hdf5 = outdir+root+"_singlefile.hdf5"
file_ascii = outdir+root+"_singlefile.txt"
make_random_catalogue(mag_lim, N_rand, version, Nz_path, file_hdf5)
# Transform into ASCII
if (os.path.isfile(file_hdf5)):
f = h5py.File(file_hdf5,'r')
ra = f["ra"].value
dec = f["dec"].value
z = f["z_obs"].value
f.close()
#Write the information into an ascii file
tofile = zip(ra,dec,z)
with open(file_ascii, 'w') as outf:
outf.write('# ra,dec,z \n')
np.savetxt(outf,tofile,fmt=('%.8f'))
outf.closed
print 'Output: ',file_ascii
else:
print 'NOT found:', file_hdf5
else:
# Generate N_rand hdf5 random catalogues
file_name = "randoms_r%.1f_N%i_" %(mag_lim, N_rand)
root = outdir+file_name
make_Nrandom_catalogue(mag_lim, N_rand, version, Nz_path, root)
for i in range(N_rand):
file_name = root+str(i+1)+".hdf5"
# Transform into ASCII
file_ascii = root+str(i+1)+".txt"
if (os.path.isfile(file_name)):
f = h5py.File(file_name,'r')
ra = f["ra"].value
dec = f["dec"].value
z = f["z"].value
f.close()
#Write the information into an ascii file
tofile = zip(ra,dec,z)
with open(file_ascii, 'w') as outf:
outf.write('# ra,dec,z \n')
np.savetxt(outf,tofile,fmt=('%.8f'))
outf.closed
print 'Output: ',file_ascii
else:
print 'NOT found:', file_name
| StarcoderdataPython |
6559432 | <reponame>ritchieyu/NeuroTechX-McGill-2021<filename>software/data_collection_platform/backend/dcp/models/data.py
from dcp.models.utils import auto_str
from dcp.models.collection import CollectionInstance
from dcp import db
@auto_str
class CollectedData(db.Model):
__tablename__ = "collected_data"
id = db.Column(db.Integer, primary_key=True)
channel_1 = db.Column(db.Float, nullable=False)
channel_2 = db.Column(db.Float, nullable=False)
channel_3 = db.Column(db.Float, nullable=False)
channel_4 = db.Column(db.Float, nullable=False)
channel_5 = db.Column(db.Float, nullable=False)
channel_6 = db.Column(db.Float, nullable=False)
channel_7 = db.Column(db.Float, nullable=False)
channel_8 = db.Column(db.Float, nullable=False)
is_subject_anxious = db.Column(db.Boolean, nullable=False)
collection_instance_id = db.Column(
db.Integer, db.ForeignKey(CollectionInstance.id), nullable=False)
order = db.Column(db.Integer, nullable=False)
def __repr__(self):
return str(self.__dict__)
| StarcoderdataPython |
270427 | import os
import json
import yaml
import copy
from collections import OrderedDict
from catalyst.utils.misc import merge_dicts
def load_ordered_yaml(
stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict
):
"""
Loads `yaml` config into OrderedDict
Args:
stream: opened file with yaml
Loader: base class for yaml Loader
object_pairs_hook: type of mapping
Returns:
dict: configuration
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping
)
return yaml.load(stream, OrderedLoader)
def save_config(config, logdir: str) -> None:
"""
Saves config into JSON in logdir
Args:
config: dictionary with config
logdir (str): path to directory to save JSON
"""
os.makedirs(logdir, exist_ok=True)
with open("{}/config.json".format(logdir), "w") as fout:
json.dump(config, fout, indent=2)
def parse_config_args(*, config, args, unknown_args):
for arg in unknown_args:
arg_name, value = arg.split("=")
arg_name = arg_name[2:]
value_content, value_type = value.rsplit(":", 1)
if "/" in arg_name:
arg_names = arg_name.split("/")
if value_type == "str":
arg_value = value_content
else:
arg_value = eval("%s(%s)" % (value_type, value_content))
config_ = config
for arg_name in arg_names[:-1]:
if arg_name not in config_:
config_[arg_name] = {}
config_ = config_[arg_name]
config_[arg_names[-1]] = arg_value
else:
if value_type == "str":
arg_value = value_content
else:
arg_value = eval("%s(%s)" % (value_type, value_content))
args.__setattr__(arg_name, arg_value)
return config, args
def parse_args_uargs(args, unknown_args, dump_config=False):
"""
Function for parsing configuration files
Args:
args: recognized arguments
unknown_args: unrecognized arguments
dump_config: if True, saves config to args.logdir
Returns:
tuple: updated arguments, dict with config
"""
args_ = copy.deepcopy(args)
# load params
config = {}
for config_path in args_.config.split(","):
with open(config_path, "r") as fin:
if config_path.endswith("json"):
config_ = json.load(fin, object_pairs_hook=OrderedDict)
elif config_path.endswith("yml"):
config_ = load_ordered_yaml(fin)
else:
raise Exception("Unknown file format")
config = merge_dicts(config, config_)
config, args_ = parse_config_args(
config=config, args=args_, unknown_args=unknown_args
)
# hack with argparse in config
config_args = config.get("args", None)
if config_args is not None:
for key, value in config_args.items():
arg_value = getattr(args_, key, None)
if arg_value is None:
arg_value = value
setattr(args_, key, arg_value)
if dump_config and getattr(args_, "logdir", None) is not None:
save_config(config=config, logdir=args_.logdir)
return args_, config
| StarcoderdataPython |
220806 | import click
from flask import Flask
from dadd import server
from dadd.master.utils import update_config
# Set up the app object before importing the handlers to avoid a
# circular import
app = Flask(__name__)
app.config.from_object('dadd.master.settings')
import dadd.master.handlers # noqa
import dadd.master.api.procs # noqa
import dadd.master.api.hosts # noqa
from dadd.master.admin import admin
@click.command(name='master')
@click.pass_context
def run(ctx):
if ctx.obj:
app.config.update(ctx.obj)
update_config(app)
admin(app)
server.mount(app, '/')
server.run(app.config)
| StarcoderdataPython |
9797213 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 8 10:50:50 2017
@author: atholon
"""
import time
import calendar
import string
import operator
import json
import csv
import tweepy
from Storage import Storage
from FrenchStemmer import FrenchStemmer
from creds import get_tweepy_api
# Tweepy documentation
# http://docs.tweepy.org/en/v3.5.0/
# https://media.readthedocs.org/pdf/tweepy/latest/tweepy.pdf
def search_tweet_backward_single_request(api, output_storage, max_id=None):
"""issue a single status search request from a given tweet id going backward.
results are appended to the output file.
Return values :
- number of status found
- id of the oldest status
"""
search_params = dict()
search_params['lang'] = 'fr'
search_params['count'] = 100
search_params['geocode'] = '43.834686,6.126635,200km'
if max_id is not None:
search_params['max_id'] = max_id # as string
search_results = api.search(**search_params)
print(len(search_results), search_results.max_id, search_results.since_id)
if len(search_results) > 0:
print(search_results[0])
output_data_file, remaining_size = output_storage.get_current_data_file()
with output_data_file.open('a') as tweet_file:
for tweet in search_results:
s = json.dumps(tweet._json)
print(s, file=tweet_file)
return len(search_results), search_results.max_id
def search_tweet_backward_all_requests(api, output_storage, max_id=None):
rate_limit_status = api.rate_limit_status()
search_limits = rate_limit_status['resources']['search']['/search/tweets']
# reset time is epoch time
reset_time = search_limits['reset']
remaining_searches = search_limits['remaining'] - 5
next_max_id = ''
while remaining_searches > 0 and next_max_id is not None:
nb_results, next_max_id = search_tweet_backward_single_request(api, output_storage, max_id)
max_id = next_max_id
now = calendar.timegm(time.gmtime())
if now < reset_time:
remaining_time = reset_time - now
delay = remaining_time / remaining_searches
time.sleep(delay)
remaining_searches -= 1
now = calendar.timegm(time.gmtime())
if now < reset_time:
remaining_time = reset_time - now
else:
remaining_time = 0
return max_id, remaining_time
def search_tweet_backward_rate_limited(api, output_storage, duration, max_id=None):
next_max_id = ''
stop_timegm = calendar.timegm(time.gmtime()) + duration
while calendar.timegm(time.gmtime()) < stop_timegm and next_max_id is not None:
next_max_id, remaining_time = search_tweet_backward_all_requests(api, output_storage, max_id)
max_id = next_max_id
time.sleep(remaining_time + 1)
def drop_retweet(api, input_storage, output_storage):
remaining_size = 0
output_data_file = None
for input_data_file in input_storage.get_all_data_files():
with input_data_file.open() as input_tweet_file:
for line in input_tweet_file:
obj = json.loads(line)
status = tweepy.ModelFactory.status.parse(api, obj)
# drop retweet
if hasattr(status, 'retweeted_status'):
pass
else:
if output_data_file is None:
output_data_file, remaining_size = output_storage.get_current_data_file()
output_file = output_data_file.open(mode='w')
remaining_size -= output_file.write(line)
if remaining_size < 0:
output_file.close()
output_data_file = None
if output_data_file is not None:
output_file.close()
def get_translate_table():
# prepare character map for latin-1, https://en.wikipedia.org/wiki/ISO/IEC_8859-1
source_characters = list(string.ascii_uppercase)
destination_characters = list(string.ascii_lowercase)
for character in range(0xC0, 0xE0):
if character in (0xD0, 0xDE, 0xDF):
pass
elif character == 0xD7:
source_characters.append(chr(character))
destination_characters.append(' ')
else:
source_characters.append(chr(character))
destination_characters.append(chr(character + 0x20))
for character_range in (range(0x00, 0x30), range(0x3A, 0x41), range(0x5B, 0x61), range(0x7B, 0xC0)):
for special_character in character_range:
source_characters.append(chr(special_character))
destination_characters.append(' ')
source_characters.append(chr(0xF7))
destination_characters.append(' ')
source_characters = ''.join(source_characters)
destination_characters = ''.join(destination_characters)
translate_table = str.maketrans(source_characters, destination_characters)
return translate_table
def get_core_text(status):
status_text = list(status.text)
for entity, values in status.entities.items():
if entity in ('hashtags', 'symbols', 'polls'):
# nothing to do for those entities
pass
elif entity in ('media', 'urls', 'user_mentions'):
for value in values:
status_text[value['indices'][0]:value['indices'][1]] = ' ' * (value['indices'][1] - value['indices'][0])
else:
print("warning: unexpected entity {} = {}".format(entity, values))
# back to string removing extra spaces
core_text = ''.join(status_text)
core_text = ' '.join(core_text.split())
return core_text
def extract_core_text(api, input_storage, output_storage):
remaining_size = 0
output_data_file = None
core_status = dict()
for input_data_file in input_storage.get_all_data_files():
with input_data_file.open() as input_tweet_file:
for line in input_tweet_file:
obj = json.loads(line)
status = tweepy.ModelFactory.status.parse(api, obj)
# get the text written by the user (excluding user mentions and urls)
core_text = get_core_text(status)
core_status['id_str'] = status.id_str
core_status['core_text'] = core_text
if output_data_file is None:
output_data_file, remaining_size = output_storage.get_current_data_file()
output_file = output_data_file.open(mode='w')
remaining_size -= output_file.write(json.dumps(core_status))
remaining_size -= output_file.write('\n')
if remaining_size < 0:
output_file.close()
output_data_file = None
if output_data_file is not None:
output_file.close()
def extract_user(api, input_storage, output_storage):
remaining_size = 0
output_data_file = None
for input_data_file in input_storage.get_all_data_files():
with input_data_file.open() as input_tweet_file:
for line in input_tweet_file:
obj = json.loads(line)
status = tweepy.ModelFactory.status.parse(api, obj)
user = status.user
if output_data_file is None:
output_data_file, remaining_size = output_storage.get_current_data_file()
output_file = output_data_file.open(mode='w')
remaining_size -= output_file.write(json.dumps(user._json))
remaining_size -= output_file.write('\n')
if remaining_size < 0:
output_file.close()
output_data_file = None
if output_data_file is not None:
output_file.close()
def extract_user_urls(api, input_storage, output_storage):
remaining_size = 0
output_data_file = None
user = dict()
for input_data_file in input_storage.get_all_data_files():
with input_data_file.open() as input_tweet_file:
for line in input_tweet_file:
obj = json.loads(line)
user.clear()
user['id_str'] = obj['id_str']
user['name'] = obj['name']
user['screen_name'] = obj['screen_name']
if obj['url'] is not None:
user['expanded_url'] = obj['entities']['url']['urls'][0]['expanded_url']
else:
user['expanded_url'] = ''
if obj['default_profile_image']:
user['profile_image_url'] = ''
else:
user['profile_image_url'] = obj['profile_image_url']
if 'profile_banner_url' in obj and obj['profile_banner_url'] is not None:
user['profile_banner_url'] = obj['profile_banner_url']
else:
user['profile_image_url'] = ''
if output_data_file is None:
output_data_file, remaining_size = output_storage.get_current_data_file()
output_file = output_data_file.open(mode='w')
remaining_size -= output_file.write(json.dumps(user))
remaining_size -= output_file.write('\n')
if remaining_size < 0:
output_file.close()
output_data_file = None
if output_data_file is not None:
output_file.close()
def get_vocabulary(input_storage):
vocabulary = dict()
# translate table to normalize text (only keeps numbers and characters; lowering the later)
translate_table = get_translate_table()
# french stemmer
# french_stemmer = FrenchStemmer()
for input_data_file in input_storage.get_all_data_files():
with input_data_file.open() as input_tweet_file:
for line in input_tweet_file:
core_status = json.loads(line)
core_text = core_status['core_text']
# get rid of all non latin-1 characters
core_text = core_text.encode("latin_1", "replace").decode("latin_1")
# normalize text
core_text = core_text.translate(translate_table)
words = core_text.split()
# words = french_stemmer.get_stems(core_text)
for word in words:
if word in vocabulary:
vocabulary[word] += 1
else:
vocabulary[word] = 1
vocabulary_sorted = sorted(vocabulary.items(), key=operator.itemgetter(1), reverse=True)
return vocabulary_sorted
def export_to_cvs(input_storage):
csv_encoding = "latin_1"
with open('tweets.csv', 'w', newline='', encoding=csv_encoding) as csv_file:
csv_writer = csv.writer(csv_file)
for input_data_file in input_storage.get_all_data_files():
with input_data_file.open() as input_tweet_file:
for line in input_tweet_file:
core_status = json.loads(line)
tweet_text = core_status['core_text'].encode(csv_encoding, "replace").decode(csv_encoding)
csv_writer.writerow([core_status['id_str'], tweet_text])
def main():
api = get_tweepy_api()
if False:
input_storage = Storage('dataset-01-01', 'tweet-01-01')
output_storage = Storage('dataset-01-02', 'tweet-01-02')
# process the entire data set, make sure output is empty first
if output_storage.is_empty():
drop_retweet(api, input_storage, output_storage)
else:
print("ERROR output data set not empty {}".format(output_storage.data_set_dir))
if False:
input_storage = Storage('dataset-01-01', 'tweet-01-01')
output_storage = Storage('user-01-01', 'user-01-01')
# process the entire data set, make sure output is empty first
if output_storage.is_empty():
extract_user(api, input_storage, output_storage)
else:
print("ERROR output data set not empty {}".format(output_storage.data_set_dir))
if True:
input_storage = Storage('user-01-01', 'user-01-01')
output_storage = Storage('user-01-02', 'user-01-02')
# process the entire data set, make sure output is empty first
if output_storage.is_empty():
extract_user_urls(api, input_storage, output_storage)
else:
print("ERROR output data set not empty {}".format(output_storage.data_set_dir))
if False:
input_storage = Storage('dataset-01-02', 'tweet-01-02')
output_storage = Storage('dataset-01-03', 'tweet-01-03')
# process the entire data set, make sure output is empty first
if output_storage.is_empty():
extract_core_text(api, input_storage, output_storage)
else:
print("ERROR output data set not empty {}".format(output_storage.data_set_dir))
if False:
input_storage = Storage('dataset-01-03', 'tweet-01-03')
vocabulary_sorted = get_vocabulary(input_storage)
with open('vocabulary.txt', 'w', encoding="latin_1") as output_file:
for word, nb in vocabulary_sorted:
print('{},{}'.format(word, nb), file=output_file)
if False:
input_storage = Storage('dataset-01-03', 'tweet-01-03')
export_to_cvs(input_storage)
if False:
output_storage = Storage('test-set', 'test')
duration = 60
max_id = 944653855807156223
# output_storage = Storage('dataset-01-01', 'tweet-01-01')
# duration = 9 * 4 * 900
search_tweet_backward_rate_limited(api, output_storage, duration=duration, max_id=max_id)
if False:
pass
# post retrieval ingest:
storage = Storage('dataset-01-01', 'tweet-01-01')
storage.add_file_to_storage('C:\\DSTI\\CA Project Technical\\data\\twitter\\dataset-01-01\\tweet-big-010.txt')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1739260 | <filename>fake_cifar10.py<gh_stars>100-1000
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to generate random data of the same format as CIFAR-10.
Creates TFRecord files with the same fields as
tensorflow/models/slim/datasets/downlod_and_convert_cifar10.py
for use in unit tests of the code that handles this data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import StringIO
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow_models.slim.datasets import dataset_utils
tf.app.flags.DEFINE_string('out_directory', 'testdata/cifar10',
'Output directory for the test data.')
FLAGS = tf.app.flags.FLAGS
_IMAGE_SIZE = 32
def create_fake_data(split_name, num_examples=4):
"""Writes the fake TFRecords for one split of the dataset.
Args:
split_name: One of 'train' or 'test'.
num_examples: The number of random examples to generate and write to the
output TFRecord file.
"""
output_file = os.path.join(FLAGS.out_directory,
'cifar10_%s.tfrecord' % split_name)
writer = tf.python_io.TFRecordWriter(output_file)
for _ in range(num_examples):
image = np.random.randint(256, size=(_IMAGE_SIZE, _IMAGE_SIZE, 3),
dtype=np.uint8)
image = Image.fromarray(image)
image_buffer = StringIO.StringIO()
image.save(image_buffer, format='png')
image_buffer = image_buffer.getvalue()
label = 0
example = dataset_utils.image_to_tfexample(
image_buffer, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label)
writer.write(example.SerializeToString())
writer.close()
def main(_):
create_fake_data('train')
create_fake_data('test')
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
5072881 | from flask import Flask, jsonify, render_template, request
from flask_mongoengine import MongoEngine, MongoEngineSessionInterface, DoesNotExist
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config['MONGODB_SETTINGS'] = {
'db': 'house-hunt',
}
app.config['DEBUG_TB_PANELS'] = {
"flask_debugtoolbar.panels.versions.VersionDebugPanel",
"flask_debugtoolbar.panels.timer.TimerDebugPanel",
"flask_debugtoolbar.panels.headers.HeaderDebugPanel",
"flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel",
"flask_debugtoolbar.panels.template.TemplateDebugPanel",
"flask_debugtoolbar.panels.logger.LoggingPanel",
"flask_mongoengine.panels.MongoDebugPanel",
}
db = MongoEngine(app)
app.session_interface = MongoEngineSessionInterface(db)
toolbar = DebugToolbarExtension(app)
class ForSale(db.Document):
baths = db.FloatField()
beds = db.IntField()
city = db.StringField()
created = db.DateField()
garage = db.FloatField()
href = db.StringField()
last_update_date = db.DateField()
lat = db.LongField()
line = db.StringField()
list_date = db.DateField()
listing_id = db.StringField()
list_price = db.LongField()
lon = db.LongField()
name = db.StringField()
permalink = db.StringField()
postal_code = db.StringField()
property_id = db.StringField()
sold_price = db.LongField()
sqft = db.IntField()
state_code = db.StringField()
status = db.StringField()
stories = db.IntField()
sub_type = db.StringField()
type = db.StringField()
year_built = db.IntField()
class Coding(db.Document):
listing_id = db.StringField()
value = db.StringField()
@app.route('/', methods=['GET'])
def list():
return render_template('list.html')
@app.route('/api/v1.0/listings/', methods=['GET'])
def listings():
listings = ForSale.objects().all()
return jsonify({
"status": 200,
"data": listings,
"errors": [],
}), 200
@app.route('/api/v1.0/codings/', methods=['GET'])
def codings():
codings = Coding.objects().all()
return jsonify({
"status": 200,
"data": codings,
"errors": [],
}), 200
@app.route('/api/v1.0/codings/<string:listing_id>', methods=['POST'])
def coding_create(listing_id):
content = request.get_json(silent=True)
value = content['value']
try:
existing = Coding.objects.get(listing_id=listing_id)
return jsonify({
"status": 500,
"data": None,
"errors": ["Coding already exists for listing"],
}), 500
except DoesNotExist:
pass
coding = Coding(listing_id=listing_id, value=value)
coding.save()
return jsonify({
"status": 200,
"data": {
"listing_id": listing_id,
"value": value,
},
"errors": [],
}), 200
@app.route('/api/v1.0/codings/<string:listing_id>', methods=['GET'])
def coding_read(listing_id):
try:
coding = Coding.objects.get(listing_id=listing_id)
return jsonify({
"status": 200,
"data": coding,
"errors": [],
}), 200
except DoesNotExist:
return jsonify({
"status": 404,
"data": None,
"errors": ["Coding not found for listing"],
}), 404
@app.route('/api/v1.0/codings/<string:listing_id>', methods=['PUT'])
def coding_update(listing_id):
content = request.get_json(silent=True)
value = content['value']
try:
coding = Coding.objects.get(listing_id=listing_id)
coding.update(
set__value = value,
)
return jsonify({
"status": 200,
"data": coding,
"errors": [],
}), 200
except DoesNotExist:
return jsonify({
"status": 404,
"data": None,
"errors": ["Coding not found for listing"],
}), 404
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
5177279 | <reponame>mchoi8739/incubator-mxnet<gh_stars>100-1000
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AdaGrad optimizer"""
from __future__ import absolute_import
from ..ndarray import (zeros, clip, sqrt, square)
from ..ndarray import sparse
from .optimizer import Optimizer, register
__all__ = ['AdaGrad']
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer updates each weight by::
grad = clip(grad * rescale_grad, clip_gradient) + wd * weight
history += square(grad)
weight -= learning_rate * grad / (sqrt(history) + epsilon)
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
See Also
----------
:meth:`mxnet.ndarray.sparse.adagrad_update`.
Parameters
----------
learning_rate : float, default 0.01
The initial learning rate. If None, the optimization will use the
learning rate from ``lr_scheduler``. If not None, it will overwrite
the learning rate in ``lr_scheduler``. If None and ``lr_scheduler``
is also None, then it will be set to 0.01 by default.
epsilon : float, default 1e-6
Small value to avoid division by 0.
use_fused_step : bool, default True
Whether or not to use fused kernels for optimizer.
When use_fused_step=False or grad is not sparse, step is called,
otherwise, fused_step is called.
"""
def __init__(self, learning_rate=0.01, epsilon=1e-6, use_fused_step=True, **kwargs):
super(AdaGrad, self).__init__(learning_rate=learning_rate,
use_fused_step=use_fused_step,
**kwargs)
self.epsilon = epsilon
def create_state(self, index, weight):
return zeros(weight.shape, weight.context, stype=weight.stype) # history
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, - self.clip_gradient, self.clip_gradient)
grad += wd * weight
# update history
history = state
history[:] += square(grad)
d = grad / (sqrt(history) + self.epsilon)
# update weight
weight[:] -= lr * d
def fused_step(self, indices, weights, grads, states):
"""Perform a fused optimization step using gradients and states.
Fused kernel is used for update.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
is_sparse = grad.stype == 'row_sparse'
if is_sparse:
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'epsilon': self.epsilon, 'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
history = state
# When grad is sparse, update weight with fused kernel
sparse.adagrad_update(weight, grad, history, out=weight, lr=lr, wd=wd, **kwargs)
else:
# When the grad is not sparse, the func step is called to update weight and state
self.step([index], [weight], [grad], [state])
| StarcoderdataPython |
6640019 | <gh_stars>10-100
# Created by MechAviv
# Quest ID :: 17602
# [Commerci Republic] Neinheart's Request
sm.setNpcOverrideBoxChat(1540451)
sm.sendNext("According to intelligence reports, the people of Commerci are fiercely independent. The Empress means well, but in their eyes, any outreach might be thought an attempt to draw them under our influence. This would destroy any possibility of a relationship with Commerci, and I can't allow that.")
sm.setNpcOverrideBoxChat(1540451)
sm.sendSay("To ensure there are no misunderstandings, we must approach this matter with the utmost care. We should gain their trust before we relay the Empress' proposal. This is part of why the Empress has chosen you rather than a royal messenger. Please do not fail the Empress.")
sm.setNpcOverrideBoxChat(1540451)
if sm.sendAskYesNo("I've arranged your passage to Commerci. Seek out an explorer named #b'Parbell'#k in Lith Harbor. Let me know when you are ready to depart for Lith Harbor.\r\n#b(You will be moved to Lith Harbor if you accept.)"):
sm.startQuest(17602)
sm.warp(104000000, 5)
else:
sm.setNpcOverrideBoxChat(1540451)
sm.sendSayOkay("We haven't got all day. Tell me when you're ready.") | StarcoderdataPython |
11342190 | <reponame>VitorNoVictor/Hebrew-Tokenizer
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
print(os.path.abspath(os.getcwd()))
sys.path.append(os.path.abspath(os.getcwd()))
from hebrew_tokenizer.tokenizer import tokenizer
def tokenize(text, with_whitespaces=False):
tokenizer.with_whitespaces = with_whitespaces
return tokenizer.tokenize(text)
if __name__ == '__main__':
sent = 'aspirin aaaaaa aaaaaaaaaaa —– dipyridamole'
sent_tokens = tokenize(sent)
for st in sent_tokens:
print(st)
| StarcoderdataPython |
5054539 | # Import the abstract class that is the blueprint for implementing AUV dynamics
from .abstract_auv_dynamics import AbstractAUVDynamics
# Import the class that can implement a neutral buoyancy vehicle in an efficient manner
from .neutral_buoyancy_auv_dynamics import NeutralBuoyancyAUVDynamics
# Import the class where the vehicle is approximated by a sphere (for buoyancy purposes)
from .sphere_auv_dynamics import SphereAUVDynamics
# Import the dynamics of a quadrotor
from .quadrotor_dynamics import QuadrotorDynamics
| StarcoderdataPython |
6487415 | <gh_stars>0
import os
import uuid
import hashlib
import boto3
from typing import List
def event_body(event: dict) -> dict:
body = event.get('body', {})
if body:
event = body
return event
def create_conversation_id(users: List[str]) -> str:
print('users for conversation_id:')
print(users)
seed = ' '.join(sorted(users)).encode('utf-8')
hasher = hashlib.md5()
hasher.update(seed)
conversation_id = 'Failed'
try:
conversation_id = str(uuid.UUID(hasher.hexdigest()))
except Exception as err:
print('Error:')
print(err)
return conversation_id
table_name = os.environ.get('DYNAMO_TABLE')
dynamo_client = boto3.client('dynamodb', region_name='us-east-2')
dynamo_resource = boto3.resource('dynamodb', region_name='us-east-2')
dynamo_table = dynamo_resource.Table(table_name)
dynamo_paginator = dynamo_client.get_paginator('scan')
| StarcoderdataPython |
1663808 | from django import forms
from .models import Student
from django.core.validators import MaxValueValidator, MinValueValidator
class StudentForm(forms.ModelForm):
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100)
house = forms.CharField(
max_length=2,
widget=forms.Select(choices=Student.HOUSE_CHOICES)
)
year = forms.IntegerField(
default=1,
validators=[
MaxValueValidator(7),
MinValueValidator(1),
]
)
class Meta:
model = Student
fields = ['first_name', 'last_name', 'house', 'year'] | StarcoderdataPython |
3265907 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import django
django.setup()
from djpersonnel.requisition.models import Operation as Requisition
from djpersonnel.transaction.models import Operation as Transaction
# env
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djpersonnel.settings.shell')
# set up command-line options
desc = """
Accepts as input Transaction or Requistion.
"""
# RawTextHelpFormatter method allows for new lines in help text
parser = argparse.ArgumentParser(
description=desc, formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'-m',
'--model',
required=True,
help="Transaction or Requisition",
dest='model',
)
parser.add_argument(
'--test',
action='store_true',
help="Dry run?",
dest='test',
)
def main():
"""Various reports on database activity."""
if test:
print("Model = {0}".format(model))
if model == 'transaction':
actions = Transaction.objects.all()
elif model == 'requisition':
actions = Requisition.objects.all()
count = actions.count()
print(count)
if __name__ == '__main__':
args = parser.parse_args()
model = args.model.lower()
test = args.test
if test:
print(args)
sys.exit(main())
| StarcoderdataPython |
6521691 | <reponame>T-Mac/Handle<filename>lib/network/__init__.py
from server import Server
from client import Client
from loader import loadNetwork | StarcoderdataPython |
3528231 | CAMERA_INDEX = 1
COLOR_BINARIZATION_THRESHOLD = 70 # change this value according to
# segmented mode
ITEM_DIMENSION = 80
ITEMS = [#{'name': 'carton',
# 'color': (20.0, 19.0, 16.0)},
#{'name': 'blue',
# 'color': (69.0, 108.0, 149.0)},
#{'name': 'orange',
# 'color': (163.0, 97.0, 59.0)},
{'name': 'pencil',
'color': (160.0, 126.0, 8.0)},
{'name': 'stickypad',
'color': (33.0, 113.0, 178.0)},
{'name': '<NAME>',
'color': (140.0, 40.0, 37.0)},
{'name': 'tape dispenser',
'color': (28.0, 28.0, 28.0)}
]
| StarcoderdataPython |
1654747 | import os
from musurgia.pdf.text import PageText
from musurgia.pdf.positioned import Positioned
from prettytable import PrettyTable
from quicktions import Fraction
from musurgia.fractaltree.fractalmusicsquare import Module
from musurgia.timeline.timeline import TimeLine
path = os.path.abspath(__file__).split('.')[0]
class ModuleTimeLine(object):
def __init__(self, start_time, module, instruments, text=None, number=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._start_time = None
self.start_time = start_time
self._instruments = None
self.instruments = instruments
self._module = None
self.module = module
self.text = text
self.number = number
@property
def module(self):
return self._module
@module.setter
def module(self, val):
if self._module:
raise Exception()
if not isinstance(val, Module):
raise TypeError('module.value must be of type Module not{}'.format(type(val)))
self._module = val
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, val):
if self._start_time:
raise Exception()
self._start_time = val
@property
def instruments(self):
return self._instruments
@instruments.setter
def instruments(self, val):
if self._instruments:
raise Exception()
self._instruments = val
def get_end_time(self):
return int(round(self.start_time + round(self.module.duration)))
class Vertical(Positioned):
def __init__(self, parent, position, number=None, line_type='dashed', thickness=1, mode='start', *args, **kwargs):
super().__init__(*args, **kwargs)
self.position = position
self.parent = parent
self.mode = mode
self.number = number
self._line_type = None
self.line_type = line_type
self.thickness = thickness
@property
def line_type(self):
return self._line_type
@line_type.setter
def line_type(self, val):
permitted = ['dashed', 'normal']
if val not in permitted:
raise ValueError('line_type.value {} must be in {}'.format(val, permitted))
self._line_type = val
def draw(self, pdf):
if self.mode == 'start':
line = self.parent.ruler.line_segments[self.position]
x = line.x1
elif self.mode == 'end':
line = self.parent.ruler.line_segments[self.position - 1]
x = line.x2
else:
raise ValueError()
y = line.y1
p = line.page
pdf.x = x
pdf.y = y
pdf.page = p
page_text = PageText(self.number, relative_y=-7, font_size=10, font_weight='bold')
page_text.draw(pdf)
for i in range(self.thickness):
grid = 0.1
y1 = y + self.relative_y
y2 = y
if self.line_type == 'dashed':
pdf.dashed_line(x1=x + i * grid, x2=x + i * grid, y1=y1, y2=y2 + self.parent.get_height() - 7,
space_length=3)
else:
pdf.line(x1=x + i * grid, x2=x + i * grid, y1=y1, y2=y2 + self.parent.get_height() - 7)
class ScoreTimeLine(TimeLine):
def __init__(self, instruments, units_per_line=30, show_interval=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self._instruments = None
self.instruments = instruments
self._module_time_lines = []
self.units_per_line = units_per_line
self._verticals = []
self.show_interval = show_interval
@property
def instruments(self):
return self._instruments
@instruments.setter
def instruments(self, val):
self._instruments = val
@property
def module_time_lines(self):
return self._module_time_lines
def add_module_time_line(self, module_time_line):
if not isinstance(module_time_line, ModuleTimeLine):
raise TypeError()
for instrument in module_time_line.instruments:
if instrument not in self.instruments:
raise ValueError('instrument {} not in {}'.format(instrument, self.instruments))
# module_time_line.number = len(self._module_time_lines) + 1
self._module_time_lines.append(module_time_line)
def get_duration(self):
return int(max([mtl.get_end_time() for mtl in self.module_time_lines]))
def add_vertical(self, position, mode='start'):
v = Vertical(parent=self, position=position, number=len(self._verticals) + 1, mode=mode)
self._verticals.append(v)
return v
def apply_module_time_lines(self):
self.length = self.get_duration()
for instrument in self.instruments:
voice = self.add_voice(name=instrument.abbreviation)
voice.instrument = instrument
for module_time_line in self.module_time_lines:
voices = [v for v in self.voices if v.instrument in module_time_line.instruments]
for voice in voices:
module = module_time_line.module
segment = voice.add_voice_segment(module_time_line.start_time, module_time_line.get_end_time())
segment.lines[0].add_text_label(module.name, font_size=8, font_weight='bold', relative_x=1)
try:
segment.lines[2].add_text_label('t=' + str(module.tempo), font_size=8)
except IndexError:
segment.lines[0].add_text_label('t=' + str(module.tempo), font_size=4, y_offset=-4.5,
x_offset=4)
try:
segment.lines[4].add_text_label('d=' + str(round(module.duration)) + '"', font_size=8)
except IndexError:
segment.lines[0].add_text_label('d=' + str(round(module.duration)) + '"', font_size=4,
x_offset=8, y_offset=-4.5)
if module_time_line.text:
segment.lines[0].add_text_label(module_time_line.text, font_size=4, y_offset=-4.5, x_offset=1)
self.add_vertical(position=module_time_line.start_time)
self.add_vertical(position=module_time_line.get_end_time(), mode='end')
def apply_verticals(self, pdf):
self._verticals.sort(key=lambda v: v.position)
number = 1
position = None
for vertical in self._verticals:
if vertical.position != position:
if vertical == self._verticals[-1]:
vertical.thickness = 4
vertical.relative_y = -3
vertical.line_type = 'normal'
vertical.number = number
vertical.draw(pdf)
position = vertical.position
number += 1
def draw(self, pdf):
self.apply_module_time_lines()
self.ruler.show_interval = self.show_interval
printable = (pdf.w - pdf.r_margin - pdf.l_margin)
self.unit = Fraction(Fraction(printable), Fraction(self.units_per_line))
super().draw(pdf)
self.apply_verticals(pdf)
def draw_square(self, square_path, square):
os.system('touch ' + square_path)
file = open(square_path, 'w')
x = PrettyTable(hrules=1)
x.set_style(11)
column_numbers = [str(number) for number in range(1, square.side_size + 1)]
x.field_names = ["instrument", 'row', *column_numbers]
for instrument in self.instruments:
mtls = [mtl for mtl in self.module_time_lines if instrument in mtl.instruments]
for row_number in range(1, square.side_size + 1):
row_mtls = [mtl for mtl in mtls if mtl.module.row_number == row_number]
dict_modules = {}
for mtl in row_mtls:
module = mtl.module
try:
column_number = module.new_column_number
except AttributeError:
column_number = module.column_number
try:
dict_modules[column_number].append(mtl.number)
except KeyError:
dict_modules[column_number] = [mtl.number]
row_infos = square.side_size * ['']
for key in dict_modules.keys():
row_infos[key - 1] = str(dict_modules[key]).strip('[]')
x.add_row([instrument.abbreviation, row_number, *row_infos])
file.write(x.get_string())
file.close()
| StarcoderdataPython |
3562203 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from cmfsapy.dimension.fsa import fsa
from cmfsapy.data import gen_ncube
import os
save_path = "./"
ns = [2500]
colors = ['tab:blue', 'tab:orange', 'tab:green']
realiz_id = 100
my_d = np.arange(2, 81)
myk = 20
box = None
for l, n in enumerate(ns):
dim_range = []
for d in tqdm(my_d):
realizations = []
for j in range(realiz_id):
X = gen_ncube(n, d, j)
dims, distances, indices = fsa(X, myk, boxsize=box)
realizations.append(dims)
dim_range.append(realizations)
dim_range = np.nanmedian(np.array(dim_range), axis=-2)
np.savez(save_path+"calibration_data_krange{}_n{}_d{}".format(myk, n, d), **{'d':my_d.reshape([-1, 1, 1]),
'k':np.arange(myk+1),
'dims': dim_range})
print(dim_range.shape) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.