id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1727431 | <gh_stars>0
import os
import hashlib
from PIL import Image
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from resizeimage import resizeimage
class Size(models.Model):
CROP = 'CRP'
COVER = 'CVR'
CONTAIN = 'CNT'
HEIGHT = 'HGT'
WIDTH = 'WDT'
THUMBNAIL = 'TMB'
METHODS = (
(CROP, _(u'Crop')),
(COVER, _(u'Cover')),
(CONTAIN, _('Contain')),
(HEIGHT, _(u'Height')),
(WIDTH, _(u'Width')),
(THUMBNAIL, _(u'Thumbnail')),
)
name = models.CharField(max_length=255)
method = models.CharField(max_length=3, choices=METHODS, default=WIDTH, verbose_name=_(u'Method'))
height = models.IntegerField(default=0, verbose_name=_(u'Height'))
width = models.IntegerField(default=0, verbose_name=_(u'Width'))
def __unicode__(self):
return name
class Picture(models.Model):
hash = models.CharField(max_length=32, primary_key=True)
created_at = models.DateField(auto_now=True, verbose_name=_(u'Created at'))
name = models.CharField(max_length=255, verbose_name=_(u'Name'))
owner = models.ForeignKey(User, verbose_name=_(u'Owner'))
original_file = models.FileField(verbose_name=_(u'File'), upload_to=settings.UPLOADS_DIR)
@property
def filename(self):
return os.path.splitext(os.path.basename(self.original_file.path))[0]
@property
def extension(self):
return os.path.splitext(os.path.basename(self.original_file.path))[1]
def save(self):
self.name = os.path.basename(self.original_file.path)
self.hash = hashlib.md5(self.original_file.read()).hexdigest()
super().save()
@staticmethod
def get_file_path():
try:
return settings.UPLOADS_DIR
except AttributeError as e:
pass
try:
return settings.BASE_DIR
except AttributeError as e:
pass
return ''
@staticmethod
def get_file_and_extension(path):
basename = os.path.basename(path)
name = os.path.splitext(basename)[0]
extension = os.path.splitext(basename)[1]
return {'name': name, 'extension': extension}
@staticmethod
def create_picture_by_image(image, user):
name = os.path.basename(image.fp.name)
pictures = Picture.objects.filter(name__startswith=os.path.splitext(name)[0])
if pictures.count() > 0:
file_attrs = Picture.get_file_and_extension(name)
name = file_attrs['name'] + '_' + str(pictures.count() + 1) + file_attrs['extension']
image_path = os.path.join(Picture.get_file_path(), name)
image.save(image_path)
picture = Picture.objects.create(name=name,
owner=user,
original_file=image_path)
picture.resize_image()
return picture
def resize_image(self):
image = Image.open(self.original_file)
sizes = Size.objects.all()
for size in sizes:
try:
if size.method == Size.WIDTH:
resized_image = resizeimage.resize_width(image=image, size=size.width)
else:
continue
except:
continue
name = os.path.basename(self.original_file.path)
file_attrs = Picture.get_file_and_extension(name)
resized_path = os.path.join(Picture.get_file_path(), file_attrs['name'] + '_' +
size.name + file_attrs['extension'])
resized_image.save(resized_path)
PictureFile.objects.create(picture=self, file=resized_path, size=size)
@staticmethod
def resize_all_images():
for picture in Picture.objects.all():
picture.remove_picture_files()
picture.resize_image()
def remove_picture_files(self):
for picture in PictureFile.objects.filter(picture=self):
os.remove(picture.file.path)
picture.delete()
class PictureFile(models.Model):
picture = models.ForeignKey(Picture, related_name='picturefiles', verbose_name=_(u'Picture'))
file = models.FileField(verbose_name=_(u'File'))
size = models.ForeignKey(Size, verbose_name=_(u'Size'))
@property
def size_name(self):
return self.size.name
@property
def filename(self):
return os.path.splitext(os.path.basename(self.file.path))[0]
@property
def extension(self):
return os.path.splitext(os.path.basename(self.file.path))[1]
@property
def width(self):
return self.size.width
| StarcoderdataPython |
3289417 | <reponame>theGloves/ContractTool
import app.CNF.LiteralPackage as lp
class Clause:
def __init__(self, s):
self.literalSet = []
LiteralStr = s.split("||")
if len(LiteralStr) == 0:
return
for i in range(len(LiteralStr)):
if "Term" not in LiteralStr[i]:
continue
literal = lp.Literal(LiteralStr[i])
self.literalSet.append(literal)
def isExist(self, literalStr):
for l in self.literalSet:
if literalStr == l.content:
return True
return False
def deleteLiteral(self, literalStr):
for l in self.literalSet:
if literalStr == l.content:
self.literalSet.remove(l)
def getValue(self, valueMap):
if len(self.literalSet) == 0:
return True
for l in self.literalSet:
if valueMap[l.content] == True:
return True
return False
def print(self):
s = "("
for l in self.literalSet:
s = s+l.content+" || "
s = s[:-4]
s += ")"
return s
| StarcoderdataPython |
3347076 | <filename>the_rack/core.py
__all__ = ['Rack']
class CheckMixin:
def check_key(self, key):
if not isinstance(key, str):
raise TypeError('Key must be a string.')
def check_func_callable(self, func):
if not callable(func):
raise Exception('Func must be a callable.')
def check_func_exists(self, key):
if not self.exists(key):
raise IndexError('Func does not exist.')
def check_last(self, last):
if not isinstance(last, str):
raise Exception('No key given and no function added yet.')
class Rack(CheckMixin):
def __init__(self):
self.funcs = {}
self.cache = {}
self.factories = []
self.last = []
def factory(self, key=None):
# if the given key isn't a string
if not isinstance(key, str):
# and last isn't one either, exception
self.check_last(self.last)
# if last is a string, use it as the key
key = self.last
self.factories.append(key)
def set(self, key, func, factory=False):
self.check_key(key)
self.check_func_callable(func)
self.funcs[key] = func
self.last = key
if factory == True:
self.factory()
return self
def exists(self, key):
return True if key in self.funcs else False
def delete(self, key):
try:
del self.funcs[key]
# if the funcs del fail, it can't be in factories
self.factories.remove(key)
# if the factories del fail, it can't be in cache
del self.cache[key]
except KeyError as e:
pass
except ValueError as e:
pass
return self
def extend(self, key, func):
self.check_func_exists(key)
self.check_func_callable(func)
previous = self.funcs[key]
# remove the need to pass get in the extension lambda
prev = lambda: previous(self.get)
# call the callable with the get and the previous
self.funcs[key] = lambda get: func(get, prev)
# delete the cache
if key in self.cache:
del self.cache[key]
def get(self, key):
self.check_key(key)
self.check_func_exists(key)
# if cached, use it
if key in self.cache:
return self.cache[key]
res = self.funcs[key](self.get)
# if not factory, store the result in cache
if key not in self.factories:
self.cache[key] = res
return res
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
if self.exists(key):
return self.extend(key, value)
return self.set(key, value)
def __delitem__(self, key):
return self.delete(key)
def __contains__(self, item):
return self.exists(item)
| StarcoderdataPython |
104645 | import binascii
import os
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from project.apps.user.managers import UserManager, ActionTokenManager
from rest_framework.authtoken.models import Token
from django.utils import timezone
from django.conf import settings
class User(AbstractUser):
"""Abstraction of the base User model. Needed to extend in the future."""
username = None
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
@staticmethod
def create(email, password, validated_data):
user, created = User.objects.get_or_create(
email=email, defaults=validated_data)
# Hash the user's password
user.set_password(password)
# Put user inactive by default
user.is_active = False
user.save()
# Create an ActivationToken to activate user in the future
ActionToken.objects.create(
user=user,
type='account_activation',
)
return user
def send_confirm_signup_email(self):
if settings.LOCAL_SETTINGS['EMAIL_SERVICE'] is True:
activation_url = ActionToken.generate_activation_url()
merge_data = {
"ACTIVATION_URL": activation_url,
"FIRST_NAME": self.first_name,
"LAST_NAME": self.last_name,
}
plain_msg = render_to_string(
"activation.txt",
merge_data
)
msg_html = render_to_string(
"activation.html",
merge_data
)
django_send_mail(
"Confirmation de la création de votre compte",
plain_msg,
settings.DEFAULT_FROM_EMAIL,
[self],
html_message=msg_html,
)
def send_reset_password(self):
forgot_password_url = ActionToken.generate_reset_password_url(self)
merge_data = {
"RESET_PASSWORD_URL": forgot_password_url
}
plain_msg = render_to_string(
"reset_password.txt",
merge_data
)
msg_html = render_to_string(
"reset_password.html",
merge_data
)
django_send_mail(
"Reset password",
plain_msg,
settings.DEFAULT_FROM_EMAIL,
[self],
html_message=msg_html,
)
def get_temporary_token(self):
token, _created = TemporaryToken.objects.get_or_create(
user=self
)
if token.expired:
# If the token is expired, generate a new one.
token.delete()
token = TemporaryToken.objects.create(
user=self)
return token
class TemporaryToken(Token):
"""Subclass of Token to add an expiration time."""
class Meta:
verbose_name = _("Temporary token")
verbose_name_plural = _("Temporary tokens")
expires = models.DateTimeField(
verbose_name=_("Expiration date"),
blank=True,
)
def save(self, *args, **kwargs):
if not self.pk:
self.expires = timezone.now() + timezone.timedelta(
minutes=settings.REST_FRAMEWORK_TEMPORARY_TOKENS['MINUTES']
)
super(TemporaryToken, self).save(*args, **kwargs)
@property
def expired(self):
"""Returns a boolean indicating token expiration."""
return self.expires <= timezone.now()
def expire(self):
"""Expires a token by setting its expiration date to now."""
self.expires = timezone.now()
self.save()
class ActionToken(models.Model):
"""
Class of Token to allow User to do some action.
Generally, the token is sent by email and serves
as a "right" to do a specific action.
"""
ACTIONS_TYPE = [
('account_activation', _('Account activation')),
('password_change', _('Password change')),
]
key = models.CharField(
verbose_name="Key",
max_length=40,
primary_key=True
)
type = models.CharField(
verbose_name='Type of action',
max_length=100,
choices=ACTIONS_TYPE,
null=False,
)
user = models.ForeignKey(
User,
related_name='activation_token',
on_delete=models.CASCADE,
verbose_name="User"
)
created = models.DateTimeField(
verbose_name="Creation date",
auto_now_add=True
)
expires = models.DateTimeField(
verbose_name="Expiration date",
blank=True,
)
objects = ActionTokenManager()
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
self.expires = timezone.now() + timezone.timedelta(
minutes=settings.ACTIVATION_TOKENS['MINUTES']
)
return super(ActionToken, self).save(*args, **kwargs)
@staticmethod
def generate_key():
"""Generate a new key"""
return binascii.hexlify(os.urandom(20)).decode()
@property
def expired(self):
"""Returns a boolean indicating token expiration."""
return self.expires <= timezone.now()
def expire(self):
"""Expires a token by setting its expiration date to now."""
self.expires = timezone.now()
self.save()
def __str__(self):
return self.key
@staticmethod
def generate_activation_url(user: User):
# Get the token of the saved user and send it with an email
activate_token = ActionToken.objects.get(
user=user,
type='account_activation',
)
return activate_token.get_url()
@staticmethod
def generate_reset_password_url(user: User):
# remove old tokens to change password
tokens = ActionToken.objects.filter(
type='password_change',
user=user,
)
for token in tokens:
token.expire()
# Get the token of the saved user and send it with an email
activate_token = ActionToken.objects.get(
user=self,
type='password_change',
)
return activate_token.get_url()
def get_url(self):
FRONTEND_SETTINGS = settings.LOCAL_SETTINGS[
'FRONTEND_INTEGRATION'
]
# Setup the url for the activation button in the email
activation_url = FRONTEND_SETTINGS['ACTIVATION_URL'].replace(
"{{token}}",
self.key
)
return activation_url
@staticmethod
def get_password_change_token(token_key):
return ActionToken.objects.get(
key=token_key,
type='password_change',
expired=False,
)
class Address(models.Model):
"""Abstract model for address"""
country = models.CharField(
max_length=45,
blank=False,
verbose_name=_("Country"),
)
state_province = models.CharField(
max_length=55,
blank=False,
verbose_name=_("State/Province"),
)
city = models.CharField(
max_length=50,
blank=False,
verbose_name=_("City"),
)
address_line1 = models.CharField(
max_length=45,
verbose_name=_("Address line 1"),
)
address_line2 = models.CharField(
max_length=45,
blank=True,
default='',
verbose_name=_("Address line 2"),
)
postal_code = models.CharField(
max_length=10,
verbose_name=_("Postal code"),
)
latitude = models.FloatField(
blank=True,
null=True,
verbose_name=_("Latitude"),
)
longitude = models.FloatField(
blank=True,
null=True,
verbose_name=_("Longitude"),
)
timezone = models.CharField(
blank=True,
null=True,
max_length=100,
verbose_name=_("Timezone"),
)
class Meta:
abstract = True
| StarcoderdataPython |
1640778 | <gh_stars>1-10
import datetime
import csv
import six
from six import StringIO
from icalendar import Calendar, Event
from pyramid.renderers import JSON
from ode.models import icalendar_to_model_keys
from ode.deserializers import data_list_to_dict
from ode.models import Event as EventModel, Location
class IcalRenderer(object):
def __init__(self, info):
pass
def __call__(self, value, system):
request = system.get('request')
if request is not None:
response = request.response
response.content_type = 'text/calendar'
calendar = Calendar()
for item in value['collection']['items']:
self.add_event(calendar, data_list_to_dict(item['data']))
return calendar.to_ical()
@staticmethod
def add_event(calendar, event_data):
event = Event()
for icalendar_key, model_attribute in icalendar_to_model_keys.items():
if model_attribute in event_data:
if event_data[model_attribute] is not None:
event.add(icalendar_key, event_data[model_attribute])
calendar.add_component(event)
class NoContentRenderer(object):
def __init__(self, info):
pass
def __call__(self, value, system):
request = system.get('request')
if request is not None:
response = request.response
response.content_type = None
response.status_code = 204
return None
class CsvRenderer(object):
MEDIA_ATTRIBUTES = ['images', 'sounds', 'videos']
def __init__(self, info):
pass
@classmethod
def format_media(cls, items):
parts = [
u'{} ({})'.format(item['url'], item['license'])
for item in items
]
return cls.format_list(parts)
@staticmethod
def format_list(parts):
result = u', '.join(parts)
if six.PY2:
result = result.encode('utf-8')
return result
@classmethod
def format_value(cls, key, value):
if six.PY2 and isinstance(value, six.string_types):
return value.encode('utf-8')
elif isinstance(value, list):
if key in cls.MEDIA_ATTRIBUTES:
return cls.format_media(value)
else:
return cls.format_list(value)
elif isinstance(value, datetime.datetime):
return value.isoformat()
else:
return value
@classmethod
def build_csv(cls, items):
fieldnames = [column.name for column in EventModel.__mapper__.columns]
fieldnames += ['location_' + column.name
for column in Location.__mapper__.columns
if column.name != 'event_id']
fieldnames += ['tags', 'categories'] + cls.MEDIA_ATTRIBUTES
output = StringIO()
writer = csv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
for item in items:
data_dict = data_list_to_dict(item['data'])
for key, value in data_dict.items():
data_dict[key] = cls.format_value(key, value)
writer.writerow(data_dict)
return output.getvalue()
def __call__(self, value, system):
request = system.get('request')
if request is not None:
response = request.response
response.content_type = 'text/csv'
items = value['collection']['items']
if items:
return self.build_csv(items)
else:
return u''
def datetime_adapter(obj, request):
return obj.isoformat()
JsonRenderer = JSON()
JsonRenderer.add_adapter(datetime.datetime, datetime_adapter)
| StarcoderdataPython |
107740 | <filename>vtune/run_th.py<gh_stars>1-10
import demo
import time
import threading
def slow_encode(input):
return demo.Encoder(input).process_slow()
def fast_encode(input):
return demo.Encoder(input).process_fast()
if __name__ == '__main__':
input = 'a' * 10000000 # 10 millions of 'a'
th1 = threading.Thread(target=slow_encode, args=(input,))
th2 = threading.Thread(target=fast_encode, args=(input,))
th1.start()
th2.start()
th1.join()
th2.join()
| StarcoderdataPython |
3250104 | <reponame>henriktao/pulumi-azure
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LoadBalancerArgs', 'LoadBalancer']
@pulumi.input_type
class LoadBalancerArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
sku_tier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a LoadBalancer resource.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Load Balancer.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]] frontend_ip_configurations: One or multiple `frontend_ip_configuration` blocks as documented below.
:param pulumi.Input[str] location: Specifies the supported Azure Region where the Load Balancer should be created.
:param pulumi.Input[str] name: Specifies the name of the Load Balancer.
:param pulumi.Input[str] sku: The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
:param pulumi.Input[str] sku_tier: `sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if frontend_ip_configurations is not None:
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if sku_tier is not None:
pulumi.set(__self__, "sku_tier", sku_tier)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group in which to create the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]]]:
"""
One or multiple `frontend_ip_configuration` blocks as documented below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@frontend_ip_configurations.setter
def frontend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]]]):
pulumi.set(self, "frontend_ip_configurations", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure Region where the Load Balancer should be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="skuTier")
def sku_tier(self) -> Optional[pulumi.Input[str]]:
"""
`sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sku_tier")
@sku_tier.setter
def sku_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku_tier", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _LoadBalancerState:
def __init__(__self__, *,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
sku_tier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering LoadBalancer resources.
:param pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]] frontend_ip_configurations: One or multiple `frontend_ip_configuration` blocks as documented below.
:param pulumi.Input[str] location: Specifies the supported Azure Region where the Load Balancer should be created.
:param pulumi.Input[str] name: Specifies the name of the Load Balancer.
:param pulumi.Input[str] private_ip_address: Private IP Address to assign to the Load Balancer. The last one and first four IPs in any range are reserved and cannot be manually assigned.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_ip_addresses: The list of private IP address assigned to the load balancer in `frontend_ip_configuration` blocks, if any.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Load Balancer.
:param pulumi.Input[str] sku: The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
:param pulumi.Input[str] sku_tier: `sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if frontend_ip_configurations is not None:
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_addresses is not None:
pulumi.set(__self__, "private_ip_addresses", private_ip_addresses)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if sku_tier is not None:
pulumi.set(__self__, "sku_tier", sku_tier)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]]]:
"""
One or multiple `frontend_ip_configuration` blocks as documented below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@frontend_ip_configurations.setter
def frontend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoadBalancerFrontendIpConfigurationArgs']]]]):
pulumi.set(self, "frontend_ip_configurations", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure Region where the Load Balancer should be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Private IP Address to assign to the Load Balancer. The last one and first four IPs in any range are reserved and cannot be manually assigned.
"""
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="privateIpAddresses")
def private_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of private IP address assigned to the load balancer in `frontend_ip_configuration` blocks, if any.
"""
return pulumi.get(self, "private_ip_addresses")
@private_ip_addresses.setter
def private_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_ip_addresses", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group in which to create the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="skuTier")
def sku_tier(self) -> Optional[pulumi.Input[str]]:
"""
`sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sku_tier")
@sku_tier.setter
def sku_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku_tier", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class LoadBalancer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerFrontendIpConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
sku_tier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Load Balancer Resource.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location="West US",
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location="West US",
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
```
## Import
Load Balancers can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/loadBalancer:LoadBalancer example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or multiple `frontend_ip_configuration` blocks as documented below.
:param pulumi.Input[str] location: Specifies the supported Azure Region where the Load Balancer should be created.
:param pulumi.Input[str] name: Specifies the name of the Load Balancer.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Load Balancer.
:param pulumi.Input[str] sku: The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
:param pulumi.Input[str] sku_tier: `sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LoadBalancerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Load Balancer Resource.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location="West US",
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location="West US",
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
```
## Import
Load Balancers can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/loadBalancer:LoadBalancer example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1
```
:param str resource_name: The name of the resource.
:param LoadBalancerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LoadBalancerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerFrontendIpConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
sku_tier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LoadBalancerArgs.__new__(LoadBalancerArgs)
__props__.__dict__["frontend_ip_configurations"] = frontend_ip_configurations
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["sku_tier"] = sku_tier
__props__.__dict__["tags"] = tags
__props__.__dict__["private_ip_address"] = None
__props__.__dict__["private_ip_addresses"] = None
super(LoadBalancer, __self__).__init__(
'azure:lb/loadBalancer:LoadBalancer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerFrontendIpConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
sku_tier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'LoadBalancer':
"""
Get an existing LoadBalancer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or multiple `frontend_ip_configuration` blocks as documented below.
:param pulumi.Input[str] location: Specifies the supported Azure Region where the Load Balancer should be created.
:param pulumi.Input[str] name: Specifies the name of the Load Balancer.
:param pulumi.Input[str] private_ip_address: Private IP Address to assign to the Load Balancer. The last one and first four IPs in any range are reserved and cannot be manually assigned.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_ip_addresses: The list of private IP address assigned to the load balancer in `frontend_ip_configuration` blocks, if any.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Load Balancer.
:param pulumi.Input[str] sku: The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
:param pulumi.Input[str] sku_tier: `sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LoadBalancerState.__new__(_LoadBalancerState)
__props__.__dict__["frontend_ip_configurations"] = frontend_ip_configurations
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["private_ip_address"] = private_ip_address
__props__.__dict__["private_ip_addresses"] = private_ip_addresses
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["sku_tier"] = sku_tier
__props__.__dict__["tags"] = tags
return LoadBalancer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.LoadBalancerFrontendIpConfiguration']]]:
"""
One or multiple `frontend_ip_configuration` blocks as documented below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure Region where the Load Balancer should be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Load Balancer.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> pulumi.Output[str]:
"""
Private IP Address to assign to the Load Balancer. The last one and first four IPs in any range are reserved and cannot be manually assigned.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIpAddresses")
def private_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
The list of private IP address assigned to the load balancer in `frontend_ip_configuration` blocks, if any.
"""
return pulumi.get(self, "private_ip_addresses")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group in which to create the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional[str]]:
"""
The SKU of the Azure Load Balancer. Accepted values are `Basic`, `Standard` and `Gateway`. Defaults to `Basic`.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="skuTier")
def sku_tier(self) -> pulumi.Output[Optional[str]]:
"""
`sku_tier` - (Optional) The Sku Tier of this Load Balancer. Possible values are `Global` and `Regional`. Defaults to `Regional`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sku_tier")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| StarcoderdataPython |
82004 | # Generated by Django 2.2.13 on 2021-02-04 10:09
import directory_validators.string
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('exportplan', '0027_companyexportplan_getting_paid'),
]
operations = [
migrations.AddField(
model_name='companyexportplan',
name='travel_business_policies',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True),
),
migrations.CreateModel(
name='BusinessTrips',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, null=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, null=True, verbose_name='modified')),
('note', models.TextField(blank=True, default='', validators=[directory_validators.string.no_html])),
('companyexportplan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='business_trips', to='exportplan.CompanyExportPlan')),
],
options={
'verbose_name_plural': 'Business Trips',
},
),
]
| StarcoderdataPython |
3274554 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
# ...
SECRET_KEY = '<KEY>'
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOAD_FOLDER = os.path.join(basedir, 'upload/')
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
MAX_FILE_SIZE = 4*1024 * 1024 + 1
| StarcoderdataPython |
25823 | <reponame>BradB111/galaxy_blizzard_plugin
import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
from collections import defaultdict
import requests
import requests.cookies
import logging as log
import subprocess
import time
import re
from typing import Union, Dict
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame, GameTime, LicenseType
from galaxy.api.errors import (
AuthenticationRequired, BackendTimeout, BackendNotAvailable, BackendError,
NetworkError, UnknownError, InvalidCredentials, UnknownBackendResponse
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from osutils import get_directory_size
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, BlizzardGame, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
class BNetPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Battlenet, version, reader, writer, token)
self.local_client = LocalClient(self._update_statuses)
self.authentication_client = AuthenticatedHttpClient(self)
self.backend_client = BackendClient(self, self.authentication_client)
self.watched_running_games = set()
def handshake_complete(self):
self.create_task(self.__delayed_handshake(), 'delayed handshake')
async def __delayed_handshake(self):
"""
Adds some minimal delay on Galaxy start before registering local data watchers.
Apparently Galaxy may be not ready to receive notifications even after handshake_complete.
"""
await asyncio.sleep(1)
self.create_task(self.local_client.register_local_data_watcher(), 'local data watcher')
self.create_task(self.local_client.register_classic_games_updater(), 'classic games updater')
async def _notify_about_game_stop(self, game, starting_timeout):
id_to_watch = game.info.uid
if id_to_watch in self.watched_running_games:
log.debug(f'Game {id_to_watch} is already watched. Skipping')
return
try:
self.watched_running_games.add(id_to_watch)
await asyncio.sleep(starting_timeout)
ProcessProvider().update_games_processes([game])
log.info(f'Setuping process watcher for {game._processes}')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, game.wait_until_game_stops)
finally:
self.update_local_game_status(LocalGame(id_to_watch, LocalGameState.Installed))
self.watched_running_games.remove(id_to_watch)
def _update_statuses(self, refreshed_games, previous_games):
for blizz_id, refr in refreshed_games.items():
prev = previous_games.get(blizz_id, None)
if prev is None:
if refr.has_galaxy_installed_state:
log.debug('Detected playable game')
state = LocalGameState.Installed
else:
log.debug('Detected not-fully installed game')
continue
elif refr.has_galaxy_installed_state and not prev.has_galaxy_installed_state:
log.debug('Detected playable game')
state = LocalGameState.Installed
elif refr.last_played != prev.last_played:
log.debug('Detected launched game')
state = LocalGameState.Installed | LocalGameState.Running
self.create_task(self._notify_about_game_stop(refr, 5), 'game stop waiter')
else:
continue
log.info(f'Changing game {blizz_id} state to {state}')
self.update_local_game_status(LocalGame(blizz_id, state))
for blizz_id, prev in previous_games.items():
refr = refreshed_games.get(blizz_id, None)
if refr is None:
log.debug('Detected uninstalled game')
state = LocalGameState.None_
self.update_local_game_status(LocalGame(blizz_id, state))
def log_out(self):
if self.backend_client:
asyncio.create_task(self.authentication_client.shutdown())
self.authentication_client.user_details = None
async def open_battlenet_browser(self):
url = self.authentication_client.blizzard_battlenet_download_url
log.info(f'Opening battle.net website: {url}')
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, lambda x: webbrowser.open(x, autoraise=True), url)
async def install_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game and os.access(installed_game.install_path, os.F_OK):
log.warning("Received install command on an already installed game")
return await self.launch_game(game_id)
if game_id in [classic.uid for classic in Blizzard.CLASSIC_GAMES]:
if SYSTEM == pf.WINDOWS:
platform = 'windows'
elif SYSTEM == pf.MACOS:
platform = 'macos'
webbrowser.open(f"https://www.blizzard.com/download/confirmation?platform={platform}&locale=enUS&version=LIVE&id={game_id}")
return
try:
self.local_client.refresh()
log.info(f'Installing game of id {game_id}')
self.local_client.install_game(game_id)
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except Exception as e:
log.exception(f"Installing game {game_id} failed: {e}")
def _open_battlenet_at_id(self, game_id):
try:
self.local_client.refresh()
self.local_client.open_battlenet(game_id)
except Exception as e:
log.exception(f"Opening battlenet client on specific game_id {game_id} failed {e}")
try:
self.local_client.open_battlenet()
except Exception as e:
log.exception(f"Opening battlenet client failed {e}")
async def uninstall_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
if game_id == 'wow_classic':
# attempting to uninstall classic wow through protocol gives you a message that the game cannot
# be uninstalled through protocol and you should use battle.net
return self._open_battlenet_at_id(game_id)
if SYSTEM == pf.MACOS:
self._open_battlenet_at_id(game_id)
else:
try:
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game is None or not os.access(installed_game.install_path, os.F_OK):
log.error(f'Cannot uninstall {game_id}')
self.update_local_game_status(LocalGame(game_id, LocalGameState.None_))
return
if not isinstance(installed_game.info, ClassicGame):
if self.local_client.uninstaller is None:
raise FileNotFoundError('Uninstaller not found')
uninstall_tag = installed_game.uninstall_tag
client_lang = self.local_client.config_parser.locale_language
self.local_client.uninstaller.uninstall_game(installed_game, uninstall_tag, client_lang)
except Exception as e:
log.exception(f'Uninstalling game {game_id} failed: {e}')
async def launch_game(self, game_id):
try:
game = self.local_client.get_installed_games().get(game_id, None)
if game is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
if isinstance(game.info, ClassicGame):
log.info(f'Launching game of id: {game_id}, {game} at path {os.path.join(game.install_path, game.info.exe)}')
if SYSTEM == pf.WINDOWS:
subprocess.Popen(os.path.join(game.install_path, game.info.exe))
elif SYSTEM == pf.MACOS:
if not game.info.bundle_id:
log.warning(f"{game.name} has no bundle id, help by providing us bundle id of this game")
subprocess.Popen(['open', '-b', game.info.bundle_id])
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
asyncio.create_task(self._notify_about_game_stop(game, 6))
return
self.local_client.refresh()
log.info(f'Launching game of id: {game_id}, {game}')
await self.local_client.launch_game(game, wait_sec=60)
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
self.local_client.close_window()
asyncio.create_task(self._notify_about_game_stop(game, 3))
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except TimeoutError as e:
log.warning(str(e))
except Exception as e:
log.exception(f"Launching game {game_id} failed: {e}")
async def authenticate(self, stored_credentials=None):
try:
if stored_credentials:
auth_data = self.authentication_client.process_stored_credentials(stored_credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
if self.authentication_client.validate_auth_status(auth_status):
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_user_details()
else:
return self.authentication_client.authenticate_using_login()
except Exception as e:
raise e
async def pass_login_credentials(self, step, credentials, cookies):
if "logout&app=oauth" in credentials['end_uri']:
# 2fa expired, repeat authentication
return self.authentication_client.authenticate_using_login()
if self.authentication_client.attempted_to_set_battle_tag:
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_auth_after_setting_battletag()
cookie_jar = self.authentication_client.parse_cookies(cookies)
auth_data = await self.authentication_client.get_auth_data_login(cookie_jar, credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
if not ("authorities" in auth_status and "IS_AUTHENTICATED_FULLY" in auth_status["authorities"]):
raise InvalidCredentials()
self.authentication_client.user_details = await self.backend_client.get_user_info()
self.authentication_client.set_credentials()
return self.authentication_client.parse_battletag()
async def get_owned_games(self):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
def _parse_battlenet_games(standard_games: dict, cn: bool) -> Dict[BlizzardGame, LicenseType]:
licenses = defaultdict(lambda: LicenseType.Unknown, {
"Trial": LicenseType.OtherUserLicense,
"Good": LicenseType.SinglePurchase,
"Inactive": LicenseType.SinglePurchase,
"Banned": LicenseType.SinglePurchase,
"Free": LicenseType.FreeToPlay,
"Suspended": LicenseType.SinglePurchase,
"AccountLock": LicenseType.SinglePurchase
})
games = {}
for standard_game in standard_games["gameAccounts"]:
title_id = standard_game['titleId']
try:
game = Blizzard.game_by_title_id(title_id, cn)
except KeyError:
log.warning(f"Skipping unknown game with titleId: {title_id}")
else:
games[game] = licenses[standard_game.get("gameAccountStatus")]
# Add wow classic if retail wow is present in owned games
wow_license = games.get(Blizzard['wow'])
if wow_license is not None:
games[Blizzard['wow_classic']] = wow_license
return games
def _parse_classic_games(classic_games: dict) -> Dict[ClassicGame, LicenseType]:
games = {}
for classic_game in classic_games["classicGames"]:
sanitized_name = classic_game["localizedGameName"].replace(u'\xa0', ' ')
for cg in Blizzard.CLASSIC_GAMES:
if cg.name == sanitized_name:
games[cg] = LicenseType.SinglePurchase
break
else:
log.warning(f"Skipping unknown classic game with name: {sanitized_name}")
return games
cn = self.authentication_client.region == 'cn'
battlenet_games = _parse_battlenet_games(await self.backend_client.get_owned_games(), cn)
classic_games = _parse_classic_games(await self.backend_client.get_owned_classic_games())
owned_games: Dict[BlizzardGame, LicenseType] = {**battlenet_games, **classic_games}
for game in Blizzard.try_for_free_games(cn):
if game not in owned_games:
owned_games[game] = LicenseType.FreeToPlay
return [
Game(game.uid, game.name, None, LicenseInfo(license_type))
for game, license_type in owned_games.items()
]
async def get_local_games(self):
timeout = time.time() + 2
try:
translated_installed_games = []
while not self.local_client.games_finished_parsing():
await asyncio.sleep(0.1)
if time.time() >= timeout:
break
running_games = self.local_client.get_running_games()
installed_games = self.local_client.get_installed_games()
log.info(f"Installed games {installed_games.items()}")
log.info(f"Running games {running_games}")
for uid, game in installed_games.items():
if game.has_galaxy_installed_state:
state = LocalGameState.Installed
if uid in running_games:
state |= LocalGameState.Running
translated_installed_games.append(LocalGame(uid, state))
self.local_client.installed_games_cache = installed_games
return translated_installed_games
except Exception as e:
log.exception(f"failed to get local games: {str(e)}")
raise
async def get_local_size(self, game_id: str, context) -> int:
install_path = self.local_client.installed_games_cache[game_id].install_path
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, get_directory_size, install_path)
async def get_game_time(self, game_id, context):
total_time = None
last_played_time = None
blizzard_game = Blizzard[game_id]
if blizzard_game.name == "Overwatch":
total_time = await self._get_overwatch_time()
log.debug(f"Gametime for Overwatch is {total_time} minutes.")
for config_info in self.local_client.config_parser.games:
if config_info.uid == blizzard_game.uid:
if config_info.last_played is not None:
last_played_time = int(config_info.last_played)
break
return GameTime(game_id, total_time, last_played_time)
async def _get_overwatch_time(self) -> Union[None, int]:
log.debug("Fetching playtime for Overwatch...")
player_data = await self.backend_client.get_ow_player_data()
if 'message' in player_data: # user not found
log.error('No Overwatch profile found.')
return None
if player_data['private'] == True:
log.info('Unable to get data as Overwatch profile is private.')
return None
qp_time = player_data['playtime'].get('quickplay')
if qp_time is None: # user has not played quick play
return 0
if qp_time.count(':') == 1: # minutes and seconds
match = re.search('(?:(?P<m>\\d+):)(?P<s>\\d+)', qp_time)
if match:
return int(match.group('m'))
elif qp_time.count(':') == 2: # hours, minutes and seconds
match = re.search('(?:(?P<h>\\d+):)(?P<m>\\d+)', qp_time)
if match:
return int(match.group('h')) * 60 + int(match.group('m'))
raise UnknownBackendResponse(f'Unknown Overwatch API playtime format: {qp_time}')
async def _get_wow_achievements(self):
achievements = []
try:
characters_data = await self.backend_client.get_wow_character_data()
characters_data = characters_data["characters"]
wow_character_data = await asyncio.gather(
*[
self.backend_client.get_wow_character_achievements(character["realm"], character["name"])
for character in characters_data
],
return_exceptions=True,
)
for data in wow_character_data:
if isinstance(data, requests.Timeout) or isinstance(data, requests.ConnectionError):
raise data
wow_achievement_data = [
list(
zip(
data["achievements"]["achievementsCompleted"],
data["achievements"]["achievementsCompletedTimestamp"],
)
)
for data in wow_character_data
if type(data) is dict
]
already_in = set()
for char_ach in wow_achievement_data:
for ach in char_ach:
if ach[0] not in already_in:
achievements.append(Achievement(achievement_id=ach[0], unlock_time=int(ach[1] / 1000)))
already_in.add(ach[0])
except (AccessTokenExpired, BackendError) as e:
log.exception(str(e))
with open('wow.json', 'w') as f:
f.write(json.dumps(achievements, cls=DataclassJSONEncoder))
return achievements
async def _get_sc2_achievements(self):
account_data = await self.backend_client.get_sc2_player_data(self.authentication_client.user_details["id"])
# TODO what if more sc2 accounts?
assert len(account_data) == 1
account_data = account_data[0]
profile_data = await self.backend_client.get_sc2_profile_data(
account_data["regionId"], account_data["realmId"],
account_data["profileId"]
)
sc2_achievement_data = [
Achievement(achievement_id=achievement["achievementId"], unlock_time=achievement["completionDate"])
for achievement in profile_data["earnedAchievements"]
if achievement["isComplete"]
]
with open('sc2.json', 'w') as f:
f.write(json.dumps(sc2_achievement_data, cls=DataclassJSONEncoder))
return sc2_achievement_data
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
async def launch_platform_client(self):
if self.local_client.is_running():
log.info("Launch platform client called but client is already running")
return
self.local_client.open_battlenet()
await self.local_client.prevent_battlenet_from_showing()
async def shutdown_platform_client(self):
await self.local_client.shutdown_platform_client()
async def shutdown(self):
log.info("Plugin shutdown.")
await self.authentication_client.shutdown()
def main():
multiprocessing.freeze_support()
create_and_run_plugin(BNetPlugin, sys.argv)
if __name__ == "__main__":
main()
| StarcoderdataPython |
175925 | <filename>inspector/checks/engine/executors/python_executor.py
from . import CheckExecutor
from ...constants import CHECK_TYPES
class PythonExecutor(CheckExecutor):
supported_check_types = \
(CHECK_TYPES.NUMBER,
CHECK_TYPES.STRING,
CHECK_TYPES.DATE)
def execute(self, check_logic):
if self.check_type == CHECK_TYPES.STRING:
return str(check_logic)
if self.check_type == CHECK_TYPES.NUMBER:
return float(check_logic)
| StarcoderdataPython |
1716115 | <filename>qrCodeGenerator.py
# pip install PyQRCode
import pyqrcode
import PIL
from pyqrcode import QRCode
generateData = input("enter text to convert :")
imageName = input("enter image name to save :")
imageNameResult = imageName + ".png"
url = pyqrcode.create(generateData)
url.png(imageNameResult, scale = 6)
| StarcoderdataPython |
3216521 | <reponame>prz3m37/TightBinding<gh_stars>1-10
import pickle
import numpy as np
from typing import TextIO
from tight_binding_base_connection import DataBaseConnection
class DataManager(object):
def __init__(self, helpers, settings):
self.__helpers = helpers
self.__settings = settings
self.__dbconnection = DataBaseConnection(settings, helpers)
def __save_results_into_database(self, data):
table = self.__settings["db table"]
data_identification = self.__settings["data_identification"]
save_query = """INSERT INTO {} (Lattice identification,
Eigen energy,
Wave function,
Slater-Koster matrix,
DOS,
Projected DOS,
Configuration)
VALUES ({}, %s, %s, %s, %s, %s, %s) """.format(table, data_identification)
query = (save_query, data)
return query
def __download_required_data(self):
select = self.__settings["select"]
table = self.__settings["db table"]
identifier = self.__helpers["identifier"]
if identifier is None:
load_query = """SELECT {} from {}""".format(select, table)
else:
load_query = """SELECT {} from {} WHERE {}""".format(select, table, identifier)
return load_query
@staticmethod
def __save_as_pickle(matrix):
with open("interaction_matrix", 'wb') as outfile:
pickled_matrix = pickle.dump(matrix, outfile, pickle.HIGHEST_PROTOCOL)
return pickled_matrix
@staticmethod
def __load_as_pickle(pickled_matrix):
with open(pickled_matrix, 'rb') as infile:
matrix = pickle.load(infile)
return matrix
def __save_numerical_results(self, title, eigen_energies: np.array) -> TextIO:
"""
Method saves numerical results - eigen energies - into txt file
Args:
title: name of file
eigen_energies: array of eigen energies calculated by diagonalization of interaction matrix.
Returns: None
"""
saving_key = self.__helpers.generate_id_key(title)
with open(self.__helpers.__directory + "/" + saving_key + '_eigenvalues.txt', "w") as file:
for eigen_energy in eigen_energies:
file.write(str(eigen_energy) + "\n")
return file
def __save_data_locally(self, energies, wave_functions, interaction_matrix, density_of_states,
projected_density_of_states, configuration):
energy_file = self.__save_numerical_results("eigen_energies", energies)
wave_functions_file = self.__save_as_pickle(wave_functions)
interaction_matrix_file = self.__save_as_pickle(interaction_matrix)
dos_file = self.__save_numerical_results("DOS", density_of_states)
p_dos_file = self.__save_numerical_results("PDOS", projected_density_of_states)
configuration_file = self.__helpers.save_all_params_to_file("parametrization_file", configuration)
data_to_save = (energy_file, wave_functions_file,
interaction_matrix_file, dos_file, p_dos_file, configuration_file)
return data_to_save
def save_data(self, energies, wave_functions, interaction_matrix,
density_of_states, projected_density_of_states, configuration):
data_source = self.__settings["data_source"]
self.__helpers.save_log('[INFO]: Saving results locally \n')
data_to_save = self.__save_data_locally(energies,
wave_functions,
interaction_matrix,
density_of_states,
projected_density_of_states,
configuration)
if data_source == "data base":
self.__helpers.save_log('[INFO]: Saving results into data base\n')
save_data_query = self.__save_results_into_database(data_to_save)
self.__dbconnection.execute_query(save_data_query, "save")
else:
pass
return
def load_data(self):
self.__helpers.save_log('[INFO]: Loading results from data base\n')
data_source = self.__settings["data_source"]
if data_source == "data base":
data = self.__download_required_data()
required_data = self.__dbconnection.execute_query(data, "load")
else:
seeking_file = self.__settings["data_identification"]
required_data = self.__helpers.search_data_on_disc(seeking_file)
return required_data
| StarcoderdataPython |
55683 | from typing import Union
import flask_restx
import flask
from keepachangelog._changelog import to_dict
def add_changelog_endpoint(
namespace: Union[flask_restx.Namespace, flask_restx.Api], changelog_path: str
):
"""
Create /changelog: Changelog endpoint parsing https://keepachangelog.com/en/1.0.0/
:param namespace: The Flask-RestX namespace.
:param changelog_path: Path to CHANGELOG.md.
"""
@namespace.route("/changelog")
@namespace.doc(
responses={
200: (
"Service changelog.",
[
namespace.model(
"ChangelogReleaseModel",
{
"metadata": namespace.model(
"ChangelogReleaseMetaDataModel",
{
"version": flask_restx.fields.String(
description="Release version following semantic versioning.",
required=True,
example="3.12.5",
),
"release_date": flask_restx.fields.Date(
description="Release date.",
required=True,
example="2019-12-31",
),
},
),
"added": flask_restx.fields.List(
flask_restx.fields.String(description="New features.")
),
"changed": flask_restx.fields.List(
flask_restx.fields.String(
description="Changes in existing functionaliy."
)
),
"deprecated": flask_restx.fields.List(
flask_restx.fields.String(
description="Soon-to-be removed features."
)
),
"removed": flask_restx.fields.List(
flask_restx.fields.String(
description="Removed features."
)
),
"fixed": flask_restx.fields.List(
flask_restx.fields.String(description="Any bug fixes.")
),
"security": flask_restx.fields.List(
flask_restx.fields.String(
description="Vulnerabilities."
)
),
},
)
],
)
}
)
class Changelog(flask_restx.Resource):
def get(self):
"""
Retrieve service changelog.
"""
try:
return flask.jsonify(to_dict(changelog_path))
except FileNotFoundError:
return flask.jsonify({})
| StarcoderdataPython |
3255811 | <reponame>ludechu/DJevn
from django.db import models
# Create your models here.
class apiManger(models.Manager):
#自定义模型类,是公共的,其他模型类也可以引用
def get_queryset(self):
return super(apiManger,self).get_queryset().filter(status=True,is_deleted=False)
def create_api(self,Name):
# 定义一个方法,用于添加数据
new_api = self.model(name=Name)
return new_api
class t_api(models.Model):
#一个模型类定义一个表
#自定义模型管理器,后期对数据库操作时,将不是用student.objects.all,而是用student.stuObj.all
apiQuery = apiManger()
addapi = apiManger()
apiQuery1 = models.Manager()
name = models.CharField(max_length=20, verbose_name='接口名称')
project_id = models.IntegerField(null=True,verbose_name='项目ID')
tree = models.IntegerField(null=True,verbose_name='树节点id')
des = models.TextField(null=True, verbose_name='描述')
method = models.IntegerField(null=True,verbose_name='请求方式')
evn_id = models.IntegerField(null=True,verbose_name='环境id')
url = models.CharField(null=True,max_length=60, verbose_name='接口url')
para_list = models.TextField(null=True, verbose_name='参数列表')
creater = models.CharField(null=True,max_length=10, verbose_name='创建人')
create_time = models.DateField(auto_now_add=True, verbose_name='创建时间')
last_modify_time = models.DateField(auto_now=True, verbose_name='最近修改时间')
status = models.BooleanField(default=True, verbose_name='是否启用')
is_deleted = models.BooleanField(default=False, verbose_name='是否删除')
def __str__(self):
return self.name
class Meta:
#定义元选项
#定义数据表名,因为模型创建出来的表包含项目名+表名,不好使
db_table = "t_api"
#对象的默认排序字段,获取对象的列表时使用,默认升序,排序会增加数据库开销
ordering = ['id']
# ordering = ['-id']#降序
| StarcoderdataPython |
12425 | <filename>sizer.py<gh_stars>0
#!/usr/bin/python3
# Fetch torrent sizes
# TODO: Report number of files before we go etc
import os
from torrentool.api import Torrent
from fnmatch import fnmatch
root = '/opt/radio/collections'
pattern = "*.torrent"
alltorrentsize = 0
print("Thanks for using The Librarian.")
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch(name, pattern):
torrentstats = Torrent.from_file(os.path.join(path, name))
alltorrentsize += torrentstats.total_size
print('Torrent size ' + str(torrentstats.total_size) + ' for a total so far of ' + str(alltorrentsize))
print('DEBUG' + os.path.join(path, name))
# Reading filesize
my_torrent = Torrent.from_file('/opt/radio/collections/arienscompanymanuals/archive.org/download/collection_01_ariens_manuals/collection_01_ariens_manuals_archive.torrent')
size = my_torrent.total_size # Total files size in bytes.
print(size) | StarcoderdataPython |
53348 | from boa3.builtin.interop.runtime import invocation_counter
def Main(example: int) -> int:
invocation_counter = example
return invocation_counter
| StarcoderdataPython |
1755812 | # Uses python3
def edit_distance(s, t):
"""Edit distance between two strings.
The edit distance between two strings is the minimum number of insertions,
deletions, and mismatches in an alignment of two strings.
Samples:
>>> edit_distance("ab", "ab")
0
>>> edit_distance("short", "ports")
3
>>> # Explanation: s h o r t −
>>> # − p o r t s
>>> edit_distance("editing", "distance")
5
>>> # Explanation: e d i − t i n g −
>>> # − d i s t a n c e
"""
len_s = len(s) + 1
len_t = len(t) + 1
# Create a distance matrix and write in initial values.
d = [[x] + [0] * (len_t - 1) for x in range(len_s)]
d[0] = [x for x in range(len_t)]
for i in range(1, len_s):
for j in range(1, len_t):
# Levenshtein distance calculation.
if s[i - 1] == t[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
d[i][j] = min(d[i][j - 1], d[i - 1][j], d[i - 1][j - 1]) + 1
# The last element of the matrix is edit distance metric.
return d[-1][-1]
if __name__ == "__main__":
print(edit_distance(input(), input()))
| StarcoderdataPython |
1649300 | <reponame>RakaAndriawan/PortfolioAnalysis
## Package for Web App
import streamlit as st
import joblib, os
import datetime
import string
## Import Custom Functions
from port_script import *
## Set Page Config
st.set_page_config(page_title="Portfolio Anda", page_icon="🧊", layout="wide", initial_sidebar_state="expanded")
## Get Options for Tickers
tickers = get_ticker()
ticker_list = tickers['Kode'] + ' - ' + tickers['Nama Perusahaan']
def main():
## Base Input: Tickers, Start_Date, Sections
## Organize Sidebar
st.title('Analisa dan Simulasi Portfolio Investasi Saham')
sh1, sh2 = st.beta_columns(2)
with sh1:
st.info('**Susun Portfolio Anda di Sidebar**')
st.sidebar.header('Susun Portfolio Anda')
section = st.sidebar.radio('Pilih Halaman:', ('Performa Portfolio', 'Backtesting Portfolio'), index = 0)
myPicks = st.sidebar.multiselect(label = 'Pilih Saham (Maks. 5)', options = ticker_list)
start_date = st.sidebar.date_input(label = 'Tanggal Mulai', value = datetime.date.today() - datetime.timedelta(365))
st.sidebar.header('Kontribusi')
st.sidebar.info('''Ini adalah project **open source** yang dapat anda **bantu kembangkan** dengan memberikan **feedback** melalui email **<EMAIL>** atau github **RakaAndriawan**''')
st.sidebar.header('Tentang Saya')
st.sidebar.info('''Aplikasi ini dibuat oleh Raka Andriawan, **penggemar Data Science**. Anda dapat mengetahui lebih banyak tentang saya pada **akun Linkedn saya**''')
## Check Base Input
num_stocks = len(myPicks)
if num_stocks > 5 or num_stocks < 2:
with sh2:
st.warning('**Portfolio diisi 2 hingga 5 saham**')
return None
## Collect Datasets
with sh2:
with st.spinner('Tunggu Proses Download Data Ya!'):
myPicks = [x.split(' - ')[0] for x in myPicks]
recent_data = get_data(myPicks, start_date)
## Rendering First Page
if section == 'Performa Portfolio':
st.markdown('''<p style="text-align:center;">
<em><b>Halaman ini bertujuan</b> untuk melihat <b>performa portfolio</b> yang anda susun
<b>berdasarkan performa di masa lalu</b><br>
Ingat bahwa performa masa lalu <b>tidak mencerminkan hasil yang menjanjikan di masa depan</b><br>
Selalu <b>berinvestasi</b> dengan <b>penuh pertimbangan</b></em></p>
''', unsafe_allow_html = True)
## Organize Layout
L0 = st.beta_container()
inp_weight = st.beta_columns(num_stocks)
L1A, L1B = st.beta_columns(2)
L2A, L2B = st.beta_columns([1,5])
st.header('**Resiko Portfolio Anda**')
L3A, L3B = st.beta_columns(2)
## Ask for Custom Weights
L0.subheader('**Tentukan Komposisi Portfolio Anda (%)**')
weights = np.zeros(num_stocks)
for i in range(0,num_stocks):
weights[i] = inp_weight[i].number_input(myPicks[i], min_value = 0.0, max_value = 100.0, value = 100/num_stocks, step = 0.1)
if sum(weights) != 100:
warn = 'Pastikan Total Komposisi 100%! Komposisi Saat Ini : {}%'.format(sum(weights))
st.text(warn)
return None
## Calculation Process for First Page
with sh2:
with st.spinner('Tunggu Proses Kalkulasi Ya!'):
weights = [x/100 for x in weights]
result = core_plot_data(recent_data, weights)
## Visualize DataFrame
with L1A:
st.subheader('**Data Returns Portfolio**')
display_data = result[2]
display_data.index = pd.to_datetime(display_data.index, format = '%m/%d/%Y').strftime('%Y-%m-%d')
L1A.dataframe(display_data.style.applymap(negative_red))
display_data.index = pd.DatetimeIndex(display_data.index)
## Create Download Link
if st.button('Download Data Return', key = 'first_df'):
tmp_download_link = download_link(display_data, 'portfolio_returns.csv', 'DOWNLOAD!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
## Highlights Key Values
with L1B:
st.subheader('**Summary Performa Portfolio**')
kpi = result[0]
keys = list(kpi.keys())
## Returns
if kpi[keys[0]] < 0 and kpi[keys[1]] < 0:
st.error('**{}** : {}% || **{}** : {}%'.format(keys[0], kpi[keys[0]], keys[1], kpi[keys[1]]))
elif kpi[keys[0]] < 0 or kpi[keys[1]] < 0:
st.warning('**{}** : {}% || **{}** : {}%'.format(keys[0], kpi[keys[0]], keys[1], kpi[keys[1]]))
elif kpi[keys[0]] >= 0 and kpi[keys[1]] >= 0:
st.success('**{}** : {}% || **{}** : {}%'.format(keys[0], kpi[keys[0]], keys[1], kpi[keys[1]]))
## Volatility and Sharpe Ratio
if kpi[keys[3]] < 0:
st.error('**{}** : {}% || **{}** : {}'.format(keys[2], kpi[keys[2]], keys[3], kpi[keys[3]]))
elif kpi[keys[3]] < 1:
st.warning('**{}** : {}% || **{}** : {}'.format(keys[2], kpi[keys[2]], keys[3], kpi[keys[3]]))
elif kpi[keys[3]] >= 1:
st.success('**{}** : {}% || **{}** : {}'.format(keys[2], kpi[keys[2]], keys[3], kpi[keys[3]]))
## Risk Metrics
st.info('**{}** (95) : {}% || **{}** (95) : {}% || **{}** : {}%'.format(keys[4], kpi[keys[4]], keys[5], kpi[keys[5]], keys[6], kpi[keys[6]]))
## Checkbox Assets to Plots
with L2A:
st.subheader('**Pilih Variabel untuk Divisualisasi**')
var = np.zeros(num_stocks + 1)
var[0] = st.checkbox('Portfolio', value = True)
for i in range(1,(num_stocks + 1)):
var[i] = st.checkbox(myPicks[i-1])
st.markdown('Gunakan slider dibawah plot untuk mengubah range waktu dari plot')
## Create Download Link
if st.button('Download Data Return Kumulatif', key = 'fifth_df'):
tmp_download_link = download_link(result[1], 'cumulative_returns.csv', 'DOWNLOAD!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
## Cumulative Returns Plot
with L2B:
my_var = ['Portfolio'] + result[5]
my_key_var = [my_var[i] for i in range(0,len(my_var)) if var[i] == 1]
if len(my_key_var) > 0:
plot_cum_return = asset_cumulative_return(result[1], my_key_var)
st.plotly_chart(plot_cum_return, use_container_width = True)
## Correlation Plot
with L3A:
st.markdown('''<p style="text-align:justify;">
Hubungan antara satu saham individual dengan yang lainya juga dapat memainkan peran dalam menyusun portfolio anda.
Anda dapat memilih saham yang berhubungan negatif untuk meminimalisir penurunan nilai portfolio anda.
Anda juga dapat memilih saham yang berhubungan secara positif untuk memaksimalkan return yang anda bisa dapatkan.
Tidak jarang juga, saham yang dipilih tidak memiliki hubungan apapun untuk meminimalkan resiko<br>
<b>Strategi Ada Di Tangan Anda</b></p>''', unsafe_allow_html = True)
plot_corr = asset_corr_plot(result[4], result[5])
st.plotly_chart(plot_corr)
with L3B:
risk_plot = st.selectbox('Pilih Grafik untuk Menggambarkan Resiko Portfolio Anda',
['Rolling Volatilitas Annual', 'VaR dan CVaR', 'Drawdown'], index = 1)
## Rolling Volatilitas Annual
if risk_plot == 'Rolling Volatilitas Annual':
st.markdown('''<p style="text-align:justify;">
Rolling Volatilitas Annual merupakan suatu teknik untuk mengestimasi nilai volatilitas annual berdasarkan jangka waktu tertentu.
Volatilias Annual sendiri merupakan ukuran yang digunakan untuk mengestimasi fluktuasi nilai return selama setahun.
Semakin besar nilai volatilitas annual maka dapat dikatakan performa portfolio anda semakin tidak stabil.
</p>''', unsafe_allow_html = True)
window = st.slider('Pilih Rentang Waktu Rolling Volatilitas Annual', min_value = 2, max_value = 30, value = 5)
plot_rolling = rolling_volatility(result[2], window)
st.plotly_chart(plot_rolling, use_container_width = True)
## Histogram VaR dan CVaR
if risk_plot == 'VaR dan CVaR':
alpha = st.slider('Pilih Level Kepercayaan Anda (%)', min_value = 90, max_value = 99, value = 95)
plot_hist, risk = var_cvar(result[2], alpha)
st.markdown('''<p style="text-align:justify;">
Nilai VaR dan CVaR merupakan ukuran yang digunakan untuk mengestimasi kemungkinan kerugian berdasarkan level kepercayaan tertentu. Sebagai contoh pada plot anda, nilai VaR pada level kepercayaan {}% menyatakan bahwa terdapat {}% kemungkinan nilai investasi anda turun lebih besar dari {}% dalam satu hari. Sedangkan nilai CVaR pada level kepercayaan yang sama menyatakan bahwa pada {}% kondisi terburuk, rata-rata kerugian anda sebesar {}% dalam satu hari.
</p>'''.format(alpha, (100-alpha), round(-(risk[0]*100), 3), (100-alpha), round(-(risk[1]*100), 3)),
unsafe_allow_html = True)
st.plotly_chart(plot_hist, use_container_width = True)
## Max Drawdown
if risk_plot == 'Drawdown':
st.markdown('''<p style="text-align:justify;">
Drawdown adalah penurunan nilai return kumulatif anda, atau penurunan nilai portfolio anda. Sedangkan Max Drawdown adalah penurunan maksimum nilai portfolio anda dari titik tertinggi ke titik terendak, sebelum bisa ke titik tertinggi lagi. Nilai ini dapat digunakan untuk mengetahui seberapa besar kerugian yang dapat kita terima dalam kondisi terbaik portfolio kita.
</p>''', unsafe_allow_html = True)
plot_drawdown = drawdown_vis(result[3])
st.plotly_chart(plot_drawdown, use_container_width = True)
## Render Process for Second Page
if section == 'Backtesting Portfolio':
st.markdown('''<p style="text-align:center;">
<em><b>Halaman ini bertujuan</b> untuk menentukan <b>komposisi portfolio</b> yang <b>tepat</b><br>
Sesuai dengan <b>return</b> dan <b>resiko</b> yang diharapkan <b>berdasarkan performa di masa lalu</b><br>
Ingat bahwa performa masa lalu <b>tidak mencerminkan hasil yang menjanjikan di masa depan</b><br>
Selalu <b>berinvestasi</b> dengan <b>penuh pertimbangan</b></em></p>
''', unsafe_allow_html = True)
## Organize Layout
L1A, L1B = st.beta_columns([1.5,1])
L2 = st.beta_container()
with L1B:
## Ask for Input: Expected Return and Risk Free Rate
st.subheader('**Masukkan Input Disini**')
exp_value = st.number_input('Ekspektasi Nilai Return Annual', min_value = 0.0, max_value = 100.0, value = 20.0, step = 0.1)
risk_free = st.number_input('Nilai Risk Free Return', min_value = 0.0, max_value = 100.0, value = 0.0, step = 0.1)
## Explain Strategy
st.subheader('**Penjelasan Strategi Portfolio**')
st.markdown('''<p style="text-align:justify;">
Portfolio yang terdiri dari beberapa saham dapat memiliki performa yang berbeda berdasarkan komposisi persentaset setiap saham individual dibandingkan total kepemilikan saham. Maka dari itu, komposisi portfolio perlu disesuaikan dengan target investasi kita. Beberapa strategi yang biasanya digunakan yaitu:<ul>
<li><b>Equal Weight (EW):</b> Setiap saham individual memiliki proporsi sama rata</li>
<li><b>Market Cap Weight (MCap):</b> Setiap saham individual memiliki proporsi sebanding dengan nilai market keseluruhan mereka</li>
<li><b>Max Sharpe Ratio (MSR):</b> Komposisi portfolio dioptimalkan dengan tujuan memaksimalkan nilai sharpe ratio</li>
<li><b>Global Min Volatility (GMV):</b> Komposisi portfolio dioptimalkan dengan tujuan meminimalkan nilai volatilitas annual</li>
<li><b>Efficient Frontier:</b> Kumpulan alternatif komposisi portfolio yang meminimalkan resiko untuk setiap return yang diinginkan</li>
</ul></p>''', unsafe_allow_html = True)
## Calculation for Second Page
if 'Portfolio' in recent_data.columns:
recent_data = recent_data.drop(columns=['Portfolio'])
with sh2:
with st.spinner('Tunggu Proses Kalkulasi Ya!'):
compiled_port = markowitz_portfolio(recent_data, max_exp = exp_value, rf = risk_free)
ef_plot = visualize_ef(compiled_port)
## Displaying DataFrame
with L1A:
st.subheader('**Penjelasan Ukuran yang Digunakan**')
st.markdown('''<p style="text-align:justify;">
Dalam melakukan optimalisasi komposisi portfolio, tentunya perlu memiliki ukuran yang digunakan untuk menentukan apakah portfolio terebut bagus atau tidak. Berikut merupakan ukuran yang digunakan:<ul>
<li><b>Return Annual:</b> Estimasi nilai rata-rata return dalam satu tahun</li>
<li><b>Volatilitas Annual:</b> Estimasi nilai rata-rata volatilitas dalam satu tahun</li>
<li><b>Sharpe Ratio:</b> Rasio perbandingan nilai risk adjusted return dengan volatilitas. Suatu portfolio dinyatakan baik jika nilai Sharpe Ratio lebih dari 1</li>
</ul></p>''', unsafe_allow_html = True)
st.subheader('**Strategi Portfolio Umum**')
st.dataframe(compiled_port[1])
if st.button('Download Data Strategi Portfolio', key = 'second_df'):
tmp_download_link = download_link(compiled_port[1], 'portfolio_strategy.csv', 'DOWNLOAD!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
st.subheader('**Portfolio Efficient Frontier**')
st.dataframe(compiled_port[2])
if st.button('Download Data Efficient Portfolio', key = 'third_df'):
tmp_download_link = download_link(compiled_port[2], 'portfolio_strategy.csv', 'DOWNLOAD!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
## Place the Charts
L2.subheader('**Visualisasi Performa Portfolio**')
L2.markdown('''<p style="text-align:left;">
Pada chart dibawah ini, anda dapat melihat <b>perbandingan return dari volatilitas</b> setiap strategi dan asset individual<br>
Anda dapat memilih strategi dengan <b>return yang anda inginkan</b> namun tetap dengan <b>resiko yang bisa anda tolerir</b></p>
''', unsafe_allow_html = True)
L2.plotly_chart(ef_plot, use_container_width = True)
## Strategy Comparation
st.header('**Bandingkan Histori Performa Strategi Portfolio Anda**')
st.markdown('''
Gunakan komposisi portfolio anda sendiri untuk membuat strategi custom<br>
Melalui plot dibawah, anda dapat membandingkan performa beberapa strategi sekaligus seiring waktu<br>
Silahkan gunakan fitur interaktif seperti memilih strategi yang dimunculkan dengan klik legenda plot di sebelah kanan plot<br>
Anda juga dapat mengamati rentang waktu yang anda inginkan melalui slider di bawah plot<br>
atau rentang waktu tertentu pada tombol di kiri atas
''', unsafe_allow_html = True)
## Ask for Custom Weights
st.subheader('**Tentukan Komposisi Portfolio Anda (%)**')
max_ef = compiled_port[2].mul(100).iloc[-1,3:].tolist()
inp_weight = st.beta_columns(num_stocks)
custom_weight = np.zeros(num_stocks)
for i in range(0,num_stocks):
custom_weight[i] = inp_weight[i].number_input(myPicks[i], min_value = 0.0, max_value = 100.0, value = max_ef[i], step = 0.1)
if sum(custom_weight) != 100:
warn = 'Pastikan Total Komposisi 100%! Komposisi Saat Ini : {}%'.format(sum(custom_weight))
st.text(warn)
return None
## Calculation for Comparation
with sh2:
with st.spinner('Tunggu Proses Kalkulasi Ya!'):
custom_weight = [x/100 for x in custom_weight]
str_df, str_fig = cumulative_performance(recent_data, compiled_port[1], custom_weight)
## Place the Charts
L3A, L3B = st.beta_columns([2,1])
L3A.plotly_chart(str_fig, use_container_width = True)
with L3B:
st.subheader('**Data Performa Strategi Portfolio**')
## Visualize DataFrame
str_df.index = pd.to_datetime(str_df.index, format = '%m/%d/%Y').strftime('%Y-%m-%d')
L3B.dataframe(str_df.style.applymap(negative_red))
str_df.index = pd.DatetimeIndex(str_df.index)
## Create Download Link
if st.button('Download Data Return Kumulatif', key = 'fourth_df'):
tmp_download_link = download_link(str_df, 'portfolio_cumulative_returns.csv', 'DOWNLOAD!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3216451 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.const import (CONF_BIT_DEPTH, CONF_CLOCK_PIN, CONF_DATA_PIN, CONF_ID,
CONF_NUM_CHANNELS, CONF_NUM_CHIPS)
AUTO_LOAD = ['output']
my9231_ns = cg.esphome_ns.namespace('my9231')
MY9231OutputComponent = my9231_ns.class_('MY9231OutputComponent', cg.Component)
MULTI_CONF = True
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(MY9231OutputComponent),
cv.Required(CONF_DATA_PIN): pins.gpio_output_pin_schema,
cv.Required(CONF_CLOCK_PIN): pins.gpio_output_pin_schema,
cv.Optional(CONF_NUM_CHANNELS, default=6): cv.int_range(min=3, max=1020),
cv.Optional(CONF_NUM_CHIPS, default=2): cv.int_range(min=1, max=255),
cv.Optional(CONF_BIT_DEPTH, default=16): cv.one_of(8, 12, 14, 16, int=True),
}).extend(cv.COMPONENT_SCHEMA)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
di = yield cg.gpio_pin_expression(config[CONF_DATA_PIN])
cg.add(var.set_pin_di(di))
dcki = yield cg.gpio_pin_expression(config[CONF_CLOCK_PIN])
cg.add(var.set_pin_dcki(dcki))
cg.add(var.set_num_channels(config[CONF_NUM_CHANNELS]))
cg.add(var.set_num_chips(config[CONF_NUM_CHIPS]))
cg.add(var.set_bit_depth(config[CONF_BIT_DEPTH]))
| StarcoderdataPython |
1603489 | from kingfisher_scrapy.spiders.digiwhist_base import DigiwhistBase
class GreeceDigiwhist(DigiwhistBase):
name = 'greece_digiwhist'
start_urls = ['https://opentender.eu/data/files/GR_ocds_data.json.tar.gz']
| StarcoderdataPython |
4841806 | <filename>repository/ChartRepository.py
import sys
import requests
from .Connection import Connection
from .Repository import Repository
class ChartRepository(Repository):
def __init__(self) -> None:
pass
def getRemoteData(self) -> str:
response = requests.get(Connection.FETCH_CHART_URL)
return response.content
def getRemoteSymbols(self) -> str:
response = requests.get(Connection.SYMBOLS_URL)
return response.content
| StarcoderdataPython |
9765 | <filename>features/cpp/simple/test.py<gh_stars>1-10
from regression_tests import *
class TestBase(Test):
def test_for_main(self):
assert self.out_c.has_funcs('main') or self.out_c.has_funcs('entry_point')
def test_check_main_is_not_ctor_or_dtor(self):
for c in self.out_config.classes:
assert "main" not in c.constructors
assert "main" not in c.destructors
class TestAll(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/symbols'),
args='-k'
)
def test_for_string(self):
# printf() is used -> '\n' at the end of the string
# puts() is used -> no '\n' at the end of the string
assert self.out_c.has_string_literal_matching( r'ClassA::ClassA(\\n)?' )
assert self.out_c.has_string_literal_matching( r'%i %i(\\n)?' )
assert self.out_c.has_string_literal_matching( r'~ClassA::ClassA(\\n)?' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert "doSomething" in vtable.items[0].target_name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.constructors) == 2
assert len(c.destructors) == 2
assert len(c.virtualMethods) == 1
class TestAllStripped(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/stripped'),
args='-k'
)
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert vtable.items[0].target_name # there is some (!empty) function name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.virtualMethods) == 1
assert len(c.constructors) == 2
assert len(c.destructors) == 2
class TestMsvc(TestBase):
settings = TestSettings(
input='inputs/msvc/simple-msvc-release.ex',
args='-k'
)
settings_d = TestSettings(
input='inputs/msvc/simple-msvc-debug.ex',
args='-k'
)
def test_for_string(self):
assert self.out_c.has_string_literal( 'ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '~ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '%i %i\\n' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 2
vtable1 = self.out_config.vtables[0]
assert vtable1.item_count == 1
vtable2 = self.out_config.vtables[0]
assert vtable2.item_count == 1
| StarcoderdataPython |
161532 | import vcr
@vcr.use_cassette
def test_esearchresult(client):
r = client.esearch(db="pubmed", term="hart rk[author]")
assert 5 < r.count
assert 0 == r.retstart
assert isinstance(r.ids, list)
assert 27814769 in r.ids
| StarcoderdataPython |
3269119 |
import unittest
import exceptions
from spidy.language import *
from spidy.common import *
class ScriptTest(unittest.TestCase):
def test_script1(self):
context = Context()
sn = parse_file('tests/scripts/script1.sp', context)
script_text = str(sn)
self.assertEqual(script_text,
'''l = []
if True:
traverse t in & breadthfirst (10 / 2):
l << &t
l >>
''')
def test_script2(self):
context = Context()
sn = parse_file('tests/scripts/script2.sp', context)
script_text = str(sn)
self.assertEqual(script_text,
'''get ('http://www.google' + '.com') as XML
part = 'blabla'
skip &('/root/div[1]/span' + part) forward
x = 0
y = 1
if (x or y):
i = 0
for c in chars:
i = (i + 1)
if y:
break
else:
continue
if x:
skip &'/div/' reverse
x = 0
else:
skip &'/div/div/' forward
x = 1
else:
get 'www.something.com' as HTML:
X-Name: 'test'
X-Date: ('today' + '-tomorrow')
X-State: x
return i
''')
def test_script3(self):
context = Context()
sn = parse_inline(
'''
if (True):
// standard header comment
for c in [1,2,3]: // -- inline comments are OK
x = 1
lst = [1,
//2, -- comments inside of multiline statements
'parenthesis(in string[ are ignored',
3]
''', context)
script_text = str(sn)
self.assertEqual(script_text,
'''if True:
for c in [1, 2, 3]:
x = 1
lst = [1, 'parenthesis(in string[ are ignored', 3]
''') | StarcoderdataPython |
1617580 | <reponame>markv58/UDI-Ping
#!/usr/bin/env python3
"""
This is a NodeServer was created using template for Polyglot v2 written in Python2/3
by Einstein.42 (<NAME>) <EMAIL>
v1.0.15
"""
import polyinterface
import sys
import time
import os
import struct
import array
import fcntl
import subprocess
import logging
LOGGER = polyinterface.LOGGER
logging.getLogger('urllib3').setLevel(logging.ERROR)
debugLog = 0
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = 'Ping'
self.firstCycle = True
def start(self):
LOGGER.info('Started Ping')
self.discover()
self.check_params()
def shortPoll(self):
result = self.checkwlan0()
for node in self.nodes:
self.nodes[node].update()
def longPoll(self):
pass
def query(self):
self.reportDrivers()
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover(self, *args, **kwargs):
global debugLog
for key,val in self.polyConfig['customParams'].items():
if key == "debug":
if val == "True":
debugLog = 1
LOGGER.info("Debug logging enabled %s" ,debugLog)
pass
else:
_netip = val.replace('.','')
if _netip[:3] == "www":
netip = _netip[3:17]
else:
netip = _netip[:14]
_key = key[:20]
self.addNode(hostnode(self, self.address, netip, val, _key))
def checkwlan0(self):
response,result = subprocess.getstatusoutput("ifconfig wlan0 | grep UP")
if debugLog == 1: LOGGER.debug("checkwlan0 %s" ,response)
return response
def update(self):
pass
def delete(self):
LOGGER.info('Deleting Ping NodeServer.')
def stop(self):
LOGGER.debug('NodeServer stopped.')
def check_params(self):
pass
def remove_notices_all(self,command):
LOGGER.info('remove_notices_all:')
# Remove all existing notices
self.removeNoticesAll()
def update_profile(self,command):
LOGGER.info('update_profile:')
st = self.poly.installprofile()
return st
id = 'controller'
commands = {
'DISCOVER': discover,
'UPDATE_PROFILE': update_profile,
'REMOVE_NOTICES_ALL': remove_notices_all,
'QUERY': query
}
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2}]
class Ping(object):
def __init__(self, ip, timeout):
self.ip = ip
self.timeout = timeout
def ping(self):
response = 0
try:
response,result = subprocess.getstatusoutput("ping -c1 -W " + str(self.timeout-1) + " " + self.ip)
if debugLog == 1: LOGGER.debug("RPi %s " ,response)
if response == 0:
return response
except Exception as e:
LOGGER.error('Error %s ',e)
return None
if response == 127:
try:
response = subprocess.call(['/sbin/ping','-c1','-t' + str(self.timeout-1), self.ip], shell=False)
if debugLog == 1: LOGGER.debug("Polisy %s " ,response)
if response == 0:
return response
except Exception as e:
LOGGER.error('Error %s ',e)
return None
else:
return None
class hostnode(polyinterface.Node):
def __init__(self, controller, primary, address, ipaddress, name):
super(hostnode, self).__init__(controller, primary, address, name)
self.ip = ipaddress
self.scan = 1
self.missed = 0
def start(self):
self.setOn('DON')
self.reportDrivers()
def update(self):
if (self.scan):
netstat = Ping(ip=self.ip,timeout=self.parent.polyConfig['shortPoll'])
result = netstat.ping()
if (result != None):
self.missed = 0
self.setOnNetwork(0)
if debugLog == 1: LOGGER.debug(self.ip + ': On Network')
elif (self.missed >= 5):
self.setOffNetwork()
if self.missed < 1440: self.missed += 1
if debugLog ==1: LOGGER.debug(self.ip + ': Off Network')
elif self.missed >= 0 and self.missed < 5:
self.missed += 1
self.setInFault(self.missed)
if debugLog ==1: LOGGER.debug(self.ip + ': In Fault')
def setOnNetwork(self,missed):
self.setDriver('ST', 0)
self.setDriver('GV0', self.missed)
def setInFault(self, missed):
self.setDriver('ST', 1)
self.setDriver('GV0', self.missed)
def setOffNetwork(self):
self.setDriver('ST', 2)
self.setDriver('GV0', self.missed)
def setOn(self, command):
self.missed = 0
self.setOnNetwork(self.missed)
self.setDriver('GV1',1)
self.scan = 1
def setOff(self, command):
self.missed = 0
self.setOffNetwork()
self.setDriver('GV0', 0)
self.setDriver('GV1', 0)
self.scan = 0
def query(self):
self.reportDrivers()
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 25},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 1, 'uom': 2}
]
id = 'hostnode'
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query
}
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('PingNodeServer')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
polyglot.stop()
sys.exit(0)
| StarcoderdataPython |
195837 | <reponame>StuartIanNaylor/simple_audio-tensorflow<filename>Fft_sizes/simple_audio_mfcc_frame_length2048_frame_step512_fft_length2048.py<gh_stars>1-10
import os
import pathlib
#import matplotlib.pyplot as plt
import numpy as np
#import seaborn as sns
import tensorflow as tf
import time
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
from tensorflow.keras import models
#from IPython import display
# Set seed for experiment reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
time_start = time.perf_counter()
data_dir = pathlib.Path('data/mini_speech_commands')
if not data_dir.exists():
tf.keras.utils.get_file(
'mini_speech_commands.zip',
origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
extract=True,
cache_dir='.', cache_subdir='data')
commands = np.array(tf.io.gfile.listdir(str(data_dir)))
commands = commands[commands != 'README.md']
print('Commands:', commands)
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
filenames = tf.random.shuffle(filenames)
num_samples = len(filenames)
print('Number of total examples:', num_samples)
print('Number of examples per label:',
len(tf.io.gfile.listdir(str(data_dir/commands[0]))))
print('Example file tensor:', filenames[0])
train_files = filenames[:6400]
val_files = filenames[6400: 6400 + 800]
test_files = filenames[-800:]
print('Training set size', len(train_files))
print('Validation set size', len(val_files))
print('Test set size', len(test_files))
def decode_audio(audio_binary):
audio, _ = tf.audio.decode_wav(audio_binary)
return tf.squeeze(audio, axis=-1)
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
# Note: You'll use indexing here instead of tuple unpacking to enable this
# to work in a TensorFlow graph.
return parts[-2]
def get_waveform_and_label(file_path):
label = get_label(file_path)
audio_binary = tf.io.read_file(file_path)
waveform = decode_audio(audio_binary)
return waveform, label
AUTOTUNE = tf.data.AUTOTUNE
files_ds = tf.data.Dataset.from_tensor_slices(train_files)
waveform_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
def get_spectrogram(waveform):
sample_rate = 16000.0
# Padding for files with less than 16000 samples
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)
# Concatenate audio with padding so that all audio clips will be of the
# same length
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=2048, frame_step=512, fft_length=2048)
spectrogram = tf.abs(spectrogram)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = spectrogram.shape[-1]
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, upper_edge_hertz)
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
mel_spectrogram.set_shape(spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
spectrogram = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[..., :13]
return spectrogram
def get_spectrogram_and_label_id(audio, label):
spectrogram = get_spectrogram(audio)
spectrogram = tf.expand_dims(spectrogram, -1)
label_id = tf.argmax(label == commands)
return spectrogram, label_id
spectrogram_ds = waveform_ds.map(
get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE)
def preprocess_dataset(files):
files_ds = tf.data.Dataset.from_tensor_slices(files)
output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
output_ds = output_ds.map(
get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE)
return output_ds
train_ds = spectrogram_ds
val_ds = preprocess_dataset(val_files)
test_ds = preprocess_dataset(test_files)
batch_size = 64
train_ds = train_ds.batch(batch_size)
val_ds = val_ds.batch(batch_size)
train_ds = train_ds.cache().prefetch(AUTOTUNE)
val_ds = val_ds.cache().prefetch(AUTOTUNE)
for spectrogram, _ in spectrogram_ds.take(1):
input_shape = spectrogram.shape
print('Input shape:', input_shape)
num_labels = len(commands)
norm_layer = preprocessing.Normalization()
norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))
model = models.Sequential([
layers.Input(shape=input_shape),
preprocessing.Resizing(32, 32),
norm_layer,
layers.Conv2D(32, 3, activation='relu'),
layers.Conv2D(64, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_labels),
])
model.summary()
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
EPOCHS = 50
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=EPOCHS,
callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=3),
)
test_audio = []
test_labels = []
for audio, label in test_ds:
test_audio.append(audio.numpy())
test_labels.append(label.numpy())
test_audio = np.array(test_audio)
test_labels = np.array(test_labels)
y_pred = np.argmax(model.predict(test_audio), axis=1)
y_true = test_labels
test_acc = sum(y_pred == y_true) / len(y_true)
print(f'Test set accuracy: {test_acc:.0%}')
sample_file = data_dir/'no/01bb6a2a_nohash_0.wav'
sample_ds = preprocess_dataset([str(sample_file)])
for spectrogram, label in sample_ds.batch(1):
prediction = model(spectrogram)
print(f'Predictions for "{commands[label[0]]}"')
print(commands, tf.nn.softmax(prediction[0]))
sample_file = data_dir/'right/3a789a0d_nohash_0.wav'
sample_ds = preprocess_dataset([str(sample_file)])
for spectrogram, label in sample_ds.batch(1):
prediction = model(spectrogram)
print(f'Predictions for "{commands[label[0]]}"')
print(commands, tf.nn.softmax(prediction[0]))
time_end=time.perf_counter()
print(f'Run time {time_end - time_start}')
| StarcoderdataPython |
54950 | #!/usr/bin/env python3
import sys
input = sys.stdin.readline
############ ---- Input Functions, coutery of 'thekushalghosh' ---- ############
def inp():
return(int(input()))
def inlt():
return(list(map(int, input().split())))
def insr():
s = input()
return(list(s[:len(s) - 1]))
def invr():
return(map(int, input().split()))
if __name__ == "__main__":
s = insr()
countl = 0
countu = 0
countl = sum(1 for c in s if c.islower())
countu = len(s) - countl
if countu > countl:
s = [i.upper() for i in s]
print(''.join(s))
else:
s = [i.lower() for i in s]
print(''.join(s))
| StarcoderdataPython |
3366263 |
"""Central location for shared arparse convention definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import codecs
import functools
from absl import app as absl_app
from absl import flags
# This codifies help string conventions and makes it easy to update them if
# necessary. Currently the only major effect is that help bodies start on the
# line after flags are listed. All flag definitions should wrap the text bodies
# with help wrap when calling DEFINE_*.
_help_wrap = functools.partial(flags.text_wrap, length=80, indent="",
firstline_indent="\n")
# Pretty formatting causes issues when utf-8 is not installed on a system.
def _stdout_utf8():
try:
codecs.lookup("utf-8")
except LookupError:
return False
return sys.stdout.encoding == "UTF-8"
if _stdout_utf8():
help_wrap = _help_wrap
else:
def help_wrap(text, *args, **kwargs):
return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"")
# Replace None with h to also allow -h
absl_app.HelpshortFlag.SHORT_NAME = "h"
| StarcoderdataPython |
1740806 | #!/usr/bin/env python
"""generate_yml.py.py - auto-generated by softnanotools"""
from softnanotools.logger import Logger
import yaml
logger = Logger('GENERATE YML.PY')
A = {
1: {'enzymes': 10},
2: {'enzymes': 20},
3: {'enzymes': 30},
4: {'enzymes': 40},
5: {'enzymes': 50}
}
B = {
1: {'spring_length': 0.25},
2: {'spring_length': 0.50},
3: {'spring_length': 1.00},
4: {'spring_length': 1.50},
5: {'spring_length': 2.00},
}
C = {
1: {'reaction_rate': 0.01},
2: {'reaction_rate': 0.1},
3: {'reaction_rate': 1.0},
4: {'reaction_rate': 10.0},
}
D = {
1: {
'diffusion_dictionary': {
'A': 0.01,
'B': 0.01,
'E': 0.01,
}
},
2: {
'diffusion_dictionary': {
'A': 0.1,
'B': 0.1,
'E': 0.1,
}
},
3: {
'diffusion_dictionary': {
'A': 1.0,
'B': 1.0,
'E': 1.0,
}
},
4: {
'diffusion_dictionary': {
'A': 10.0,
'B': 10.0,
'E': 10.0,
}
},
}
def main(**kwargs):
logger.info('Running generate_yml.py...')
# insert code here
with open('settings.yml', 'r') as f:
template = yaml.safe_load(f)
for schema, name in zip(
[A, B, C, D],
['A', 'B', 'C', 'D']
):
for i, parameters in schema.items():
data = template.copy()
for key, value in parameters.items():
data[key] = value
with open(f'yml/{name}{i}.yml', 'w') as f:
yaml.dump(data, f)
logger.info('Done!')
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='generate_yml.py.py - auto-generated by softnanotools')
main(**vars(parser.parse_args()))
| StarcoderdataPython |
1604852 | <reponame>elotgamu/libraryproject
# from django.shortcuts import render
from django.views.generic import ListView, TemplateView
from .models import Book, Genre, Author
# Create your views here.
class Home(TemplateView):
template_name = "pages/home.html"
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
context['recentbooks'] = Book.objects.order_by('-added_at')[:3]
context['genres'] = Genre.objects.order_by('-added_at')[:3]
context['authors'] = Author.objects.order_by('-added_at')[:3]
return context
class BooksList(ListView):
model = Book
template_name = "books/book_list.html"
context_object_name = 'books'
paginate_by = 3
def get_queryset(self):
books = super(BooksList, self).get_queryset()
get_search = self.request.GET.get('search-field')
filter_criteria = self.request.GET.get('filter-type')
if get_search is None:
books = Book.objects.all()
else:
if filter_criteria == '2':
''' Here the user wants to show book of the authors '''
authors_name = Author.objects.filter(
first_name__icontains=get_search)
if authors_name.exists():
authors = authors_name
else:
authors_last_name = Author.objects.filter(
last_name__istartswith=get_search)
authors = authors_last_name
books = Book.objects.filter(author__in=authors)
else:
books = Book.objects.filter(name__icontains=get_search)
return books
class GenreList(ListView):
model = Genre
template_name = "books/genres.html"
context_object_name = 'genres'
class GenreBooksList(ListView):
model = Book
template_name = "books/per_genre_books.html"
context_object_name = 'books'
def get_queryset(self):
return Book.objects.filter(genre=self.kwargs['id'])
class AuthorList(ListView):
model = Author
template_name = "books/authors.html"
context_object_name = 'authors'
class AuthorBooksList(ListView):
model = Book
template_name = "books/per_author_books.html"
context_object_name = 'books'
def get_queryset(self):
return Book.objects.filter(author=self.kwargs['id'])
| StarcoderdataPython |
68366 | # MIT License
#
# Copyright (c) 2019 SSL-Roots
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# coding: UTF-8
import rospy
import math
import copy
from python_qt_binding.QtCore import Qt, QPointF, QRectF
from python_qt_binding.QtGui import QPainter, QPen ,QColor, QPolygonF
from python_qt_binding.QtGui import QMouseEvent
from python_qt_binding.QtWidgets import QWidget
from geometry_msgs.msg import Pose2D
from consai2_msgs.msg import VisionGeometry, BallInfo, RobotInfo
from consai2_msgs.msg import Replacements, ReplaceBall, ReplaceRobot
from consai2_msgs.msg import ControlTarget
from consai2_msgs.msg import DecodedReferee
import tool
class PaintWidget(QWidget):
def __init__(self, parent=None):
super(PaintWidget, self).__init__(parent)
self._WHITE_LINE_THICKNESS = 2 # 白線の太さ
self._ZOOM_RATE = 0.1 # 拡大縮小率
self._SCALE_LIMIT = 0.2 # 縮小率の限界値
self._ALPHA_DETECTED = 255 # 検出できたロボット・ボールの透明度
self._ALPHA_NOT_DETECTED = 127 # 検出できなかったロボット・ボールの透明度
self._COLOR_BALL = QColor(Qt.red)
self._COLOR_ROBOT = {'blue':QColor(Qt.cyan), 'yellow':QColor(Qt.yellow)}
self._ID_POS = (0.15, 0.15) # IDを描画するロボット中心からの位置
self._FLAG_POS = (0.15, 0) # control_targetのフラグを描画するロボット中心からの位置
# Replace
self._REPLACE_CLICK_POS_THRESHOLD = 0.1
self._REPLACE_CLICK_VEL_ANGLE_THRESHOLD = self._REPLACE_CLICK_POS_THRESHOLD + 0.1
self._REPLACE_BALL_VELOCITY_GAIN = 3.0
self._REPLACE_MAX_BALL_VELOCITY = 8.0
self._BALL_RADIUS = rospy.get_param('consai2_description/ball_radius', 0.0215)
self._ROBOT_RADIUS = rospy.get_param('consai2_description/robot_radius', 0.09)
self._MAX_ID = rospy.get_param('consai2_description/max_id', 15)
self._SIDE = rospy.get_param('consai2_description/our_side', 'left')
# チームサイドの反転
self._invert_side = False
if self._SIDE != 'left':
self._invert_side = True
# GUIパラメータ
self._trans = QPointF(0.0, 0.0) # x, y方向の移動
self._mouse_trans = QPointF(0.0, 0.0) # マウス操作による移動
self._scale = QPointF(1.0, 1.0) # 拡大, 縮小
self._do_rotate_view = False # 90度回転判定
self._view_height = self.height() # 描画サイズ(縦)
self._view_width = self.width() # 描画サイズ(横)
self._scale_field_to_view = 1.0 # フィールドから描画領域に縮小するスケール
self._click_point = QPointF(0.0, 0.0) # マウスでクリックした描画座標
self._current_mouse_pos = QPointF(0.0, 0.0) # マウスカーソル位置
self._replace_func = None
self._replace_id = 0
self._replace_is_yellow = False
self._do_replacement = False
self._replacement_target = {'ball_pos':False, 'ball_vel':False,
'robot_pos':False, 'robot_angle':False}
# フィールド形状
# raw_vision_geometryの値で更新される
self._field_length = 9.0
self._field_width = 6.0
self._field_goal_width = 1.0
self._field_goal_depth = 0.2
self._field_boundary_width = 0.3
self._field_lines = []
self._field_arcs = []
# ジョイスティック情報
self._joy_target = ControlTarget()
# ロボット・ボール情報
self._ball_info = BallInfo()
self._robot_info = {'blue':[],'yellow':[]}
# レフェリー情報
self._decoded_referee = None
# Publisher
self._pub_replace = rospy.Publisher('sim_sender/replacements',
Replacements, queue_size=1)
# Subscribers
self._sub_decoded_referee = rospy.Subscriber(
'referee_wrapper/decoded_referee', DecodedReferee,
self._callback_referee, queue_size=1)
self._sub_geometry = rospy.Subscriber(
'vision_receiver/raw_vision_geometry', VisionGeometry,
self._callback_geometry, queue_size=1)
self._sub_ball_info = rospy.Subscriber(
'vision_wrapper/ball_info', BallInfo,
self._callback_ball_info, queue_size=1)
self._sub_joy_target = rospy.Subscriber(
'consai2_examples/joy_target', ControlTarget,
self._callback_joy_target, queue_size=1)
self._subs_robot_info = {'blue':[], 'yellow':[]}
self._control_targets = {'blue':[], 'yellow':[]}
self._subs_control_target = {'blue':[], 'yellow':[]}
for robot_id in range(self._MAX_ID +1):
self._robot_info['blue'].append(RobotInfo())
self._robot_info['yellow'].append(RobotInfo())
self._control_targets['blue'].append(ControlTarget())
self._control_targets['yellow'].append(ControlTarget())
# 末尾に16進数の文字列をつける
topic_id = hex(robot_id)[2:]
topic_name = 'vision_wrapper/robot_info_blue_' + topic_id
self._subs_robot_info['blue'].append(
rospy.Subscriber(topic_name, RobotInfo,
self._callback_blue_info, callback_args=robot_id))
topic_name = 'vision_wrapper/robot_info_yellow_' + topic_id
self._subs_robot_info['yellow'].append(
rospy.Subscriber(topic_name, RobotInfo,
self._callback_yellow_info, callback_args=robot_id))
topic_name = 'consai2_game/control_target_blue_' + topic_id
self._subs_control_target['blue'].append(
rospy.Subscriber(topic_name, ControlTarget,
self._callback_blue_target, callback_args=robot_id))
topic_name = 'consai2_game/control_target_blue_' + topic_id
self._subs_control_target['blue'].append(
rospy.Subscriber(topic_name, ControlTarget,
self._callback_blue_target, callback_args=robot_id))
topic_name = 'consai2_game/control_target_yellow_' + topic_id
self._subs_control_target['yellow'].append(
rospy.Subscriber(topic_name, ControlTarget,
self._callback_yellow_target, callback_args=robot_id))
# Configs
# This function enables mouse tracking without pressing mouse button
self.setMouseTracking(True)
def _callback_geometry(self, msg):
# フィールド形状を更新
if msg.field_length:
self._field_length = msg.field_length
if msg.field_width:
self._field_width = msg.field_width
if msg.goal_width:
self._field_goal_width = msg.goal_width
if msg.goal_depth:
self._field_goal_depth = msg.goal_depth
if msg.boundary_width:
self._field_boundary_width = msg.boundary_width
if msg.field_lines:
self._field_lines = []
for line in msg.field_lines:
self._field_lines.append(
{"name":line.name, "p1_x":line.p1_x, "p1_y":line.p1_y,
"p2_x":line.p2_x, "p2_y":line.p2_y, "thickness":line.thickness})
if msg.field_arcs:
self._field_arcs = []
for arc in msg.field_arcs:
self._field_arcs.append(
{"name":arc.name, "center_x":arc.center_x, "center_y":arc.center_y,
"radius":arc.radius, "a1":arc.a1, "a2":arc.a2, "thickness":arc.thickness})
self._resize_draw_world()
def _callback_referee(self, msg):
self._decoded_referee = msg
def _callback_ball_info(self, msg):
self._ball_info = msg
def _callback_blue_info(self, msg, robot_id):
self._robot_info['blue'][robot_id] = msg
def _callback_yellow_info(self, msg, robot_id):
self._robot_info['yellow'][robot_id] = msg
def _callback_blue_target(self, msg, robot_id):
self._control_targets['blue'][robot_id] = msg
def _callback_yellow_target(self, msg, robot_id):
self._control_targets['yellow'][robot_id] = msg
def _callback_joy_target(self, msg):
self._joy_target = msg
def mousePressEvent(self, event):
# マウスのドラッグ操作で描画領域を移動する
# 右クリックで移動と拡大縮小をリセットする
if event.buttons() == Qt.LeftButton:
self._click_point = event.localPos()
self._do_replacement = self._is_replacement_click(self._click_point)
elif event.buttons() == Qt.RightButton:
self._reset_painter_status()
self.update()
def mouseMoveEvent(self, event):
# マウスのドラッグ操作で描画領域を移動する
# Replacementを行うときは描画領域を移動しない
self._current_mouse_pos = event.localPos()
if self._do_replacement:
pass
elif event.buttons() == Qt.LeftButton:
self._mouse_trans = (
self._current_mouse_pos - self._click_point) / self._scale.x()
self.update()
def mouseReleaseEvent(self, event):
# マウスのドラッグ操作で描画領域を移動する
# Replacementを行うときは描画領域を移動しない
if self._do_replacement:
self._do_replacement = False
self._replace_func(event.localPos())
else:
self._trans += self._mouse_trans
self._mouse_trans = QPointF(0.0, 0.0)
self.update()
def wheelEvent(self, event):
# マウスのホイール操作で描画領域を拡大縮小する
s = self._scale.x()
if event.angleDelta().y() > 0:
self._scale.setX(s + self._ZOOM_RATE)
self._scale.setY(s + self._ZOOM_RATE)
else:
if s > self._SCALE_LIMIT:
self._scale.setX(s - self._ZOOM_RATE)
self._scale.setY(s - self._ZOOM_RATE)
self.update()
def paintEvent(self, event):
painter = QPainter(self)
# 描画の中心をWidgetの中心に持ってくる
cx = float(self.width()) * 0.5
cy = float(self.height()) * 0.5
painter.translate(cx,cy)
painter.scale(self._scale.x(), self._scale.y())
painter.translate(self._trans + self._mouse_trans)
if self._do_rotate_view is True:
painter.rotate(-90)
# これ以降に書きたいものを重ねる
self._draw_field(painter)
# Referee情報
self._draw_referee(painter)
self._draw_ball(painter)
self._draw_ball_velocity(painter)
# JoyStick関連
if len(self._joy_target.path) > 0:
self._draw_joy_target(painter)
self._draw_robots(painter)
# grSim Replacement関連
if self._replacement_target['ball_pos'] or self._replacement_target['robot_pos']:
self._draw_pos_replacement(painter)
self._draw_cursor_coordinate(painter)
elif self._replacement_target['ball_vel']:
self._draw_vel_replacement(painter)
elif self._replacement_target['robot_angle']:
self._draw_angle_replacement(painter)
else:
self._draw_cursor_coordinate(painter)
def resizeEvent(self, event):
self._resize_draw_world()
def _resize_draw_world(self):
# Widgetのサイズに合わせて、描くフィールドのサイズを変える
# 描画の回転判断もしてくれるすぐれもの
# Widgetの縦横比を算出
widget_height = float(self.height())
widget_width = float(self.width())
widget_w_per_h = widget_width / widget_height
# Fieldの縦横比を算出
field_width = self._field_length + self._field_boundary_width * 2.0
field_height = self._field_width + self._field_boundary_width * 2.0
field_w_per_h = field_width / field_height
field_h_per_w = 1.0 / field_w_per_h
if widget_w_per_h >= field_w_per_h:
# Widgetが横長のとき
self._view_height = widget_height
self._view_width = widget_height * field_w_per_h
self._do_rotate_view = False
elif widget_w_per_h <= field_h_per_w:
# Widgetが縦長のとき
self._view_height = widget_width
self._view_width = widget_width * field_w_per_h
self._do_rotate_view = True
else:
# 描画回転にヒステリシスをもたせる
if self._do_rotate_view is True:
self._view_height = widget_height * field_h_per_w
self._view_width = widget_height
else:
self._view_height = widget_width * field_h_per_w
self._view_width = widget_width
self._scale_field_to_view = self._view_width / field_width
def _convert_to_view(self, x, y):
# フィールド座標系を描画座標系に変換する
view_x = x * self._scale_field_to_view
view_y = -y * self._scale_field_to_view
point = QPointF(view_x, view_y)
return point
def _convert_to_field(self, x, y):
# 描画座標系をフィールド座標系に変換する
x /= self._scale.x()
y /= self._scale.y()
x -= (self._trans.x() + self._mouse_trans.x())
y -= (self._trans.y() + self._mouse_trans.y())
x -= self.width() * 0.5 / self._scale.x()
y -= self.height() * 0.5 / self._scale.y()
if self._do_rotate_view:
x, y = -y, x
field_x = x / self._scale_field_to_view
field_y = -y / self._scale_field_to_view
point = QPointF(field_x, field_y)
return point
def _reset_painter_status(self):
# 描画領域の移動と拡大縮小を初期化する
self._trans = QPointF(0.0, 0.0)
self._mouse_trans = QPointF(0.0, 0.0)
self._scale = QPointF(1.0, 1.0)
def _is_replacement_click(self, mouse_pos):
# クリックした描画位置にオブジェクトがあればReplacementと判定する
# ボールとロボットが近い場合、ボールのReplacementを優先する
field_point = self._convert_to_field(mouse_pos.x(), mouse_pos.y())
is_clicked = True
result = self._is_ball_clicked(field_point)
if result == 'pos':
self._replacement_target['ball_pos'] = True
self._replace_func = self._replace_ball_pos
elif result == 'vel_angle':
self._replacement_target['ball_vel'] = True
self._replace_func = self._replace_ball_vel
else:
result, robot_id, is_yellow = self._is_robot_clicked(field_point)
self._replace_id = robot_id
self._replace_is_yellow = is_yellow
if result == 'pos':
self._replacement_target['robot_pos'] = True
self._replace_func = self._replace_robot_pos
elif result == 'vel_angle':
self._replacement_target['robot_angle'] = True
self._replace_func = self._replace_robot_angle
else:
is_clicked = False
return is_clicked
def _is_clicked(self, field_point1, field_point2):
# フィールド上のオブジェクトをクリックしたかどうか判定する
diff_point = field_point1 - field_point2
diff_norm = math.hypot(diff_point.x(), diff_point.y())
if diff_norm < self._REPLACE_CLICK_POS_THRESHOLD:
return 'pos'
elif diff_norm < self._REPLACE_CLICK_VEL_ANGLE_THRESHOLD:
return 'vel_angle'
return False
def _is_ball_clicked(self, field_point):
# ボールをクリックしたか判定する
# ボールが消えていれば判定しない
if self._ball_info.disappeared:
return False
pos_x = self._ball_info.pose.x
pos_y = self._ball_info.pose.y
ball_pos = QPointF(pos_x, pos_y)
return self._is_clicked(field_point, ball_pos)
def _is_robot_clicked(self, field_point):
# ロボットをクリックしたか判定する
# 消えたロボットは対照外
is_clicked = False
replace_id = 0
is_yellow = False
for robot in self._robot_info['blue']:
if robot.disappeared:
continue
robot_point = QPointF(robot.pose.x, robot.pose.y)
is_clicked = self._is_clicked(field_point, robot_point)
if is_clicked:
is_yellow = False
return is_clicked, robot.robot_id, is_yellow
for robot in self._robot_info['yellow']:
if robot.disappeared:
continue
robot_point = QPointF(robot.pose.x, robot.pose.y)
is_clicked = self._is_clicked(field_point, robot_point)
if is_clicked:
is_yellow = True
return is_clicked, robot.robot_id, is_yellow
return is_clicked, replace_id, is_yellow
def _replace_ball_pos(self, mouse_pos):
# ボール位置のReplacement
field_point = self._convert_to_field(mouse_pos.x(), mouse_pos.y())
ball = ReplaceBall()
ball.x = field_point.x()
ball.y = field_point.y()
ball.is_enabled = True
if self._invert_side:
ball.x *= -1.0
ball.y *= -1.0
replacements = Replacements()
replacements.ball = ball
self._pub_replace.publish(replacements)
self._replacement_target['ball_pos'] = False
def _replace_ball_vel(self, mouse_pos):
# ボール速度のReplacement
ball_point = QPointF(self._ball_info.pose.x, self._ball_info.pose.y)
field_point = self._convert_to_field(mouse_pos.x(), mouse_pos.y())
velocity = self._replacement_velocity(ball_point, field_point)
ball = ReplaceBall()
ball.x = ball_point.x()
ball.y = ball_point.y()
ball.vx = velocity.x()
ball.vy = velocity.y()
ball.is_enabled = True
if self._invert_side:
ball.x *= -1.0
ball.y *= -1.0
ball.vx *= -1.0
ball.vy *= -1.0
replacements = Replacements()
replacements.ball = ball
self._pub_replace.publish(replacements)
self._replacement_target['ball_vel'] = False
def _replacement_velocity(self, from_point, to_point):
# Replacementのボール速度を計算する
diff_point = to_point - from_point
diff_norm = math.hypot(diff_point.x(), diff_point.y())
velocity_norm = diff_norm * self._REPLACE_BALL_VELOCITY_GAIN
if velocity_norm > self._REPLACE_MAX_BALL_VELOCITY:
velocity_norm = self._REPLACE_MAX_BALL_VELOCITY
angle = math.atan2(diff_point.y(), diff_point.x())
velocity = QPointF(
velocity_norm * math.cos(angle),
velocity_norm * math.sin(angle))
return velocity
def _replace_robot_pos(self, mouse_pos):
# ロボット位置のReplacement
field_point = self._convert_to_field(mouse_pos.x(), mouse_pos.y())
# ロボット角度をradiansからdegreesに変換する
direction = 0
if self._replace_is_yellow:
direction = math.degrees(self._robot_info['yellow'][self._replace_id].pose.theta)
else:
direction = math.degrees(self._robot_info['blue'][self._replace_id].pose.theta)
robot = ReplaceRobot()
robot.x = field_point.x()
robot.y = field_point.y()
robot.dir = direction
robot.id = self._replace_id
robot.yellowteam = self._replace_is_yellow
robot.turnon = True
if self._invert_side:
robot.x *= -1.0
robot.y *= -1.0
robot.dir += 180
replacements = Replacements()
replacements.robots.append(robot)
self._pub_replace.publish(replacements)
self._replacement_target['robot_pos'] = False
def _replace_robot_angle(self, mouse_pos):
# ロボット角度のReplacement
field_point = self._convert_to_field(mouse_pos.x(), mouse_pos.y())
# ロボット角度をradiansからdegreesに変換する
robot_point = QPointF()
if self._replace_is_yellow:
robot_point = QPointF(
self._robot_info['yellow'][self._replace_id].pose.x,
self._robot_info['yellow'][self._replace_id].pose.y)
else:
robot_point = QPointF(
self._robot_info['blue'][self._replace_id].pose.x,
self._robot_info['blue'][self._replace_id].pose.y)
robot = ReplaceRobot()
robot.x = robot_point.x()
robot.y = robot_point.y()
robot.dir = math.degrees(self._to_angle(robot_point, field_point))
robot.id = self._replace_id
robot.yellowteam = self._replace_is_yellow
robot.turnon = True
if self._invert_side:
robot.x *= -1.0
robot.y *= -1.0
robot.dir += 180
replacements = Replacements()
replacements.robots.append(robot)
self._pub_replace.publish(replacements)
self._replacement_target['robot_angle'] = False
def _draw_field(self, painter):
# フィールドの緑と白線を描画する
# グリーンカーペットを描画
painter.setPen(Qt.black)
painter.setBrush(Qt.green)
rect = QRectF(-self._view_width*0.5, -self._view_height*0.5,
self._view_width, self._view_height)
painter.drawRect(rect)
# 白線を描画
painter.setPen(QPen(Qt.white, self._WHITE_LINE_THICKNESS))
for line in self._field_lines:
p1 = self._convert_to_view(line["p1_x"], line["p1_y"])
p2 = self._convert_to_view(line["p2_x"], line["p2_y"])
painter.drawLine(p1, p2)
for arc in self._field_arcs:
top_left = self._convert_to_view(
arc["center_x"] - arc["radius"],
arc["center_y"] + arc["radius"] )
size = arc["radius"] * 2.0 * self._scale_field_to_view
# angle must be 1/16 degrees order
start_angle = math.degrees(arc["a1"]) * 16
end_angle = math.degrees(arc["a2"]) * 16
span_angle = end_angle - start_angle
painter.drawArc(top_left.x(), top_left.y(), size, size, start_angle, span_angle)
def _draw_ball(self, painter):
# ボールを描画する
if self._ball_info.disappeared is False:
point = self._convert_to_view(
self._ball_info.pose.x, self._ball_info.pose.y)
size = self._BALL_RADIUS * self._scale_field_to_view
ball_color = copy.deepcopy(self._COLOR_BALL)
if self._ball_info.detected is False:
# ボールを検出してないときは透明度を変える
ball_color.setAlpha(self._ALPHA_NOT_DETECTED)
painter.setPen(Qt.black)
painter.setBrush(ball_color)
painter.drawEllipse(point, size, size)
def _draw_ball_velocity(self, painter):
# ボールの速度方向を描画する
VELOCITY_THRESH = 1.0
PAINT_DIST = 10.0 # meters
ball_pos = self._ball_info.pose
ball_vel = self._ball_info.velocity
# 速度が小さければ描画しない
if math.hypot(ball_vel.x, ball_vel.y) < VELOCITY_THRESH:
return
direction = math.atan2(ball_vel.y, ball_vel.x)
vel_pos_x = PAINT_DIST * math.cos(direction) + ball_pos.x
vel_pos_y = PAINT_DIST * math.sin(direction) + ball_pos.y
point1 = self._convert_to_view(ball_pos.x, ball_pos.y)
point2 = self._convert_to_view(vel_pos_x, vel_pos_y)
painter.setPen(QPen(QColor(102, 0, 255), 2))
painter.drawLine(point1, point2)
def _draw_robots(self, painter):
# 全てのロボットを描画する
for blue in self._robot_info['blue']:
self._draw_robot(painter, blue, 'blue')
for yellow in self._robot_info['yellow']:
self._draw_robot(painter, yellow, 'yellow')
def _draw_robot(self, painter, robot, color):
# ロボット1台を描画する
if robot.disappeared is False:
point = self._convert_to_view(robot.pose.x, robot.pose.y)
size = self._ROBOT_RADIUS * self._scale_field_to_view
robot_color = copy.deepcopy(self._COLOR_ROBOT[color])
if robot.detected is False:
# ロボットを検出してないときは透明度を変える
robot_color.setAlpha(self._ALPHA_NOT_DETECTED)
painter.setPen(Qt.black)
painter.setBrush(robot_color)
painter.drawEllipse(point, size, size)
# ロボット角度
line_x = self._ROBOT_RADIUS * math.cos(robot.pose.theta)
line_y = self._ROBOT_RADIUS * math.sin(robot.pose.theta)
line_point = point + self._convert_to_view(line_x, line_y)
painter.drawLine(point, line_point)
# ロボットID
text_point = point + self._convert_to_view(self._ID_POS[0], self._ID_POS[1])
painter.drawText(text_point, str(robot.robot_id))
# ControlTarget
self._draw_control_target(painter, color, robot)
def _draw_cursor_coordinate(self, painter):
# マウスカーソルのフィールド座標を描画する
current_pos = self._convert_to_field(
self._current_mouse_pos.x(), self._current_mouse_pos.y())
current_point = self._convert_to_view(current_pos.x(), current_pos.y())
text = "(" + str(round(current_pos.x(),2)) + ", " + str(round(current_pos.y(),2)) + ")"
painter.setPen(Qt.black)
painter.drawText(current_point, text)
def _draw_pos_replacement(self, painter):
# 位置のReplacementを描画する
start_pos = self._convert_to_field(
self._click_point.x(), self._click_point.y())
current_pos = self._convert_to_field(
self._current_mouse_pos.x(), self._current_mouse_pos.y())
start_point = self._convert_to_view(start_pos.x(), start_pos.y())
current_point = self._convert_to_view(current_pos.x(), current_pos.y())
painter.setPen(QPen(Qt.red, 2))
painter.drawLine(start_point, current_point)
def _draw_vel_replacement(self, painter):
# ボール速度のReplacementを描画する
current_pos = self._convert_to_field(
self._current_mouse_pos.x(), self._current_mouse_pos.y())
current_point = self._convert_to_view(current_pos.x(), current_pos.y())
ball_pos = QPointF(
self._ball_info.pose.x, self._ball_info.pose.y)
ball_point = self._convert_to_view(ball_pos.x(), ball_pos.y())
painter.setPen(QPen(Qt.blue, 2))
painter.drawLine(ball_point, current_point)
# 速度の数値を描画する
velocity = self._replacement_velocity(ball_pos, current_pos)
text = "[" + str(round(velocity.x(),2)) + ", " + str(round(velocity.y(),2)) + "]"
painter.setPen(Qt.black)
painter.drawText(current_point, text)
def _draw_angle_replacement(self, painter):
# ロボット角度のReplacementを描画する
robot_pos = QPointF()
if self._replace_is_yellow:
robot_pos.setX(self._robot_info['yellow'][self._replace_id].pose.x)
robot_pos.setY(self._robot_info['yellow'][self._replace_id].pose.y)
else:
robot_pos.setX(self._robot_info['blue'][self._replace_id].pose.x)
robot_pos.setY(self._robot_info['blue'][self._replace_id].pose.y)
robot_point = self._convert_to_view(robot_pos.x(), robot_pos.y())
current_pos = self._convert_to_field(
self._current_mouse_pos.x(), self._current_mouse_pos.y())
current_point = self._convert_to_view(current_pos.x(), current_pos.y())
painter.setPen(QPen(Qt.blue, 2))
painter.drawLine(robot_point, current_point)
# 角度の数値を描画する
angle = math.degrees(self._to_angle(robot_pos, current_pos))
text = "[" + str(round(angle,2)) + "]"
painter.setPen(Qt.black)
painter.drawText(current_point, text)
def _draw_control_target(self, painter, color, robot):
# ロボットの制御目標値を描画する
# 経路の線を描画するための変数
prev_point = None
robot_id = robot.robot_id
target = self._control_targets[color][robot_id]
# 経路を描画
for pose in target.path:
point = self._convert_to_view(pose.x, pose.y)
size = self._ROBOT_RADIUS * self._scale_field_to_view
target_color = QColor(Qt.magenta)
target_color.setAlphaF(0.5)
painter.setPen(Qt.black)
painter.setBrush(target_color)
painter.drawEllipse(point, size, size)
# 角度
line_x = self._ROBOT_RADIUS * math.cos(pose.theta)
line_y = self._ROBOT_RADIUS * math.sin(pose.theta)
line_point = point + self._convert_to_view(line_x, line_y)
painter.drawLine(point, line_point)
# IDを添える
text_point = point + self._convert_to_view(self._ID_POS[0], self._ID_POS[1])
painter.drawText(text_point, str(robot_id))
# 経路を描画
if prev_point is None:
prev_point = point
else:
painter.setPen(QPen(QColor(0,0,255, 127), 4))
painter.drawLine(prev_point, point)
prev_point = point
# キック・ドリブルフラグを、ロボットの現在位置周辺に描画
text_flag = ""
if target.kick_power > 0.0:
text_flag += "K:" + str(target.kick_power)
if target.chip_enable is True:
text_flag += "\nC:ON"
if target.dribble_power > 0.0:
text_flag += "\nD:" + str(target.dribble_power)
point = self._convert_to_view(robot.pose.x, robot.pose.y)
text_point = point + self._convert_to_view(self._FLAG_POS[0], self._FLAG_POS[1])
# 複数行に分けてテキストを描画するため、widthとheightを設定する
text_width = 50
text_height = 100
painter.setPen(QPen(Qt.red, 2))
painter.drawText(
text_point.x(), text_point.y(),
text_width, text_height, 0, text_flag)
def _to_angle(self, from_point, to_point):
diff_point = to_point - from_point
return math.atan2(diff_point.y(), diff_point.x())
def _draw_joy_target(self, painter):
# JoyStickのControlTargetを描画する
# 経路の線を描画するための変数
prev_point = None
for i, pose in enumerate(self._joy_target.path):
point = self._convert_to_view(pose.x, pose.y)
size = self._ROBOT_RADIUS * self._scale_field_to_view
joy_color = QColor(Qt.magenta)
painter.setPen(Qt.black)
painter.setBrush(joy_color)
painter.drawEllipse(point, size, size)
# 角度
line_x = self._ROBOT_RADIUS * math.cos(pose.theta)
line_y = self._ROBOT_RADIUS * math.sin(pose.theta)
line_point = point + self._convert_to_view(line_x, line_y)
painter.drawLine(point, line_point)
# インデックス
text_point = point + self._convert_to_view(self._ID_POS[0], self._ID_POS[1])
painter.drawText(text_point, str(i))
# 経路を描画
if prev_point is None:
prev_point = point
else:
painter.setPen(QPen(QColor(0,0,255, 127), 4))
painter.drawLine(prev_point, point)
prev_point = point
def _draw_referee(self, painter):
# レフェリーの情報を描画する
PLACE_RADIUS = 0.15 # meters
AVOID_LENGTH = 0.5 # meter
if self._decoded_referee is None:
return
ball_pose = self._ball_info.pose
# ボールプレースメントの進入禁止エリアと設置位置を描画
if self._decoded_referee.referee_text == "OUR_BALL_PLACEMENT" \
or self._decoded_referee.referee_text == "THEIR_BALL_PLACEMENT":
replacement_pose = self._decoded_referee.placement_position
# 進入禁止エリアを描画
# Reference: Rule 8.2.3
angle_ball_to_target = tool.get_angle(ball_pose, replacement_pose)
dist_ball_to_target = tool.distance_2_poses(ball_pose, replacement_pose)
trans_BtoT = tool.Trans(ball_pose, angle_ball_to_target)
# 進入禁止エリア長方形の角の座標を取得
avoid_upper_left = trans_BtoT.inverted_transform(Pose2D(0, AVOID_LENGTH, 0))
avoid_lower_left = trans_BtoT.inverted_transform(Pose2D(0, -AVOID_LENGTH, 0))
avoid_upper_right = trans_BtoT.inverted_transform(
Pose2D(dist_ball_to_target, AVOID_LENGTH, 0))
avoid_lower_right = trans_BtoT.inverted_transform(
Pose2D(dist_ball_to_target, -AVOID_LENGTH, 0))
# 各座標を描画座標に変換
upper_left_point = self._convert_to_view(
avoid_upper_left.x, avoid_upper_left.y)
lower_left_point = self._convert_to_view(
avoid_lower_left.x, avoid_lower_left.y)
upper_right_point = self._convert_to_view(
avoid_upper_right.x, avoid_upper_right.y)
lower_right_point = self._convert_to_view(
avoid_lower_right.x, avoid_lower_right.y)
# ポリゴンに追加
polygon = QPolygonF()
polygon.append(upper_left_point)
polygon.append(upper_right_point)
polygon.append(lower_right_point)
polygon.append(lower_left_point)
avoid_color = QColor(Qt.red)
avoid_color.setAlphaF(0.3)
painter.setPen(QPen(Qt.black,1))
painter.setBrush(avoid_color)
painter.drawPolygon(polygon)
replace_point = self._convert_to_view(
replacement_pose.x, replacement_pose.y)
ball_point = self._convert_to_view(
ball_pose.x, ball_pose.y)
size = AVOID_LENGTH * self._scale_field_to_view
painter.drawEllipse(replace_point, size, size)
painter.drawEllipse(ball_point, size, size)
# ボール設置位置を描画
size = PLACE_RADIUS * self._scale_field_to_view
place_color = QColor(Qt.white)
place_color.setAlphaF(0.6)
painter.setPen(QPen(Qt.black,2))
painter.setBrush(place_color)
painter.drawEllipse(replace_point, size, size)
# ボール進入禁止エリアを描画
if self._decoded_referee.keep_out_radius_from_ball != -1:
point = self._convert_to_view(
ball_pose.x, ball_pose.y)
size = self._decoded_referee.keep_out_radius_from_ball * self._scale_field_to_view
ball_color = copy.deepcopy(self._COLOR_BALL)
keepout_color = QColor(Qt.red)
keepout_color.setAlphaF(0.3)
painter.setPen(Qt.black)
painter.setBrush(keepout_color)
painter.drawEllipse(point, size, size)
# レフェリーテキストをカーソル周辺に表示する
if self._decoded_referee.referee_text:
# カーソル座標を取得
current_pos = self._convert_to_field(
self._current_mouse_pos.x(), self._current_mouse_pos.y())
# 他のテキストと被らないように位置を微調整
current_point = self._convert_to_view(current_pos.x() + 0.1, current_pos.y() - 0.15)
text = self._decoded_referee.referee_text
painter.setPen(Qt.red)
painter.drawText(current_point, text)
| StarcoderdataPython |
1639664 | h1 = '''
<!Doctype html>
<html>
<head>
<title>单元测试报告</title>
<style>
body {
width: 80%;
margin: 40px auto;
font-weight: bold;
font-family: 'Trebuchet MS', 'Lucida Sans Unicode', 'Lucida Grande', 'Lucida Sans', Arial, sans-serif;
font-size: 18px;
color: #000;
}
table {
*border-collapse: collapse;
border-spacing: 0;
width: 100%;
}
.tableStyle {
/* border: solid #ggg 1px; */
border-style: outset;
border-width: 2px;
/* border: 2px; */
border-color: blue;
}
.tableStyle tr:hover {
background: rgb(173, 216, 230);
}
.tableStyle td,
.tableStyle th {
border-left: solid 1px rgb(146, 208, 80);
border-top: 1px solid rgb(146, 208, 80);
padding: 15px;
text-align: center;
}
.tableStyle th {
padding: 15px;
background-color: rgb(146, 208, 80);
background-image: -webkit-gradient(linear, left top, left bottom, from(#92D050), to(#A2D668));
/* rgb(146, 208, 80) */
}
</style>
</head>
<body>
<center>
<h1>测试报告</h1>
</center>
<br/>
<table class="tableStyle">
<thead>
<tr>
<th>Search Words</th>
<th>Assert Words</th>
<th>Start Time</th>
<th>Waste Time</th>
<th>Status</th>
</tr>
</thead>
'''
h2 = '''
</table>
</body>
</html>
'''
def generate_html(tr_data):
html = h1 + tr_data + h2
with open('report.html', 'w', encoding='utf-8') as fp:
fp.write(html) | StarcoderdataPython |
3223170 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateBundle(Model):
"""A certificate bundle consists of a certificate (X509) plus its attributes.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The certificate id.
:vartype id: str
:ivar kid: The key id.
:vartype kid: str
:ivar sid: The secret id.
:vartype sid: str
:ivar x509_thumbprint: Thumbprint of the certificate.
:vartype x509_thumbprint: bytes
:ivar policy: The management policy.
:vartype policy: :class:`CertificatePolicy
<azure.keyvault.generated.models.CertificatePolicy>`
:param cer: CER contents of x509 certificate.
:type cer: bytearray
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The certificate attributes.
:type attributes: :class:`CertificateAttributes
<azure.keyvault.generated.models.CertificateAttributes>`
:param tags: Application specific metadata in the form of key-value pairs
:type tags: dict
"""
_validation = {
'id': {'readonly': True},
'kid': {'readonly': True},
'sid': {'readonly': True},
'x509_thumbprint': {'readonly': True},
'policy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'sid': {'key': 'sid', 'type': 'str'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'cer': {'key': 'cer', 'type': 'bytearray'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, cer=None, content_type=None, attributes=None, tags=None):
self.id = None
self.kid = None
self.sid = None
self.x509_thumbprint = None
self.policy = None
self.cer = cer
self.content_type = content_type
self.attributes = attributes
self.tags = tags
| StarcoderdataPython |
197378 | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.errors import SynapseError
from synapse.types import StreamToken
import logging
logger = logging.getLogger(__name__)
class SourcePaginationConfig(object):
"""A configuration object which stores pagination parameters for a
specific event source."""
def __init__(self, from_key=None, to_key=None, direction='f',
limit=None):
self.from_key = from_key
self.to_key = to_key
self.direction = 'f' if direction == 'f' else 'b'
self.limit = int(limit) if limit is not None else None
def __repr__(self):
return (
"StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)"
) % (self.from_key, self.to_key, self.direction, self.limit)
class PaginationConfig(object):
"""A configuration object which stores pagination parameters."""
def __init__(self, from_token=None, to_token=None, direction='f',
limit=None):
self.from_token = from_token
self.to_token = to_token
self.direction = 'f' if direction == 'f' else 'b'
self.limit = int(limit) if limit is not None else None
@classmethod
def from_request(cls, request, raise_invalid_params=True,
default_limit=None):
def get_param(name, default=None):
lst = request.args.get(name, [])
if len(lst) > 1:
raise SynapseError(
400, "%s must be specified only once" % (name,)
)
elif len(lst) == 1:
return lst[0]
else:
return default
direction = get_param("dir", 'f')
if direction not in ['f', 'b']:
raise SynapseError(400, "'dir' parameter is invalid.")
from_tok = get_param("from")
to_tok = get_param("to")
try:
if from_tok == "END":
from_tok = None # For backwards compat.
elif from_tok:
from_tok = StreamToken.from_string(from_tok)
except:
raise SynapseError(400, "'from' paramater is invalid")
try:
if to_tok:
to_tok = StreamToken.from_string(to_tok)
except:
raise SynapseError(400, "'to' paramater is invalid")
limit = get_param("limit", None)
if limit is not None and not limit.isdigit():
raise SynapseError(400, "'limit' parameter must be an integer.")
if limit is None:
limit = default_limit
try:
return PaginationConfig(from_tok, to_tok, direction, limit)
except:
logger.exception("Failed to create pagination config")
raise SynapseError(400, "Invalid request.")
def __repr__(self):
return (
"PaginationConfig(from_tok=%r, to_tok=%r,"
" direction=%r, limit=%r)"
) % (self.from_token, self.to_token, self.direction, self.limit)
def get_source_config(self, source_name):
keyname = "%s_key" % source_name
return SourcePaginationConfig(
from_key=getattr(self.from_token, keyname),
to_key=getattr(self.to_token, keyname) if self.to_token else None,
direction=self.direction,
limit=self.limit,
)
| StarcoderdataPython |
1601134 | <reponame>leguiart/Machine-Learning<gh_stars>0
import numpy as np
import pandas as pd
from abc import ABCMeta, abstractmethod
from hklearn.model import Model
class _BaseModel(Model):
def __init__(self, estimator = 'ML', alpha = 1.0):
if isinstance(estimator, str) and estimator.lower() in ['ml', 'map']:
self.estimator = estimator.lower()
else:
raise Exception("Not valid estimator option (either ML or MAP)")
self.alpha = alpha
def fit(self, X, y):
if isinstance(X, (pd.core.frame.DataFrame, pd.core.series.Series)):
X_np = X.to_numpy()
else:
X_np = X
if isinstance(y, pd.core.frame.DataFrame):
y_np = y.to_numpy()
else:
y_np = y
self.classes = np.unique(y)
self.class_estimators, self.attribute_estimators = self.estimate(X_np, y_np, self.classes)
@abstractmethod
def estimate(self, X, y, classes):
pass
@abstractmethod
def predict(self, X):
pass
class BernoulliNB(_BaseModel):
def estimate(self, X, y, classes):
m = classes.shape[0]
n = X.shape[1]
beta = n
attribute_estimators = np.zeros((m,n))
class_estimators = np.zeros((m))
for i, c in enumerate(classes):
X_c = X[np.where(y == c)]
if self.estimator == 'ml':
class_estimators[i] = X_c.shape[0]/X.shape[0]
attribute_estimators[i, :] = np.count_nonzero(X_c, axis = 0) / X_c.shape[0]
elif self.estimator == 'map':
class_estimators[i] = (X_c.shape[0] + self.alpha - 1)/(X.shape[0] + beta + self.alpha - 2)
attribute_estimators[i, :] = (np.count_nonzero(X_c, axis = 0) + self.alpha - 1) / (X_c.shape[0] + beta + self.alpha - 2)
return class_estimators, attribute_estimators
def predict(self, X):
if isinstance(X, (pd.core.frame.DataFrame, pd.core.series.Series)):
X_np = X.to_numpy()
else:
X_np = X
X_np = np.where(X_np > 0.5, 1., 0.)
pcc = np.zeros((self.classes.shape[0], X_np.shape[0]))
a0log = (1 - X_np) @ np.log(1 - self.attribute_estimators + 0.000000001).T
a1log = X_np @ np.log(self.attribute_estimators + 0.000000001).T
pcc = a0log + a1log + np.log(self.class_estimators)
return np.argmax(pcc, axis = 1)
class MultinomialNB(_BaseModel):
def estimate(self, X, y, classes):
m = classes.shape[0]
n = X.shape[1]
beta = n
attribute_estimators = np.zeros((m,n))
class_estimators = np.zeros((m))
for i, c in enumerate(classes):
X_c = X[np.where(y == c)]
if self.estimator == 'ml':
class_estimators[i] = X_c.shape[0]/X.shape[0]
n_w = X_c.sum(axis = 0)
attribute_estimators[i, :] = n_w / n_w.sum()
elif self.estimator == 'map':
n_w = X_c.sum(axis = 0)
class_estimators[i] = (X_c.shape[0] + self.alpha - 1)/(X.shape[0] + m * self.alpha - m)
attribute_estimators[i, :] = (n_w + self.alpha - 1) / (n_w.sum() + n * self.alpha - n)
return class_estimators, attribute_estimators
def predict(self, X):
if isinstance(X, (pd.core.frame.DataFrame, pd.core.series.Series)):
X_np = X.to_numpy()
else:
X_np = X
pcc = np.zeros((self.classes.shape[0], X_np.shape[0]))
a1log = X_np @ np.log(self.attribute_estimators + 0.000000001).T
pcc = a1log + np.log(self.class_estimators)
return np.argmax(pcc, axis = 1)
| StarcoderdataPython |
3345180 | from typing import Tuple
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.rectangle import rectangle
from gdsfactory.types import LayerSpec
@cell
def litho_calipers(
notch_size: Tuple[float, float] = (2.0, 5.0),
notch_spacing: float = 2.0,
num_notches: int = 11,
offset_per_notch: float = 0.1,
row_spacing: float = 0.0,
layer1: LayerSpec = "WG",
layer2: LayerSpec = "SLAB150",
) -> Component:
"""Vernier caliper structure to test lithography alignment
Only the middle finger is aligned and the rest are offset.
adapted from phidl
Args:
notch_size: [xwidth, yheight].
notch_spacing: in um.
num_notches: number of notches.
offset_per_notch: in um.
row_spacing: 0
layer1: layer.
layer2: layer.
"""
D = gf.Component()
num_notches_total = num_notches * 2 + 1
centre_notch = num_notches
R1 = rectangle(size=notch_size, layer=layer1)
R2 = rectangle(size=notch_size, layer=layer2)
for i in range(num_notches_total):
if i == centre_notch:
D.add_ref(R1).movex(i * (notch_size[0] + notch_spacing)).movey(
notch_size[1]
)
D.add_ref(R2).movex(
i * (notch_size[0] + notch_spacing)
+ offset_per_notch * (centre_notch - i)
).movey(-2 * notch_size[1] - row_spacing)
D.add_ref(R1).movex(i * (notch_size[0] + notch_spacing))
D.add_ref(R2).movex(
i * (notch_size[0] + notch_spacing) + offset_per_notch * (centre_notch - i)
).movey(-notch_size[1] - row_spacing)
return D
if __name__ == "__main__":
c = litho_calipers()
c.show()
| StarcoderdataPython |
148841 | <filename>python/random_weighted.py
"""
Implements a random weighted choice function
Functions: weight(collection)
"""
import random
def weight(collection):
"""Choose an element from a dict based on its weight and return its key.
Parameters:
- collection (dict): dict of elements with weights as values.
Returns:
string: key of the chosen element.
"""
# 1. Get sum of weights
weight_sum = sum([value for value in collection.values()])
# 2. Generate random number between 1 and sum of weights
random_value = random.randint(1, weight_sum)
# 3. Iterate through items
for key, value in collection.items():
# 4. Subtract weight of each item from random number
random_value -= value
# 5. Compare with 0, if <= 0, that item has been chosen
if random_value <= 0:
return key
# 6. Else continue subtracting
# Should not reach here.
raise ValueError("Invalid argument value.")
if __name__ == '__main__':
LIMIT = 100
# 0. Decide on weights for each item
# Mapping item to its weight
WEIGHTS = {
'Vanilla': 15,
'Strawberry': 25,
'Chocolate': 55,
'Cookies and Cream': 5
}
# Initializing the counter for storing results
counter = {
'Vanilla': 0,
'Strawberry': 0,
'Chocolate': 0,
'Cookies and Cream': 0
}
# Running example LIMIT times
for i in range(LIMIT):
counter[weight(WEIGHTS)] += 1
# Display results
print(counter)
| StarcoderdataPython |
3207699 | # https://workat.tech/problem-solving/practice/implement-stack-array/
class Stack:
arr = []
def __init__(self, capacity=0):
self.t = -1
self.capacity = capacity
self.arr = [-1]*self.capacity
def isEmpty(self) -> bool:
if(self.t==-1):
return True
return False
def size(self) -> int:
return self.t+1
def top(self) -> int:
if(self.t==-1):
return self.t
i = self.t
return self.arr[i]
def push(self, element: int) -> None:
self.t+=1
i = self.t
self.arr[i] = element
def pop(self) -> None:
del self.arr[self.t]
self.t-=1
i = self.t
return self.arr[i]
| StarcoderdataPython |
1749755 | <filename>__init__.py
from flask import Flask,render_template
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from config import app_config
import os
app = Flask(__name__)
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
return app
app = create_app(os.getenv('APP_SETTING'))
api = Api(app=app, prefix='/api/v1') | StarcoderdataPython |
170751 | <gh_stars>0
"""
Contains classes and functions regarding Lectio Assignments.
"""
from urllib.parse import urlparse, parse_qs
from dateutil import parser as dt_parser
from .config import DEFAULT_TZ, DECIMAL_SEPARATOR
from .types import AssignmentWaitingFor, AssignmentStatuses, LectioType
from .utilities import percent2float
from .exceptions import ScrapingError
class Assignment(LectioType):
"""
Represents an Assignment assigned to a Lectio student.
"""
WAITING_FOR_LOOKUP = {
"Elev": AssignmentWaitingFor.STUDENT,
"Lærer": AssignmentWaitingFor.TEACHER
}
STATUS_LOOKUP = {
"Afleveret": AssignmentStatuses.HANDED_IN,
"Venter": AssignmentStatuses.WAITING,
"Mangler": AssignmentStatuses.MISSING
}
ATTRIBUTES = ["week", "group", "title", "deadline", "student_hours",
"status", "absence", "waiting_for", "note", "grade",
"student_note", "id"]
def __init__(self, raw_tag, tz=DEFAULT_TZ):
self.week = None
self.group = None
self.title = None
self.deadline = None
self.student_hours = None
self.status = None
self.absence = None
self.waiting_for = None
self.note = None
self.grade = None
self.student_note = None
self.id = None
self.tz = tz
# Initial unused element
raw_tag.contents.pop(0)
# Week
week_tag = raw_tag.contents.pop(0)
self.week = int(week_tag.span.text)
# Group
group_tag = raw_tag.contents.pop(0)
self.group = group_tag.span.text
# Title
title_tag = raw_tag.contents.pop(0)
self.title = title_tag.span.text
# Id - based on title_tag
parsed_url = urlparse(title_tag.span.a["href"])
parsed_qs = parse_qs(parsed_url.query)
self.id = "e" + parsed_qs["exerciseid"][0]
# Deadline
deadline_tag = raw_tag.contents.pop(0)
self.deadline = dt_parser.parse(deadline_tag.span.text, dayfirst=True,
yearfirst=False)
# pylint thinks deadline is a tuple.
# pylint: disable=no-member
self.deadline = self.deadline.replace(tzinfo=self.tz)
# pylint: enable=no-member
# Student Hours
student_hours_tag = raw_tag.contents.pop(0)
number_text = student_hours_tag.span.text.replace(DECIMAL_SEPARATOR,
".")
self.student_hours = float(number_text)
# Status
status_tag = raw_tag.contents.pop(0)
status = status_tag.span.text
try:
self.status = self.STATUS_LOOKUP[status]
except KeyError:
raise ScrapingError("Unknown value in 'status' field: " + status)
# Absence
absence_tag = raw_tag.contents.pop(0)
if absence_tag.text:
self.absence = percent2float(absence_tag.text)
# Waiting For
waiting_for_tag = raw_tag.contents.pop(0)
who = waiting_for_tag.span.text
try:
self.waiting_for = self.WAITING_FOR_LOOKUP[who]
except KeyError:
raise ScrapingError("Unknown value in 'waiting for' field: " + who)
# Note
note_tag = raw_tag.contents.pop(0)
self.note = note_tag.text
# Grade
grade_tag = raw_tag.contents.pop(0)
if grade_tag.text:
self.grade = int(grade_tag.text)
# Student Note
student_note_tag = raw_tag.contents.pop(0)
self.student_note = student_note_tag.text
| StarcoderdataPython |
1740519 | <reponame>thmolena/Shapes_Assistant
import math
from PIL import Image, ImageDraw, ImageColor, ImagePath
import random
def draw_a_shape(number):
img = Image.new("RGB", (128,128), "white")
image = ImageDraw.Draw(img)
a = random.randint(0,129)
#The center is at (64,64)
side = 3
angle = ((2*math.pi)/side)
length = abs(64-a)
if a > 64:
point_1 = (a,64)
else:
point_1 = (64+length, 64)
xy = [point_1]
for i in range (side-1):
x_coor = (length*math.cos((i+1)*angle)) +64
y_coor = (length*math.sin((i+1)*angle)) +64
x_coor = round(x_coor,5)
y_coor = round(y_coor,5)
point_2 = (x_coor, y_coor)
xy.append(point_2)
e = 64 - length
f = 64 + length
if a > 64:
distance_x = abs(a-e)
distance_y = abs(e-f)
else:
distance_x = abs(a-f)
distance_y = abs(e-f)
image.polygon(xy, fill =None, outline ="black")
img.save("a.png")
imm = Image.open("a.png")
if a > 64:
im_1 = imm.crop((e,e,a,f))
else:
im_1 = imm.crop((a,e,f,f))
im_1.save("a.png")
imgg = Image.open("a.png")
angle = random.randint(0,361)
out = im_1.rotate(angle, fillcolor = "white")
out.save("a.png")
img_2 = Image.new("RGB", (128,128), "white")
image_2 = ImageDraw.Draw(img_2)
coo = random.randint(0,128-distance_x)
doo = random.randint(0,128-distance_y)
back_im = img_2.copy()
back_im.paste(out, (coo,doo))
back_im.save("3_sides_"+str(number)+".png")
# pix_val = list(back_im.getdata())
# print(pix_val)
def main():
for i in range(10000):
draw_a_shape(i+4543)
main()
| StarcoderdataPython |
3209224 | <reponame>sprout42/StarStruct<filename>starstruct/elementescaped.py
"""
The escaped NamedStruct element class.
Can be used in multiple ways ways:
1: Variable Lengths, in terms of namedstruct elements
.. code-block:: python
ExampleMessage = Message('VarTest', [('x', 'B'), ('y', 'B')])
TestStruct = Message('TestStruct', [
('escaped_data', ExampleMessage, {
'escape': {
'start': b'\xff\x00\xff\x11',
'separator': b'\x12\x34',
'end': b'\x11\xff\x00\xff',
},
}),
])
`start` is the starting escape sequence
`separator` is a separating sequence
`end` is the ending escape sequence
"""
# pylint: disable=line-too-long
from typing import Optional
import starstruct
from starstruct.element import register, Element
from starstruct.modes import Mode
class Escapor:
def __init__(self, start=None, separator=None, end=None, opts=None):
self._start = start
self._separator = separator
self._end = end
self._opts = opts
@property
def start(self):
if self._start is not None:
return self._start
else:
return b''
@property
def separator(self):
if self._separator is not None:
return self._separator
else:
return b''
@property
def end(self):
if self._end is not None:
return self._end
else:
return b''
@register
class ElementEscaped(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# Escaped elements don't use the normal struct format, the format is
# a StarStruct.Message object, but change the mode to match the
# current mode.
self.format = field[1]
self.escapor = Escapor(**field[2]['escape'])
self._mode = mode
self._alignment = alignment
self.update(mode, alignment)
@staticmethod
def valid(field: tuple) -> bool:
"""
See :py:func:`starstruct.element.Element.valid`
:param field: The items to determine the structure of the element
"""
if len(field) == 3:
return isinstance(field[1], starstruct.message.Message) \
and isinstance(field[2], dict) \
and 'escape' in field[2].keys()
else:
return False
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
# TODO: Any validation needed here?
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if self._mode is not None:
self._mode = mode
if self._alignment is not None:
self._alignment = alignment
self.format.update(self._mode, self._alignment)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# When packing use the length of the current element to determine
# how many elements to pack, not the length element of the message
# (which should not be specified manually).
iterator = msg[self.name]
if not isinstance(iterator, list):
iterator = [iterator]
ret = self.escapor.start
for item in iterator:
ret += self.format.pack(item)
ret += self.escapor.separator
ret += self.escapor.end
# There is no need to make sure that the packed data is properly
# aligned, because that should already be done by the individual
# messages that have been packed.
return ret
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# When unpacking a variable element, reference the already unpacked
# length field to determine how many elements need unpacked.
ret = []
# Check the starting value
if buf[:len(self.escapor.start)] == self.escapor.start:
buf = buf[len(self.escapor.start):]
else:
raise ValueError('Buf did not start with expected start sequence: {0}'.format(
self.escapor.start.decode()))
unused = buf
while True:
(val, unused) = self.format.unpack_partial(unused)
ret.append(val)
if unused[:len(self.escapor.separator)] == self.escapor.separator:
unused = unused[len(self.escapor.separator):]
else:
raise ValueError('Buf did not separate with expected separate sequence: {0}'.format(
self.escapor.separator.decode()))
if unused[:len(self.escapor.end)] == self.escapor.end:
unused = unused[len(self.escapor.end):]
break
# There is no need to make sure that the unpacked data consumes a
# properly aligned number of bytes because that should already be done
# by the individual messages that have been unpacked.
return (ret, unused)
def make(self, msg):
"""Return the expected "made" value"""
ret = []
for val in msg[self.name]:
ret.append(self.format.make(val))
return ret
| StarcoderdataPython |
1761798 | <reponame>igor-morawski/MFIR-AP<filename>testing/test_d00_paths.py
import unittest
import MFIRAP.d00_utils.paths as tested_module
# functions are trivial, testing if import is succsesful is enough
class TestDummy(unittest.TestCase):
def test_Dummy(self):
self.assertTrue(True)
| StarcoderdataPython |
4820100 | <reponame>ajayiagbebaku/NFL-Model
import pytest
import altair as alt
from altair.utils import AltairDeprecationWarning
from altair.utils.deprecation import _deprecate, deprecated
def test_deprecated_class():
OldChart = _deprecate(alt.Chart, "OldChart")
with pytest.warns(AltairDeprecationWarning) as record:
OldChart()
assert "alt.OldChart" in record[0].message.args[0]
assert "alt.Chart" in record[0].message.args[0]
def test_deprecation_decorator():
@deprecated(message="func is deprecated")
def func(x):
return x + 1
with pytest.warns(AltairDeprecationWarning) as record:
y = func(1)
assert y == 2
assert record[0].message.args[0] == "func is deprecated"
| StarcoderdataPython |
4811205 | import pandas as pd
import datetime
import matplotlib.pyplot as plt
from Bio import SeqIO
import argparse
import os
def process_dat(dat, groupby_var, max_date, datetime_fmt):
# groups and sums
dat = dat.groupby(groupby_var).agg('sum')
# converts columns to datetime
dat.columns = pd.to_datetime(dat.columns, format=datetime_fmt)
# gets only through march 31st
dat = \
dat.loc[:,dat.columns <=
datetime.datetime.strptime(max_date, datetime_fmt)].\
sort_values(max_date, ascending=False)
top_dat = dat.loc[dat.index[0:9],:]
summarized_dat = pd.concat([top_dat,
pd.DataFrame(dat.sum() - top_dat.sum(),
columns=['Other']).transpose()])
return(dat, summarized_dat)
def run():
parser = argparse.ArgumentParser()
# input files
parser.add_argument('--globalDat',
help='csv file with country cases by day (from hopkins CCSE)',
default=None)
parser.add_argument('--usDat',
help='csv file with US cases by day (from hopkins CCSE)',
default=None)
parser.add_argument('--dateMax',
help='max date',
default=None)
parser.add_argument('--dateFmt',
help='date format',
default='%m/%d/%y')
parser.add_argument('--focalStates',
help='focal states, exlcuded from USA weight',
nargs='+')
args = parser.parse_args()
# /// PROCESS GLOBAL DAT ///
#args.dateMax='03/31/20'
#args.globalDat = 'data/time_series_covid19_confirmed_global.csv'
#args.usDat = 'data/time_series_covid19_confirmed_US.csv'
#args.focalStates = ['Georgia']
global_dat = pd.read_csv(args.globalDat, sep=',')
# to make sure hopkins definitions line up with gisaid
# some hopkins provinces are broken out as GISAID countries
province_country_mappings = {'Curacao': 'Curacao',
'Faroe Islands': 'Faroe Islands',
'Aruba': 'Aruba',
'St Martin': 'Saint Martin',
'Hong Kong': 'Hong Kong',
'Saint Barthelemy': 'Saint Barthélemy',
'Guadeloupe': 'Guadeloupe',
'Reunion': 'Reunion',
'Bermuda': 'Bermuda',
'St Martin': 'St Martin',
'Sint Maarten': 'Sint Maarten'}
global_dat.loc[global_dat['Province/State'].isin(province_country_mappings.keys()),
'Country/Region'] = \
global_dat.loc[global_dat['Province/State'].isin(province_country_mappings.keys()),
'Province/State'].map(province_country_mappings)
# some countries are spelled different in GISAID
country_mappings = {'Korea, South': 'South Korea',
'Congo (Brazzaville)': 'Republic of the Congo',
'Congo (Kinshasa)': 'Democratic Republic of the Congo',
'Czechia': 'Czech Republic',
}
global_dat.loc[global_dat['Country/Region'].isin(country_mappings),
'Country/Region'] = \
global_dat.loc[global_dat['Country/Region'].isin(country_mappings),
'Country/Region'].map(country_mappings)
# removes unneeded columns
global_dat = global_dat[global_dat.columns[0:2].tolist() +
global_dat.columns[4:].tolist()]
all_country_dat, summarized_country_dat = \
process_dat(global_dat, 'Country/Region', args.dateMax, args.dateFmt)
# finally, add to dictionary
country_case_dict = {idx: i[-1] for idx, i in all_country_dat.iterrows()}
# some countries are spelled different in GISAID
# and hopkins
# will not double count because these are all
# strict 1-1 mappings
# (confirmed by hand)
collapse_countries = {
"Côte d'Ivoire": "Cote d'Ivoire",
'Myanmar': 'Burma',
'<NAME>': '<NAME>'}
for key, value in collapse_countries.items():
country_case_dict[key] = all_country_dat.loc[value,:][-1]
# //// PROCESS US STATE DAT ////
# gets US data
us_dat = pd.read_csv(args.usDat, sep=',')
# removes unneeded columns
us_dat = us_dat[['Province_State'] + us_dat.columns[11:].tolist()]
all_state_dat, summarized_state_dat = \
process_dat(us_dat, 'Province_State', args.dateMax, args.dateFmt)
# //// PROCESS SEQUENCE DAT ////
# some US territories are listed as part of
# the US in the hopkins data but broken out as country
# in GISAID
# so we want to dictionary and remove from us state data
for country in ['Guam']:
# already filtered by max date so can take -1 index
country_dat = all_state_dat.loc[country,:].iloc[-1]
country_case_dict[country] = country_dat
all_state_dat = all_state_dat[all_state_dat.index != country]
# remove focal states -- likely these are force included
# and thus do not want to count their cases in the weights
for state in args.focalStates:
all_state_dat = all_state_dat[all_state_dat.index != state]
# then, sum over remaining US states to get USA weights
country_case_dict['USA'] = all_state_dat.sum()[-1]
# finally, we convert to DF, calc relative weights, and save
country_case_dat = pd.DataFrame(zip(country_case_dict.keys(), country_case_dict.values()))
country_case_dat[2] = country_case_dat[1]/country_case_dat[1].sum()
country_case_dat.to_csv('data/country_case_weights.tsv', header=None, index=None, sep='\t')
if __name__ == "__main__":
run()
'''
all_state_dat_summed = all_state_dat.sum()
# finally, calcualte sequence weights
country_case_dict = {idx: i[-1] for idx, i in all_country_dat.iterrows()}
state_case_dict = {'USA_'+idx: i[-1] for idx, i in all_state_dat.iterrows()}
region_case_dict = {**country_case_dict, **state_case_dict}
sequence_cases = \
pd.DataFrame.from_dict({key: region_case_dict[key] for key in sequence_locs if key in region_case_dict.keys()},
orient='index').sort_values(by=0, ascending=False)
sequence_cases['weight'] = \
sequence_cases[0]/sum(sequence_cases[0])
#sequence_cases['weight_log'] = \
# np.log10(sequence_cases[0]+1)/sum(np.log10(sequence_cases[0]+1))
sequence_cases.to_csv(os.path.splitext(args.sequences)[0]+'_weights.csv', header=None)
# get
# plots country dat
fig, axs = plt.subplots(1, 2, figsize=(6.4*2,4.8), constrained_layout=True)
summarized_country_dat.transpose().plot.area(ax=axs[0],
colormap=sns.color_palette("tab20", as_cmap=True), alpha=0.5, lw=1)
axs[0].set_ylabel('cumulative cases')
axs[0].set_title('global')
summarized_state_dat.transpose().plot.area(ax=axs[1],
colormap=sns.color_palette("tab20b", as_cmap=True), alpha=0.5, lw=1)
axs[1].set_title('usa')
fig.savefig('cumulative_cases.pdf')
plt.close()
# //// NOT OVERSAMPLING US SEQUENCES ////
args.globalDat = 'data/time_series_covid19_confirmed_global.csv'
args.metadata = 'data/gisaid_hcov-19_2021_01_26_prop_date_location_coarse.csv'
args.dateFmt = '%m/%d/%y'
args.dateMax = '03/31/20'
global_dat = pd.read_csv(args.globalDat, sep=',')
# to make sure hopkins definitions line up with gisaid
# some provinces are broken out as GISAID countries
province_country_mappings = {'Curacao': 'Curacao',
'Faroe Islands': 'Faroe Islands',
'Aruba': 'Aruba',
'St Martin': 'Saint Martin',
'Hong Kong': 'Hong Kong',
'Saint Barthelemy': 'Saint Barthélemy',
'Guadeloupe': 'Guadeloupe',
'Reunion': 'Reunion'}
global_dat.loc[global_dat['Province/State'].isin(province_country_mappings.keys()),
'Country/Region'] = \
global_dat.loc[global_dat['Province/State'].isin(province_country_mappings.keys()),
'Province/State'].map(province_country_mappings)
# some countries are spelled different in GISAID
country_mappings = {'Korea, South': 'South Korea',
'Congo (Brazzaville)': 'Republic of Congo',
'Congo (Kinshasa)': 'Democratic Republic of the Congo',
'Czechia': 'Czech Republic'}
global_dat.loc[global_dat['Country/Region'].isin(country_mappings),
'Country/Region'] = \
global_dat.loc[global_dat['Country/Region'].isin(country_mappings),
'Country/Region'].map(country_mappings)
# removes unneeded columns
global_dat = global_dat[global_dat.columns[0:2].tolist() +
global_dat.columns[4:].tolist()]
all_country_dat, summarized_country_dat = \
process_dat(global_dat, 'Country/Region', args.dateMax, args.dateFmt)
# //// PROCESS US STATE DAT ////
args.usDat = 'data/time_series_covid19_confirmed_US.csv'
# gets US data
us_dat = pd.read_csv(args.usDat, sep=',')
# removes unneeded columns
us_dat = us_dat[['Province_State'] + us_dat.columns[11:].tolist()]
all_state_dat, summarized_state_dat = \
process_dat(us_dat, 'Province_State', args.dateMax, args.dateFmt)
non_ga_dat = all_state_dat.sum() - all_state_dat.loc['Georgia']
country_case_dict = {idx: i[-1] for idx, i in all_country_dat.iterrows()}
# adds US data
country_case_dict.update({'Other(USA)': non_ga_dat[-1],
'Georgia(USA)': all_state_dat.loc['Georgia'][-1]})
metadata = pd.read_csv(args.metadata, sep=',', header=None)
collapse_countries = {'Palestine': 'Israel',
'Viet Nam': 'Vietnam',
'Czech Republic': 'Czech republic',
'Crimea': 'Russia'}
sequence_locs = \
[collapse_countries[i] if i in collapse_countries.keys() else i for i in metadata[2].unique()]
sequence_cases = \
pd.DataFrame.from_dict({key: country_case_dict[key] for key in sequence_locs if key in country_case_dict.keys()},
orient='index').sort_values(by=0, ascending=False)
sequence_cases['weight'] = \
sequence_cases[0]/sum(sequence_cases[0])
sequence_cases.to_csv(os.path.splitext(args.metadata)[0]+'_coarse_weights.csv', header=None)
# plots sampled sequences against number of cases
sequences = list(SeqIO.parse('data/ga_focused_prop_aligned_ref_filtered_masked.fasta', 'fasta'))
metadata = pd.read_csv('data/ga_focused_prop_date_location_name.csv', sep=',', header=None)
case_counts = pd.read_csv('data/gisaid_hcov-19_2021_01_26_weights.csv', sep=',', header=None)
seq_names = pd.DataFrame([i.description.split('|')[1] for i in sequences])
seq_counts = \
seq_names.merge(metadata, left_on=0, right_on=0).groupby(2).apply(lambda x: len(x)).reset_index()
seq_counts['prop_seqs'] = seq_counts[0]/seq_counts[0].sum()
seq_data = seq_counts.merge(case_counts, left_on=2, right_on=0)
fig, axs = plt.subplots(1,1, figsize=(6.4, 4.8), constrained_layout=True)
axs.scatter(seq_data[1], seq_data['0_x'], color='#333333', alpha=0.85)
axs.set_xlabel('cumulative cases')
axs.set_ylabel('number of down sampled sequences')
fig.savefig('figures/sampled_sequences.pdf')
seq_dates = \
seq_names.merge(metadata, left_on=0, right_on=0).groupby(1).apply(lambda x: len(x)).reset_index()
seq_dates[1] = pd.to_datetime(seq_dates[1])
fig, axs = plt.subplots(constrained_layout=True)
axs.scatter(seq_dates[1], seq_dates[0],
color='#333333', alpha=0.85)
axs.set_xlabel('date')
axs.set_ylabel('numer of down sampled sequences')
axs.tick_params(labelrotation=45, axis='x')
fig.savefig('figures/sampled_sequence_dates.pdf')
'''
| StarcoderdataPython |
3240930 | <filename>src/gausskernel/dbmind/sqldiag/src/sql_template.py
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
import re
# split flag in SQL
split_flag = ['!=', '<=', '>=', '==', '<', '>', '=', ',', '(', ')', '*', ';', '%', '+', ',']
# filter like (insert into aa (c1, c2) values (v1, v2) => insert into aa * values *)
BRACKET_FILTER = r'\(.*?\)'
# filter (123, 123.123)
PURE_DIGIT_FILTER = r'[\s]+\d+(\.\d+)?'
# filter ('123', '123.123')
QUOTE_DIGIT_FILTER = r'\'\d+(\.\d+)?\''
# filter ("123", "123.123")
DOUBLE_QUOTE_DIGIT_FILTER = r'"\d+(\.\d+)?"'
# filter ('123', 123, '123,123', 123.123) not filter(table1, column1, table_2, column_2)
DIGIT_FILTER = r'([^a-zA-Z])_?\d+(\.\d+)?'
# filter date in sql ('1999-09-09', '1999/09/09', "1999-09-09 20:10:10", '1999/09/09 20:10:10.12345')
PURE_TIME_FILTER = r'[0-9]{4}[-/][0-9]{1,2}[-/][0-9]{1,2}\s*([0-9]{1,2}[:][0-9]{1,2}[:][0-9]{1,2})?(\.\d+)?'
SINGLE_QUOTE_TIME_FILTER = r'\'[0-9]{4}[-/][0-9]{1,2}[-/][0-9]{1,2}\s*([0-9]{1,2}[:][0-9]{1,2}[:][0-9]{1,' \
r'2})?(\.\d+)?\' '
DOUBLE_QUOTE_TIME_FILTER = r'"[0-9]{4}[-/][0-9]{1,2}[-/][0-9]{1,2}\s*([0-9]{1,2}[:][0-9]{1,2}[:][0-9]{1,2})?(\.\d+)?"'
# filter like "where id='abcd" => "where id=#"
SINGLE_QUOTE_FILTER = r'\'.*?\''
# filter like 'where id="abcd" => 'where id=#'
DOUBLE_QUOTE_FILTER = r'".*?"'
# filter annotation like "/* XXX */"
ANNOTATION_FILTER_1 = r'/\s*\*[\w\W]*?\*\s*/\s*'
ANNOTATION_FILTER_2 = r'^--.*\s?'
# filter NULL character '\n \t' in sql
NULL_CHARACTER_FILTER = r'\s+'
def replace_module(sql):
"""
function: replace the message which is not important in sql
"""
sql = unify_sql(sql)
sql = sql.strip()
sql = re.sub(PURE_TIME_FILTER, r'?', sql)
sql = re.sub(SINGLE_QUOTE_TIME_FILTER, r'?', sql)
sql = re.sub(DOUBLE_QUOTE_TIME_FILTER, r'?', sql)
sql = re.sub(QUOTE_DIGIT_FILTER, r'?', sql)
sql = re.sub(DOUBLE_QUOTE_DIGIT_FILTER, r'?', sql)
sql = re.sub(PURE_DIGIT_FILTER, r' ?', sql)
sql = re.sub(SINGLE_QUOTE_FILTER, r'?', sql)
sql = re.sub(DOUBLE_QUOTE_FILTER, r'?', sql)
sql = re.sub(ANNOTATION_FILTER_1, r'', sql)
sql = re.sub(ANNOTATION_FILTER_2, r'', sql)
sql = re.sub(NULL_CHARACTER_FILTER, r' ', sql)
return sql
def unify_sql(sql):
index = 0
while index < len(sql):
if sql[index] in split_flag:
if sql[index:index + 2] in split_flag:
sql = sql[:index].strip() + ' ' + sql[index:index + 2] + ' ' + sql[index + 2:].strip()
index = index + 3
else:
sql = sql[:index].strip() + ' ' + sql[index] + ' ' + sql[index + 1:].strip()
index = index + 2
else:
index = index + 1
new_sql = list()
for word in sql.split():
new_sql.append(word.upper())
return ' '.join(new_sql)
def derive_template_of_sql(sql):
"""
function: derive skeleton of sql
"""
sql = replace_module(sql)
return sql | StarcoderdataPython |
192895 | <filename>amromics/utils/command.py
import os
import logging
logger = logging.getLogger(__name__)
def run_command(cmd, timing_log=None):
"""
Run a command line, return the returning code of the command
:param cmd:
:param timing_log:
:return:
"""
if timing_log is not None:
cmd = '/usr/bin/time --append -v -o {} bash -c "{}"'.format(timing_log, cmd)
logger.info('Running "{}'.format(cmd))
ret = os.system(cmd)
return ret
| StarcoderdataPython |
3320333 | import glob
import os
import pandas as pd
from app.proto import MpsReceiveMsg
def find_msgdata_in_directory(directory: str):
matchings = glob.glob1(directory, 'msgData_*')
if matchings:
return os.path.join(directory, matchings[0])
else:
raise FileNotFoundError(f'No msgdata file found in {repr(directory)}.')
def msgdata_trans(src_file: str) -> pd.DataFrame:
mpa = MpsReceiveMsg()
data = {
'time': [],
'receive_id': [],
'send_id': [],
'type': [],
}
with open(src_file, 'rb') as f:
con = f.read()
index = 1
while index < len(con):
curindex = index
lbytes = []
while con[curindex] & 0x80:
lbytes.append(con[curindex] & ~0x80)
curindex += 1
if len(lbytes) < 5:
lbytes.append(con[curindex])
final_length = 0x00
for length in lbytes[::-1]:
final_length = (final_length << 7) | length
content = con[curindex + 1:curindex + 1 + final_length]
mpa.ParseFromString(content)
for msg in mpa.msginfo:
data['time'].append(mpa.time)
data['receive_id'].append(mpa.receiveID)
data['send_id'].append(msg.sendID)
data['type'].append(msg.msgtype)
index = curindex + 1 + final_length + 1
return pd.DataFrame(data)
| StarcoderdataPython |
4815795 | <gh_stars>0
import torch
import torch.nn as nn
import numpy as np
from torch.nn import init
from bfutils import viz_datachunk
import cv2
import flow_utils
import matplotlib.pyplot as plt
class UpsampleZero(nn.Module):
def __init__(self):
super().__init__()
def forward(self, img, scale_factor):
"""
IMPORTANT: we only support integer scaling factors for now!!
"""
# input shape is: batch x channels x height x width
# output shape is:
device = img.device
input_size = torch.tensor(img.size(), dtype=torch.int)
input_image_size = input_size[2:] # input_image_size[0]-height, input_image_size[1]-width
data_size = input_size[:2]
# Get the last two dimensions -> height x width
# compare to given scale factor
b_ = np.asarray(scale_factor)
b = torch.tensor(b_)
# check that the dimensions of the tuples match.
if len(input_image_size) != len(b):
raise ValueError("scale_factor should match input size!")
output_image_size = (input_image_size * b).type(torch.int) # element-wise product
scale_factor = torch.tensor(np.asarray(scale_factor), dtype=torch.int)
##
output_size = torch.cat((data_size, output_image_size))
output = torch.zeros(tuple(output_size.tolist()))
##
# todo: use output.view(...) instead.
output[:, :, ::scale_factor[0], ::scale_factor[1]] = img
return output.to(device=device)
class BackwardWarper(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x_image, x_motion, debug_goal=None):
""" Stolen from github nsrr-reimplementation """
# this is diff for some reason
x_motion = -x_motion
index_batch, _, height, width = x_image.size()
grid_x = torch.arange(width).view(1, -1).repeat(height, 1)
grid_y = torch.arange(height).view(-1, 1).repeat(1, width)
grid_x = grid_x.view(1, 1, height, width).repeat(index_batch, 1, 1, 1)
grid_y = grid_y.view(1, 1, height, width).repeat(index_batch, 1, 1, 1)
##
grid = torch.cat((grid_x, grid_y), 1).float().to(device=x_motion.device)
# grid is: [batch, channel (2), height, width]
vgrid = grid + x_motion
# Grid values must be normalised positions in [-1, 1]
vgrid_x = vgrid[:, 0, :, :]
vgrid_y = vgrid[:, 1, :, :]
vgrid[:, 0, :, :] = (vgrid_x / width) * 2.0 - 1.0
vgrid[:, 1, :, :] = (vgrid_y / height) * 2.0 - 1.0
# swapping grid dimensions in order to match the input of grid_sample.
# that is: [batch, output_height, output_width, grid_pos (2)]
vgrid = vgrid.permute((0, 2, 3, 1)).to(device=x_image.device)
output = torch.nn.functional.grid_sample(x_image, vgrid, mode='nearest', align_corners=False)
return output
class FeatureExtractionNet(nn.Module):
def __init__(self, kernel_size=3, padding='same'):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_channels=4, out_channels=32, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=8, kernel_size=kernel_size, padding=padding),
nn.ReLU()
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
def forward(self, rgbd_tensor):
# rgbd_tensor: (N, C, H, W) where C = R, G, B, D channels
full_features = self.net(rgbd_tensor)
channel_dim = len(rgbd_tensor.shape)-1-2
return torch.concat((rgbd_tensor, full_features), dim=channel_dim)
class FeatureReweightingNet(nn.Module):
'''Adopted from nsrr github'''
def __init__(self, kernel_size=3, padding='same', scale=10):
super().__init__()
self.scale = scale
self.net = nn.Sequential(
# We think of the input as the concatanation of RGB-Ds of current frame, which has 4 channles
# and full features of previous frames, each of which has 12 channels
# so `in_channels=20`, which is 4+4*12 = 52
# To save memory, we have to feed the upsampled RGB-D to compute a weight and calculate the weighted sum of the 12 channels
nn.Conv2d(in_channels=4*5, out_channels=32, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=4, kernel_size=kernel_size, padding=padding),
nn.Tanh()
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
def forward(self, current_rgbd_scaled, previous_features):
stacked_reweighted_rgbds = torch.cat([
current_rgbd_scaled.unsqueeze(1)[:, :, 0:4, ...],
previous_features[:, :, 0:4, ...]
], dim=1)
stacked_reweighted_rgbds = stacked_reweighted_rgbds.flatten(1, 2)
w = self.net(stacked_reweighted_rgbds)
w = (w-(-1))/2*self.scale # Scale
weighted_previous_features = [w[:,i,:,:].unsqueeze(1)*previous_features[:, i, ...] for i in range(4)] # Reweighting
return weighted_previous_features
class ReconstructionNet(nn.Module):
'''Adopted from NSRR github repo'''
def __init__(self, kernel_size=3, padding='same'):
super().__init__()
self.pooling = nn.MaxPool2d(2)
# Split the network into 5 groups of 2 layers to apply concat operation at each stage
# todo: the first layer of the model would take
# the concatenated features of all previous frames,
# so the input number of channels of the first 2D convolution
# would be 12 * self.number_previous_frames
self.encoder_1 = nn.Sequential(
nn.Conv2d(12*4+12, 64, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(64, 32, kernel_size=kernel_size, padding=padding),
nn.ReLU()
)
self.encoder_2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=kernel_size, padding=padding),
nn.ReLU()
)
self.center = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
)
self.decoder_1 = nn.Sequential(
nn.Conv2d(128+64, 64, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
)
self.decoder_2 = nn.Sequential(
nn.Conv2d(32+64, 32, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Conv2d(32, 3, kernel_size=kernel_size, padding=padding),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
def forward(self, current_features, reweighted_previous_features):
# Features of the current frame and the reweighted features
# of previous frames are concatenated
reconstruction_inp = torch.cat([
current_features.unsqueeze(1),
reweighted_previous_features
], dim=1)
reconstruction_inp = reconstruction_inp.flatten(1, 2)
channel_dim = 1
out_encoder_1 = self.pooling(self.encoder_1(reconstruction_inp))
out_encoder_2 = self.pooling(self.encoder_2(out_encoder_1))
out_center = self.center(out_encoder_2)
out_decoder_1 = self.decoder_1(torch.concat((out_center, out_encoder_2), dim=channel_dim))
out_decoder_2 = self.decoder_2(torch.concat((out_encoder_1, out_decoder_1), dim=channel_dim))
return out_decoder_2
class NSRR(nn.Module):
def __init__(self, dev=0, debug=False):
super().__init__()
self.past_fextractor = FeatureExtractionNet()
self.current_fextractor = FeatureExtractionNet()
self.upsample_zero = UpsampleZero()
self.upsample_factor = 2
self.backward_warp = BackwardWarper()
self.reweighter = FeatureReweightingNet()
self.reconstructer = ReconstructionNet()
self.past_fextractor.cuda(dev)
self.reweighter.cuda(dev)
self.dev = dev
self.debug = debug
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
def forward(self, rgbds, flows):
_batch_size = rgbds.shape[0]
zero_upsample = False
upsample_mode = 'bilinear'
if self.debug:
print('visualizing input batch to ./debug/batch_d_input.png')
for batch_i in range(rgbds.shape[0]):
viz_datachunk(rgbds/255.0, flows, batch_idx=batch_i, \
imname=('./debug/a_batch_%d_input.png' % batch_i))
current_rgbd = rgbds[:, -1, ...]
current_features = self.current_fextractor(current_rgbd)
if zero_upsample:
current_features = self.upsample_zero(current_features, [self.upsample_factor]*2)
else:
current_features = torch.nn.functional.interpolate(current_features,
scale_factor=self.upsample_factor,
mode=upsample_mode)
input_shape = torch.tensor(current_rgbd.shape[-2:])
past_rgbds = rgbds[:, 0:-1, ...]
past_features = torch.empty([_batch_size, 4, 12] + (input_shape*self.upsample_factor).tolist())
for i in range(4):
past = past_rgbds[:, i, ...]
past_feature = self.past_fextractor(past)
if zero_upsample:
past_feature = self.upsample_zero(past_feature, [self.upsample_factor]*2)
else:
past_feature = torch.nn.functional.interpolate(past_feature,
scale_factor=self.upsample_factor,
mode=upsample_mode)
past_features[:, i, ...] = past_feature
# past_features[0] -> frame i-4
# past_features[1] -> frame i-3
# past_features[2] -> frame i-2
# past_features[3] -> frame i-1
# current_features -> frame i
if self.debug:
inp = rgbds[:, 3, 0:3, ...]
goal = rgbds[:, 4, 0:3, ...]
res = self.backward_warp(inp, flows[:, 3, ...])
res = res[0, ...].transpose(0, 1).transpose(1, 2)
plt.imsave('./debug/x_warped1.png', res.cpu().detach().numpy()/255.0)
x = inp[0, ...].transpose(0, 1).transpose(1, 2)
plt.imsave('./debug/y_input_warp.png', x.cpu().detach().numpy()/255.0)
x = goal[0, ...].transpose(0, 1).transpose(1, 2)
plt.imsave('./debug/z_goal_warp.png', x.cpu().detach().numpy()/255.0)
f = flow_utils.flow2img(flows[0, -1, ...].cpu().detach().numpy().transpose(1, 2, 0))
plt.imsave('./debug/w_flow.png', f)
flows = torch.nn.functional.interpolate(flows.flatten(0, 1),
scale_factor=self.upsample_factor,
mode='bilinear')
flows = flows.unflatten(0, (_batch_size, 4))
# Do Accumulative backward warping in batches
# Frame i-1 gets warped into frame i using flow(i-1 --> i)
# Frame i-2 gets warped into frame i using flow(i-2 --> i-1) and then flow(i-1 --> i)
# Frame i-3 gets warped into frame i using flow(i-3 --> i-2), flow(i-2 --> i-1), flow(i-1 --> i)
# Frame i-4 gets warped into frame i using flow(i-4 --> i-3), ..., flow(i-1 --> i)
done_warped = [None] * 4
warped = past_features
for i in range(4):
to_warp = warped[:, 0:4-i, ...].flatten(0, 1)
warp_flows = flows[:, i:4, ...].flatten(0, 1)
warped = self.backward_warp(to_warp, warp_flows)
warped = warped.unflatten(0, (_batch_size, 4-i))
done_warped[3-i] = warped[:, 3-i, ...]
# done_warped[0] --> zero upscaled, acc. warped frame_minus_4
# done_warped[1] --> zero upscaled, acc. warped frame_minus_3
# done_warped[2] --> zero upscaled, acc. warped frame_minus_2
# done_warped[3] --> zero upscaled, warped frame_minus_1
# Create input to Feature Reweighting network
done_warped = torch.stack(done_warped, dim=1).cuda(self.dev)
if self.debug:
print('visualizing warped batch to ./debug/batch_d_warped.png')
for batch_i in range(rgbds.shape[0]):
viz_datachunk(done_warped/255.0, flows, batch_idx=batch_i, \
imname=('./debug/b_batch_%d_warped.png' % batch_i))
current_rgbd_scaled = current_features[:, 0:4, ...]
reweighted = self.reweighter(current_rgbd_scaled, done_warped)
reweighted = torch.stack(reweighted, dim=1).cuda(self.dev)
reconstructed = self.reconstructer(current_features, reweighted)
reconstructed
return reconstructed
| StarcoderdataPython |
1681064 | import asyncio
import pytest
import starkware.starknet.testing.objects
from starkware.starknet.testing.starknet import Starknet
from starkware.starkware_utils.error_handling import StarkException
from tests.utils import Signer, str_to_felt, to_uint, uint
FALSE, TRUE = 0, 1
signer = Signer(123456789987654321)
NONEXISTENT_TOKEN = to_uint(999)
# random token IDs
TOKENS = [to_uint(5042), to_uint(793)]
# test token
TOKEN = TOKENS[0]
INITIAL_PRICE = 1000
DECAY_CONSTANT = 1
SCALE_FACTOR = 10
@pytest.fixture(scope="module")
def event_loop():
return asyncio.new_event_loop()
@pytest.fixture(scope="module")
async def contract_factory():
# Deploy the contracts
starknet = await Starknet.empty()
account1 = await starknet.deploy(
"openzeppelin/account/Account.cairo",
constructor_calldata=[signer.public_key],
)
contract = await starknet.deploy(
"contracts/discreteGDA.cairo",
constructor_calldata=[
str_to_felt("Non Fungible Token"), # name
str_to_felt("NFT"), # ticker
account1.contract_address, # owner
INITIAL_PRICE,
SCALE_FACTOR,
DECAY_CONSTANT,
],
)
account2 = await starknet.deploy(
"openzeppelin/account/Account.cairo",
constructor_calldata=[signer.public_key],
)
# ERC20 type accepted as payment
erc20 = await starknet.deploy(
"openzeppelin/token/erc20/ERC20_Mintable.cairo",
constructor_calldata=[
str_to_felt("Mintable Token"),
str_to_felt("MTKN"),
18,
*to_uint(INITIAL_PRICE + 1002),
account2.contract_address,
account2.contract_address,
],
)
# Approve the tokens for the auction contract
await signer.send_transaction(
account=account2,
to=erc20.contract_address,
selector_name="approve",
calldata=[contract.contract_address, *to_uint(INITIAL_PRICE)],
)
return starknet, contract, account1, account2, erc20
@pytest.mark.asyncio
async def test_initial_price(contract_factory):
"""Initial price should be price scale"""
starknet, contract, account1, account2, erc20 = contract_factory
observed = await contract.purchase_price(1).call()
assert assertApproxEqual(INITIAL_PRICE, observed.result[0][0], 1)
@pytest.mark.asyncio
async def test_insufficient_payment(contract_factory):
starknet, contract, account1, account2, erc20 = contract_factory
observed = await contract.purchase_price(1).call()
with pytest.raises(StarkException):
await signer.send_transaction(
account=account2,
to=contract.contract_address,
selector_name="purchaseTokens",
calldata=[
1,
account2.contract_address,
*to_uint(observed.result[0][0] - 1),
],
)
@pytest.mark.asyncio
async def test_mint_correctly(contract_factory):
starknet, contract, account1, account2, erc20 = contract_factory
# Checks token has no owner
with pytest.raises(StarkException):
await contract.ownerOf(to_uint(0)).call()
price = await contract.purchase_price(1).call()
await signer.send_transaction(
account=account2,
to=contract.contract_address,
selector_name="purchaseTokens",
calldata=[
1,
account2.contract_address,
*to_uint(1001),
],
)
# Checks token has new owner
new_owner = await contract.ownerOf(to_uint(0)).call()
assert new_owner.result == (account2.contract_address,)
# @pytest.mark.asyncio
# async def test_refund(contract_factory):
# starknet, contract, account1, account2, erc20 = contract_factory
# price = await contract.purchase_price(1).call()
# await signer.send_transaction(
# account=account2,
# to=contract.contract_address,
# selector_name="purchaseTokens",
# calldata=[
# 1,
# account2.contract_address,
# *to_uint(2002),
# ],
# )
# observed = await erc20.balanceOf(account2.contract_address).call()
# assert observed.result == price.result
def assertApproxEqual(expected, actual, tolerance):
print(expected, actual, tolerance)
leftBound = (expected * (1000 - tolerance)) / 1000
rightBound = (expected * (1000 + tolerance)) / 1000
return leftBound <= actual and actual <= rightBound
| StarcoderdataPython |
3253068 | import os
from aiohttp import web
from .db import init_db, setup_tables
from .views import request_handler
app = web.Application()
app['config'] = {
'gino': {
'host': os.getenv('POSTGRES_HOST', 'localhost'),
'user': os.getenv('POSTGRES_USER', 'postgres'),
'password': os.getenv('<PASSWORD>'),
'database': os.getenv('POSTGRES_DB', 'fbk_task'),
}
}
init_db(app)
app.add_routes([
# only POST requests are allowed, see deploy/nginx/nginx.conf:13
web.post('/', request_handler),
])
app.on_startup.append(setup_tables)
if __name__ == '__main__':
web.run_app(app)
| StarcoderdataPython |
1659872 | <filename>src/publish_conda_stack/__init__.py
__version__ = "0.3.0dev7"
| StarcoderdataPython |
1823 | <gh_stars>0
# urls.py
from django.urls import path, register_converter
from fiesta import converters
from fiesta.views import views
from rest_framework.urlpatterns import format_suffix_patterns
# "http://django-sdmx.org/wsrest/"
# "http://django-sdmx.org/ws/"
register_converter(converters.ResourceConverter, 'res')
register_converter(converters.AgencyConverter, 'age')
register_converter(converters.ContextConverter, 'con')
urlpatterns = [
path('wsreg/SubmitStructure/', views.SubmitStructureRequestView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>/<str:version>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/<res:resource>/', views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/'
'<str:version>/',
views.SDMXRESTfulStructureView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| StarcoderdataPython |
1776403 | import os
import torch, torchaudio
import itertools
from shutil import copyfile
class BasePreprocessor:
def __init__(self, data_dir, output_dir, overwrite_files, bulk_process, extensions = None):
# organization
self.data_dir = data_dir
self.output_dir = output_dir
self.overwrite_files = overwrite_files
self.extensions = extensions or ['.ogg']
# if(bulk_process):
# self.bulk_processor(data_dir, extensions)
def folder_crawl(self, data_dir, extensions):
count = 0
for root, dirs, files in os.walk(data_dir):
count += 1
for name in files:
fp = os.path.join(root, name)
new_root = root.replace('download', 'processed')
new_fp = os.path.join(new_root, name)
new_fp, extension = os.path.splitext(new_fp)
if not os.path.exists(new_root):
os.makedirs(new_root)
if (extension in extensions): # process these files
new_fp = new_fp + ".pickle"
if self.overwrite_files or not os.path.exists(new_fp): # do not overwrite existing processed files
print("Processing: ", fp, " to ", new_fp, " file ", count)
new_obj = self.process(fp)
torch.save(new_obj, new_fp)
else:
print("Skipping: ", fp, " to ", new_fp, " file ", count)
else: # copy these files
print("Copying: ", fp, " to ", new_fp, " file ", count)
copyfile(fp, new_fp + extension)
def bulk_processor(self, data_dir = '/', extensions = '.ogg'):
self.folder_crawl(data_dir, extensions)
return self.processed_data_dir
# default processes; override
def process(self, fp):
waveform, sample_rate = torchaudio.load(fp, normalization = True)
if sample_rate != 32000:
torchaudio.transforms.Resample(new_freq = 320000, resampling_method='sinc_interpolation')
return waveform | StarcoderdataPython |
20233 | import unittest
import os.path
import requests_mock
import tableauserverclient as TSC
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml')
SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml')
SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml')
class AuthTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
self.baseurl = self.server.auth.baseurl
def test_sign_in(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken',
personal_access_token='<PASSWORD>', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_impersonate(self):
with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password',
user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgz<PASSWORD>', self.server.auth_token)
self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id)
def test_sign_in_error(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('testuser', '<PASSWORD>')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_invalid_token(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken', personal_access_token='invalid')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_without_auth(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('', '')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_out(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
m.post(self.baseurl + '/signout', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.sign_out()
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
self.server.version = '2.6'
baseurl = self.server.auth.baseurl
site_id, user_id, auth_token = list('<PASSWORD>')
self.server._set_auth(site_id, user_id, auth_token)
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/switchSite', text=response_xml)
site = TSC.SiteItem('Samples', 'Samples')
self.server.auth.switch_site(site)
self.assertEqual('eIX6mvFsq<PASSWORD>4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>-8120<PASSWORD>', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
self.server.version = "3.10"
baseurl = self.server.auth.baseurl
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/signin', text=response_xml)
m.post(baseurl + '/revokeAllServerAdminTokens', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.revoke_all_server_admin_tokens()
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
| StarcoderdataPython |
1748802 | <reponame>PmagPy/PmagPy-notebooks
#!/usr/bin/env python
from __future__ import print_function
from builtins import range
mylist = [42, 'spam', 'ocelot']
Indices = list(range(0, len(mylist), 1))
for i in Indices:
print(mylist[i])
print('All done')
| StarcoderdataPython |
1768038 | """Rectangular and polar coordinates representation utility."""
from math import *
from taurapoint import *
def main():
test = ""
##### CREATING #####
# create passing x and y
test = test = "create passing x and y"
print("test:", test)
p1 = Point2(0, 1)
try:
assert p1.x == 0
assert p1.y == 1
assert p1.r == 1
assert p1.a == round(pi/2,16)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
print("test:", test)
p1 = Point2(1, 0)
try:
assert p1.x == 1
assert p1.y == 0
assert p1.r == 1
assert p1.a == 0
print("passed!")
except Exception as e:
print("ERROR",e, p1)
# create with x and y as a tuple
test = "create with x and y as a tuple"
print("test:", test)
p1 = Point2((0, 1))
try:
assert p1.x == 0
assert p1.y == 1
assert p1.r == 1
assert p1.a == round(pi/2,16)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
# create from another point
test = "create from another point"
print("test:", test)
p2 = Point2(p1)
try:
assert p2.x == p1.x
assert p2.y == p1.y
assert p2.r == p1.r
assert p2.a == p1.a
print("passed!")
except Exception as e:
print("ERROR",e, p2)
# create with no arguments
test = "create with no arguments"
print("test:", test)
p1 = Point2()
try:
assert p1.x == 0
assert p1.y == 0
assert p1.r == 0
assert p1.a == 0
print("passed!")
except Exception as e:
print("ERROR",e, p1)
##### SETTING #####
# set rectangular coordinates x and y
test = "set rectangular coordinates x and y"
print("test:", test)
p1 = Point2().rect(0, 1)
try:
assert p1.x == 0
assert p1.y == 1
assert p1.r == 1
assert p1.a == round(pi/2,16)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
# set rectangular coordinates x and y as a tuple
test = "set rectangular coordinates x and y as a tuple"
print("test:", test)
p1 = Point2().rect((0, 1))
try:
assert p1.x == 0
assert p1.y == 1
assert p1.r == 1
assert p1.a == round(pi/2,16)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
# set polar coordinates r and a
test = "set polar coordinates r and a"
print("test:", test)
p1 = Point2().polar(1, round(pi/2,16))
try:
assert p1.x == 0
assert p1.y == 1
assert p1.r == 1
assert p1.a == round(pi/2,16)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
# set polar coordinates r and a as a tuple
test = "set polar coordinates r and a as a tuple"
print("test:", test)
p1 = Point2().polar((1, round(pi/2,16)))
try:
assert p1.x == 0
assert p1.y == 1
assert p1.r == 1
assert p1.a == round(pi/2,16)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "set attribute x"
print("test:", test)
p1 = Point2()
p1.x = 1
try:
assert p1.polar() == (1, 0)
assert p1.rect() == (1, 0)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "set attribute y"
print("test:", test)
p1 = Point2()
p1.y = 1
try:
assert p1.polar() == (1, round(pi/2,16))
assert p1.rect() == (0, 1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "set attribute r"
print("test:", test)
p1 = Point2()
p1.r = 1
try:
assert p1.polar() == (1, 0)
assert p1.rect() == (1, 0)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "set attribute a"
print("test:", test)
p1 = Point2()
p1.r = 1
p1.a = round(pi/2,16)
try:
assert p1.polar() == (1, round(pi/2,16))
assert p1.rect() == (0, 1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
##### GETTING #####
test = "### GETTING #####"
print("test:", test)
try:
assert p1.polar() == (1, round(pi/2,16))
assert p1.rect() == (0, 1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
##### OPERATIONS ####
test = "vector + vector"
print("test:", test)
p1 = Point2(1, 0)
p2 = Point2(0, 1)
try:
assert (p1 + p2).rect() == (1, 1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "vector - vector"
print("test:", test)
p1 = Point2(1, 0)
p2 = Point2(0, 1)
try:
assert (p1 - p2).rect() == (1, -1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "vector += vector"
print("test:", test)
p1 = Point2(1, 0)
p2 = Point2(0, 1)
p1 += p2
try:
assert p1.rect() == (1, 1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "vector -= vector"
print("test:", test)
p1 = Point2(1, 0)
p2 = Point2(0, 1)
p1 -= p2
try:
assert p1.rect() == (1, -1)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
test = "vector * scalar"
print("test:", test)
p1 = Point2(1, 0)
n = 2
try:
assert (p1 * n).rect() == (2, 0)
print("passed!")
except Exception as e:
print("ERROR",e, p1)
if __name__ == '__main__':
main() | StarcoderdataPython |
130730 | import os
# Late setting of env so pywtk loads correctly
pywtk_cache = os.path.join(os.environ['HOME'], 'dropbox', 'pywtk-data')
os.environ['PYWTK_CACHE_DIR'] = pywtk_cache
import pandas
from pywtk.site_lookup import get_3tiersites_from_wkt
from pywtk.wtk_api import get_nc_data, WIND_FCST_DIR, WIND_MET_NC_DIR
if __name__ == '__main__':
pywtk_cache = os.path.join(
os.environ['HOME'], 'dropbox', 'pywtk-data')
os.environ['PYWTK_CACHE_DIR'] = pywtk_cache
site_id = '0'
start = pandas.Timestamp('2013-01-01', tz='utc')
end = pandas.Timestamp('2013-01-07', tz='utc')
utc = True
attributes = ["power", "wind_direction", "wind_speed", "temperature", "pressure", "density"]
met_data = get_nc_data(
site_id, start, end, attributes, utc=utc, nc_dir=WIND_MET_NC_DIR)
attributes_hour = [
"hour_ahead_power", "hour_ahead_power_p90",
"hour_ahead_power_p10"]
attributes_4_hour = [
"4_hour_ahead_power", "4_hour_ahead_power_p90",
"4_hour_ahead_power_p10"]
attributes_6_hour = [
"6_hour_ahead_power", "6_hour_ahead_power_p90",
"6_hour_ahead_power_p10"]
attributes_day = [
"day_ahead_power", "day_ahead_power_p90", "day_ahead_power_p10"]
fcst_attributes = attributes_hour + attributes_4_hour + attributes_6_hour + attributes_day
fcst_data = get_nc_data(
site_id, start, end, fcst_attributes, utc=utc, nc_dir=WIND_FCST_DIR)
# Longitude first
sites = get_3tiersites_from_wkt('POINT(23.8000 -68.33000)')
assert 0
| StarcoderdataPython |
1758259 | print('{:=^100}'.format(' DESAFIO 23-A '))
n = str(input('\nDigite um númeoro de 0 a 9999: '))
n = '{:0>4}'.format(n[:4])
unidade = n[3]
dezena = n[2]
centena = n[1]
milhar = n[0]
print('\nUnidade: {}\nDezena: {}\nCentena: {}\nMilhar: {}\n'.format(unidade, dezena, centena, milhar))
print('{:=^100}'.format(' FIM DESAFIO 23-A ')) | StarcoderdataPython |
3357337 | """This module includes a series of test cases to confirm the validity of tool_box.py
UnitTests(unittest.TestCase) -- this class is a unit test which includes 5 different test for tool_box.py
"""
import unittest
import pandas as pd
from covid_wa import tool_box
import sys
sys.path.append('..')
data = pd.read_csv("data/Unemployed/Unemployment.csv")
plot_data = pd.read_csv("data/COVID19/COVID19-Rate and Unemployment.csv")
class UnitTests(unittest.TestCase):
""""This class is a unit test which includes 8 different test for tool_box.py."""
#Smoke test
def test_smoke(self):
""""This function is a smoke test for tool_box.py.
Args:
self: Instance object.
Returns:
bool: True for pass the test, False otherwise.
"""
county = "King"
month = "June"
text = tool_box.unemploy_text_parser(county, month, data)
def test_oneshot1(self):
""""This function is a smoke test for tool_box.py.
Args:
self: Instance object.
Returns:
bool: True for pass the test, False otherwise.
"""
county = "King"
month = "June"
text = tool_box.unemploy_text_parser(county, month, data)
test_text = 'County Name: King County Period: June Unemployment Rate: 9.6'
self.assertEqual(text, test_text)
def test_oneshot2(self):
""""This function is a smoke test for tool_box.py.
Args:
self: Instance object.
Returns:
bool: True for pass the test, False otherwise.
"""
county = "Pacific"
month = "September"
text = tool_box.unemploy_text_parser(county,month, data)
test_text = 'County Name: Pacific County Period: September Unemployment Rate: 9.5'
self.assertEqual(text, test_text)
def test_edge1(self):
""""This function is a smoke test for tool_box.py.
Args:
self: Instance object.
Returns:
bool: True for pass the test, False otherwise.
"""
with self.assertRaises(ValueError):
county = "San Francisco"
month = "June"
text = tool_box.unemploy_text_parser(county, month, data)
def test_edge2(self):
""""This function is a smoke test for tool_box.py.
Args:
self: Instance object.
Returns:
bool: True for pass the test, False otherwise.
"""
with self.assertRaises(ValueError):
county = "King"
month = "November"
text = tool_box.unemploy_text_parser(county, month, data)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1765094 | <reponame>kripken/intensityengine
# Copyright 2010 <NAME> ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
from django import template
from django.utils.safestring import SafeString
from django.utils.html import escape
register = template.Library()
def enum_key(enum, value):
for key in enum.__dict__:
if value == getattr(enum, key):
return key
return '?'
@register.filter
def instance_status(value):
return enum_key(ServerInstance.STATUS, value)
@register.filter
def instance_mode(value):
return enum_key(ServerInstance.MODE, value)
@register.filter
def hl_activity(value):
"""Renders a small 'headline' version of an activity, including brief summary, link, etc."""
if value is None:
return ''
return SafeString('<a href="/tracker/activity/view/' + value.uuid + '/">' + value.name + '</a>')
@register.filter
def hl_account(value, my_account=None):
if value is None or value == '':
return ''
nickname = escape(value.nickname) if my_account is None or value.uuid != my_account.uuid else "<b>Me (%s)</b>" % value.nickname
return SafeString('<a href="/tracker/account/view/' + escape(value.uuid) + '/">' + nickname + '</a>')
@register.filter
def hl_instance(value):
if value is None or value == '':
return ''
def zero_if_none(value): return value if value is not None else 0
return SafeString('<a href="/tracker/instance/view/%s/">%s</a> [%s/%s : %s]' % tuple(map(
lambda val: escape(str(val)),
(value.uuid, value.user_interface, zero_if_none(value.players), zero_if_none(value.max_players), value.activity)
)))
@register.filter
def hl_account_list(accounts, my_account=None):
if accounts is None or accounts == '' or len(accounts) == 0:
return ''
return reduce(lambda x,y: x + SafeString(', ') + y, map(lambda account: hl_account(account, my_account), accounts))
@register.filter
def hl_asset(value):
if value is None or value == '':
return ''
return SafeString('<a href="/tracker/asset/view/' + escape(value.uuid) + '/">' + value.location + '</a>')
@register.filter
def mul_1000(value):
if value is None or value == '':
return ''
return value*1000
# Filtrations
def check_only_mine(request):
if not request.user.is_authenticated(): return False
if 'filtration' in request.GET:
return request.GET.get('only_mine') == 'on'
else:
return True # By default, only mine
def filtration(glob, only_mine):
return glob + '|' + str(int(only_mine))
@register.filter
def filtrationer(filtration, examples=None):
glob, only_mine = filtration.split('|')
if examples is not None:
examples = ' (e.g., %s)' % examples
else:
examples = ''
return SafeString('''
<form method="GET" action="">
Pattern%s: <input type="text" name="glob" value="%s">
<input type="checkbox" name="only_mine" %s>Show only mine
<input type="hidden" name="filtration" value="1">
<input type="submit" value="Apply">
</form>
''' % (examples, glob, 'checked' if only_mine == '1' else ''))
# Other
@register.filter
def timesizeformat(total_seconds):
'''
>>> timesizeformat(0)
'0 seconds'
>>> timesizeformat(1)
'1 seconds'
>>> timesizeformat(9)
'9 seconds'
>>> timesizeformat(11)
'11 seconds'
>>> timesizeformat(59)
'59 seconds'
>>> timesizeformat(60)
'1:00 minutes'
>>> timesizeformat(70) # Show seconds, not proportion! (Not seconds/60)
'1:10 minutes'
>>> timesizeformat(119)
'1:59 minutes'
>>> timesizeformat(60*3+42)
'3:42 minutes'
>>> timesizeformat(60*59+57)
'59:57 minutes'
>>> timesizeformat(60*60)
'1:00:00 hours'
>>> timesizeformat(60*60+53)
'1:00:53 hours'
>>> timesizeformat(60*60+42*60)
'1:42:00 hours'
>>> timesizeformat(60*60+51*60+37)
'1:51:37 hours'
>>> timesizeformat(60*60*85+24*60+16) # Don't do days - just hours
'85:24:16 hours'
'''
try:
total_seconds = int(total_seconds)
except TypeError:
return u"0 seconds"
seconds = total_seconds % 60
total_seconds = total_seconds/60
minutes = total_seconds % 60
hours = total_seconds/60
time = '%02d' % seconds
text = 'seconds'
if minutes > 0 or hours > 0:
time = ('%02d:' % minutes) + time
text = 'minutes'
if hours > 0:
time = str(hours) + ':' + time
text = 'hours'
if time[0] == '0' and len(time) > 0: time = time[1:]
return time + ' ' + text
if __name__ == '__main__':
import doctest
doctest.testmod()
else:
from intensity.models import ServerInstance, UserAccount
| StarcoderdataPython |
3320677 | from datasets.census_dataloader import get_income_census_dataloaders
from models.experiment_adaptation_pretrain_learner import FederatedDAANLearner
from models.experiment_finetune_target_learner import FederatedTargetLearner
from utils import get_timestamp, get_current_date, create_id_from_hyperparameters, test_classifier
def pretrain_census(data_tag,
census_dann_root_dir,
learner_hyperparameters,
data_hyperparameters,
model,
apply_feature_group=True):
# hyper-parameters
using_interaction = learner_hyperparameters['using_interaction']
momentum = learner_hyperparameters['momentum']
weight_decay = learner_hyperparameters['weight_decay']
batch_size = learner_hyperparameters['batch_size']
lr = learner_hyperparameters['lr']
epoch_patience = learner_hyperparameters['epoch_patience']
max_epochs = learner_hyperparameters['max_epochs']
valid_metric = learner_hyperparameters['valid_metric']
date = get_current_date()
timestamp = get_timestamp()
optimizer_param_dict = {"src": {"lr": lr, "momentum": momentum, "weight_decay": weight_decay}}
# create pre-train task id
hyperparameter_dict = {"lr": lr, "bs": batch_size, "me": max_epochs, "ts": timestamp}
if apply_feature_group:
using_intr_tag = "intr" + str(True) if using_interaction else "intr" + str(False)
task_id = date + "_census_fg_adapt_" + data_tag + "_" + using_intr_tag + "_" + create_id_from_hyperparameters(
hyperparameter_dict)
else:
task_id = date + "_census_no_fg_adapt_" + data_tag + "_" + create_id_from_hyperparameters(hyperparameter_dict)
print("[INFO] perform task:{0}".format(task_id))
# load data
source_train_file_name = data_hyperparameters['source_ad_train_file_name']
source_valid_file_name = data_hyperparameters['source_ad_valid_file_name']
target_train_file_name = data_hyperparameters['target_ad_train_file_name']
target_valid_file_name = data_hyperparameters['target_ft_valid_file_name']
print(f"[INFO] load source train from: {source_train_file_name}.")
print(f"[INFO] load source valid from: {source_valid_file_name}.")
print(f"[INFO] load target train from: {target_train_file_name}.")
print(f"[INFO] load target valid from: {target_valid_file_name}.")
print("[INFO] Load train data.")
source_da_census_train_loader, _ = get_income_census_dataloaders(
ds_file_name=source_train_file_name, batch_size=batch_size, split_ratio=1.0)
target_da_census_train_loader, _ = get_income_census_dataloaders(
ds_file_name=target_train_file_name, batch_size=batch_size, split_ratio=1.0)
print("[INFO] Load valid data.")
source_census_valid_loader, _ = get_income_census_dataloaders(
ds_file_name=source_valid_file_name, batch_size=batch_size * 4, split_ratio=1.0)
target_census_valid_loader, _ = get_income_census_dataloaders(
ds_file_name=target_valid_file_name, batch_size=batch_size * 4, split_ratio=1.0)
plat = FederatedDAANLearner(model=model,
source_da_train_loader=source_da_census_train_loader,
source_val_loader=source_census_valid_loader,
target_da_train_loader=target_da_census_train_loader,
target_val_loader=target_census_valid_loader,
max_epochs=max_epochs,
epoch_patience=epoch_patience)
plat.set_model_save_info(census_dann_root_dir)
plat.train_dann(epochs=200,
task_id=task_id,
metric=valid_metric,
optimizer_param_dict=optimizer_param_dict)
return task_id
def finetune_census(pretrain_task_id,
census_pretain_model_root_dir,
census_finetune_target_root_dir,
learner_hyperparameters,
data_hyperparameters,
model):
# hyper-parameters
load_global_classifier = learner_hyperparameters['load_global_classifier']
momentum = learner_hyperparameters['momentum']
weight_decay = learner_hyperparameters['weight_decay']
batch_size = learner_hyperparameters['batch_size']
lr = learner_hyperparameters['lr']
valid_metric = learner_hyperparameters['valid_metric']
date = get_current_date()
timestamp = get_timestamp()
# create fine-tune task id
hyperparameter_dict = {"lr": lr, "bs": batch_size, "ts": timestamp}
appendix = create_id_from_hyperparameters(hyperparameter_dict)
glr = "ft_glr" if load_global_classifier else "rt_glr"
target_task_id = pretrain_task_id + "@target_" + date + "_" + glr + "_" + appendix
print("[INFO] perform task:{0}".format(target_task_id))
# load pre-trained model
model.load_model(root=census_pretain_model_root_dir,
task_id=pretrain_task_id,
load_global_classifier=load_global_classifier,
timestamp=None)
print("[DEBUG] Global classifier Model Parameter Before train:")
model.print_parameters()
# Load data
target_ft_train_file_name = data_hyperparameters['target_ft_train_file_name']
target_ft_valid_file_name = data_hyperparameters['target_ft_valid_file_name']
target_ft_test_file_name = data_hyperparameters['target_ft_test_file_name']
print(f"[INFO] load target ft train data from {target_ft_train_file_name}.")
print(f"[INFO] load target ft valid data from {target_ft_valid_file_name}.")
print(f"[INFO] load target ft test data from {target_ft_test_file_name}.")
print("[INFO] Load train data")
target_train_loader, _ = get_income_census_dataloaders(
ds_file_name=target_ft_train_file_name, batch_size=batch_size, split_ratio=1.0)
print("[INFO] Load test data")
target_valid_loader, _ = get_income_census_dataloaders(
ds_file_name=target_ft_valid_file_name, batch_size=batch_size, split_ratio=1.0)
print("[INFO] Load test data")
target_test_loader, _ = get_income_census_dataloaders(
ds_file_name=target_ft_test_file_name, batch_size=batch_size, split_ratio=1.0)
# perform target training
plat_target = FederatedTargetLearner(model=model,
target_train_loader=target_train_loader,
target_val_loader=target_valid_loader,
patience=800,
max_global_epochs=500)
plat_target.set_model_save_info(census_finetune_target_root_dir)
plat_target.train_target_with_alternating(global_epochs=500,
top_epochs=1,
bottom_epochs=1,
lr=lr,
task_id=target_task_id,
metric=valid_metric,
momentum=momentum,
weight_decay=weight_decay)
# load best model
model.load_model(root=census_finetune_target_root_dir,
task_id=target_task_id,
load_global_classifier=True,
timestamp=None)
print("[DEBUG] Global classifier Model Parameter After train:")
model.print_parameters()
acc, auc, ks = test_classifier(model, target_test_loader, 'test')
print(f"acc:{acc}, auc:{auc}, ks:{ks}")
return target_task_id
def train_no_adaptation(data_tag,
census_no_ad_root_dir,
learner_hyperparameters,
data_hyperparameters,
model):
# hyper-parameters
apply_feature_group = learner_hyperparameters['apply_feature_group']
train_data_tag = learner_hyperparameters['train_data_tag']
momentum = learner_hyperparameters['momentum']
weight_decay = learner_hyperparameters['weight_decay']
batch_size = learner_hyperparameters['batch_size']
lr = learner_hyperparameters['lr']
epoch_patience = learner_hyperparameters['epoch_patience']
max_epochs = learner_hyperparameters['max_epochs']
valid_metric = learner_hyperparameters['valid_metric']
src_tgt_train_file_name = data_hyperparameters['src_tgt_train_file_name']
source_valid_file_name = data_hyperparameters['source_ad_valid_file_name']
target_ft_train_file_name = data_hyperparameters['target_ft_train_file_name']
target_ft_valid_file_name = data_hyperparameters['target_ft_valid_file_name']
target_ft_test_file_name = data_hyperparameters['target_ft_test_file_name']
data_file_name_dict = {"tgt": target_ft_train_file_name,
"all": src_tgt_train_file_name}
# print(f"[INFO] load source train from: {source_train_file_name}.")
print(f"[INFO] load source valid from: {source_valid_file_name}.")
print(f"[INFO] load target train from: {target_ft_train_file_name}.")
print(f"[INFO] load target valid from: {target_ft_valid_file_name}.")
print(f"[INFO] load target test from: {target_ft_test_file_name}.")
print(f"[INFO] load src+tgt test from: {src_tgt_train_file_name}.")
timestamp = get_timestamp()
fg_tag = "census_no_ad_w_fg" if apply_feature_group else "census_no_ad_wo_fg"
date = get_current_date() + "_" + data_tag + "_" + fg_tag
tries = 1
task_id_list = list()
for version in range(tries):
hyperparameter_dict = {"lr": lr, "bs": batch_size, "ts": timestamp, "ve": version}
task_id = date + "_" + train_data_tag + "_" + create_id_from_hyperparameters(hyperparameter_dict)
task_id_list.append(task_id)
print("[INFO] perform task:{0}".format(task_id))
print("[INFO] model created.")
src_train_loader, _ = get_income_census_dataloaders(
ds_file_name=data_file_name_dict[train_data_tag], batch_size=batch_size, split_ratio=1.0)
src_valid_loader, _ = get_income_census_dataloaders(
ds_file_name=source_valid_file_name, batch_size=batch_size * 4, split_ratio=1.0)
tgt_train_loader, _ = get_income_census_dataloaders(
ds_file_name=target_ft_train_file_name, batch_size=batch_size, split_ratio=1.0)
tgt_valid_loader, _ = get_income_census_dataloaders(
ds_file_name=target_ft_valid_file_name, batch_size=batch_size * 4, split_ratio=1.0)
tgt_test_loader, _ = get_income_census_dataloaders(
ds_file_name=target_ft_test_file_name, batch_size=batch_size * 4, split_ratio=1.0)
plat = FederatedDAANLearner(model=model,
source_da_train_loader=src_train_loader,
source_val_loader=src_valid_loader,
target_da_train_loader=tgt_train_loader,
target_val_loader=tgt_valid_loader,
epoch_patience=epoch_patience,
validation_batch_interval=5)
plat.set_model_save_info(census_no_ad_root_dir)
plat.train_wo_adaption(epochs=max_epochs,
lr=lr,
train_source=True,
metric=valid_metric,
task_id=task_id,
momentum=momentum,
weight_decay=weight_decay)
# load best model
model.load_model(root=census_no_ad_root_dir,
task_id=task_id,
load_global_classifier=True,
timestamp=None)
print("[DEBUG] Global classifier Model Parameter After train:")
model.print_parameters()
acc, auc, ks = test_classifier(model, tgt_test_loader, 'test')
print(f"acc:{acc}, auc:{auc}, ks:{ks}")
return task_id_list
| StarcoderdataPython |
3211755 | <reponame>hawk-earnin/quickstatus
from expiration import TimeFormatError
from status import (
Expiration,
print_statuses_list,
clear_status,
set_status,
)
def add_default(config, status, time):
try:
s = config.statuses[status]
except KeyError:
print(f'{status} is not a vaild status. Valid statuses are:')
print_statuses_list(config.statuses)
exit(1)
try:
t = Expiration.from_timestamp(time)
except TimeFormatError as err:
print(err)
exit(1)
s.status_expiration = t
config.default_statuses.append(s)
config.write_config()
set_status(config.slack, s)
def pop_default(config):
try:
config.default_statuses.pop()
except IndexError:
pass
config.write_config()
if len(config.default_statuses) > 0:
set_status(config.slack, config.default_statuses[-1])
else:
clear_status(config.slack)
| StarcoderdataPython |
1632245 | <filename>run_pretrained.py
import argparse
import gym
import matplotlib.pyplot as plt
import torch
from matplotlib import animation
import algorithms
import algorithms.dqn.trainer
from algorithms.dqn.model import dqn_vanilla
from utils import gym_utils
def parse_args():
parser = argparse.ArgumentParser(description='Inference config.')
parser.add_argument('--ckpt_path', type=str, required=False,
default='./assets/checkpoint.pt',
help='Path to checkpoint file to run')
parser.add_argument('--save_gif', type=lambda x: (
str(x).lower() not in ['false', '0', 'no']),
required=False, default=True,
help='True to save gif in /results')
return parser.parse_args()
def run_pretrained(ckpt_path, save_gif):
env = gym_utils.EnvWrapper(gym.make('CartPole-v0').unwrapped, num_frames=4)
env.reset()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not torch.cuda.is_available():
print("Running on CPU!!!")
# Get screen size so that we can initialize layers correctly based on shape
# returned from AI gym. Typical dimensions at this point are close to 3x40x90
# which is the result of a clamped and down-scaled render buffer in get_screen()
init_state = env.get_state().to(device)
_, _, screen_height, screen_width = init_state.shape
# Get number of actions from gym action space
n_actions = env.action_space.n
policy_net = dqn_vanilla.DQN(screen_height, screen_width, n_actions)
policy_net.to(device).eval()
agent = algorithms.dqn.trainer.DQNAgent(policy_net, n_actions, device, env)
runner = CartpoleRunner(agent, env, policy_net, device, ckpt_path)
runner.run(save_gif)
env.close()
print('\n Session ended')
class CartpoleRunner(object):
def __init__(self, agent, env, policy_net, device, ckpt_path):
self.agent = agent
self.env = env
self.policy_net = policy_net
self.device = device
self.steps_done = 0
self.ckpt_path = ckpt_path
self.init_episode = 0
self._load_ckpt()
def run(self, save_gif):
frames = []
self.agent.state = self.env.get_state().to(self.device)
while True:
action = self.agent.select_action(eps_threshold=0)
self.steps_done += 1
_, _, done, _ = self.env.step(action.item())
frames.append(self.env.render(mode="rgb_array"))
# Observe new state
if not done:
next_state = self.env.get_state().to(self.device)
self.agent.state = next_state
if done:
if save_gif:
save_frames_as_gif(frames)
return None
# Move to the next state
def _load_ckpt(self):
print('Loading {}'.format(self.ckpt_path))
checkpoint = torch.load(self.ckpt_path)
self.policy_net.load_state_dict(checkpoint['model'])
def save_frames_as_gif(frames, path='./',
filename='results/cartpole_results.gif'):
# Mess with this to change frame size
plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0),
dpi=72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),
interval=50)
try:
anim.save(path + filename, writer='imagemagick', fps=60)
print(f'Saved .gif file to {path + filename}')
except:
print('.gif save was unsuccessful, consider installing imagemagik '
'(with $sudo apt install -y imagemagik)')
if __name__ == '__main__':
args = parse_args()
run_pretrained(args.ckpt_path, args.save_gif)
| StarcoderdataPython |
143620 | import filecmp
import os
import pathlib
from typing import Optional
from approvaltests.core.namer import Namer
from approvaltests.core.reporter import Reporter
from approvaltests.core.writer import Writer
def exists(path: str) -> bool:
return os.path.isfile(path)
class ReporterNotWorkingException(Exception):
def __init__(self, reporter: Reporter):
super().__init__(f"Reporter {reporter} failed to work!")
class FileApprover(object):
def verify(
self,
namer: Namer,
writer: Writer,
reporter: Reporter,
) -> Optional[str]:
base = namer.get_basename()
approved = namer.get_approved_filename(base)
received = namer.get_received_filename(base)
# The writer has the ability to change the name of the received file
received = writer.write_received_file(received)
ok = self.verify_files(approved, received, reporter)
if not ok:
return (
f"Approval Mismatch, received != approved\n"
f"\tApproved: {approved}\n"
f"\tReceived: {received} "
)
return None
def verify_files(
self, approved_file: str, received_file: str, reporter: Reporter
) -> bool:
if self.are_files_the_same(approved_file, received_file):
os.remove(received_file)
return True
worked = reporter.report(received_file, approved_file)
if not worked:
raise ReporterNotWorkingException(reporter)
return False
@staticmethod
def are_files_the_same(approved_file: str, received_file: str) -> bool:
if not exists(approved_file) or not exists(received_file):
return False
if filecmp.cmp(approved_file, received_file):
return True
try:
approved_raw = pathlib.Path(approved_file).read_text()
approved_text = approved_raw.replace("\r\n", "\n")
received_raw = pathlib.Path(received_file).read_text()
received_text = received_raw.replace("\r\n", "\n")
return approved_text == received_text
except:
return False
| StarcoderdataPython |
159223 | # -*- coding: utf-8 -*-
"""Controllers for the comcenter application."""
| StarcoderdataPython |
4820105 | <reponame>KwatME/ccal<filename>kwat/constant/__init__.py
from .DATA_DIRECTORY_PATH import DATA_DIRECTORY_PATH
from .FLOAT_RESOLUTION import FLOAT_RESOLUTION
from .GOLDEN_RATIO import GOLDEN_RATIO
from .NUMBER_OF_CATEGORY import NUMBER_OF_CATEGORY
from .RANDOM_SEED import RANDOM_SEED
from .SAMPLE_FRACTION import SAMPLE_FRACTION
| StarcoderdataPython |
4828734 | <gh_stars>1-10
from zipfile import ZipFile
from time import sleep
from glob import glob
import shutil
import gee
import os
def find(directory):
'''
It returns the paths of files contained in a directory
'''
names = []
for folder in glob(directory):
names.append(folder)
# It removes unnecessary file from the names list
for i in range(len(names)):
if names[i].endswith('ini'):
names.pop(i)
break
return names
def get_zone_names(points):
'''
It creates a symbolic name for a downloaded file using the longitude and latitude values
'''
lat = points[0,:]
lon = points[1,:]
zone_names = []
for i in range(len(lat)):
zone_names.append('lat_'+str(lat[i])+'_lon_'+str(lon[i]))
return zone_names
def data_extractor(path, zone_name, date, downloads_folder_path, windows):
names = find(downloads_folder_path)
for i in range(len(names)):
if not ('.zip' in names[i]):
shutil.rmtree(names[i])
names = find(downloads_folder_path)
# It tries to extract all the downloaded image into a folder
for i in range(len(names)):
if windows == True:
#print(names)
f = names[i].split('.')[0].split('\\')
#print(f)
fn = f[len(f)-1]
pos = path+zone_name+'\\'+date+'\\'+fn
#print(pos)
else:
f = names[i].split('.')[0].split('/')
fn = f[len(f)-1]
pos = path+zone_name+'/'+date+'/'+fn
os.makedirs(pos, exist_ok=True)
try:
with ZipFile(names[i], 'r') as ZipObj:
ZipObj.extractall(pos)
print(' + Extracting file %d of %d' % (i+1, len(names)))
except Exception:
print(' !!! Extraction exception !!!')
pass
# After the extraction it removes all the zip files
for i in range(len(names)):
os.remove(names[i])
def download(points, patch_size, start_date, end_date, date_names, s2_selectors, s1_selectors, n_of_regions, n_imgs, downloads_folder_path, download_path, sen2_images_base_path, sen1_images_base_path, windows):
regions, rectangles = gee.get_region_and_rectangle(points[0,:], points[1,:], size = patch_size)
zone_names = get_zone_names(points)
for scene in range(0, n_of_regions):
#--------------------------------------------- SENTINEL-2 ---------------------------------------------
print(' # Sentinel-2 data downloading')
print(' > Sentinel-2 region %d of %d download started' % (scene+1, n_of_regions))
for period in range(len(start_date)):
s2data = gee.get_s2_data_from_gge(rectangles[scene], start_date[period], end_date[period])
gee.download_s2_data(s2data, regions[scene], zone_names[scene], date_names[period], download_path, n_imgs=n_imgs, selectors=s2_selectors)
#sleep(1)
data_extractor(sen2_images_base_path, zone_names[scene], date_names[period], downloads_folder_path, windows = windows)
print(' > Sentinel-2 region %d of %d download completed' % (scene+1, n_of_regions))
#--------------------------------------------- SENTINEL-2 ---------------------------------------------
print(' # Sentinel-1 data downloading')
print(' > Sentinel-1 region %d of %d download started' % (scene+1, n_of_regions))
for period in range(len(start_date)):
s1data = gee.get_s1_data_from_gge(rectangles[scene], start_date[period], end_date[period])
gee.download_s1_data(s1data, regions[scene], zone_names[scene], date_names[period], download_path, n_imgs=n_imgs, selectors=s2_selectors)
#sleep(1)
data_extractor(sen1_images_base_path, zone_names[scene], date_names[period], downloads_folder_path, windows = windows)
#sleep(3)
print(' > Sentinel-1 region %d of %d download completed' % (scene+1, n_of_regions))
print(' #Download completed')
| StarcoderdataPython |
1724290 | #!/usr/bin/env python
import os
from migrate.versioning.shell import main
if __name__ == '__main__':
db_url = os.environ.get('DATABASE_URL', 'postgresql://localhost:5432/beercalc')
db_url = db_url.replace('postgres:', 'postgresql:', 1)
main(url=db_url, debug='False', repository='db_repository')
| StarcoderdataPython |
3354542 | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sympy.solvers import solve
from sympy import Symbol
from matplotlib import patches
import matplotlib.patches as mpatches
import scipy.io as sio
from numpy import linalg as LA
# plotting configuration
ratio = 1.5
figure_len, figure_width = 15*ratio, 12*ratio
font_size_1, font_size_2 = 36*ratio, 36*ratio
legend_size = 18*ratio
line_width, tick_len = 3*ratio, 10*ratio
marker_size = 15*ratio
plot_line_width = 5*ratio
hfont = {'fontname': 'Arial'}
pal = sns.color_palette("deep")
# simulation setup
dt = 0.0001
T = int(16/dt)
# neuronal parameters
tau_e, tau_i = 0.020, 0.010
# network parameters
U, U_max = 1, 6
tau_x = 0.20
alpha_e, alpha_i = 2, 2
# network connectivity
l_l = [1]
k = 0.1
for l in l_l:
Jee = 1.4
Jie = (2-l) * 0.6
Jei = (2-l) * 1.0
Jii = (2-l) * 0.6
Jee_2 = k * Jee
Jie_2 = l * 0.6
Jei_2 = l * 1.0
Jii_2 = l * 0.6
r_e_1, r_e_2, r_i_1, r_i_2 = 0, 0, 0, 0
r_e = np.zeros(2)
z_e_1, z_e_2, z_i_1, z_i_2 = 0, 0, 0, 0
x = np.ones(2)
l_r_e_1, l_r_e_2, l_r_i_1, l_r_i_2 = [], [], [], []
for i in range(T):
if 50000 <= i < 70000:
g_e_1, g_e_2, g_i_1, g_i_2 = 2.2, 3.0, 2, 2
elif 120000 <= i < 140000:
g_e_1, g_e_2, g_i_1, g_i_2 = 3.0, 2.2, 2, 2
else:
g_e_1, g_e_2, g_i_1, g_i_2 = 2.2, 2.2, 2, 2
g_e_1 = g_e_1 * (g_e_1 > 0)
g_e_2 = g_e_2 * (g_e_2 > 0)
g_i_1 = g_i_1 * (g_i_1 > 0)
g_i_2 = g_i_2 * (g_i_2 > 0)
# SSN part
z_e_1 = Jee * r_e_1 + Jee_2 * r_e_2 - Jei * r_i_1 - Jei_2 * r_i_2 + g_e_1
z_e_2 = Jee_2 * r_e_1 + Jee * r_e_2 - Jei_2 * r_i_1 - Jei * r_i_2 + g_e_2
z_i_1 = Jie * x[0] * r_e_1 + Jie_2 * x[1] * r_e_2 - Jii * r_i_1 - Jii_2 * r_i_2 + g_i_1
z_i_2 = Jie_2 * x[0] * r_e_1 + Jie * x[1] * r_e_2 - Jii_2 * r_i_1 - Jii * r_i_2 + g_i_2
z_e_1 = z_e_1 * (z_e_1 > 0)
z_e_2 = z_e_2 * (z_e_2 > 0)
z_i_1 = z_i_1 * (z_i_1 > 0)
z_i_2 = z_i_2 * (z_i_2 > 0)
r_e_1 = r_e_1 + (-r_e_1 + np.power(z_e_1, alpha_e)) / tau_e * dt
r_e_2 = r_e_2 + (-r_e_2 + np.power(z_e_2, alpha_e)) / tau_e * dt
r_i_1 = r_i_1 + (-r_i_1 + np.power(z_i_1, alpha_i)) / tau_i * dt
r_i_2 = r_i_2 + (-r_i_2 + np.power(z_i_2, alpha_i)) / tau_i * dt
r_e_1 = r_e_1 * (r_e_1 > 0)
r_e_2 = r_e_2 * (r_e_2 > 0)
r_i_1 = r_i_1 * (r_i_1 > 0)
r_i_2 = r_i_2 * (r_i_2 > 0)
r_e[0] = np.copy(r_e_1)
r_e[1] = np.copy(r_e_2)
x = x + ((U - x) / tau_x + U * (U_max - x) * r_e) * dt
x = x * (x > 0)
x[x > U_max] = U_max
l_r_e_1.append(r_e_1)
l_r_e_2.append(r_e_2)
l_r_i_1.append(r_i_1)
l_r_i_2.append(r_i_2)
l_r_e_1 = np.asarray(l_r_e_1)
l_r_e_2 = np.asarray(l_r_e_2)
l_r_i_1 = np.asarray(l_r_i_1)
l_r_i_2 = np.asarray(l_r_i_2)
if l == 1:
print(np.mean(l_r_e_1[100000:110000]))
print(np.mean(l_r_e_2[100000:110000]))
print(np.mean(l_r_i_1[100000:110000]))
print(np.mean(l_r_i_2[100000:110000]))
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
plt.yscale('symlog', linthreshy=0.1)
plt.plot(l_r_e_1, linewidth=plot_line_width)
plt.plot(l_r_e_2, linewidth=plot_line_width)
plt.plot(l_r_i_1, linewidth=plot_line_width)
# plt.plot(l_r_i_2, linewidth=plot_line_width)
plt.xticks(np.arange(100000, 160000 + 5000, 20000), [0, 2, 4, 6], fontsize=font_size_1, **hfont)
plt.yticks([0, 0.1, 1, 10, 100], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Firing rate (Hz)', fontsize=font_size_1, **hfont)
plt.xlim([100000, 160000])
plt.ylim([0, 100])
plt.legend(['Exc 1', 'Exc 2', 'Inh'], prop={"family": "Arial", 'size': font_size_1})
if l == 1:
plt.savefig('paper_figures/png/Fig_4_Supralinear_network_global_inhibition_multistable.png')
plt.savefig('paper_figures/pdf/Fig_4_Supralinear_network_global_inhibition_multistable.pdf')
else:
plt.savefig('paper_figures/png/Fig_4_Supralinear_network_cotuned_inhibition_multistable.png')
plt.savefig('paper_figures/pdf/Fig_4_Supralinear_network_cotuned_inhibition_multistable.pdf') | StarcoderdataPython |
1708469 | <reponame>Ruide/ACES<gh_stars>10-100
import os
import sys
# Add this directory to the path in GDB so import will work
path, filename = os.path.split(__file__)
if not path in sys.path:
sys.path.append(path)
import gdb
import gdb_helpers
import gdb_measure_stacks as stack_measure
import hexbox_app_support
RESULTS_DIR = "timing_results"
filename_w_ext = gdb_helpers.get_filename()
filename, ext = os.path.splitext(filename_w_ext)
r_file = os.path.join(RESULTS_DIR, filename + '--timing.csv')
summary_file = os.path.join(RESULTS_DIR, filename + '--timing_summary.csv')
if not os.path.exists(RESULTS_DIR):
os.mkdir(RESULTS_DIR)
brkpt = hexbox_app_support.get_breakpoint()
gdb_helpers.cl()
hexbox = 'hexbox' in filename
stack_measure.setup_stacks_for_measuring(hexbox)
bp = gdb_helpers.EndBreakpoint(brkpt, type=gdb.BP_BREAKPOINT)
bp.set_params(r_file, summary_file, stack_measure.measure_stacks, limit=3)
print "BP Set, Running"
gdb.execute('c')
| StarcoderdataPython |
1690756 | import sys
import pytest
import logging
import warnings
from multiprocessing.util import log_to_stderr
def pytest_addoption(parser):
parser.addoption("--loky-verbosity", type=int, default=logging.DEBUG,
help="log-level: integer, SUBDEBUG(5) - INFO(20)")
parser.addoption("--skip-high-memory", action="store_true",
help="skip high-memory test to avoid conflict on CI.")
def log_lvl(request):
"""Choose logging level for multiprocessing"""
return request.config.getoption("--loky-verbosity")
def pytest_configure(config):
"""Setup multiprocessing logging for loky testing"""
if sys.version_info >= (3, 4):
logging._levelToName[5] = "SUBDEBUG"
log = log_to_stderr(config.getoption("--loky-verbosity"))
log.handlers[0].setFormatter(logging.Formatter(
'[%(levelname)s:%(processName)s:%(threadName)s] %(message)s'))
warnings.simplefilter('always')
def pytest_collection_modifyitems(config, items):
if not config.getoption("--skip-high-memory"):
# --skip-high-memory given in cli: skip high-memory tests
return
skip_high_memory = pytest.mark.skip(
reason="--skip-high-memory option was provided")
for item in items:
if "high_memory" in item.keywords:
item.add_marker(skip_high_memory)
| StarcoderdataPython |
3330436 | def test():
#test
\
x=1
for x in range(1,5):
# \
#test
\
\
print x
print 3
#test
# print 2
# | StarcoderdataPython |
3248140 | # -*- coding: utf-8 -*-
## \package globals.threads
#
# WARNING: Standard Python module 'threads' cannot be imported here
# MIT licensing
# See: docs/LICENSE.txt
import threading
from dbr.log import Logger
thr = threading
## Standard thread class with renamed methods
class Thread(thr.Thread):
def __init__(self, function, *args):
thr.Thread.__init__(self, target=function, args=args)
self.Active = False
# TODO: Retrieve target exit value
self.ExitVal = None
def __del__(self):
Logger.Debug(__name__, u'Destroying Thread instance; Thread is active: {}'.format(self.IsActive()))
## Exits the thread & sets inactive
#
# Alias of globals.threads.Thread.Join
def Exit(self):
return self.Join()
## Retrieves the thread identifier
def GetId(self):
return self.ident
## Tests if thread is active
def IsActive(self):
return self.Active
## Exits the thread & sets inactive
def join(self):
if self.IsActive():
Logger.Debug(__name__, u'Joining thread ...')
thr.Thread.join(self)
self.Active = False
## Exits the thread & sets inactive
#
# Alias of globals.threads.Thread.join
def Join(self):
return self.join()
## Executes target under new thread
def start(self):
try:
thr.Thread.start(self)
self.Active = True
# Do not try to restart thread if already started
except RuntimeError:
Logger.Debug(__name__, u'ThreadStart: Thread is active, cannot restart')
# In case active state has been changed
self.Active = True
pass
return self.IsActive()
## Alias for start method
def Start(self):
return self.start()
active_threads = []
## Creates a new thread for processing
#
# \return
# \b \e Integer thread ID if successfully activated
def CreateThread(function, *args):
global active_threads
new_thread = Thread(function, args)
thread_id = new_thread.GetId()
if new_thread.IsActive() and thread_id not in active_threads:
active_threads.append(thread_id)
return thread_id
return None
## Ends an active thread
#
# TODO: Define
# \param thread_id
# \b \e Integer ID of the thread to kill
# \return
# \b \e True if thread was successfully killed
def KillThread(thread_id):
global active_threads
if thread_id not in active_threads:
return False
# REMOVEME:
return False
| StarcoderdataPython |
1691478 | class Solution:
def beautifulArray(self, N: int) -> List[int]:
result = [1]
while len(result) < N:
result = [i * 2 - 1 for i in result] + [i * 2 for i in result]
return [i for i in result if i <= N]
| StarcoderdataPython |
3251181 | <gh_stars>10-100
from .es_utils import *
from tqdm import tqdm
from config import *
from dataloader.utils import *
import argparse
import json
import ipdb
'''Generate the BM25 gray candidates:
Make sure the q-q BM25 index has been built
'''
def parser_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='douban', type=str)
parser.add_argument('--pool_size', default=1000, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--recall_mode', default='q-q', type=str)
parser.add_argument('--topk', default=10, type=int)
return parser.parse_args()
if __name__ == '__main__':
args = vars(parser_args())
bsz = args['batch_size']
args['mode'] = 'test'
args['model'] = 'dual-bert' # useless
config = load_config(args)
args.update(config)
args['batch_size'] = bsz
searcher = ESSearcher(
f'{args["dataset"]}_{args["recall_mode"]}',
q_q=True if args['recall_mode']=='q-q' else False
)
# load train dataset
read_path = f'{args["root_dir"]}/data/{args["dataset"]}/train.txt'
write_path = f'{args["root_dir"]}/data/{args["dataset"]}/train_bm25_gray.txt'
# dataset = read_text_data_utterances_full(read_path, lang=args['lang'], turn_length=5)
dataset = read_text_data_utterances(read_path, lang=args['lang'])
data = [(utterances[:-1], utterances[-1]) for label, utterances in dataset if label == 1]
responses = [utterances[-1] for label, utterances in dataset]
collector = []
pbar = tqdm(range(0, len(data), args['batch_size']))
for idx in pbar:
# random choice the conversation context to search the topic related responses
context = [i[0] for i in data[idx:idx+args['batch_size']]]
response = [i[1] for i in data[idx:idx+args['batch_size']]]
context_str = [' '.join(i[0]) for i in data[idx:idx+args['batch_size']]]
rest_ = searcher.msearch(context_str, topk=args['pool_size'])
rest = []
for gt_ctx, gt_res, i in zip(context, response, rest_):
i = list(set(i))
if gt_res in i:
i.remove(gt_res)
if len(i) < args['topk']:
rest.append(i + random.sample(responses, args['topk']-len(i)))
else:
rest.append(i[:args['topk']])
for q, r, nr in zip(context, response, rest):
collector.append({'q': q, 'r': r, 'nr': nr})
with open(write_path, 'w', encoding='utf-8') as f:
for data in collector:
string = json.dumps(data)
f.write(f'{string}\n')
| StarcoderdataPython |
1759699 | import json, os, inspect, shutil
from phi.fluidformat import *
class ControlScene:
def __init__(self, path, mode="r", index=None):
self.path = path
self.index = index
if mode.lower() == "r":
with open(os.path.join(path, "description.json"), "r") as file:
self.infodict = json.load(file)
elif mode.lower() == "w":
self.infodict = {}
else:
raise ValueError("Illegal mode: %s " %mode)
def get_final_loss(self, include_reg_loss=True):
final_loss = self.infodict["final_loss"]
if not include_reg_loss and "regloss" in self.infodict:
final_loss -= self.infodict["regloss"]
return final_loss
def improvement(self):
final_loss = self.get_final_loss(include_reg_loss=False)
initial_loss = self.infodict["initial_loss"]
return initial_loss / final_loss
@property
def scenetype(self):
return self.infodict["scenetype"]
def control_frames(self):
return range(self.infodict["n_frames"])
def target_density(self):
return read_sim_frames(self.path, ["target density"])[0]
def get_state(self, index):
return read_sim_frame(self.path, ["density", "velocity", "force"], index, set_missing_to_none=False)
def time_to_keyframe(self, index):
return self.infodict["n_frames"] - index
def put(self, dict, save=True):
self.infodict.update(dict)
if save:
with open(os.path.join(self.path, "description.json"), "w") as out:
json.dump(self.infodict, out, indent=2)
def file(self, name):
return os.path.join(self.path, name)
def __getitem__(self, key):
return self.infodict[key]
def __getattr__(self, item):
return self.infodict[item]
def __str__(self):
return self.path
def copy_calling_script(self):
script_path = inspect.stack()[1][1]
script_name = os.path.basename(script_path)
src_path = os.path.join(self.path, "src")
os.path.isdir(src_path) or os.mkdir(src_path)
target = os.path.join(self.path, "src", script_name)
shutil.copy(script_path, target)
try:
shutil.copystat(script_path, target)
except:
pass # print("Could not copy file metadata to %s"%target)
def list_scenes(directory, category, min=None, max=None):
scenes = []
if min is None:
i = 1
else:
i = int(min)
while True:
path = os.path.join(directory, category, "sim_%06d/"%i)
if not os.path.isdir(path): break
scenes.append(ControlScene(path, "r", i))
if max is not None and i == max: break
i += 1
return scenes
def new_scene(directory, category):
scenedir = os.path.join(directory, category)
if not os.path.isdir(scenedir):
os.makedirs(scenedir)
next_index = 1
else:
indices = [int(name[4:]) for name in os.listdir(scenedir) if name.startswith("sim_")]
if not indices:
next_index = 1
else:
next_index = max(indices) + 1
path = os.path.join(scenedir, "sim_%06d"%next_index)
os.mkdir(path)
return ControlScene(path, "w", next_index)
def load_scene_data(scenes):
densities = []
velocities = []
forces = []
targets = []
remaining_times = []
for scene in scenes:
target = scene.target_density()
for i in scene.control_frames():
density, velocity, force = scene.get_state(i)
remaining_time = scene.time_to_keyframe(i)
densities.append(density)
velocities.append(velocity)
forces.append(force)
remaining_times.append(remaining_time)
targets.append(target)
return densities, velocities, forces, targets, remaining_times | StarcoderdataPython |
3291968 | <reponame>freelancing-solutions/Open-Sentencing-Model
from server import app
from flask import Response, request
from prometheus_client import generate_latest, Counter
from functools import wraps
# route to display configured Prometheus metrics
# note that you will need to set up custom metric observers for your app
@app.route('/metrics')
def prometheus_metrics():
MIMETYPE = 'text/plain; version=0.0.4; charset=utf-8'
return Response(generate_latest(), mimetype=MIMETYPE)
# creates a Prometheus Counter to track requests for specified routes
# usage:
# @app.route('/example')
# @prometheus.track_requests
# def example():
# pass
route_counter = Counter('requests_for_routes', 'Number of requests for specified routes', ['method', 'endpoint'])
def track_requests(route):
@wraps(route)
def wrapper(*args, **kwargs):
route_labels = {
"method": request.method,
"endpoint": str(request.path)
}
route_counter.labels(**route_labels).inc()
return route(*args, **kwargs)
return wrapper
| StarcoderdataPython |
3232858 | <reponame>qqqqqie/bk-log
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import logging
import sys
from unittest.mock import patch
import arrow
from django.conf import settings
from django.test import TestCase, override_settings
from django.utils.translation import ugettext_lazy as _
from apps.log_databus.models import CollectorConfig
from apps.log_databus.views import collector_views
from apps.iam.handlers import permission
logging.basicConfig(level=logging.NOTSET)
logger = logging.getLogger("root")
BK_APP_CODE = "bk_log_search"
BK_BIZ_ID = 2
COLLECTOR_CONFIG_ID = 231
COLLECTOR_SCENARIO_ID_ROW = "row"
COLLECTOR_SCENARIO_ID_SECTION = "section"
SUCCESS_STATUS_CODE = 200
PARAMS = {"bk_biz_id": BK_BIZ_ID, "page": 1, "pagesize": 2, "keyword": ""}
OVERRIDE_MIDDLEWARE = "apps.tests.middlewares.OverrideMiddleware"
CLUSTER_INFOS = {"2_bklog.test3333": {"cluster_config": {"cluster_id": 1, "cluster_name": ""}}}
BATCH_IS_ALLOWED = {"231": {"search_log": True}}
SCENARIOS = [
{
"collector_scenario_id": COLLECTOR_SCENARIO_ID_ROW,
"collector_scenario_name": _("行日志"),
"is_active": True,
"config": {
"paths": {
"field_type": "list",
"field_name": "paths",
"field_alias": _("日志路径"),
"required": True,
"option": {},
},
"conditions": {
"field_type": "dict",
"field_name": "conditions",
"field_alias": _("过滤方式"),
"required": False,
"option": {"choices": ["match", "separator"]},
},
},
},
{
"collector_scenario_id": COLLECTOR_SCENARIO_ID_SECTION,
"collector_scenario_name": _("段日志"),
"is_active": False,
"config": {
"paths": {
"field_type": "list",
"field_name": "paths",
"field_alias": _("日志路径"),
"required": True,
"option": {},
},
"conditions": {
"field_type": "dict",
"field_name": "conditions",
"field_alias": _("过滤方式"),
"required": False,
"option": {"choices": ["match"]},
},
},
},
]
COLLECTORS_LIST = {
"result": True,
"data": {
"total": 1,
"list": [
{
"collector_config_id": 231,
"collector_scenario_name": "行日志文件",
"category_name": "操作系统",
"target_nodes": [{"bk_inst_id": 52, "bk_obj_id": "module"}],
"task_id_list": ["1331697"],
"target_subscription_diff": {},
"created_at": "2021-06-26 15:55:08+0800",
"created_by": "",
"updated_at": "2021-06-26 15:55:08+0800",
"updated_by": "",
"is_deleted": False,
"deleted_at": None,
"deleted_by": None,
"custom_type": "log",
"custom_name": "容器日志上报",
"collector_config_name": "test3333",
"bk_app_code": "bk_log_search",
"collector_scenario_id": "row",
"bk_biz_id": 2,
"category_id": "os",
"target_object_type": "HOST",
"target_node_type": "TOPO",
"description": "test3333",
"is_active": True,
"data_link_id": None,
"bk_data_id": 1500586,
"bk_data_name": None,
"table_id": "test3333",
"etl_config": None,
"subscription_id": 2103,
"bkdata_data_id": None,
"index_set_id": None,
"data_encoding": None,
"params": "{}",
"itsm_ticket_sn": None,
"itsm_ticket_status": "not_apply",
"can_use_independent_es_cluster": True,
"collector_package_count": 10,
"collector_output_format": None,
"collector_config_overlay": None,
"storage_shards_nums": None,
"storage_shards_size": None,
"storage_replies": 1,
"bkdata_data_id_sync_times": 0,
"collector_config_name_en": "",
"storage_cluster_id": 1,
"storage_cluster_name": "",
"table_id_prefix": "2_bklog_",
"is_search": False,
"permission": {"search_log": True},
"create_clean_able": True,
"bkdata_index_set_ids": [],
}
],
},
"code": 0,
"message": "",
}
class TestCollectorViewSetAPI(TestCase):
"""
测试 CollectorViewSet中的接口
"""
@patch("apps.api.TransferApi.get_result_table_storage", lambda _: CLUSTER_INFOS)
@patch("apps.log_databus.views.collector_views.CollectorViewSet.get_permissions", lambda _: [])
@patch.object(collector_views.CollectorViewSet, "get_permissions", lambda _: [])
@patch.object(permission.Permission, "batch_is_allowed", lambda _, actions, resources: BATCH_IS_ALLOWED)
@override_settings(MIDDLEWARE=(OVERRIDE_MIDDLEWARE,))
def test_list_collector(self):
"""
测试 api.v1.databus.collectors
"""
# 测试数据库添加一条CollectorConfig数据
self.maxDiff = 500000
CollectorConfig.objects.create(
collector_config_id=COLLECTOR_CONFIG_ID,
collector_config_name="test3333",
bk_app_code=BK_APP_CODE,
collector_scenario_id=COLLECTOR_SCENARIO_ID_ROW,
bk_biz_id=BK_BIZ_ID,
category_id="os",
target_object_type="HOST",
target_node_type="TOPO",
target_nodes=[{"bk_inst_id": 52, "bk_obj_id": "module"}],
target_subscription_diff={},
description="test3333",
is_active=True,
bk_data_id=1500586,
table_id="2_bklog.test3333",
subscription_id=2103,
task_id_list=["1331697"],
)
# 取到created_at和updated_at
collector_config = CollectorConfig.objects.get(collector_config_id=COLLECTOR_CONFIG_ID)
created_at = (
arrow.get(collector_config.created_at).to(settings.TIME_ZONE).strftime(settings.BKDATA_DATETIME_FORMAT)
)
updated_at = (
arrow.get(collector_config.updated_at).to(settings.TIME_ZONE).strftime(settings.BKDATA_DATETIME_FORMAT)
)
COLLECTORS_LIST["data"]["list"][0].update({"created_at": created_at})
COLLECTORS_LIST["data"]["list"][0].update({"updated_at": updated_at})
path = "/api/v1/databus/collectors/"
data = PARAMS
response = self.client.get(path=path, data=data)
content = json.loads(response.content)
logger.info(" {func_name}:{content}".format(func_name=sys._getframe().f_code.co_name, content=content))
self.assertEqual(response.status_code, SUCCESS_STATUS_CODE)
self.assertEqual(content, COLLECTORS_LIST)
@override_settings(MIDDLEWARE=(OVERRIDE_MIDDLEWARE,))
def test_list_scenarios(self):
"""
测试接口 api.v1.databus.collectors.scenarios
"""
path = "/api/v1/databus/collectors/scenarios/"
response = self.client.get(path=path)
content = json.loads(response.content)
logger.info(" {func_name}:{content}".format(func_name=sys._getframe().f_code.co_name, content=content))
self.assertEqual(response.status_code, SUCCESS_STATUS_CODE)
self.assertEqual(content["data"], SCENARIOS)
| StarcoderdataPython |
1692681 | #!/usr/local/bin/python3.5 -u
factor0 = 6
factor1 = 7
print(factor0 * factor1)
| StarcoderdataPython |
3339866 | <gh_stars>0
"""Tests the plotting function developed as part of 2E"""
# Imports
from floodsystem.plot import plot_water_levels
from floodsystem.datafetcher import fetch_measure_levels
import datetime
from floodsystem.station import MonitoringStation
fictional_station = MonitoringStation("station_id", "measure_id",
"Line at y=1 and y=9, and a line that goes diagonally from 0 to 10 across 11 days",
"coord", [1, 9], "made up river", "New Madeupville")
dates = []
for i in range(11):
date = datetime.date(2022, 1, 11-i)
dates.append(date)
# remember that the actual dates go backwards!
levels = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
plot_water_levels(fictional_station, dates, levels)
#print("Check that this forms a Z shape, that all three lines are plotted, and that the graph has a legend and title") | StarcoderdataPython |
1712242 | def NumToCharList(num):
return [i for i in str(num)]
def solution(s):
target = []
count = 1
for i,ch in enumerate(s):
if i==len(s)-1:
if count!=1:
target+=ch
target+=NumToCharList(count)
break
else:
target+=ch
break
if s[i]==s[i+1]:
count+=1
else:
target+=ch
if count!=1:
target+=NumToCharList(count)
count=1
return target
def compress(chars):
"""
:type chars: List[str]
:rtype: int
"""
count = 1
write = 0
for i, ch in enumerate(chars):
if i == len(chars) - 1 or chars[i] != chars[i + 1]:
if count != 1:
chars[write] = ch
write += 1
for j in str(count):
chars[write] = j
write += 1
count = 1
else:
chars[write] = ch
write += 1
else:
count += 1
# print(write)
return chars
s = ['a','c','c','c','c','c','c','c','c','c','c','c','c']
s = ['a']
s = ['a','s','a','a','b','b','b','c','c','c','b','b']
print(compress(s)) | StarcoderdataPython |
199943 | import torch
from torch.utils.tensorboard import SummaryWriter
from torch.nn.utils.clip_grad import clip_grad_norm_
from torch import distributions
from boltzmann import protein
from boltzmann.generative import transforms
from boltzmann import nn
from boltzmann import utils
from boltzmann import training
from sklearn.neighbors.kde import KernelDensity
from sklearn.model_selection import GridSearchCV
from simtk import openmm as mm
from simtk.openmm import app
import numpy as np
import mdtraj as md
import os
import shutil
import argparse
from tqdm import tqdm
#
# Command line and logging
#
def parse_args():
parser = argparse.ArgumentParser(
prog="train.py", description="Train generative model of molecular conformation."
)
subparsers = parser.add_subparsers(dest="action")
# Common io arguments
io_parent_parser = argparse.ArgumentParser(add_help=False)
io_parent_parser.add_argument("--save", required=True, help="basename for output")
io_parent_parser.add_argument(
"--overwrite", action="store_true", help="overwrite previous run"
)
io_parent_parser.set_defaults(overwrite=False)
io_parent_parser.add_argument("--pdb-path", required=True, help="path to pdb file")
io_parent_parser.add_argument(
"--validation", required=True, help="validation dataset name"
)
#
# Init parameters
#
init_parser = subparsers.add_parser(
"init", help="initialize a new network", parents=[io_parent_parser]
)
# Init paths and filenames
init_parser.add_argument("--dcd-path", required=True, help="path to dcd file")
init_parser.add_argument(
"--validation-fraction",
default=0.05,
type=float,
help="fraction of dataset to use for validation (default: %(default)g)",
)
# Network parameters
network_group = init_parser.add_argument_group("network parameters")
network_group.add_argument(
"--model-type",
default="nsf-coupling",
choices=[
"affine-coupling",
"affine-made",
"nsf-unconditional",
"nsf-coupling",
"nsf-made",
],
help="type of model (default: %(default)s)",
)
network_group.add_argument(
"--coupling-layers",
type=int,
default=8,
help="number of coupling layers (default: %(default)d)",
)
network_group.add_argument(
"--hidden-features",
type=int,
default=128,
help="number of hidden features in each layer (default: %(default)d)",
)
network_group.add_argument(
"--hidden-layers",
type=int,
default=2,
help="number of hidden layers (default: %(default)d)",
)
network_group.add_argument(
"--spline-points",
type=int,
default=8,
help="number of spline points in NSF layers (default: %(default)d)",
)
network_group.add_argument(
"--dropout-fraction",
type=float,
default=0.0,
help="strength of dropout (default: %(default)g)",
)
network_group.add_argument(
"--ensemble-size",
type=int,
default=100_000,
help="size of configuration ensemble (default: %(default)d)",
)
# Pretrainsformation parameters
pretrans_group = init_parser.add_argument_group("pretransformation parameters")
pretrans_group.add_argument(
"--pretrans-type",
default="quad-cdf",
choices=["quad-cdf", "none"],
help="pre-transform inputs before neural network (default: %(default)s)",
)
pretrans_group.add_argument(
"--pretrans-epochs",
type=int,
default=500,
help="number of training epochs for pre-transformation layer (default: %(default)d)",
)
pretrans_group.add_argument(
"--pretrans-lr",
type=float,
default=1e-2,
help="learning rate for pretransform training (default: %(default)g)",
)
pretrans_group.add_argument(
"--pretrans-batch-size",
type=int,
default=1024,
help="batch size for pretransformation training (default: %(default)g)",
)
# Noise parameters
noise_group = init_parser.add_argument_group("noise parameters")
noise_group.add_argument(
"--training-noise",
default=None,
type=float,
help="amount of noise to add to training examples (default: automatic)",
)
noise_group.add_argument(
"--min-noise",
default=0.1,
type=float,
help="minimum example noise level for automatic training noise (default: %(default)g)",
)
noise_group.add_argument(
"--max-noise",
default=0.7,
type=float,
help="maximum example noise level for automatic training noise (default: %(default)g)",
)
noise_group.add_argument(
"--n-noise",
default=10,
type=int,
help="number of trial values for automatic training noise (default: %(default)g)",
)
#
# Training Parameters
#
train_parser = subparsers.add_parser(
"train", help="train a network", parents=[io_parent_parser]
)
# Training paths
train_parser.add_argument(
"--load", required=True, help="basename of network to load"
)
# Loss Function parameters
loss_group = train_parser.add_argument_group("loss function parameters")
loss_group.add_argument(
"--example-weight",
type=float,
default=1.0,
help="weight for training by example (default: %(default)g)",
)
loss_group.add_argument(
"--energy-weight",
type=float,
default=0.0,
help="weight for training by energy (default: %(default)g)",
)
# Energy evaluation parameters
energy_group = train_parser.add_argument_group("parameters for energy function")
energy_group.add_argument(
"--temperature",
type=float,
default=298.0,
help="temperature (default: %(default)g)",
)
energy_group.add_argument(
"--energy-max",
type=float,
default=1e20,
help="maximum energy (default: %(default)g)",
)
energy_group.add_argument(
"--energy-high",
type=float,
default=1e10,
help="log transform energies above this value (default: %(default)g)",
)
# Optimization parameters
optimizer_group = train_parser.add_argument_group("optimization parameters")
optimizer_group.add_argument(
"--epochs",
type=int,
default=1000,
help="number of training iterations (default: %(default)d)",
)
optimizer_group.add_argument(
"--batch-size",
type=int,
default=1024,
help="size of training batch (default: %(default)d)",
)
optimizer_group.add_argument(
"--warmup-epochs",
type=int,
default=10,
help="gradually raise learning rate over first WARMUP_EPOCHS (default: %(default)d)",
)
optimizer_group.add_argument(
"--warmup-factor",
type=float,
default=1000,
help="learning rate starts WARMUP_FACTOR below init-lr (default: %(default)d)",
)
optimizer_group.add_argument(
"--init-lr",
type=float,
default=1e-3,
help="initial learning rate (default: %(default)g)",
)
optimizer_group.add_argument(
"--final-lr",
type=float,
default=1e-4,
help="final learning rate (default: %(default)g)",
)
optimizer_group.add_argument(
"--weight-decay",
type=float,
default=1e-3,
help="strength of weight decay (default: %(default)g)",
)
optimizer_group.add_argument(
"--max-gradient",
type=float,
default=1000.0,
help="maximum allowed gradient (default: %(default)g)",
)
optimizer_group.add_argument(
"--log-freq",
type=int,
default=10,
help="how often to update tensorboard (default: %(default)d)",
)
args = parser.parse_args()
return args
def setup_writer(args):
writer = SummaryWriter(log_dir=f"runs/{args.save}", purge_step=0, flush_secs=30)
setup_custom_scalars(args, writer)
return writer
def setup_custom_scalars(args, writer):
writer.add_custom_scalars(
{
"Sampling": {
"acceptance rate": ["Multiline", ["acceptance_rate"]],
"step size": ["Multiline", ["step_size"]],
"gradient norm": ["Multiline", ["gradient_norm"]],
},
"Total Losses (weighted)": {
"total loss": ["Multiline", ["total_loss"]],
"energy loss": ["Multiline", ["weighted_energy_total_loss"]],
"example loss": ["Multiline", ["weighted_example_total_loss"]],
},
"Example Losses (unweighted)": {
"total": [
"Multiline",
["example_total_loss", "val_example_total_loss"],
],
"ml": ["Multiline", ["example_ml_loss", "val_example_ml_loss"]],
"jac": ["Multiline", ["example_jac_loss", "val_example_jac_loss"]],
},
"Energy Losses (unweighted)": {
"total": ["Multiline", ["energy_total_loss"]],
"ml": ["Multiline", ["energy_kl_loss"]],
"jac": ["Multiline", ["energy_jac_loss"]],
},
"Generative Energies": {
"minimum": ["Multiline", ["minimum_energy"]],
"mean": ["Multiline", ["mean_energy"]],
"median": ["Multiline", ["median_energy"]],
},
}
)
#
# File input / output
#
def delete_run(name):
if os.path.exists(f"models/{name}.pkl"):
os.remove(f"models/{name}.pkl")
if os.path.exists(f"gen_samples/{name}.pdb"):
os.remove(f"gen_samples/{name}.pdb")
if os.path.exists(f"ensembles/{name}.dcd"):
os.remove(f"ensembles/{name}.dcd")
if os.path.exists(f"runs/{name}"):
shutil.rmtree(f"runs/{name}")
def create_dirs():
os.makedirs("models", exist_ok=True)
os.makedirs("gen_samples", exist_ok=True)
os.makedirs("ensembles", exist_ok=True)
os.makedirs("validation", exist_ok=True)
def load_trajectory(pdb_path, dcd_path, align=False):
t = md.load(dcd_path, top=pdb_path)
if align:
ind = t.topology.select("backbone")
t.superpose(t, frame=0, atom_indices=ind)
return t
def load_network(path, device):
net = torch.load(path).to(device)
print(net)
print_number_trainable_params(net)
return net
#
# Build network
#
def build_affine_coupling(
n_dim, n_coupling, hidden_layers, hidden_features, dropout_fraction
):
layers = []
for _ in range(n_coupling):
p = transforms.RandomPermutation(n_dim, 1)
mask_even = utils.create_alternating_binary_mask(features=n_dim, even=True)
mask_odd = utils.create_alternating_binary_mask(features=n_dim, even=False)
t1 = transforms.AffineCouplingTransform(
mask=mask_even,
transform_net_create_fn=lambda in_features, out_features: nn.ResidualNet(
in_features=in_features,
out_features=out_features,
hidden_features=hidden_features,
num_blocks=hidden_layers,
dropout_probability=dropout_fraction,
use_batch_norm=False,
),
)
t2 = transforms.AffineCouplingTransform(
mask=mask_odd,
transform_net_create_fn=lambda in_features, out_features: nn.ResidualNet(
in_features=in_features,
out_features=out_features,
hidden_features=hidden_features,
num_blocks=hidden_layers,
dropout_probability=dropout_fraction,
use_batch_norm=False,
),
)
layers.append(p)
layers.append(t1)
layers.append(t2)
return layers
def build_affine_made(n_dim, hidden_layers, hidden_features, dropout_fraction):
made = transforms.MaskedAffineAutoregressiveTransform(
n_dim,
hidden_features=hidden_features,
num_blocks=hidden_layers,
dropout_probability=dropout_fraction,
use_batch_norm=False,
)
return [made]
def build_nsf_unconditional(n_dim, spline_points):
nsf = transforms.PiecewiseRationalQuadraticCDF(
[n_dim],
num_bins=spline_points,
tails="linear",
tail_bound=15,
identity_init=True,
)
return [nsf]
def build_nsf_coupling(
n_dim, n_coupling, spline_points, hidden_layers, hidden_features, dropout_fraction
):
layers = []
for _ in range(n_coupling):
p = transforms.RandomPermutation(n_dim, 1)
mask_even = utils.create_alternating_binary_mask(features=n_dim, even=True)
mask_odd = utils.create_alternating_binary_mask(features=n_dim, even=False)
t1 = transforms.PiecewiseRationalQuadraticCouplingTransform(
mask=mask_even,
transform_net_create_fn=lambda in_features, out_features: nn.ResidualNet(
in_features=in_features,
out_features=out_features,
hidden_features=hidden_features,
num_blocks=hidden_layers,
dropout_probability=dropout_fraction,
use_batch_norm=False,
),
tails="linear",
tail_bound=15,
num_bins=spline_points,
apply_unconditional_transform=False,
)
t2 = transforms.PiecewiseRationalQuadraticCouplingTransform(
mask=mask_odd,
transform_net_create_fn=lambda in_features, out_features: nn.ResidualNet(
in_features=in_features,
out_features=out_features,
hidden_features=hidden_features,
num_blocks=hidden_layers,
dropout_probability=dropout_fraction,
use_batch_norm=False,
),
tails="linear",
tail_bound=15,
num_bins=spline_points,
apply_unconditional_transform=False,
)
layers.append(p)
layers.append(t1)
layers.append(t2)
return layers
def build_nsf_made(
n_dim, spline_points, hidden_layers, hidden_features, dropout_fraction
):
made = transforms.MaskedPiecewiseRationalQuadraticAutoregressiveTransform(
features=n_dim,
hidden_features=hidden_features,
num_blocks=hidden_layers,
dropout_probability=dropout_fraction,
use_batch_norm=False,
num_bins=spline_points,
tails="linear",
tail_bound=15,
)
return [made]
def build_network(
model_type,
n_dim,
topology,
training_data,
n_coupling,
spline_points,
hidden_features,
hidden_layers,
dropout_fraction,
pretrans_type,
pretrans_epochs,
pretrans_lr,
pretrans_batch_size,
device,
):
training_data = training_data.to(device)
print("Creating network")
stage1_layers = []
# Create the mixed transofrm layer
pca_block = protein.PCABlock("backbone", True)
mixed = protein.MixedTransform(n_dim, topology, [pca_block], training_data)
stage1_layers.append(mixed)
if pretrans_type == "quad-cdf":
print()
print("Pre-training unconditional NSF layer")
print()
unconditional = build_nsf_unconditional(n_dim - 6, spline_points)[0]
stage1_layers.append(unconditional)
unconditional_net = transforms.CompositeTransform(stage1_layers).to(device)
pre_train_unconditional_nsf(
unconditional_net,
device,
training_data,
pretrans_batch_size,
pretrans_epochs,
pretrans_lr,
10,
)
print()
print("Pretraining completed. Freezing weights")
unconditional.unnormalized_heights.requires_grad_(False)
unconditional.unnormalized_widths.requires_grad_(False)
unconditional.unnormalized_derivatives.requires_grad_(False)
stage1 = unconditional_net
else:
stage1 = transforms.CompositeTransform(head_layers).to(device)
if model_type == "affine-coupling":
stage2_layers = build_affine_coupling(
n_dim - 6, n_coupling, hidden_layers, hidden_features, dropout_fraction
)
elif model_type == "affine-made":
stage2_layers = build_affine_made(
n_dim - 6, hidden_layers, hidden_features, dropout_fraction
)
elif model_type == "nsf-unconditional":
stage2_layers = build_nsf_unconditional(n_dim - 6, spline_points)
elif model_type == "nsf-coupling":
stage2_layers = build_nsf_coupling(
n_dim - 6,
n_coupling,
spline_points,
hidden_layers,
hidden_features,
dropout_fraction,
)
elif model_type == "nsf-made":
stage2_layers = build_nsf_made(
n_dim - 6, spline_points, hidden_layers, hidden_features, dropout_fraction
)
else:
raise RuntimeError()
stage2 = transforms.CompositeTransform(stage2_layers).to(device)
net = transforms.TwoStageComposite(stage1, stage2)
print()
print("Network constructed.")
print(net)
print_number_trainable_params(net)
print()
return net
def print_number_trainable_params(net):
total_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print()
print(f"Network has {total_params} trainable parameters")
print()
#
# Energy function
#
def get_openmm_context(pdb_path):
pdb = app.PDBFile(pdb_path)
ff = app.ForceField("amber99sbildn.xml", "amber99_obc.xml")
system = ff.createSystem(
pdb.topology,
nonbondedMethod=app.CutoffNonPeriodic,
nonbondedCutoff=1.0,
constraints=None,
)
integrator = mm.LangevinIntegrator(298, 1.0, 0.002)
simulation = app.Simulation(pdb.topology, system, integrator)
context = simulation.context
return context
def get_energy_evaluator(openmm_context, temperature, energy_high, energy_max, device):
energy_high = torch.tensor(
energy_high, dtype=torch.float32, device=device, requires_grad=False
)
energy_max = torch.tensor(
energy_max, dtype=torch.float32, device=device, requires_grad=False
)
def eval_energy(x):
return protein.regularize_energy(
protein.openmm_energy(x, openmm_context, temperature),
energy_high,
energy_max,
)
return eval_energy
#
# Optimizer
#
def setup_optimizer(net, init_lr, weight_decay):
optimizer = torch.optim.AdamW(
net.parameters(), lr=init_lr, weight_decay=weight_decay
)
return optimizer
def setup_scheduler(optimizer, init_lr, final_lr, epochs, warmup_epochs, warmup_factor):
anneal = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs, final_lr)
warmup = utils.GradualWarmupScheduler(
optimizer, warmup_factor, warmup_epochs, after_scheduler=anneal
)
return warmup
#
# Loss functions
#
def get_ml_loss(net, x_batch, example_weight, dist):
z, z_jac = net.forward(x_batch)
example_ml_loss = -torch.mean(dist.log_prob(z)) * example_weight
example_jac_loss = -torch.mean(z_jac) * example_weight
example_loss = example_ml_loss + example_jac_loss
return example_loss, example_ml_loss, example_jac_loss
#
# Training
#
def get_device():
if torch.cuda.is_available():
print("Using cuda")
device = torch.device("cuda")
else:
print("Using CPU")
device = torch.device("cpu")
return device
def pre_train_unconditional_nsf(
net, device, training_data, batch_size, epochs, lr, out_freq
):
mu = torch.zeros(training_data.shape[-1] - 6, device=device)
cov = torch.eye(training_data.shape[-1] - 6, device=device)
dist = distributions.MultivariateNormal(mu, covariance_matrix=cov).expand(
(batch_size,)
)
indices = np.arange(training_data.shape[0])
optimizer = setup_optimizer(net, lr, 0.0)
with tqdm(range(epochs)) as progress:
for epoch in progress:
net.train()
index_batch = np.random.choice(
indices, args.pretrans_batch_size, replace=True
)
x_batch = training_data[index_batch, :]
loss, _, _ = get_ml_loss(net, x_batch, 1.0, dist)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % out_freq == 0:
progress.set_postfix(loss=f"{loss.item():8.3f}")
def train_network(args, device):
writer = setup_writer(args)
dcd_path = f"ensembles/{args.load}.dcd"
traj = load_trajectory(args.pdb_path, dcd_path, align=False)
traj.unitcell_lengths = None
traj.unitcell_angles = None
n_dim = traj.xyz.shape[1] * 3
ensemble = traj.xyz.reshape(-1, n_dim)
ensemble = torch.from_numpy(ensemble.astype("float32"))
print(f"Ensemble has size {ensemble.shape[0]} x {ensemble.shape[1]}.\n")
validation_dcd_path = f"validation/{args.validation}.dcd"
valid_traj = load_trajectory(args.pdb_path, validation_dcd_path, align=False)
n_valid_dim = valid_traj.xyz.shape[1] * 3
validation_data = valid_traj.xyz.reshape(-1, n_valid_dim)
validation_data = torch.from_numpy(validation_data.astype("float32")).to(device)
print(
f"Validation has size {validation_data.shape[0]} x {validation_data.shape[1]}.\n"
)
net = load_network(f"models/{args.load}.pkl", device=device)
optimizer = setup_optimizer(
net=net,
init_lr=args.init_lr / args.warmup_factor,
weight_decay=args.weight_decay,
)
scheduler = setup_scheduler(
optimizer,
init_lr=args.init_lr,
final_lr=args.final_lr,
epochs=args.epochs,
warmup_epochs=args.warmup_epochs,
warmup_factor=args.warmup_factor,
)
openmm_context = get_openmm_context(args.pdb_path)
energy_evaluator = get_energy_evaluator(
openmm_context=openmm_context,
temperature=args.temperature,
energy_high=args.energy_high,
energy_max=args.energy_max,
device=device,
)
trainer = MixedLossTrainer(
net, device, ensemble, validation_data, args.batch_size, energy_evaluator
)
with tqdm(range(args.epochs)) as progress:
for epoch in progress:
net.train()
trainer.compute_training_losses()
loss = (
trainer.forward_loss * args.example_weight
+ trainer.inverse_loss * args.energy_weight
)
optimizer.zero_grad()
loss.backward()
gradient_norm = clip_grad_norm_(net.parameters(), args.max_gradient)
optimizer.step()
scheduler.step(epoch)
validation_step = epoch % args.log_freq == 0
if validation_step:
net.eval()
trainer.compute_validation_losses()
writer.add_scalar(
"val_example_ml_loss", trainer.val_forward_ml.item(), epoch
)
writer.add_scalar(
"val_example_jac_loss", trainer.val_forward_jac.item(), epoch
)
writer.add_scalar(
"val_example_total_loss", trainer.val_forward_loss.item(), epoch
)
# Output our training losses
# writer.add_scalar(
# "acceptance_rate", trainer.acceptance_probs[-1], epoch
# )
# writer.add_scalar("step_size", trainer.step_size, epoch)
writer.add_scalar("total_loss", loss.item(), epoch)
writer.add_scalar("gradient_norm", gradient_norm, epoch)
writer.add_scalar("example_ml_loss", trainer.forward_ml, epoch)
writer.add_scalar("example_jac_loss", trainer.forward_jac, epoch)
writer.add_scalar("example_total_loss", trainer.forward_loss, epoch)
writer.add_scalar(
"weighted_example_total_loss",
trainer.forward_loss * args.example_weight,
epoch,
)
writer.add_scalar("energy_kl_loss", trainer.inverse_kl, epoch)
writer.add_scalar("energy_jac_loss", trainer.inverse_jac, epoch)
writer.add_scalar("energy_total_loss", trainer.inverse_loss, epoch)
writer.add_scalar(
"weighted_energy_total_loss",
trainer.inverse_loss * args.energy_weight,
epoch,
)
writer.add_scalar("minimum_energy", trainer.min_energy.item(), epoch)
writer.add_scalar("median_energy", trainer.median_energy.item(), epoch)
writer.add_scalar("mean_energy", trainer.mean_energy.item(), epoch)
progress.set_postfix(loss=f"{loss.item():8.3f}")
# Save our final model
torch.save(net, f"models/{args.save}.pkl")
# Save our reservoir
x = trainer.reservoir.cpu().detach().numpy()
x = x.reshape(trainer.res_size, -1, 3)
traj.xyz = x
traj.save(f"ensembles/{args.save}.dcd")
# Generate examples and write trajectory
net.eval()
z = torch.normal(0, 1, size=(args.batch_size, n_dim - 6), device=device)
x, _ = net.inverse(z)
x = x.cpu().detach().numpy()
x = x.reshape(args.batch_size, -1, 3)
traj.xyz = x
traj.save(f"gen_samples/{args.save}.dcd")
def calculate_example_noise(net, training_data, min_noise, max_noise, n_noise):
# Run all training data through the pretransformation stage of the network
transformed_data, _ = net.stage1_forward(training_data)
transformed_data = transformed_data.cpu().detach().numpy()
np.random.shuffle(transformed_data)
params = {"bandwidth": np.linspace(min_noise, max_noise, n_noise)}
grid = GridSearchCV(
KernelDensity(kernel="gaussian", atol=1e-4, rtol=1e-4),
params,
cv=3,
return_train_score=False,
)
grid.fit(transformed_data)
# Use cross-validation to identify the optimal noise bandwidth.
return grid.best_params_["bandwidth"]
def init_ensemble(ensemble_size, data):
if data.shape[0] != ensemble_size:
print(
f"Generating ensemble by sampling from {data.shape[0]} to {ensemble_size}.\n"
)
sampled = np.random.choice(
np.arange(data.shape[0]), ensemble_size, replace=True
)
ensemble = data[sampled, :]
else:
ensemble = data
return ensemble
def init_network(args, device):
traj = load_trajectory(args.pdb_path, args.dcd_path, align=True)
traj.unitcell_lengths = None
traj.unitcell_angles = None
n_dim = traj.xyz.shape[1] * 3
training_data_npy = traj.xyz.reshape(-1, n_dim)
# Shuffle the training data for later training / test split
np.random.shuffle(training_data_npy)
training_data = torch.from_numpy(training_data_npy.astype("float32"))
print(
f"Trajectory loaded with size {training_data.shape[0]} x {training_data.shape[1]}"
)
net = build_network(
n_dim=n_dim,
model_type=args.model_type,
topology=traj.topology,
training_data=training_data,
n_coupling=args.coupling_layers,
spline_points=args.spline_points,
hidden_features=args.hidden_features,
hidden_layers=args.hidden_layers,
dropout_fraction=args.dropout_fraction,
pretrans_type=args.pretrans_type,
pretrans_epochs=args.pretrans_epochs,
pretrans_lr=args.pretrans_lr,
pretrans_batch_size=args.pretrans_batch_size,
device=device,
)
if args.training_noise is None:
print(
f"Using automatic noise level detection with {args.n_noise} trials from {args.min_noise} to {args.max_noise}."
)
net.example_noise = calculate_example_noise(
net, training_data.to(device), args.min_noise, args.max_noise, args.n_noise
)
print(f"Using automatically determined noise level {net.example_noise}.\n")
else:
net.example_noise = args.training_noise
print(f"Using noise level {net.example_noise} specified on command line.\n")
# We do this just to test if we can.
openmm_context_ = get_openmm_context(args.pdb_path)
# Set aside our validation dataset and create our initial ensemble.
n_valid = int(training_data.shape[0] * args.validation_fraction)
n_train = training_data.shape[0] - n_valid
print(
f"Splitting data into training ({n_train} points) and validation ({n_valid} points) sets.\n"
)
validation_data = training_data[:n_valid, :]
training_data = training_data[n_valid:, :]
ensemble = init_ensemble(args.ensemble_size, training_data)
# Save everything
torch.save(net, f"models/{args.save}.pkl")
x = ensemble.cpu().detach().numpy()
x = x.reshape(args.ensemble_size, -1, 3)
traj.xyz = x
traj.save(f"ensembles/{args.save}.dcd")
y = validation_data.cpu().detach().numpy()
y = y.reshape(n_valid, -1, 3)
traj.xyz = y
traj.save(f"validation/{args.validation}.dcd")
class MixedLossTrainer:
def __init__(
self, net, device, training_data, validation_data, batch_size, energy_evaluator
):
self.net = net
self.device = device
self.training_data = training_data
self.validation_data = validation_data
self.batch_size = batch_size
self.energy_evaluator = energy_evaluator
self.training_indices = np.arange(self.training_data.shape[0])
self.validation_indices = np.arange(self.validation_data.shape[0])
# Setup latent gaussian distribution
mu = torch.zeros(self.training_data.shape[-1] - 6, device=device)
cov = torch.eye(self.training_data.shape[-1] - 6, device=device)
self.latent_distribution = distributions.MultivariateNormal(
mu, covariance_matrix=cov
).expand((self.batch_size,))
# These statistics are updated during training.
self.forward_loss = None
self.forward_ml = None
self.forward_jac = None
self.val_forward_loss = None
self.val_forward_ml = None
self.val_forward_jac = None
self.inverse_loss = None
self.inverse_kl = None
self.inverse_jac = None
self.mean_energy = None
self.median_energy = None
self.min_energy = None
self.acceptance_probs = []
def compute_training_losses(self):
with torch.no_grad():
# choose random examples
example_ind = np.random.choice(
self.training_indices, size=self.batch_size, replace=True
)
x = self.training_data[example_ind, :].to(self.device)
# transform through stage1
z_pretrans, _ = self.net.stage1_forward(x)
# add noise
z_pretrans = z_pretrans + torch.normal(
0,
self.net.example_noise,
size=z_pretrans.shape,
device=z_pretrans.device,
)
# transform back to x
x, _ = self.net.stage1_inverse(z_pretrans)
# transform through full network
z, z_jac = self.net.forward(x)
# compute loss
self.forward_ml = -torch.mean(self.latent_distribution.log_prob(z))
self.forward_jac = -torch.mean(z_jac)
self.forward_loss = self.forward_ml + self.forward_jac
# choose random latent and compute losses
z_prime = self.latent_distribution.sample().to(self.device)
x_prime, x_jac_prime = self.net.inverse(z_prime)
energies = self.energy_evaluator(x_prime)
self.min_energy = torch.min(energies)
self.median_energy = torch.median(energies)
self.mean_energy = torch.mean(energies)
self.inverse_kl = torch.mean(energies)
self.inverse_jac = -torch.mean(x_jac_prime)
self.inverse_loss = self.inverse_kl + self.inverse_jac
def compute_validation_losses(self):
with torch.no_grad():
valid_ind = np.random.choice(
self.validation_indices, size=self.batch_size, replace=True
)
x_valid = self.validation_data[valid_ind, :].to(self.device)
z_valid, z_jac_valid = self.net.forward(x_valid)
self.val_forward_ml = -torch.mean(
self.latent_distribution.log_prob(z_valid)
)
self.val_forward_jac = -torch.mean(z_jac_valid)
self.val_forward_loss = self.val_forward_ml + self.val_forward_jac
if __name__ == "__main__":
args = parse_args()
model_path = f"models/{args.save}.pkl"
if os.path.exists(model_path):
if args.overwrite:
print(f"Warning: output `{model_path}' already exists. Overwriting anyway.")
else:
raise RuntimeError(
f"Output '{model_path}' already exists. If you're sure use --overwrite."
)
delete_run(args.save)
create_dirs()
device = get_device()
if args.action == "init":
init_network(args, device)
elif args.action == "train":
train_network(args, device)
else:
raise RuntimeError(f"Unknown command {args.action}.")
| StarcoderdataPython |
51191 | # 1일차 - 이진수2
test_cases = int(input())
# 10진수 -> 2진수
def dec_to_bin(decimal):
binary = ''
while decimal != 0:
decimal *= 2
if decimal >= 1:
decimal -= 1
binary += '1'
else:
binary += '0'
if len(binary) >= 13:
binary = 'overflow'
break
return binary
for t in range(1, test_cases + 1):
N = float(input())
result = dec_to_bin(N)
print('#{} {}'.format(t, result))
| StarcoderdataPython |
4841991 | import sys
import os
# Add bin to sys path since splunk runs it within this context
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bin'))
sys.path.insert(3, "/usr/local/lib/python2.7/dist-packages")
sys.path.insert(4, "/usr/lib/python2.7/dist-packages")
| StarcoderdataPython |
3373801 | <filename>fedopt_guide/stackoverflow_transformer/transformer_models.py<gh_stars>100-1000
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer model for next word prediction.
Reference
Attention Is All You Need, 2017 (https://arxiv.org/abs/1706.03762)
TF transformer tutorial (https://www.tensorflow.org/tutorials/text/transformer)
"""
from typing import Optional, Tuple
import numpy as np
import tensorflow as tf
from utils.models.stackoverflow_models import TransposableEmbedding
DEFAULT_LARGE_NEGATIVE = -1e9
DEFAULT_POSITIONAL_BASE = 10000
def scaled_dot_product_attention(
query: tf.Tensor, key: tf.Tensor, value: tf.Tensor,
mask: Optional[tf.Tensor]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Apply the scaled attention weights.
q (query), k (key), v (value) must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
query: Query feature vectors, shape == (..., seq_len_q, depth).
key: Key feature vectors, shape == (..., seq_len_k, depth).
value: Value feature vectors, shape == (..., seq_len_v, depth_v).
mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k).
Returns:
The output attention vectors.
"""
matmul_qk = tf.matmul(query, key, transpose_b=True)
# Scale matmul_qk so that the softmax does not vanish when feature dimension
# is large.
ftr_dim = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(ftr_dim)
if mask is not None:
scaled_attention_logits += (mask * DEFAULT_LARGE_NEGATIVE)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, value)
return output
class MultiHeadAttention(tf.keras.layers.Layer):
"""Multi-head attention layer for transformer.
Attributes:
num_heads: An integer of the number of heads.
d_model: An integer of the total dimension of the multi-head layer. Must be
divisible by num_heads. Each head will apply the scaled attention weights.
"""
def __init__(self, d_model: int, num_heads: int):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
if d_model % self.num_heads != 0:
raise ValueError(
'Feature dimension should be divisible by number of heads! Got {}/{}'
.format(d_model, num_heads))
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def _split_heads(self, x: tf.Tensor, batch_size: int) -> tf.Tensor:
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
# Transpose the result such that the shape is (batch_size, num_heads,
# seq_len, depth)
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v: tf.Tensor, k: tf.Tensor, q: tf.Tensor,
mask: tf.Tensor) -> tf.Tensor:
batch_size = tf.shape(q)[0]
q = self.wq(q)
k = self.wk(k)
v = self.wv(v)
q = self._split_heads(q, batch_size)
k = self._split_heads(k, batch_size)
v = self._split_heads(v, batch_size)
scaled_attention = scaled_dot_product_attention(q, k, v, mask)
# Reshape scaled_attention from (batch_size, num_heads, seq_len_q, depth)
# to (batch_size, seq_len_q, num_heads, depth).
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
# The output is of shape (batch_size, seq_len_q, d_model).
output = self.dense(concat_attention)
return output
def point_wise_feed_forward_network(d_model: int,
d_hidden: int) -> tf.keras.Model:
"""Returns all the possible positional encodings.
Args:
d_model: Dimension of the input feature.
d_hidden: Dimension of the hidden layer.
Returns:
A one-hidden-layer MLP.
"""
return tf.keras.Sequential([
tf.keras.layers.Dense(d_hidden, activation='relu'),
tf.keras.layers.Dense(d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
"""Encoder of transformer."""
def __init__(self, d_model, num_heads, d_hidden, dropout_rate):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, d_hidden)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(dropout_rate)
self.dropout2 = tf.keras.layers.Dropout(dropout_rate)
def call(self, x: tf.Tensor, training: Optional[bool],
mask: tf.Tensor) -> tf.Tensor:
attn_output = self.mha(x, x, x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
def positional_encoding(max_positions: int, d_model: int) -> tf.Tensor:
"""Returns all the possible positional encodings.
Args:
max_positions: Maximum number of positions.
d_model: Dimension of features of MultiHeadAttention layers.
Returns:
The position encodings of the input sequence.
"""
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(DEFAULT_POSITIONAL_BASE,
(2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
angle_rads = get_angles(
np.arange(max_positions)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :], d_model)
# Apply sin to even indices in the array; 2i.
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# Apply cos to odd indices in the array; 2i+1.
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class TransformerLM(tf.keras.layers.Layer):
"""Transformer for next word prediction."""
def __init__(self, num_layers: int, d_embed: int, d_model: int,
num_heads: int, d_hidden: int, input_vocab_size: int,
maximum_position_encoding: int, dropout_rate: float):
super(TransformerLM, self).__init__()
self.d_model = d_model
self.d_embed = d_embed
self.num_layers = num_layers
# Set mask_zero to True to be consistent with the LSTM model
# for StackOverflow in TFF.
self.embedding = tf.keras.layers.Embedding(
input_vocab_size, d_embed, mask_zero=True)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_embed)
self.embedding_proj = tf.keras.layers.Dense(d_model)
self.enc_layers = [
EncoderLayer(d_model, num_heads, d_hidden, dropout_rate)
for _ in range(num_layers)
]
self.embedding_out_proj = tf.keras.layers.Dense(d_embed)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
def call(self, x: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
seq_len = tf.shape(x)[1]
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.d_embed, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.embedding_proj(x)
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
x = self.embedding_out_proj(x)
# Shape of output: (batch_size, input_seq_len, d_embed)
return x
def create_transformer_lm(vocab_size: int = 10000,
num_oov_buckets: int = 1,
dim_embed: int = 96,
dim_model: int = 512,
dim_hidden: int = 2048,
num_heads: int = 8,
num_layers: int = 1,
max_position_encoding: int = 1000,
dropout: float = 0.1,
name='transformer_lm') -> tf.keras.Model:
"""Create the transformer-based language model for next-token prediction.
Args:
vocab_size: Vocab size for normal tokens.
num_oov_buckets: Number of out of vocabulary buckets.
dim_embed: Dimension of the token embeddings.
dim_model: Dimension of features of MultiHeadAttention layers.
dim_hidden: Dimension of hidden layers of the FFN.
num_heads: Number of attention heads.
num_layers: Number of Transformer blocks.
max_position_encoding: Maximum number of positions for position embeddings.
dropout: Dropout rate.
name: Name of the model.
Returns:
A transformer model.
"""
if max_position_encoding > DEFAULT_POSITIONAL_BASE:
raise ValueError(
'The maximum position cannot exceed the default positional base {}'
.format(DEFAULT_POSITIONAL_BASE))
extended_vocab_size = vocab_size + 3 + num_oov_buckets # For pad/bos/eos/oov.
inputs = tf.keras.layers.Input(shape=(None,))
transformer = TransformerLM(
num_layers,
dim_embed,
dim_model,
num_heads,
dim_hidden,
extended_vocab_size,
max_position_encoding,
dropout_rate=dropout)
features = transformer(inputs)
# Use shared embedding by default. Put it outside TransformerLM because of
# the initialization of transformer.embedding.embeddings.
transpose_embedding = TransposableEmbedding(transformer.embedding)
logits = transpose_embedding(features)
return tf.keras.Model(inputs=inputs, outputs=logits, name=name)
| StarcoderdataPython |
3202794 | <reponame>G00dBye/YYMS<filename>scripts/npc/enterGreatElf.py
if sm.getChr().getJob() >= 2300:
sm.warp(910150100)
# else:
| StarcoderdataPython |
3334698 | <filename>Python/Tests/GlassTests/PythonTests/Python/StepNativeToPython/py_mod.py
import cpp_mod
def callback():
print('ok')
cpp_mod.global_func(callback)
| StarcoderdataPython |
1779369 | # Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
# Imports from this application
from app import app
from joblib import load
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## MVP Predictions
Every year the Baseball Writers Association of America (BBWAA) votes on Major League Baseball players to be awarded the Most Valuable Player (MVP) award. Two awards are given each season -- one for a player in the American League, one for a player in the National League. This award has been bestowed every year since 1931. I was able to retrieve a custom csv dataset from Fangraphs.com listing every season of every player since the award started. I started by eliminating seasons where a player had fewer than 400 plate appearances. In this dataset I chose to use features I thought to be important in the minds of BBWAA writers when selecting a player to vote for. MLB player stats have been at the forefront of sports statistics for a number of years and it would be inefficient to include so many of the numbers that the voters would either not be aware of or did not deem important enough to impact their voting.
"""
),
html.Img(src='assets/SHAP_IMG.png', className='img-fluid'),
dcc.Markdown(
"""
Shown above is an example of a theoretical player in the 2019 season. This player played in the American league for the New York Yankees and has a 15% chance of winning the AL MVP. You can see that having 148 RBIs has a huge positive impact on his odds. The feature that has the most negative impact is Run Rank, as being only ranked 23rd is not ideal for a player trying to be considered the best.
Along with a model that predicts probabilities for made-up player stats, I built one that attempts to predict the MVP winner for any given historical season. This model is based off of the same features, and uses training data from the 10 seasons leading up to the season chosen. This model was able to predict winners with accuracy that I was not expecting. The downside of this model, however, was that it was only able to use position player data and therefore could not predict a pitcher to be named MVP. This meant that in seasons where a pitcher was given the award, the model was unfortunately always wrong, regardless of its best efforts.
"""
),
dcc.Markdown("# Actual Outcome"),
html.Img(src='assets/PIC.png', className='img-fluid'),
dcc.Markdown('# Predicted Outcome'),
html.Img(src='assets/PLEASE_WORK.jpg', className='img-fluid'),
],
md=10
)
layout = dbc.Row([column1]) | StarcoderdataPython |
3221284 | <reponame>Imperssonator/afm-cnn
""" Database models for microstructure dataset """
import os
from sqlalchemy import Column, Float, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
dbpath = 'sqlite:///data/microstructures.sqlite'
class User(Base):
__tablename__ = 'user'
user_id = Column(Integer, primary_key=True)
username = Column(String(250))
givenname = Column(String(250))
familyname = Column(String(250))
email = Column(String(250))
orcid = Column(String(250))
micrographs = relationship('Micrograph')
class Collection(Base):
__tablename__ = 'collection'
collection_id = Column(Integer, primary_key=True)
name = Column(String(250))
doi = Column(String(250))
class Sample(Base):
__tablename__ = 'sample'
sample_id = Column(Integer, primary_key=True)
label = Column(String(250))
anneal_time = Column(Float)
anneal_time_unit = Column(String(16))
anneal_temperature = Column(Float)
anneal_temp_unit = Column(String(16))
cool_method = Column(String(16))
micrographs = relationship('Micrograph')
class Micrograph(Base):
__tablename__ = 'micrograph'
micrograph_id = Column(Integer, primary_key=True)
path = Column(String())
micron_bar = Column(Float)
micron_bar_units = Column(String(64))
micron_bar_px = Column(Integer)
magnification = Column(Integer)
detector = Column(String(16))
sample_key = Column(Integer, ForeignKey('sample.sample_id'))
sample = relationship('Sample', back_populates='micrographs')
contributor_key = Column(Integer, ForeignKey('user.user_id'))
contributor = relationship('User', back_populates='micrographs')
primary_microconstituent = Column(String(250))
if __name__ == '__main__':
engine = create_engine(dbpath)
Base.metadata.create_all(engine)
| StarcoderdataPython |
3232911 | <reponame>LincolnBryant/htcondor<gh_stars>0
#!/usr/bin/env pytest
import classad
import htcondor
import logging
import os
import time
from ornithology import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@standup
def condor(test_dir):
with Condor(
local_dir=test_dir / "condor",
config={
"DAEMON_LIST": "COLLECTOR MASTER",
"USE_SHARED_PORT": False
}
) as condor:
yield condor
@standup
def collector(condor):
collector = condor.get_local_collector()
collector.advertise([classad.ClassAd({"MyType": "Accounting", "Name": "Accounting-1", "IsPytest": True})], "UPDATE_ACCOUNTING_AD")
collector.advertise([classad.ClassAd({"MyType": "Accounting", "Name": "Accounting-2", "IsPytest": True})], "UPDATE_ACCOUNTING_AD")
collector.advertise([classad.ClassAd({"MyType": "Collector", "Name": "Collector-1", "IsPytest": True})], "UPDATE_COLLECTOR_AD")
collector.advertise([classad.ClassAd({"MyType": "DaemonMaster", "Name": "DaemonMaster-1", "IsPytest": True})], "UPDATE_MASTER_AD")
collector.advertise([classad.ClassAd({"MyType": "Machine", "Name": "Machine-1", "IsPytest": True})], "UPDATE_STARTD_AD")
collector.advertise([classad.ClassAd({"MyType": "Machine", "Name": "Machine-2", "IsPytest": True})], "UPDATE_STARTD_AD")
collector.advertise([classad.ClassAd({"MyType": "Machine", "Name": "Machine-3", "IsPytest": True})], "UPDATE_STARTD_AD")
collector.advertise([classad.ClassAd({"MyType": "Machine", "Name": "Machine-4", "IsPytest": True})], "UPDATE_STARTD_AD")
collector.advertise([classad.ClassAd({"MyType": "Negotiator", "Name": "Negotiator-1", "IsPytest": True})], "UPDATE_NEGOTIATOR_AD")
collector.advertise([classad.ClassAd({"MyType": "Scheduler", "Name": "Scheduler-1", "MyAddress": "<127.0.0.1:38900?addrs=127.0.0.1-38900&alias=localhost&noUDP&sock=startd_6695_1b0e>", "IsPytest": True})], "UPDATE_SCHEDD_AD")
collector.advertise([classad.ClassAd({"MyType": "Submitter", "Name": "Submitter-1", "MyAddress": "<127.0.0.1:38900?addrs=127.0.0.1-38900&alias=localhost&noUDP&sock=startd_6695_1b0e>", "IsPytest": True})], "UPDATE_SUBMITTOR_AD")
yield collector
@action
def accounting_ads(collector):
ads = collector.query(htcondor.AdTypes.Accounting, "IsPytest == True", [])
return ads
@action
def collector_ad_counts(collector):
ads = collector.query(htcondor.AdTypes.Any, "IsPytest == True")
counts = {}
for ad in ads :
if ad['MyType'] in counts:
counts[ad['MyType']] += 1
else:
counts[ad['MyType']] = 1
return counts
@action
def collector_ads(collector):
ads = collector.query(htcondor.AdTypes.Collector, "IsPytest == True", [])
return ads
@action
def machine_ads(collector):
ads = collector.query(htcondor.AdTypes.Startd, "IsPytest == True", [])
return ads
@action
def master_ads(collector):
ads = collector.query(htcondor.AdTypes.Master, "IsPytest == True", [])
return ads
@action
def negotiator_ads(collector):
ads = collector.query(htcondor.AdTypes.Negotiator, "IsPytest == True", [])
return ads
@action
def scheduler_ads(collector):
ads = collector.query(htcondor.AdTypes.Schedd, "IsPytest == True", [])
return ads
@action
def submitter_ads(collector):
ads = collector.query(htcondor.AdTypes.Submitter, "IsPytest == True", [])
return ads
@action
def locate_collector_ad(collector):
locate_collector_ad = collector.locate(htcondor.DaemonTypes.Collector)
return locate_collector_ad
@action
def locate_all_startd_ads(collector):
locate_all_startd_ads = collector.locateAll(htcondor.DaemonTypes.Startd)
return locate_all_startd_ads
class TestCollectorQuery:
def test_collector_ad_counts(self, collector_ad_counts):
assert collector_ad_counts["Accounting"] == 2
assert collector_ad_counts["Collector"] == 1
assert collector_ad_counts["DaemonMaster"] == 1
assert collector_ad_counts["Machine"] == 4
assert collector_ad_counts["Negotiator"] == 1
assert collector_ad_counts["Scheduler"] == 1
assert collector_ad_counts["Submitter"] == 1
def test_accounting_ads(self, accounting_ads):
assert len(accounting_ads) == 2
def test_collector_ads(self, collector_ads):
assert len(collector_ads) == 1
def test_machine_ads(self, machine_ads):
assert len(machine_ads) == 4
def test_master_ads(self, master_ads):
assert len(master_ads) == 1
def test_negotiator_ads(self, negotiator_ads):
assert len(negotiator_ads) == 1
def test_scheduler_ads(self, scheduler_ads):
assert len(scheduler_ads) == 1
def test_submitter_ads(self, submitter_ads):
assert len(submitter_ads) == 1
def test_locate(self, locate_collector_ad):
assert locate_collector_ad["MyType"] == "Collector"
def test_locate_all(self, locate_all_startd_ads):
assert len(locate_all_startd_ads) == 4 | StarcoderdataPython |
1607371 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 通过字符串调用方法
Desc :
"""
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
# !r表示调用后面参数的__repr__()方法
return 'Point({!r:},{!r:})'.format(self.x, self.y)
def distance(self, x, y):
return math.hypot(self.x - x, self.y - y)
p = Point(2, 3)
d = getattr(p, 'distance')(0, 0) # Calls p.distance(0, 0)
import operator
operator.methodcaller('distance', 0, 0)(p)
points = [
Point(1, 2),
Point(3, 0),
Point(10, -3),
Point(-5, -7),
Point(-1, 8),
Point(3, 2)
]
# Sort by distance from origin (0, 0)
points.sort(key=operator.methodcaller('distance', 0, 0))
| StarcoderdataPython |
122133 | <filename>enthought/chaco/axis.py
# proxy module
from __future__ import absolute_import
from chaco.axis import *
| StarcoderdataPython |
1732564 | <reponame>Evgengrmit/course-project
# Copyright 2020 <NAME> <EMAIL>
from catboost import CatBoostClassifier, Pool
from sklearn.model_selection import train_test_split
from detector import Preprocess as pp
from sklearn.metrics import roc_auc_score, accuracy_score
import sys
class CatBoostModel:
def __init__(self):
self._preprocess = pp.Preprocess()
self._model = CatBoostClassifier()
self._model.load_model("models/Saving/CBmodel.cbm")
self.x = self.y = 0
self._train_data = None
self._test_data = None
@property
def model(self):
return self._model
def set_new_model(self, cbm_model=""):
if cbm_model == '':
raise IOError("No path to model")
self._model.load_model(cbm_model)
def set_pool(self, path_to_dataset='', test_size=0.3):
if path_to_dataset != '':
self._preprocess.set_dataset(path_to_dataset)
self.x, self.y = self._preprocess.process_data_for_gradient_with_label()
x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=test_size, random_state=42)
self._train_data = Pool(x_train, y_train)
self._test_data = Pool(x_test, y_test)
def get_predict_with_label(self, path_to_data=''):
if path_to_data == '':
raise IOError("No path to data")
self._preprocess.set_dataset(path_to_data)
self.x, self.y = self._preprocess.process_data_for_gradient_with_label()
return self._model.predict(self.x)
def relearn_model(self, path_to_dataset='', test_size=0.3):
if path_to_dataset == '':
raise IOError("No path to dataset")
self.set_pool(path_to_dataset=path_to_dataset, test_size=test_size)
self._model = CatBoostClassifier(iterations=200,
depth=2,
learning_rate=0.4,
loss_function='Logloss',
verbose=False)
self._model.fit(self._train_data, plot=True)
def get_test_accuracy(self):
return accuracy_score(self._test_data.get_label(), self._model.predict(self._test_data.get_features()))
def get_test_auc(self):
return roc_auc_score(self._test_data.get_label(),
self._model.predict_proba(self._test_data.get_features())[:, 1])
def get_predict_unknown(self, path_to_data=''):
if path_to_data == '':
raise IOError("No path to data")
self._preprocess.set_dataset(path_to_data)
self.x = self._preprocess.get_data_for_predict_gradient()
return self._model.predict(self.x)
| StarcoderdataPython |
1674638 | from matplotlib import pyplot as plt
import numpy as np
from model_overview import define, define_from_file
definition = define_from_file()
model = definition["model"]
# for index, beam in enumerate(model.beams):
# plt.clf()
# plt.axis('off')
# points = beam.points
# edges = beam.edges
# for i, j in edges:
# vertices = np.take(points, (i, j), axis=0)
# plt.plot(vertices[:, 0], vertices[:, 1], color=(0, 0, 0))
#
# ax = plt.gca()
# ax.set_aspect('equal')
# plt.savefig(f"part-{index}.png", dpi=500, transparent=True)
#
# np.savez(f"data/overview_{i}.npz",
# eigenvalue=np.array(e),
# points=points,
# edges=edges,
# eigenvector=eigenvector,
# force=force,
# stiffness=M)
points = model.point_matrix()
edges = model.edge_matrix()
for index in (0, 1, 2):
plt.clf()
plt.axis('off')
# for i, j in edges:
# vertices = np.take(points, (i, j), axis=0)
# plt.plot(vertices[:, 0], vertices[:, 1], color=(0, 0, 0))
data = np.load(f"data/overview_{index}.npz")
eigenvalue, eigenvector = data["eigenvalue"], data["eigenvector"].reshape(-1, 3)
print(index, eigenvalue)
xs, ys = points[:, 0], points[:, 1]
eigenvector *= 30
dxs, dys = eigenvector[:, 0], eigenvector[:, 1]
color = (1, 0, 0) if index == 0 else (255 / 255, 165 / 255, 0)
for x, y, dx, dy in zip(xs, ys, dxs, dys):
if np.linalg.norm([dx, dy]) < 1e-2:
continue
plt.gca().arrow(
x, y, dx, dy,
# length_includes_head=True,
color=color,
width=0.5,
)
# plt.show()
plt.savefig(f"eigenvector-{index}.svg", dpi=500, transparent=True)
| StarcoderdataPython |
1727153 | array([[3.06957962, 0.70322209],
[3.21487456, 0.6941981 ],
[3.36178869, 0.68840816],
[3.50994615, 0.68517987],
[3.65898274, 0.68379808],
[3.80855501, 0.68351701],
[3.95831501, 0.6835691 ],
[4.10807562, 0.68362114],
[4.25783491, 0.68367411],
[4.40759492, 0.68372796],
[4.55735493, 0.68378122],
[4.70711446, 0.68383627],
[4.85687399, 0.68389387],
[5.00663304, 0.68395211],
[5.15614037, 0.68473156],
[5.30484411, 0.68767909],
[5.45207032, 0.69417401],
[5.59702974, 0.70547385],
[5.73884978, 0.72265311],
[5.87660197, 0.74657918],
[6.00934528, 0.77787928],
[6.13612983, 0.81697437],
[6.25605207, 0.8640421 ],
[6.36825905, 0.9190548 ],
[6.471951 , 0.98180992],
[6.56636624, 1.05196745],
[6.65076793, 1.12907228],
[6.72442806, 1.21256733],
[6.78658408, 1.30180813],
[6.83644241, 1.3960382 ],
[6.87313107, 1.494375 ],
[6.89567222, 1.59575902],
[6.90291635, 1.69886654],
[6.89362032, 1.80193543],
[6.87102272, 1.90360723],
[6.83472686, 2.00266609],
[6.78420056, 2.09763454],
[6.72147717, 2.18793819],
[6.64754014, 2.27302272],
[6.5633645 , 2.35253789],
[6.46995646, 2.4263443 ],
[6.36835193, 2.49451037],
[6.2596731 , 2.55735486],
[6.14513958, 2.61546391],
[6.02605092, 2.66967356],
[5.90378724, 2.72105064],
[5.7797842 , 2.77083483],
[5.65089933, 2.82355995],
[5.52245232, 2.87720029],
[5.39448107, 2.93183383],
[5.26702513, 2.98754188],
[5.14012449, 3.04440642],
[5.01381929, 3.10250952],
[4.88815398, 3.16194185],
[4.76317526, 3.22279832],
[4.63912129, 3.28556878],
[4.51585714, 3.34997018],
[4.39324993, 3.41572581],
[4.27116679, 3.48256057],
[4.14947609, 3.55020279],
[4.02804628, 3.61838157],
[3.90674682, 3.68682834],
[3.78606476, 3.75466556],
[3.66542201, 3.8214176 ],
[3.54460944, 3.88641983],
[3.42338074, 3.94904778],
[3.30153856, 4.00867496],
[3.17894761, 4.06467069],
[3.05553044, 4.11641117],
[2.93126392, 4.16327026],
[2.80618597, 4.20463834],
[2.68039857, 4.23991067],
[2.55407471, 4.26848908],
[2.42746615, 4.28978676],
[2.30091199, 4.30322969],
[2.17484672, 4.30827884],
[2.04981534, 4.30438234],
[1.92648423, 4.29103379],
[1.80565313, 4.26777293],
[1.68826402, 4.23420633],
[1.5754052 , 4.19002993],
[1.4683113 , 4.13504625],
[1.36834128, 4.06919802],
[1.27697034, 3.99255106],
[1.19582937, 3.90523629],
[1.1267356 , 3.80747621],
[1.06615412, 3.70327052],
[1.01405515, 3.59314464],
[0.97045343, 3.4775375 ],
[0.93541423, 3.35682491],
[0.90903203, 3.23135199],
[0.89144172, 3.10144282],
[0.88279594, 2.96742763],
[0.88323932, 2.82966736],
[0.89292262, 2.68856632],
[0.91199469, 2.54461666],
[0.94050228, 2.39854881],
[0.97701182, 2.25607975],
[1.02013481, 2.11903117],
[1.06965435, 1.98755457],
[1.12540272, 1.86177002],
[1.18723904, 1.74178564],
[1.25504951, 1.62771468],
[1.32875198, 1.51969174],
[1.40829254, 1.41787883],
[1.4936732 , 1.32250506],
[1.58492633, 1.23385128],
[1.68215631, 1.15231404],
[1.78567299, 1.07861045],
[1.89491637, 1.01238947],
[2.00941427, 0.95334177],
[2.12874407, 0.90116915],
[2.25253104, 0.8555959 ],
[2.38042271, 0.81634066],
[2.5120838 , 0.78311346],
[2.64717801, 0.7555887 ],
[2.78536227, 0.73339387],
[2.92628376, 0.71610111]]) | StarcoderdataPython |
1701770 | import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
def _create_odf_csv(data, dfsDatabase):
# call function default_session() to get session object
s = orca.default_session()
dolphindb_script = """
login("admin", "<PASSWORD>")
dbPath="dfs://groupbyDateDB"
if(existsDatabase(dbPath))
dropDatabase(dbPath)
schema = extractTextSchema('{data}')
cols = exec name from schema
types = ["INT", "DATE", "SYMBOL", "BOOL", "SHORT", "INT", "LONG", "FLOAT", "DOUBLE"]
schema = table(50000:0, cols, types)
tt=schema(schema).colDefs
tt.drop!(`typeInt)
tt.rename!(`name`type)
db = database(dbPath, RANGE, 1 501 1001 1501 2001 2501 3001)
tb = db.createPartitionedTable(schema, `tb, `id)
db.loadTextEx(`tb,`id, '{data}' ,, tt)""".format(data=data)
s.run(dolphindb_script)
return orca.read_table(dfsDatabase, 'tb')
class Csv:
pdf_csv = None
odfs_csv = None
class DfsGroupByTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'groupbyDate.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
dfsDatabase = "dfs://groupbyDateDB"
# Orca connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1], dtype={"id": np.int32, "tbool": np.bool, "tshort": np.int16,
"tint": np.int32, "tlong": np.int64, "tfloat": np.float32,
"tdouble": np.float64})
Csv.pdf_csv['tbool'] = Csv.pdf_csv["tbool"].astype(np.bool)
Csv.odfs_csv = _create_odf_csv(data, dfsDatabase)
Csv.odfs_csv.set_index("id", inplace=True)
Csv.pdf_csv.set_index("id", inplace=True)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odfs_csv(self):
return Csv.odfs_csv
def test_dfs_groupby_param_by_date_all(self):
pass
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
# a = self.odfs_csv.groupby('date').all()
# b = self.pdf_csv.groupby('date').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_any(self):
pass
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
# a = self.odfs_csv.groupby('date').any()
# b = self.pdf_csv.groupby('date').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_bfill(self):
a = self.odfs_csv.groupby('date').bfill()
b = self.pdf_csv.groupby('date').bfill()
# TODO: bfill for strings is not allowed in Orca
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_count(self):
a = self.odfs_csv.groupby('date').count()
b = self.pdf_csv.groupby('date').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumcount(self):
a = self.odfs_csv.groupby('date').cumcount()
b = self.pdf_csv.groupby('date').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cummax(self):
a = self.odfs_csv.drop(columns=['tsymbol']).groupby('date').cummax()
b = self.pdf_csv.drop(columns=['tsymbol']).groupby('date').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_date_cummin(self):
a = self.odfs_csv.drop(columns=['tsymbol']).groupby('date').cummin()
b = self.pdf_csv.drop(columns=['tsymbol']).groupby('date').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumprod(self):
a = self.odfs_csv.groupby('date').cumprod()
b = self.pdf_csv.groupby('date').cumprod()
# TODO: TO MUCH DIFFS
assert_frame_equal(a.to_pandas().iloc[0:5].reset_index(drop=True), b.iloc[0:5].reset_index(drop=True), check_dtype=False,
check_index_type=False, check_less_precise=1)
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumsum(self):
a = self.odfs_csv.groupby('date').cumsum()
b = self.pdf_csv.groupby('date').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ffill(self):
a = self.odfs_csv.groupby('date').ffill()
b = self.pdf_csv.groupby('date').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_first(self):
a = self.odfs_csv.groupby('date').first()
b = self.pdf_csv.groupby('date').first()
b['tbool'] = b['tbool'].astype(np.bool, errors="ignore")
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('date').head()
# b = self.pdf_csv.groupby('date').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_last(self):
a = self.odfs_csv.groupby('date').last()
b = self.pdf_csv.groupby('date').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_max(self):
a = self.odfs_csv.groupby('date').max()
b = self.pdf_csv.groupby('date').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_mean(self):
a = self.odfs_csv.groupby('date').mean()
b = self.pdf_csv.groupby('date').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('date').median()
# b = self.pdf_csv.groupby('date').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_min(self):
a = self.odfs_csv.groupby('date').min()
b = self.pdf_csv.groupby('date').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('date').ngroup()
# b = self.pdf_csv.groupby('date').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('date').nth(0)
# b = self.pdf_csv.groupby('date').nth(0)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ohlc(self):
a = self.odfs_csv.drop(columns=['tsymbol', "date"]).groupby(['tint', 'tbool']).ohlc()
b = self.pdf_csv.drop(columns=['tsymbol', "date"]).groupby(['tint', 'tbool']).ohlc()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_prod(self):
a = self.odfs_csv.groupby('date').prod()
b = self.pdf_csv.groupby('date').prod()
# TODO:DIFFS
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_rank(self):
a = self.odfs_csv.groupby('date').rank()
# TODO: pandas doesn't support
# b = self.pdf_csv.groupby('date').rank()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "tsymbol"]).groupby('date').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "tsymbol"]).groupby('date').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False, check_less_precise=2)
def test_dfs_groupby_param_by_date_size(self):
a = self.odfs_csv.groupby('date').size()
b = self.pdf_csv.groupby('date').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('date').sem()
# b = self.pdf_csv.groupby('date').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_std(self):
a = self.odfs_csv.groupby('date').std()
b = self.pdf_csv.groupby('date').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_sum(self):
a = self.odfs_csv.groupby('date').sum()
b = self.pdf_csv.groupby('date').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_var(self):
a = self.odfs_csv.groupby('date').var()
b = self.pdf_csv.groupby('date').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('date').tail()
# b = self.pdf_csv.groupby('date').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_all(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').all()
# b = self.pdf_csv.groupby('tsymbol').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_any(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').any()
# b = self.pdf_csv.groupby('tsymbol').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_bfill(self):
a = self.odfs_csv.groupby('tsymbol').bfill()
b = self.pdf_csv.groupby('tsymbol').bfill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_count(self):
a = self.odfs_csv.groupby('tsymbol').count()
b = self.pdf_csv.groupby('tsymbol').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cumcount(self):
a = self.odfs_csv.groupby('tsymbol').cumcount()
b = self.pdf_csv.groupby('tsymbol').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cummax(self):
a = self.odfs_csv.drop(columns=['date']).groupby('tsymbol').cummax()
b = self.pdf_csv.drop(columns=['date']).groupby('tsymbol').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_symbol_cummin(self):
a = self.odfs_csv.drop(columns=['date']).groupby('tsymbol').cummin()
b = self.pdf_csv.drop(columns=['date']).groupby('tsymbol').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_symbol_cumprod(self):
a = self.odfs_csv.groupby('tsymbol').cumprod()
b = self.pdf_csv.groupby('tsymbol').cumprod()
assert_frame_equal(a.to_pandas().iloc[0:5].reset_index(drop=True), b.iloc[0:5].reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cumsum(self):
a = self.odfs_csv.groupby('tsymbol').cumsum()
b = self.pdf_csv.groupby('tsymbol').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ffill(self):
a = self.odfs_csv.groupby('tsymbol').ffill()
b = self.pdf_csv.groupby('tsymbol').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_first(self):
a = self.odfs_csv.groupby('tsymbol').first()
b = self.pdf_csv.groupby('tsymbol').first()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tsymbol').head()
# b = self.pdf_csv.groupby('tsymbol').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_last(self):
a = self.odfs_csv.groupby('tsymbol').last()
b = self.pdf_csv.groupby('tsymbol').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_max(self):
a = self.odfs_csv.groupby('tsymbol').max()
b = self.pdf_csv.groupby('tsymbol').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_mean(self):
a = self.odfs_csv.groupby('tsymbol').mean()
b = self.pdf_csv.groupby('tsymbol').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').median()
# b = self.pdf_csv.groupby('tsymbol').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_min(self):
a = self.odfs_csv.groupby('tsymbol').min()
b = self.pdf_csv.groupby('tsymbol').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tsymbol').ngroup()
# b = self.pdf_csv.groupby('tsymbol').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tsymbol').nth(0)
# b = self.pdf_csv.groupby('tsymbol').nth(0)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ohlc(self):
a = self.odfs_csv.groupby('tsymbol').ohlc()
# pandas doesn't support
# b = self.pdf_csv.groupby('tsymbol').ohlc()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_prod(self):
a = self.odfs_csv.groupby('tsymbol').prod()
b = self.pdf_csv.groupby('tsymbol').prod()
assert_frame_equal(a.to_pandas(), b.fillna(0), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_rank(self):
a = self.odfs_csv.groupby('tsymbol').rank()
b = self.pdf_csv.groupby('tsymbol').rank()
# TODO: DIFFERENT METHOD
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "date"]).groupby('tsymbol').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "date"]).groupby('tsymbol').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False,
check_less_precise=2)
def test_dfs_groupby_param_by_symbol_size(self):
a = self.odfs_csv.groupby('tsymbol').size()
b = self.pdf_csv.groupby('tsymbol').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').sem()
# b = self.pdf_csv.groupby('tsymbol').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_std(self):
a = self.odfs_csv.groupby('tsymbol').std()
b = self.pdf_csv.groupby('tsymbol').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_sum(self):
a = self.odfs_csv.groupby('tsymbol').sum()
b = self.pdf_csv.groupby('tsymbol').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_var(self):
a = self.odfs_csv.groupby('tsymbol').var()
b = self.pdf_csv.groupby('tsymbol').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tsymbol').tail()
# b = self.pdf_csv.groupby('tsymbol').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_all(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').all()
# b = self.pdf_csv.groupby('tlong').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_any(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').any()
# b = self.pdf_csv.groupby('tlong').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_bfill(self):
a = self.odfs_csv.groupby('tlong').bfill()
b = self.pdf_csv.groupby('tlong').bfill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_count(self):
a = self.odfs_csv.groupby('tlong').count()
b = self.pdf_csv.groupby('tlong').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cumcount(self):
a = self.odfs_csv.groupby('tlong').cumcount()
b = self.pdf_csv.groupby('tlong').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cummax(self):
a = self.odfs_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummax()
b = self.pdf_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_long_cummin(self):
a = self.odfs_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummin()
b = self.pdf_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_long_cumprod(self):
a = self.odfs_csv.groupby('tlong').cumprod()
b = self.pdf_csv.groupby('tlong').cumprod()
# TODO: TO MUCH DIFFS
assert_frame_equal(a.to_pandas().iloc[0:50].reset_index(drop=True), b.iloc[0:50].reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cumsum(self):
a = self.odfs_csv.groupby('tlong').cumsum()
b = self.pdf_csv.groupby('tlong').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ffill(self):
a = self.odfs_csv.groupby('tlong').ffill()
b = self.pdf_csv.groupby('tlong').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_first(self):
a = self.odfs_csv.groupby('tlong').first()
b = self.pdf_csv.groupby('tlong').first()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tlong').head()
# b = self.pdf_csv.groupby('tlong').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_last(self):
a = self.odfs_csv.groupby('tlong').last()
b = self.pdf_csv.groupby('tlong').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_max(self):
a = self.odfs_csv.groupby('tlong').max()
b = self.pdf_csv.groupby('tlong').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_mean(self):
a = self.odfs_csv.groupby('tlong').mean()
b = self.pdf_csv.groupby('tlong').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').median()
# b = self.pdf_csv.groupby('tlong').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_min(self):
a = self.odfs_csv.groupby('tlong').min()
b = self.pdf_csv.groupby('tlong').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tlong').ngroup()
# b = self.pdf_csv.groupby('tlong').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tlong').nth()
# b = self.pdf_csv.groupby('tlong').nth()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ohlc(self):
a = self.odfs_csv.drop(columns=['tsymbol', "date"]).groupby('tlong').ohlc()
b = self.pdf_csv.drop(columns=['tsymbol', "date"]).groupby('tlong').ohlc()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_prod(self):
a = self.odfs_csv.groupby('tlong').prod()
b = self.pdf_csv.groupby('tlong').prod()
# TODO:DIFFS
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_rank(self):
a = self.odfs_csv.groupby('tlong').rank()
# b = self.pdf_csv.groupby('tlong').rank()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "tsymbol", "date"]).groupby('tlong').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "tsymbol", "date"]).groupby('tlong').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False, check_less_precise=2)
def test_dfs_groupby_param_by_long_size(self):
a = self.odfs_csv.groupby('tlong').size().loc[0:]
b = self.pdf_csv.groupby('tlong').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').sem()
# b = self.pdf_csv.groupby('tlong').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_std(self):
a = self.odfs_csv.groupby('tlong').std()
b = self.pdf_csv.groupby('tlong').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_sum(self):
a = self.odfs_csv.groupby('tlong').sum()
b = self.pdf_csv.groupby('tlong').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_var(self):
a = self.odfs_csv.groupby('tlong').var()
b = self.pdf_csv.groupby('tlong').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tlong').tail()
# b = self.pdf_csv.groupby('tlong').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1721948 | <gh_stars>0
from minpiler.std import M, L, use_object
def find_player_by_name(name: str):
M.unit.bind(M.at.UnitType.gamma)
if M.at.const.unit.name != name:
M.unit.bind(M.at.UnitType.beta)
if M.at.const.unit.name != name:
M.unit.bind(M.at.UnitType.alpha)
if M.at.const.unit.name != name:
return None
return M.at.const.unit
player = use_object()
if player is None:
player = find_player_by_name("your_name_here")
if player is not None:
M.unit.bind(M.at.UnitType.flare)
M.unit.move(player.x, player.y)
M.unit.target(player.x, player.y, 1)
| StarcoderdataPython |
3213406 | <reponame>Sehun0819/pytea<gh_stars>100-1000
import LibCall
import torch
from .distribution import Distribution
class Bernoulli(Distribution):
def __init__(self, probs=None, logits=None, validate_args=None):
if probs is not None:
self.is_scalar = isinstance(probs, float) or isinstance(probs, int)
self._param = probs
elif logits is not None:
self.is_scalar = isinstance(logits, float) or isinstance(logits, int)
self._param = logits
else:
raise ValueError(
"Either `probs` or `logits` must be specified, but not both."
)
if self.is_scalar:
empty_tensor = torch.Tensor()
batch_shape = empty_tensor.shape
else:
batch_shape = self._param.shape
self._batch_shape = batch_shape
super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args)
def sample(self, sample_shape):
if self.is_scalar:
sample = LibCall.builtins.randFloat(0, 1, "ThBer")
if sample > self._param:
return Sample(1.0)
else:
return Sample(0.0)
return torch.rand(self._batch_shape)
# temporary class to be used like Bernoulli(p).sample().item()
# in reality, Bernoulli(p).sample() returns tensor.
class Sample:
def __init__(self, value):
self.value = value
def item(self):
return self.value
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.