hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aedb08eaa770e83a1aeb002b521f83da5390d75b | 4,571 | py | Python | launch/g325_g365_launch.py | cubicleguy/imu_ros2_uart | 9e3eaa60b004981e8c35d1a12a91bbf9e410402b | [
"Unlicense",
"BSD-3-Clause"
] | null | null | null | launch/g325_g365_launch.py | cubicleguy/imu_ros2_uart | 9e3eaa60b004981e8c35d1a12a91bbf9e410402b | [
"Unlicense",
"BSD-3-Clause"
] | null | null | null | launch/g325_g365_launch.py | cubicleguy/imu_ros2_uart | 9e3eaa60b004981e8c35d1a12a91bbf9e410402b | [
"Unlicense",
"BSD-3-Clause"
] | null | null | null | """Launch file for Epson G325/G365 imu_node for epson_imu_uart_ros2 ROS2 package"""
from launch import LaunchDescription
import launch_ros.actions
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
def generate_launch_description():
return LaunchDescription([
DeclareLaunchArgument(
name="serial_port",
default_value="/dev/ttyUSB0",
description="Serial port name"),
DeclareLaunchArgument(
name="frame_id",
default_value="imu_link",
description="imu message frame_id field"),
DeclareLaunchArgument(
name="imu_topic",
default_value="/epson_imu/data_raw",
description="topic name for publishing imu messages."),
DeclareLaunchArgument(
name="burst_polling_rate",
default_value="4000.0",
description="Set to atleast 2x the output rate of IMU. Should not need to change."),
DeclareLaunchArgument(
name="imu_dout_rate",
# value: output rate (Hz) Recommended Moving Average Filter
# 0: 2000 TAP>=0
# 1: 1000 TAP>=2
# 2: 500 TAP>=4
# 3: 250 TAP>=8
# 4: 125 TAP>=16
# 5: 62.5 TAP>=32
# 6: 31.25 TAP>=64
# 7: 15.625 TAP=128
# 8: 400 TAP>=8
# 9: 200 TAP>=16
# 10: 100 TAP>=32
# 11: 80 TAP>=32
# 12: 50 TAP>=64
# 13: 40 TAP>=64
# 14: 25 TAP=128
# 15: 20 TAP=128
default_value="4",
description="Sets data output rate of IMU"),
DeclareLaunchArgument(
name="imu_filter_sel",
# value: Filter Setting
# 0: bypass
# 1: Moving Average TAP2
# 2: Moving Average TAP4
# 3: Moving Average TAP8
# 4: Moving Average TAP16
# 5: Moving Average TAP32
# 6: Moving Average TAP64
# 7: Moving Average TAP128
# 8: KAISER TAP32 Fc=50 Hz
# 9: KAISER TAP32 Fc=100 Hz
# 10: KAISER TAP32 Fc=200 Hz
# 11: KAISER TAP32 Fc=400 Hz
# 12: KAISER TAP64 Fc=50 Hz
# 13: KAISER TAP64 Fc=100 Hz
# 14: KAISER TAP64 Fc=200 Hz
# 15: KAISER TAP64 Fc=400 Hz
# 16: KAISER TAP128 Fc=50 Hz
# 17: KAISER TAP128 Fc=100 Hz
# 18: KAISER TAP128 Fc=200 Hz
# 19: KAISER TAP128 Fc=400 Hz
default_value="6",
description="Sets the IMU filter"),
DeclareLaunchArgument(
name="output_32bit_en",
default_value="true",
description="Enables all sensor data output in 32-bit resolution or 16-bit resolution."),
DeclareLaunchArgument(
name="time_correction_en",
default_value="false",
description="Enables using IMU external counter reset function for timestamp with external 1PPS connected to IMU input pin for GPIO2/EXT"),
DeclareLaunchArgument(
name="ext_trigger_en",
default_value="false",
description="Enables using IMU external trigger function for sending IMU samples with external trigger signal connected to IMU input pin for GPIO2/EXT"),
launch_ros.actions.Node(
package='epson_imu_uart_ros2',
node_executable='imu_node',
output='screen',
parameters=[{'__log_level': 'INFO',
'serial_port': LaunchConfiguration("serial_port"),
'frame_id': LaunchConfiguration("frame_id"),
'imu_topic': LaunchConfiguration("imu_topic"),
'burst_polling_rate': LaunchConfiguration("burst_polling_rate"),
'imu_dout_rate': LaunchConfiguration("imu_dout_rate"),
'imu_filter_sel': LaunchConfiguration("imu_filter_sel"),
'output_32bit_en': LaunchConfiguration("output_32bit_en"),
'time_correction_en': LaunchConfiguration("time_correction_en"),
'ext_trigger_en': LaunchConfiguration("ext_trigger_en"),
}])
])
| 45.71 | 165 | 0.532706 |
001e2ce83aeb202160a1f1ce7b013429abc6c971 | 5,904 | py | Python | BlogApp/models.py | n3trob3/nimrodage | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | [
"Apache-2.0"
] | null | null | null | BlogApp/models.py | n3trob3/nimrodage | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | [
"Apache-2.0"
] | null | null | null | BlogApp/models.py | n3trob3/nimrodage | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail, send_mass_mail
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from .managers import PostManager
from .tokens import email_confirm_token
User = get_user_model()
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=225, unique=True)
title_slug = models.CharField(max_length=225, blank=True)
author = models.ForeignKey(User, on_delete = models.CASCADE)
category = models.ForeignKey('BlogApp.Category', on_delete = models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
content = models.TextField(blank=True)
likes = models.ManyToManyField('BlogApp.Likes', blank=True)
views = models.ManyToManyField('BlogApp.Views', blank=True)
draft = models.BooleanField(default=False)
postadmin = PostManager()
def get_absolute_url(self):
return reverse('blog-detail', args=[self.title_slug])
def __str__(self):
return self.title.title()
def count_likes(self):
return self.likes.count()
def count_views(self):
return self.views.count()
class Category(models.Model):
name = models.CharField(max_length=225)
def __str__(self):
return self.name.title()
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
name = models.CharField(max_length=225)
email = models.EmailField(help_text='Your email is protected and not seen by any third party.')
comment = models.TextField()
date = models.DateTimeField(auto_now=True)
has_reply = models.BooleanField(default=False, editable=False)
reply = models.OneToOneField('BlogApp.Reply', on_delete=models.CASCADE, blank=True, null=True)
def email_user(self, fail=True):
message=render_to_string("Blog/new_reply.html",{
'email_name': settings.EMAIL_NIMRODAGE,
'name': self.name or self.email.split('@')[0],
'comment': self.comment,
'reply': self.reply.content,
'post': self.post.title,
'link': reverse('blog-detail', args=[self.post.title_slug]),
'domain': settings.SITE_DOMAINX,
})
print(message)
return send_mail('Nimrod Age Newsletter', message, settings.EMAIL_NIMRODAGE, [self.email], fail)
class Meta:
ordering=['-date']
def __str__(self):
return self.name
class Reply(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField()
date = models.DateTimeField(auto_now=True)
class Views(models.Model):
viewer = models.CharField(max_length=225)
class Likes(models.Model):
viewer = models.ForeignKey('BlogApp.Views', on_delete=models.CASCADE)
class Newsletter(models.Model):
email = models.EmailField(unique=True)
verified = models.BooleanField(default=False)
def __str__(self):
return self.email
def new_email(self, fail=True):
message=render_to_string("Blog/new_email.html",{
'email_name': settings.EMAIL_NIMRODAGE,
'name': self.email.split('@')[0],
})
return send_mail('Nimrod Age Newsletter', message, settings.EMAIL_NIMRODAGE, [self.email], fail)
def email_user(self, subject, message, fail=True):
return send_mail(subject, message, settings.EMAIL_NIMRODAGE, [self.email], fail)
class EmailMarketing(models.Model):
subject = models.CharField(max_length=255)
message = models.TextField()
sents = models.ManyToManyField(Newsletter, related_name='Sents', blank=True)
rejects = models.ManyToManyField(Newsletter, blank=True)
amount = models.IntegerField(default=0)
date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.subject
def email_lists(self):
return Newsletter.objects.count()
def count_sent(self):
return self.sents.count()
def count_reject(self):
return self.rejects.count()
def email_users(self, fail=True, only_verified=False):
sending=[]
results = Newsletter.objects.all()
if only_verified:
results = results.filter(verified=True)
for user in results:
message=render_to_string("Blog/newsletter_mail.html",{
'email_name': settings.EMAIL_NIMRODAGE,
'name': user.email.split('@')[0].title(),
'message': self.message,
'subject': self.subject,
}),
sent=send_mail(self.subject, message, settings.EMAIL_NIMRODAGE, [user.email], fail)
if sent:
self.sents.add(user)
else:
self.rejects.add(user)
sending.append(sent)
if results:
self.amount += 1
self.save()
return sending
@receiver(pre_save, sender=Post)
def create_post_slug(sender, instance,**kwargs):
instance.title_slug = instance.title.replace(' ','_').lower()
@receiver(post_save, sender=Views)
def create_like(sender, instance,created,**kwargs):
if created:
like = Likes.objects.create(viewer=instance)
@receiver(pre_save, sender=Comment)
def create_reply(sender, instance,**kwargs):
if instance.reply:
instance.has_reply=True
instance.email_user()
@receiver(pre_save, sender=Newsletter)
def new_newsletter(sender, instance,**kwargs):
if instance.verified:
instance.new_email()
else:
uid=urlsafe_base64_encode(force_bytes(instance.pk))
token=email_confirm_token.make_token(instance)
subject=f"{instance.email.split('@')[0].title()} Nimrod Age Newsletter"
message=render_to_string("Blog/activation_email.html",{
'name': instance.email.split('@')[0].title(),
"uid": uid,
"token": token,
"domain": settings.SITE_DOMAINX,
'email_name': settings.EMAIL_NIMRODAGE,
})
sent=instance.email_user(subject,message)
print(sent, '\n\n')
| 32.618785 | 99 | 0.730522 |
bac0e18ad0d11367448e52d4248fbaebafa9a85d | 376 | py | Python | TorchTSA/model/ARModel.py | ailzy/TorchTSA | b4baba635048a8dbb66eee33f2ae0f2bc8b24141 | [
"MIT"
] | 6 | 2018-01-03T07:31:12.000Z | 2018-03-06T07:02:17.000Z | TorchTSA/model/ARModel.py | ppaanngggg/TorchTSA | 1badc7dbb077e63cc6148c8bf5c6a88232f56dc1 | [
"MIT"
] | null | null | null | TorchTSA/model/ARModel.py | ppaanngggg/TorchTSA | 1badc7dbb077e63cc6148c8bf5c6a88232f56dc1 | [
"MIT"
] | 2 | 2018-03-06T06:57:05.000Z | 2018-03-09T16:33:51.000Z | from TorchTSA.model.ARMAModel import ARMAModel
class ARModel(ARMAModel):
def __init__(
self, _phi_num: int = 1,
_use_mu: bool = True
):
super().__init__(
_phi_num=_phi_num,
_theta_num=0,
_use_mu=_use_mu
)
def getThetas(self):
raise NotImplementedError('No Theta in AR Model')
| 20.888889 | 57 | 0.569149 |
9534ec78ef9c64f3b4948c8c26a04efb729354e2 | 759 | py | Python | tools/c7n_traildb/setup.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 2,415 | 2018-12-04T00:37:58.000Z | 2022-03-31T12:28:56.000Z | tools/c7n_traildb/setup.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 3,272 | 2018-12-03T23:58:17.000Z | 2022-03-31T21:15:32.000Z | tools/c7n_traildb/setup.py | lfranchini31/cloud-custodian | 1830fe4b9a59ff6afb675985c9ea531571616a76 | [
"Apache-2.0"
] | 773 | 2018-12-06T09:43:23.000Z | 2022-03-30T20:44:43.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from setuptools import setup
setup(
name="c7n_traildb",
version='0.1',
description="Cloud Custodian - Cloud Trail Tools",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/cloud-custodian/cloud-custodian",
license="Apache-2.0",
py_modules=['c7n_traildb'],
entry_points={
'console_scripts': [
'c7n-traildb = c7n_traildb.traildb:main',
'c7n-trailts = c7n_traildb.trailts:trailts',
'c7n-trailes = c7n_traildb.trailes:trailes',
]},
install_requires=["c7n", "click", "jsonschema", "influxdb"],
)
| 30.36 | 64 | 0.635046 |
a651e3f570a53446e3f901dbf42bfb0648d7d25b | 561 | py | Python | setup.py | wookayin/toy-montezuma-pycolab | 4de753e8b00d96968782ba0f1dbd89be7291d81f | [
"Apache-2.0"
] | 7 | 2019-02-01T01:41:33.000Z | 2021-12-03T13:05:18.000Z | setup.py | wookayin/toy-montezuma-pycolab | 4de753e8b00d96968782ba0f1dbd89be7291d81f | [
"Apache-2.0"
] | 1 | 2021-08-02T08:13:33.000Z | 2021-08-02T08:13:33.000Z | setup.py | wookayin/toy-montezuma-pycolab | 4de753e8b00d96968782ba0f1dbd89be7291d81f | [
"Apache-2.0"
] | 3 | 2019-04-15T14:57:27.000Z | 2021-07-30T23:35:51.000Z |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import setuptools
setuptools.setup(
name='mr_pycolab',
version='1.0',
description='A Toy Montezuma\'s Revenge Environment written in pycolab.',
url='https://github.com/wookayin/montezuma-pycolab/',
author='Jongwook Choi',
author_email='wookayin@gmail.com',
license='Apache 2.0',
install_requires=[
'numpy>=1.9',
'pycolab>=1.0',
'six',
],
packages=['mr_pycolab'],
zip_safe=True,
)
| 22.44 | 77 | 0.672014 |
bf8af9d2a2d0167cf39a2edc68b2b085be2c5299 | 3,943 | py | Python | src/Project 3/mail/mail/migrations/0001_initial.py | aminbeigi/CS50W-Projects | 905f53b1ebf057340114c2ca0283bcac64a16609 | [
"MIT"
] | null | null | null | src/Project 3/mail/mail/migrations/0001_initial.py | aminbeigi/CS50W-Projects | 905f53b1ebf057340114c2ca0283bcac64a16609 | [
"MIT"
] | null | null | null | src/Project 3/mail/mail/migrations/0001_initial.py | aminbeigi/CS50W-Projects | 905f53b1ebf057340114c2ca0283bcac64a16609 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-17 02:13
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('read', models.BooleanField(default=False)),
('archived', models.BooleanField(default=False)),
('recipients', models.ManyToManyField(related_name='emails_received', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails_sent', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL)),
],
),
]
| 64.639344 | 330 | 0.64494 |
843ebd26b6404b20d25d5b43aaa8e3ec46042317 | 21,503 | py | Python | TelegramForwarder/Source/telethon/tl/types/help.py | Invectys/TelegramBots | 658eaae2d23a476b9407e6db6f3e1be57c5e7465 | [
"MIT"
] | null | null | null | TelegramForwarder/Source/telethon/tl/types/help.py | Invectys/TelegramBots | 658eaae2d23a476b9407e6db6f3e1be57c5e7465 | [
"MIT"
] | null | null | null | TelegramForwarder/Source/telethon/tl/types/help.py | Invectys/TelegramBots | 658eaae2d23a476b9407e6db6f3e1be57c5e7465 | [
"MIT"
] | null | null | null | """File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeAccessPointRule, TypeChat, TypeDataJSON, TypeDocument, TypeMessageEntity, TypePeer, TypeRecentMeUrl, TypeUser
from ...tl.types.help import TypeTermsOfService
class AppUpdate(TLObject):
CONSTRUCTOR_ID = 0x1da7158f
SUBCLASS_OF_ID = 0x5897069e
# noinspection PyShadowingBuiltins
def __init__(self, id: int, version: str, text: str, entities: List['TypeMessageEntity'], can_not_skip: Optional[bool]=None, document: Optional['TypeDocument']=None, url: Optional[str]=None):
"""
Constructor for help.AppUpdate: Instance of either AppUpdate, NoAppUpdate.
"""
self.id = id
self.version = version
self.text = text
self.entities = entities
self.can_not_skip = can_not_skip
self.document = document
self.url = url
def to_dict(self):
return {
'_': 'AppUpdate',
'id': self.id,
'version': self.version,
'text': self.text,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'can_not_skip': self.can_not_skip,
'document': self.document.to_dict() if isinstance(self.document, TLObject) else self.document,
'url': self.url
}
def _bytes(self):
return b''.join((
b'\x8f\x15\xa7\x1d',
struct.pack('<I', (0 if self.can_not_skip is None or self.can_not_skip is False else 1) | (0 if self.document is None or self.document is False else 2) | (0 if self.url is None or self.url is False else 4)),
struct.pack('<i', self.id),
self.serialize_bytes(self.version),
self.serialize_bytes(self.text),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities),
b'' if self.document is None or self.document is False else (self.document._bytes()),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_can_not_skip = bool(flags & 1)
_id = reader.read_int()
_version = reader.tgread_string()
_text = reader.tgread_string()
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
if flags & 2:
_document = reader.tgread_object()
else:
_document = None
if flags & 4:
_url = reader.tgread_string()
else:
_url = None
return cls(id=_id, version=_version, text=_text, entities=_entities, can_not_skip=_can_not_skip, document=_document, url=_url)
class ConfigSimple(TLObject):
CONSTRUCTOR_ID = 0x5a592a6c
SUBCLASS_OF_ID = 0x29183ac4
def __init__(self, date: Optional[datetime], expires: Optional[datetime], rules: List['TypeAccessPointRule']):
"""
Constructor for help.ConfigSimple: Instance of ConfigSimple.
"""
self.date = date
self.expires = expires
self.rules = rules
def to_dict(self):
return {
'_': 'ConfigSimple',
'date': self.date,
'expires': self.expires,
'rules': [] if self.rules is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.rules]
}
def _bytes(self):
return b''.join((
b'l*YZ',
self.serialize_datetime(self.date),
self.serialize_datetime(self.expires),
struct.pack('<i', len(self.rules)),b''.join(x._bytes() for x in self.rules),
))
@classmethod
def from_reader(cls, reader):
_date = reader.tgread_date()
_expires = reader.tgread_date()
_rules = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_rules.append(_x)
return cls(date=_date, expires=_expires, rules=_rules)
class DeepLinkInfo(TLObject):
CONSTRUCTOR_ID = 0x6a4ee832
SUBCLASS_OF_ID = 0x984aac38
def __init__(self, message: str, update_app: Optional[bool]=None, entities: Optional[List['TypeMessageEntity']]=None):
"""
Constructor for help.DeepLinkInfo: Instance of either DeepLinkInfoEmpty, DeepLinkInfo.
"""
self.message = message
self.update_app = update_app
self.entities = entities
def to_dict(self):
return {
'_': 'DeepLinkInfo',
'message': self.message,
'update_app': self.update_app,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities]
}
def _bytes(self):
return b''.join((
b'2\xe8Nj',
struct.pack('<I', (0 if self.update_app is None or self.update_app is False else 1) | (0 if self.entities is None or self.entities is False else 2)),
self.serialize_bytes(self.message),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_update_app = bool(flags & 1)
_message = reader.tgread_string()
if flags & 2:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
return cls(message=_message, update_app=_update_app, entities=_entities)
class DeepLinkInfoEmpty(TLObject):
CONSTRUCTOR_ID = 0x66afa166
SUBCLASS_OF_ID = 0x984aac38
def to_dict(self):
return {
'_': 'DeepLinkInfoEmpty'
}
def _bytes(self):
return b''.join((
b'f\xa1\xaff',
))
@classmethod
def from_reader(cls, reader):
return cls()
class InviteText(TLObject):
CONSTRUCTOR_ID = 0x18cb9f78
SUBCLASS_OF_ID = 0xcf70aa35
def __init__(self, message: str):
"""
Constructor for help.InviteText: Instance of InviteText.
"""
self.message = message
def to_dict(self):
return {
'_': 'InviteText',
'message': self.message
}
def _bytes(self):
return b''.join((
b'x\x9f\xcb\x18',
self.serialize_bytes(self.message),
))
@classmethod
def from_reader(cls, reader):
_message = reader.tgread_string()
return cls(message=_message)
class NoAppUpdate(TLObject):
CONSTRUCTOR_ID = 0xc45a6536
SUBCLASS_OF_ID = 0x5897069e
def to_dict(self):
return {
'_': 'NoAppUpdate'
}
def _bytes(self):
return b''.join((
b'6eZ\xc4',
))
@classmethod
def from_reader(cls, reader):
return cls()
class PassportConfig(TLObject):
CONSTRUCTOR_ID = 0xa098d6af
SUBCLASS_OF_ID = 0xc666c0ad
# noinspection PyShadowingBuiltins
def __init__(self, hash: int, countries_langs: 'TypeDataJSON'):
"""
Constructor for help.PassportConfig: Instance of either PassportConfigNotModified, PassportConfig.
"""
self.hash = hash
self.countries_langs = countries_langs
def to_dict(self):
return {
'_': 'PassportConfig',
'hash': self.hash,
'countries_langs': self.countries_langs.to_dict() if isinstance(self.countries_langs, TLObject) else self.countries_langs
}
def _bytes(self):
return b''.join((
b'\xaf\xd6\x98\xa0',
struct.pack('<i', self.hash),
self.countries_langs._bytes(),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
_countries_langs = reader.tgread_object()
return cls(hash=_hash, countries_langs=_countries_langs)
class PassportConfigNotModified(TLObject):
CONSTRUCTOR_ID = 0xbfb9f457
SUBCLASS_OF_ID = 0xc666c0ad
def to_dict(self):
return {
'_': 'PassportConfigNotModified'
}
def _bytes(self):
return b''.join((
b'W\xf4\xb9\xbf',
))
@classmethod
def from_reader(cls, reader):
return cls()
class PromoData(TLObject):
CONSTRUCTOR_ID = 0x8c39793f
SUBCLASS_OF_ID = 0x9d595542
def __init__(self, expires: Optional[datetime], peer: 'TypePeer', chats: List['TypeChat'], users: List['TypeUser'], proxy: Optional[bool]=None, psa_type: Optional[str]=None, psa_message: Optional[str]=None):
"""
Constructor for help.PromoData: Instance of either PromoDataEmpty, PromoData.
"""
self.expires = expires
self.peer = peer
self.chats = chats
self.users = users
self.proxy = proxy
self.psa_type = psa_type
self.psa_message = psa_message
def to_dict(self):
return {
'_': 'PromoData',
'expires': self.expires,
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'chats': [] if self.chats is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.chats],
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users],
'proxy': self.proxy,
'psa_type': self.psa_type,
'psa_message': self.psa_message
}
def _bytes(self):
return b''.join((
b'?y9\x8c',
struct.pack('<I', (0 if self.proxy is None or self.proxy is False else 1) | (0 if self.psa_type is None or self.psa_type is False else 2) | (0 if self.psa_message is None or self.psa_message is False else 4)),
self.serialize_datetime(self.expires),
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(x._bytes() for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
b'' if self.psa_type is None or self.psa_type is False else (self.serialize_bytes(self.psa_type)),
b'' if self.psa_message is None or self.psa_message is False else (self.serialize_bytes(self.psa_message)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_proxy = bool(flags & 1)
_expires = reader.tgread_date()
_peer = reader.tgread_object()
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
if flags & 2:
_psa_type = reader.tgread_string()
else:
_psa_type = None
if flags & 4:
_psa_message = reader.tgread_string()
else:
_psa_message = None
return cls(expires=_expires, peer=_peer, chats=_chats, users=_users, proxy=_proxy, psa_type=_psa_type, psa_message=_psa_message)
class PromoDataEmpty(TLObject):
CONSTRUCTOR_ID = 0x98f6ac75
SUBCLASS_OF_ID = 0x9d595542
def __init__(self, expires: Optional[datetime]):
"""
Constructor for help.PromoData: Instance of either PromoDataEmpty, PromoData.
"""
self.expires = expires
def to_dict(self):
return {
'_': 'PromoDataEmpty',
'expires': self.expires
}
def _bytes(self):
return b''.join((
b'u\xac\xf6\x98',
self.serialize_datetime(self.expires),
))
@classmethod
def from_reader(cls, reader):
_expires = reader.tgread_date()
return cls(expires=_expires)
class RecentMeUrls(TLObject):
CONSTRUCTOR_ID = 0xe0310d7
SUBCLASS_OF_ID = 0xf269c477
def __init__(self, urls: List['TypeRecentMeUrl'], chats: List['TypeChat'], users: List['TypeUser']):
"""
Constructor for help.RecentMeUrls: Instance of RecentMeUrls.
"""
self.urls = urls
self.chats = chats
self.users = users
def to_dict(self):
return {
'_': 'RecentMeUrls',
'urls': [] if self.urls is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.urls],
'chats': [] if self.chats is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.chats],
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users]
}
def _bytes(self):
return b''.join((
b'\xd7\x10\x03\x0e',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.urls)),b''.join(x._bytes() for x in self.urls),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(x._bytes() for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_urls = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_urls.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(urls=_urls, chats=_chats, users=_users)
class Support(TLObject):
CONSTRUCTOR_ID = 0x17c6b5f6
SUBCLASS_OF_ID = 0x7159bceb
def __init__(self, phone_number: str, user: 'TypeUser'):
"""
Constructor for help.Support: Instance of Support.
"""
self.phone_number = phone_number
self.user = user
def to_dict(self):
return {
'_': 'Support',
'phone_number': self.phone_number,
'user': self.user.to_dict() if isinstance(self.user, TLObject) else self.user
}
def _bytes(self):
return b''.join((
b'\xf6\xb5\xc6\x17',
self.serialize_bytes(self.phone_number),
self.user._bytes(),
))
@classmethod
def from_reader(cls, reader):
_phone_number = reader.tgread_string()
_user = reader.tgread_object()
return cls(phone_number=_phone_number, user=_user)
class SupportName(TLObject):
CONSTRUCTOR_ID = 0x8c05f1c9
SUBCLASS_OF_ID = 0x7f50b7c2
def __init__(self, name: str):
"""
Constructor for help.SupportName: Instance of SupportName.
"""
self.name = name
def to_dict(self):
return {
'_': 'SupportName',
'name': self.name
}
def _bytes(self):
return b''.join((
b'\xc9\xf1\x05\x8c',
self.serialize_bytes(self.name),
))
@classmethod
def from_reader(cls, reader):
_name = reader.tgread_string()
return cls(name=_name)
class TermsOfService(TLObject):
CONSTRUCTOR_ID = 0x780a0310
SUBCLASS_OF_ID = 0x20ee8312
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeDataJSON', text: str, entities: List['TypeMessageEntity'], popup: Optional[bool]=None, min_age_confirm: Optional[int]=None):
"""
Constructor for help.TermsOfService: Instance of TermsOfService.
"""
self.id = id
self.text = text
self.entities = entities
self.popup = popup
self.min_age_confirm = min_age_confirm
def to_dict(self):
return {
'_': 'TermsOfService',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'text': self.text,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'popup': self.popup,
'min_age_confirm': self.min_age_confirm
}
def _bytes(self):
return b''.join((
b'\x10\x03\nx',
struct.pack('<I', (0 if self.popup is None or self.popup is False else 1) | (0 if self.min_age_confirm is None or self.min_age_confirm is False else 2)),
self.id._bytes(),
self.serialize_bytes(self.text),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities),
b'' if self.min_age_confirm is None or self.min_age_confirm is False else (struct.pack('<i', self.min_age_confirm)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_popup = bool(flags & 1)
_id = reader.tgread_object()
_text = reader.tgread_string()
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
if flags & 2:
_min_age_confirm = reader.read_int()
else:
_min_age_confirm = None
return cls(id=_id, text=_text, entities=_entities, popup=_popup, min_age_confirm=_min_age_confirm)
class TermsOfServiceUpdate(TLObject):
CONSTRUCTOR_ID = 0x28ecf961
SUBCLASS_OF_ID = 0x293c2977
def __init__(self, expires: Optional[datetime], terms_of_service: 'TypeTermsOfService'):
"""
Constructor for help.TermsOfServiceUpdate: Instance of either TermsOfServiceUpdateEmpty, TermsOfServiceUpdate.
"""
self.expires = expires
self.terms_of_service = terms_of_service
def to_dict(self):
return {
'_': 'TermsOfServiceUpdate',
'expires': self.expires,
'terms_of_service': self.terms_of_service.to_dict() if isinstance(self.terms_of_service, TLObject) else self.terms_of_service
}
def _bytes(self):
return b''.join((
b'a\xf9\xec(',
self.serialize_datetime(self.expires),
self.terms_of_service._bytes(),
))
@classmethod
def from_reader(cls, reader):
_expires = reader.tgread_date()
_terms_of_service = reader.tgread_object()
return cls(expires=_expires, terms_of_service=_terms_of_service)
class TermsOfServiceUpdateEmpty(TLObject):
CONSTRUCTOR_ID = 0xe3309f7f
SUBCLASS_OF_ID = 0x293c2977
def __init__(self, expires: Optional[datetime]):
"""
Constructor for help.TermsOfServiceUpdate: Instance of either TermsOfServiceUpdateEmpty, TermsOfServiceUpdate.
"""
self.expires = expires
def to_dict(self):
return {
'_': 'TermsOfServiceUpdateEmpty',
'expires': self.expires
}
def _bytes(self):
return b''.join((
b'\x7f\x9f0\xe3',
self.serialize_datetime(self.expires),
))
@classmethod
def from_reader(cls, reader):
_expires = reader.tgread_date()
return cls(expires=_expires)
class UserInfo(TLObject):
CONSTRUCTOR_ID = 0x1eb3758
SUBCLASS_OF_ID = 0x5c53d7d8
def __init__(self, message: str, entities: List['TypeMessageEntity'], author: str, date: Optional[datetime]):
"""
Constructor for help.UserInfo: Instance of either UserInfoEmpty, UserInfo.
"""
self.message = message
self.entities = entities
self.author = author
self.date = date
def to_dict(self):
return {
'_': 'UserInfo',
'message': self.message,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'author': self.author,
'date': self.date
}
def _bytes(self):
return b''.join((
b'X7\xeb\x01',
self.serialize_bytes(self.message),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities),
self.serialize_bytes(self.author),
self.serialize_datetime(self.date),
))
@classmethod
def from_reader(cls, reader):
_message = reader.tgread_string()
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
_author = reader.tgread_string()
_date = reader.tgread_date()
return cls(message=_message, entities=_entities, author=_author, date=_date)
class UserInfoEmpty(TLObject):
CONSTRUCTOR_ID = 0xf3ae2eed
SUBCLASS_OF_ID = 0x5c53d7d8
def to_dict(self):
return {
'_': 'UserInfoEmpty'
}
def _bytes(self):
return b''.join((
b'\xed.\xae\xf3',
))
@classmethod
def from_reader(cls, reader):
return cls()
| 32.09403 | 221 | 0.596614 |
75ce77b90abbfb3dd5a478279ecab683ee1ccadb | 993 | py | Python | 283_move_zeros.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 283_move_zeros.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 283_move_zeros.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # 283. Move Zeroes
# Given an array nums, write a function to move all 0's to the end of it
# while maintaining the relative order of the non-zero elements.
# For example, given nums = [0, 1, 0, 3, 12],
# after calling your function, nums should be [1, 3, 12, 0, 0].
class Solution(object):
# http://bookshadow.com/weblog/2015/09/19/leetcode-move-zeroes/
# 使用两个"指针"x和y,初始令y = 0
# 利用x遍历数组nums:
# 若nums[x]非0,则交换nums[x]与nums[y],并令y+1
# y指针指向首个0元素可能存在的位置
# 遍历过程中,算法确保[y, x)范围内的元素均为0
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
y = 0
for x in range(len(nums)):
if nums[x]:
nums[x], nums[y] = nums[y], nums[x]
y += 1
if __name__ == '__main__':
nums1 = [0, 1, 0, 3, 12]
Solution().moveZeroes(nums1)
print(nums1)
nums2 = [1, 0]
Solution().moveZeroes(nums2)
print(nums2)
| 27.583333 | 74 | 0.584089 |
d27a0e44a047441cf1a8aa8579fb7a56bd1e44fd | 4,032 | py | Python | addons/Helper.py | EmreTech/EmreTech-Bot | 55fe998509d3547f9b6eedc263a91c7016ce52a3 | [
"MIT"
] | null | null | null | addons/Helper.py | EmreTech/EmreTech-Bot | 55fe998509d3547f9b6eedc263a91c7016ce52a3 | [
"MIT"
] | 1 | 2021-04-26T01:02:12.000Z | 2021-05-07T20:16:43.000Z | addons/Helper.py | EmreTech/EmreTech-Bot | 55fe998509d3547f9b6eedc263a91c7016ce52a3 | [
"MIT"
] | null | null | null | import discord
import functools
import hashlib, hmac
import urllib.request
import urllib.parse
import json
from datetime import datetime
from discord.ext import commands
"""
Some useful utilities for other addons (cogs) of the bot.
"""
# A hash list for verification
hashes = ["sha1", "sha256", "sha512"]
def restricted_to_bot_channel(func):
"""Restricts the given function to the bot channel.
Example:
-------
```
@commands.command()
@restricted_to_bot_channel
async def run_me_in_bot_channel(self, ctx):
await ctx.send("Okay.")
```
"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
func_self = args[0]
ctx = args[1]
try:
if ctx.author in (func_self.bot.protected_roles):
pass
elif not ctx.channel == func_self.bot.bot_channel and ctx.guild.id == 816810434811527198:
await ctx.message.delete()
return await ctx.send(f"{ctx.author.mention} This command can only be used in {func_self.bot.bot_channel.mention}.")
await func(*args, **kwargs)
except:
if ctx.author in (func_self.bot.protected_roles):
await func(*args, **kwargs)
else:
return await ctx.send(f"{ctx.author.mention} This command can only be used in the bot channel.")
return wrapper
def restricted_to_level(requiredLevel: int):
"""Restricts the given function to a given level.
Code to accept arguments into the decorator taken from here: https://stackoverflow.com/a/30904603
Example:
-------
```
@commands.command()
@restricted_to_level(10)
async def only_use_me_at_lvl_ten(self, ctx):
await ctx.send("Good job reaching level 10!")
```
"""
def outer(func):
@functools.wraps(func)
async def inner(*args, **kwargs):
func_self = args[0]
ctx = args[1]
author_id = str(ctx.author.id)
try:
current_level = func_self.bot.levels_dict[author_id]["level"]
if current_level < requiredLevel:
await ctx.send(f"Your level is too low to run this command. Requirement is {requiredLevel}.")
else:
await func(*args, **kwargs)
except KeyError:
await ctx.send(f"Your level is too low to run this command. Requirement is {requiredLevel}.")
return inner
return outer
async def check_mute_expiry(mutes_dict: dict, member: discord.User):
if not str(member.id) in mutes_dict.keys():
return False
end_time = mutes_dict[str(member.id)]
if end_time == "Indefinite":
return True
elif end_time == "":
return False
end_time = datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
diff = end_time - datetime.utcnow()
return diff.total_seconds() < 0
async def handle_verify_msg(author: discord.Member, user_hash: str, hash_used: str):
"""
Handles the verify message, where the hash used is random.
This checks if the provided hash matches the hash of their name and discriminator.
"""
full = (author.name + "#" + author.discriminator).encode('utf8')
env = {"string": full}
env.update(globals())
# A method I "came up with myself" to save codespace. I was originally going to use if-elif-else statements to do this same thing!
our_hash = eval(f"hashlib.{hash_used}(string)", env)
return hmac.compare_digest(our_hash.hexdigest().lower(), user_hash.lower())
def get_title_from_youtube_video(id: str):
params = {
"format": "json",
"url": f"https://www.youtube.com/watch?v={id}"
}
query_string = urllib.parse.urlencode(params)
url = f"https://www.youtube.com/oembed?{query_string}"
with urllib.request.urlopen(url) as response:
res_text = response.read()
data = json.loads(res_text.decode())
return data["title"]
| 33.04918 | 134 | 0.62252 |
2fe0d0c808c824d900654929d793f6876aabbdc0 | 12,713 | py | Python | tests/custom_object_test.py | rsepassi/jax | 33348b10f8e826c6ad25cb7055b5a351849777fb | [
"Apache-2.0"
] | 1 | 2020-12-07T14:55:41.000Z | 2020-12-07T14:55:41.000Z | tests/custom_object_test.py | rsepassi/jax | 33348b10f8e826c6ad25cb7055b5a351849777fb | [
"Apache-2.0"
] | 6 | 2022-01-03T00:41:33.000Z | 2022-02-14T00:33:48.000Z | tests/custom_object_test.py | rsepassi/jax | 33348b10f8e826c6ad25cb7055b5a351849777fb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest, parameterized
import numpy as np
from jax._src import test_util as jtu
import jax.numpy as jnp
from jax import core, jit, lax, make_jaxpr
from jax._src import device_array
from jax._src import dispatch
from jax.interpreters import mlir
from jax.interpreters import xla
from jax._src.lib.mlir import ir
from jax._src.lib import xla_bridge, xla_client
xops = xla_client.ops
xc = xla_client
xb = xla_bridge
from jax.config import config
config.parse_flags_with_absl()
# TODO(jakevdp): use a setup/teardown method to populate and unpopulate all the
# dictionaries associated with the following objects.
# Define a sparse array data structure. The important feature here is that
# it is a jaxpr object that is backed by two device buffers.
class SparseArray:
"""Simple sparse COO array data structure."""
def __init__(self, aval, data, indices):
self.aval = aval
self.shape = aval.shape
self.data = data
self.indices = indices
@property
def index_dtype(self):
return self.indices.dtype
@property
def dtype(self):
return self.data.dtype
@property
def nnz(self):
return self.data.shape[0]
def __repr__(self):
return repr(list((tuple(ind), d) for ind, d in zip(self.indices, self.data)))
class AbstractSparseArray(core.ShapedArray):
__slots__ = ['index_dtype', 'nnz', 'data_aval', 'indices_aval']
def __init__(self, shape, dtype, index_dtype, nnz, weak_type=False,
named_shape=None):
super().__init__(shape, dtype)
named_shape = {} if named_shape is None else named_shape
self.index_dtype = index_dtype
self.nnz = nnz
self.data_aval = core.ShapedArray((nnz,), dtype, weak_type, named_shape)
self.indices_aval = core.ShapedArray((nnz, len(shape)), index_dtype,
named_shape=named_shape)
def update(self, shape=None, dtype=None, index_dtype=None, nnz=None,
weak_type=None, named_shape=None):
if shape is None:
shape = self.shape
if dtype is None:
dtype = self.dtype
if index_dtype is None:
index_dtype = self.dtype
if nnz is None:
nnz = self.nnz
if weak_type is None:
weak_type = self.weak_type
if named_shape is None:
named_shape = self.named_shape
return AbstractSparseArray(
shape, dtype, index_dtype, nnz, weak_type, named_shape)
def strip_weak_type(self):
return self
@core.aval_property
def data(self):
return sp_data_p.bind(self)
@core.aval_property
def indices(self):
return sp_indices_p.bind(self)
class ConcreteSparseArray(AbstractSparseArray):
pass
def sparse_array_result_handler(device, aval):
def build_sparse_array(data_buf, indices_buf):
data = device_array.make_device_array(aval.data_aval, device, data_buf)
indices = device_array.make_device_array(aval.indices_aval, device, indices_buf)
return SparseArray(aval, data, indices)
return build_sparse_array
def sparse_array_shape_handler(a):
return (
xc.Shape.array_shape(a.data_aval.dtype, a.data_aval.shape),
xc.Shape.array_shape(a.indices_aval.dtype, a.indices_aval.shape),
)
def sparse_array_device_put_handler(a, device):
return (
xb.get_device_backend(device).buffer_from_pyval(a.data, device),
xb.get_device_backend(device).buffer_from_pyval(a.indices, device)
)
def sparse_array_constant_handler(c, val, canonicalize_dtypes):
return (
xla.pyval_to_ir_constant(val.data, canonicalize_dtypes),
xla.pyval_to_ir_constant(val.indices, canonicalize_dtypes)
)
core.pytype_aval_mappings[SparseArray] = lambda x: x.aval
core.raise_to_shaped_mappings[AbstractSparseArray] = lambda aval, _: aval
xla.pytype_aval_mappings[SparseArray] = lambda x: x.aval
xla.canonicalize_dtype_handlers[SparseArray] = lambda x: x
dispatch.device_put_handlers[SparseArray] = sparse_array_device_put_handler
dispatch.result_handlers[AbstractSparseArray] = sparse_array_result_handler
dispatch.num_buffers_handlers[AbstractSparseArray] = lambda _: 2
xla.xla_shape_handlers[AbstractSparseArray] = sparse_array_shape_handler
xla.register_constant_handler(SparseArray, sparse_array_constant_handler)
def sparse_array_mlir_type_handler(a):
return (
ir.RankedTensorType.get(
a.data_aval.shape, mlir.dtype_to_ir_type[a.data_aval.dtype]()),
ir.RankedTensorType.get(
a.indices_aval.shape, mlir.dtype_to_ir_type[a.indices_aval.dtype]()),
)
mlir.ir_type_handlers[AbstractSparseArray] = sparse_array_mlir_type_handler
sp_indices_p = core.Primitive('sp_indices')
@sp_indices_p.def_impl
def _sp_indices_impl(mat):
return mat.indices
@sp_indices_p.def_abstract_eval
def _sp_indices_abstract_eval(mat):
return mat.indices_aval
def _sp_indices_translation_rule(ctx, avals_in, avals_out, data, indices):
return [indices]
# Note: cannot use lower_fun to define attribute access primitives
# because it leads to infinite recursion.
xla.register_translation(sp_indices_p, _sp_indices_translation_rule)
def _sp_indices_mhlo_lowering(ctx, avals_in, avals_out, data_and_indices):
return [data_and_indices[1]]
mlir.register_lowering(sp_indices_p, _sp_indices_mhlo_lowering)
sp_data_p = core.Primitive('sp_data')
@sp_data_p.def_impl
def _sp_data_impl(mat):
return mat.data
@sp_data_p.def_abstract_eval
def _sp_data_abstract_eval(mat):
return mat.data_aval
def _sp_data_translation_rule(ctx, avals_in, avals_out, data, indices):
return [data]
# Note: cannot use lower_fun to define attribute access primitives
# because it leads to infinite recursion.
xla.register_translation(sp_data_p, _sp_data_translation_rule)
def _sp_data_mhlo_lowering(ctx, avals_in, avals_out, data_and_indices):
return [data_and_indices[0]]
mlir.register_lowering(sp_data_p, _sp_data_mhlo_lowering)
def identity(x):
return identity_p.bind(x)
identity_p = core.Primitive('identity')
@identity_p.def_impl
def _identity_impl(mat):
return mat
@identity_p.def_abstract_eval
def _identity_abstract_eval(mat):
return AbstractSparseArray(mat.shape, mat.dtype, mat.index_dtype, mat.nnz)
xla.register_translation(
identity_p, xla.lower_fun(_identity_impl, multiple_results=False,
new_style=True))
mlir.register_lowering(
identity_p, mlir.lower_fun(_identity_impl, multiple_results=False))
def split(x):
return split_p.bind(x)
split_p = core.Primitive('split')
split_p.multiple_results = True
@split_p.def_impl
def _split_impl(mat):
return mat, mat
@split_p.def_abstract_eval
def _split_abstract_eval(mat):
m = AbstractSparseArray(mat.shape, mat.dtype, mat.index_dtype, mat.nnz)
return m, m
xla.register_translation(
split_p, xla.lower_fun(_split_impl, multiple_results=True, new_style=True))
def make_sparse_array(rng, shape, dtype, nnz=0.2):
mat = rng(shape, dtype)
size = int(np.prod(shape))
if 0 < nnz < 1:
nnz = nnz * size
nnz = int(nnz)
if nnz == 0:
mat = np.zeros_like(mat)
elif nnz < size:
# TODO(jakevdp): do we care about duplicates?
cutoff = np.sort(mat.ravel())[nnz]
mat[mat >= cutoff] = 0
nz = (mat != 0)
data = jnp.array(mat[nz])
indices = jnp.array(np.where(nz)).T
aval = AbstractSparseArray(shape, data.dtype, indices.dtype, len(indices))
return SparseArray(aval, data, indices)
def matvec(mat, v):
v = jnp.asarray(v)
assert v.ndim == 1
assert len(mat.shape) == 2
assert v.shape[0] == mat.shape[1]
rows = mat.indices[:, 0]
cols = mat.indices[:, 1]
dv = mat.data * v[cols]
return jnp.zeros(mat.shape[0], dtype=dv.dtype).at[rows].add(dv)
class Empty:
def __init__(self, aval):
self.aval = aval
class AbstractEmpty(core.AbstractValue):
def join(self, other):
assert isinstance(other, self.__class__), other
return self
def __hash__(self):
return hash(())
def __eq__(self, other):
return isinstance(other, AbstractEmpty)
class ConcreteEmpty(AbstractEmpty):
pass
core.pytype_aval_mappings[Empty] = lambda x: ConcreteEmpty()
core.raise_to_shaped_mappings[AbstractEmpty] = lambda aval, _: aval
xla.pytype_aval_mappings[Empty] = lambda x: AbstractEmpty()
xla.canonicalize_dtype_handlers[Empty] = lambda x: x
dispatch.device_put_handlers[Empty] = lambda _, __: ()
dispatch.result_handlers[AbstractEmpty] = lambda _, __: lambda: Empty(AbstractEmpty())
dispatch.num_buffers_handlers[AbstractEmpty] = lambda _: 0
xla.xla_shape_handlers[AbstractEmpty] = lambda _: ()
class CustomObjectTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_compile={}_primitive={}".format(compile, primitive),
"compile": compile, "primitive": primitive}
for primitive in [True, False]
for compile in [True, False]))
def testSparseIdentity(self, compile, primitive):
f = identity if primitive else (lambda x: x)
f = jit(f) if compile else f
rng = jtu.rand_default(self.rng())
M = make_sparse_array(rng, (10,), jnp.float32)
M2 = f(M)
jaxpr = make_jaxpr(f)(M).jaxpr
core.check_jaxpr(jaxpr)
self.assertEqual(M.dtype, M2.dtype)
self.assertEqual(M.index_dtype, M2.index_dtype)
self.assertAllClose(M.data, M2.data)
self.assertAllClose(M.indices, M2.indices)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_compile={}".format(compile),
"compile": compile}
for compile in [True, False]))
def testSparseSplit(self, compile):
f = jit(split) if compile else split
rng = jtu.rand_default(self.rng())
M = make_sparse_array(rng, (10,), jnp.float32)
M2, M3 = f(M)
jaxpr = make_jaxpr(f)(M).jaxpr
core.check_jaxpr(jaxpr)
for MM in M2, M3:
self.assertEqual(M.dtype, MM.dtype)
self.assertEqual(M.index_dtype, MM.index_dtype)
self.assertArraysEqual(M.data, MM.data)
self.assertArraysEqual(M.indices, MM.indices)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_compile={}_primitive={}".format(compile, primitive),
"compile": compile, "primitive": primitive}
for primitive in [True, False]
for compile in [True, False]))
def testSparseLaxLoop(self, compile, primitive):
rng = jtu.rand_default(self.rng())
f = identity if primitive else (lambda x: x)
f = jit(f) if compile else f
body_fun = lambda _, A: f(A)
M = make_sparse_array(rng, (10,), jnp.float32)
lax.fori_loop(0, 10, body_fun, M)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_attr={}".format(attr), "attr": attr}
for attr in ["data", "indices"]))
def testSparseAttrAccess(self, attr):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [make_sparse_array(rng, (10,), jnp.float32)]
f = lambda x: getattr(x, attr)
self._CompileAndCheck(f, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 3), (2, 6), (6, 2)]
for dtype in jtu.dtypes.floating))
def testSparseMatvec(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [make_sparse_array(rng, shape, dtype), rng(shape[-1:], dtype)]
self._CompileAndCheck(matvec, args_maker)
def testLowerToNothing(self):
empty = Empty(AbstractEmpty())
jaxpr = make_jaxpr(jit(lambda e: e))(empty).jaxpr
core.check_jaxpr(jaxpr)
# cannot return a unit, because CompileAndCheck assumes array output.
testfunc = lambda e: None
args_maker = lambda: [empty]
self._CompileAndCheck(testfunc, args_maker)
def testConstantHandler(self):
def make_const_array():
data = np.arange(3.0)
indices = np.arange(3)[:, None]
shape = (5,)
aval = AbstractSparseArray(shape, data.dtype, indices.dtype, len(indices))
return SparseArray(aval, data, indices)
out1 = make_const_array()
out2 = jit(make_const_array)()
self.assertArraysEqual(out1.data, out2.data)
self.assertArraysEqual(out1.indices, out2.indices)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 32.266497 | 87 | 0.728467 |
0a36166acee6000c5205a5262139e69bd1dd5269 | 3,622 | py | Python | Code/odooerp/odoo-8.0/openerp/tools/func.py | zhupangithub/WEBERP | 714512082ec5c6db07cbf6af0238ceefe2d2c1a5 | [
"MIT"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/openerp/tools/func.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/openerp/tools/func.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010, 2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ['synchronized', 'lazy_property', 'classproperty']
from functools import wraps
from inspect import getsourcefile
class lazy_property(object):
""" Decorator for a lazy property of an object, i.e., an object attribute
that is determined by the result of a method call evaluated once. To
reevaluate the property, simply delete the attribute on the object, and
get it again.
"""
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.fget.__name__, value)
return value
@property
def __doc__(self):
return self.fget.__doc__
@staticmethod
def reset_all(obj):
""" Reset all lazy properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
for name in obj_dict.keys():
if isinstance(getattr(cls, name, None), lazy_property):
obj_dict.pop(name)
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return fname, lineno
except Exception:
return "<unknown>", ''
def compose(a, b):
""" Composes the callables ``a`` and ``b``. ``compose(a, b)(*args)`` is
equivalent to ``a(b(*args))``.
Can be used as a decorator by partially applying ``a``::
@partial(compose, a)
def b():
...
"""
@wraps(b)
def wrapper(*args, **kwargs):
return a(b(*args, **kwargs))
return wrapper
class _ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def classproperty(func):
return _ClassProperty(classmethod(func))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 31.495652 | 79 | 0.587797 |
2938f55bc765e10c87a35f031352b863523936ff | 1,235 | py | Python | python/functional/functional.py | L1ttl3S1st3r/MyLittleProjects | a932ab3229bc2b7515bf4fd98cbe7895f9273281 | [
"Apache-2.0"
] | null | null | null | python/functional/functional.py | L1ttl3S1st3r/MyLittleProjects | a932ab3229bc2b7515bf4fd98cbe7895f9273281 | [
"Apache-2.0"
] | null | null | null | python/functional/functional.py | L1ttl3S1st3r/MyLittleProjects | a932ab3229bc2b7515bf4fd98cbe7895f9273281 | [
"Apache-2.0"
] | null | null | null | """
functional programming on Python example: check if string is a palendrome
dependencies:
pyfpm for pattern matching
python 3.7.1
author:
L1ttl3S1st3r
4/14/2019
"""
from pyfpm.matcher import Matcher
import typing
class IncorrectArgException(ValueError):
pass
# watch a descriptions in pyfmp docs
is_palendrome = Matcher()
# if arg is an empty string
@is_palendrome.handler("''")
def empty_string() -> bool:
return True
# if arg matchs to default string
@is_palendrome.handler('string: str')
def correct_string(string) -> bool:
if string[0] == string[-1]:
return is_palendrome(string[1:-1])
else:
return False
@is_palendrome.handler("_")
def random_arg() -> bool:
raise IncorrectArgException("incorrect input type")
def main() -> None:
tests = ["abc", "b", "",
"abcd", "abba", 1,
None, object]
for test in tests:
try:
if is_palendrome(test):
print(str(test) + " is a palendrome")
else:
print(str(test) + " is not a palendrome")
except IncorrectArgException:
print(str(test) + " has incorrect type:" + str(type(test)))
if __name__ == "__main__":
main()
| 20.583333 | 73 | 0.625101 |
3c3854b7acf825b230515b1bebb6f14f091e9ec1 | 99 | py | Python | server/apps/sertificates/apps.py | Alexandr-Belchev/testblank | eabec9348fcb38dd6a6cfb9219404cbe7a4e327d | [
"MIT"
] | null | null | null | server/apps/sertificates/apps.py | Alexandr-Belchev/testblank | eabec9348fcb38dd6a6cfb9219404cbe7a4e327d | [
"MIT"
] | 11 | 2020-02-11T23:53:15.000Z | 2022-03-11T23:49:19.000Z | server/apps/sertificates/apps.py | Alexandr-Belchev/testblank | eabec9348fcb38dd6a6cfb9219404cbe7a4e327d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SertificatesConfig(AppConfig):
name = 'sertificates'
| 16.5 | 36 | 0.777778 |
fc49f95d237b9a81bf6cca4dd5c6e534677c9ce0 | 3,611 | py | Python | qulab/Driver/drivers/DSA875.py | feihoo87/QuLab | cc16f4777e5523fca327f7f0a9725fd13f9b057f | [
"MIT"
] | 16 | 2018-03-16T12:08:31.000Z | 2022-03-20T08:53:35.000Z | qulab/Driver/drivers/DSA875.py | feihoo87/QuLab | cc16f4777e5523fca327f7f0a9725fd13f9b057f | [
"MIT"
] | 148 | 2018-03-18T09:33:18.000Z | 2022-03-21T16:00:15.000Z | qulab/Driver/drivers/DSA875.py | feihoo87/QuLab | cc16f4777e5523fca327f7f0a9725fd13f9b057f | [
"MIT"
] | 14 | 2018-03-18T08:00:12.000Z | 2020-10-21T12:39:42.000Z | import re
import time
import numpy as np
import logging
log = logging.getLogger(__name__)
from qulab.Driver import visaDriver, QInteger, QOption, QReal
class Driver(visaDriver):
__log__=log
error_command = ':SYST:ERR?'
support_models = ['DSA875']
quants = [
QOption('Sweep',
value='ON',
set_cmd='INIT:CONT %(option)s',
options=[('OFF', 'OFF'), ('ON', 'ON')]),
QOption('Trace Mode',
value='WRIT',
ch=1,
set_cmd='TRAC%(ch)d:MODE %(option)s',
get_cmd='TRAC%(ch)d:MODE?',
options=[('Write', 'WRIT'), ('Maxhold', 'MAXH'),
('Minhold', 'MINH'), ('View', 'VIEW'),
('Blank', 'BLAN'), ('Videoavg', 'VID'),
('Poweravg', 'POW')]),
QReal('Frequency Start',
unit='Hz',
set_cmd='SENS:FREQ:STAR %(value)e%(unit)s',
get_cmd='SENS:FREQ:STAR?'),
QReal('Frequency Stop',
unit='Hz',
set_cmd='SENS:FREQ:STOP %(value)e%(unit)s',
get_cmd='SENS:FREQ:STOP?'),
QInteger('Sweep Points',
value=601,
set_cmd=':SWE:POIN %(value)d',
get_cmd=':SWE:POIN?')
]
def get_Trace(self, average=1, ch=1):
'''Get the Trace Data '''
points = self.getValue('Sweep Points')
#Stop the sweep
self.setValue('Sweep', 'OFF')
if average == 1:
self.setValue('Trace Mode', 'Write', ch=ch)
self.write(':SWE:COUN 1')
else:
self.setValue('Trace Mode', 'Poweravg', ch=ch)
self.write(':TRAC:AVER:COUN %d' % average)
self.write(':SWE:COUN %d' % average)
self.write(':TRAC:AVER:RES')
#Begin a measurement
self.write('INIT:IMM')
self.write('*WAI')
count = float(self.query('SWE:COUN:CURR?'))
while count < average:
count = float(self.query('SWE:COUN:CURR?'))
time.sleep(0.01)
#Get the data
self.write('FORMAT:BORD NORM')
self.write('FORMAT ASCII')
data_raw = self.query("TRAC:DATA? TRACE%d" % ch).strip('\n')
_data = re.split(r",", data_raw[11:])
data = []
for d in _data[:points]:
data.append(float(d))
#Start the sweep
self.setValue('Sweep', 'ON')
return np.array(data)
def get_Frequency(self):
"""Return the frequency of DSA measurement"""
freq_star = self.getValue('Frequency Start')
freq_stop = self.getValue('Frequency Stop')
sweep_point = self.getValue('Sweep Points')
return np.array(np.linspace(freq_star, freq_stop, sweep_point))
def get_SNR(self, signalfreqlist=[], signalbandwidth=10e6, average=1,
ch=1):
'''get SNR_dB '''
Y_unit = self.query(':UNIT:POW?;:UNIT:POW W').strip('\n')
Frequency = self.get_Frequency()
Spectrum = self.get_Trace(average=average, ch=ch)
Signal_power = 0
Total_power = sum(Spectrum)
for sf in signalfreqlist:
for f in Frequency:
if f > (sf - signalbandwidth / 2) and f < (
sf + signalbandwidth / 2):
index = np.where(Frequency == f)
Signal_power = Signal_power + Spectrum[index]
self.write(':UNIT:POW %s' % Y_unit)
_SNR = Signal_power / (Total_power - Signal_power)
SNR = 10 * np.log10(_SNR)
return SNR
| 35.058252 | 73 | 0.514262 |
8430d552d1736de305c07974e83e06cce7222134 | 1,097 | py | Python | sfcbackend/sfc/transferobjects/shipment.py | sfcol/sfc-backend | 33695a4a6faba6f22916e34d0e2033b8d6039f8b | [
"Apache-2.0"
] | 1 | 2020-11-02T10:55:34.000Z | 2020-11-02T10:55:34.000Z | sfcbackend/sfc/transferobjects/shipment.py | sfcol/sfc-backend | 33695a4a6faba6f22916e34d0e2033b8d6039f8b | [
"Apache-2.0"
] | null | null | null | sfcbackend/sfc/transferobjects/shipment.py | sfcol/sfc-backend | 33695a4a6faba6f22916e34d0e2033b8d6039f8b | [
"Apache-2.0"
] | 1 | 2020-11-20T20:56:40.000Z | 2020-11-20T20:56:40.000Z | '''
Module with data model for the shipment
'''
from .orderdata import ShippedItem
class CreateShipmentData:
'''
Class for definition of shipments
'''
def __init__(self):
self.sending_user_id : int = 0
self.receiving_user_id : int = 0
self.shipped_item : ShippedItem = None
self.starting_locker_id : str = None
self.destination_locker_id : str = None
class Shipment:
'''
Class for an existing shipment
'''
def __init__(self):
'''
Initializer for the class
'''
self.shipment_id: str = None
self.sending_user_id: int = 0
self.receiving_user_is: int = 0
self.shipped_item: ShippedItem = None
self.starting_locker_id: str = None
self.destination_locker_id: str = None
self.shipment_status: str = None
class CollectionCodeData:
def __init__(self):
'''
Initializer for the service
'''
self.shipment_id: str = None
self.collection_code: str = None
self.destination_locker_id: str = None
| 26.119048 | 47 | 0.616226 |
42abb69a0eb87796d54b7b271182277164d8ca29 | 164 | py | Python | python/Python-Quick-Start/exceptions.py | pepincho/playground | 9202a3dab880ff789e5fb96b259c3e0c2503cb49 | [
"MIT"
] | null | null | null | python/Python-Quick-Start/exceptions.py | pepincho/playground | 9202a3dab880ff789e5fb96b259c3e0c2503cb49 | [
"MIT"
] | null | null | null | python/Python-Quick-Start/exceptions.py | pepincho/playground | 9202a3dab880ff789e5fb96b259c3e0c2503cb49 | [
"MIT"
] | null | null | null | try:
fh = open('xlines.txt')
for line in fh.readlines():
print(line)
except IOError as e:
print("something bad happend ({})".format(e))
print("after badness") | 20.5 | 46 | 0.676829 |
8957a5c0f74c33ecf88e6805e53b42e62ff42a44 | 27,284 | py | Python | src/utils.py | RahulPatkiWork/covid-vaccine-booking-main | 4472f631e28169e99775f1576ca2e575419f03e5 | [
"MIT"
] | null | null | null | src/utils.py | RahulPatkiWork/covid-vaccine-booking-main | 4472f631e28169e99775f1576ca2e575419f03e5 | [
"MIT"
] | 2 | 2021-05-31T13:02:51.000Z | 2021-06-01T18:34:15.000Z | src/utils.py | RahulPatkiWork/covid-vaccine-booking-main | 4472f631e28169e99775f1576ca2e575419f03e5 | [
"MIT"
] | null | null | null | import json
from hashlib import sha256
from inputimeout import inputimeout, TimeoutOccurred
import tabulate, copy, time, datetime, requests, sys, os, random
from captcha import captcha_builder
BOOKING_URL = "https://cdn-api.co-vin.in/api/v2/appointment/schedule"
BENEFICIARIES_URL = "https://cdn-api.co-vin.in/api/v2/appointment/beneficiaries"
CALENDAR_URL_DISTRICT = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict?district_id={0}&date={1}"
CALENDAR_URL_PINCODE = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByPin?pincode={0}&date={1}"
CAPTCHA_URL = "https://cdn-api.co-vin.in/api/v2/auth/getRecaptcha"
OTP_PUBLIC_URL = 'https://cdn-api.co-vin.in/api/v2/auth/public/generateOTP'
OTP_PRO_URL = 'https://cdn-api.co-vin.in/api/v2/auth/generateMobileOTP'
WARNING_BEEP_DURATION = (1000, 2000)
try:
import winsound
except ImportError:
import os
def beep(freq, duration):
# apt install sox/brew install sox
os.system(
f"play -n synth {duration/1000} sin {freq} >/dev/null 2>&1")
else:
def beep(freq, duration):
winsound.Beep(freq, duration)
def viable_options(resp, minimum_slots, min_age_booking, fee_type, dose):
options = []
if len(resp['centers']) >= 0:
for center in resp['centers']:
for session in center['sessions']:
# availability = session['available_capacity']
availability = session['available_capacity_dose1'] if dose == 1 else session['available_capacity_dose2']
if (availability >= minimum_slots) \
and (session['min_age_limit'] <= min_age_booking)\
and (center['fee_type'] in fee_type):
out = {
'name': center['name'],
'district': center['district_name'],
'pincode': center['pincode'],
'center_id': center['center_id'],
'available': availability,
'date': session['date'],
'slots': session['slots'],
'session_id': session['session_id']
}
options.append(out)
else:
pass
else:
pass
return options
def display_table(dict_list):
"""
This function
1. Takes a list of dictionary
2. Add an Index column, and
3. Displays the data in tabular format
"""
header = ['idx'] + list(dict_list[0].keys())
rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]
print(tabulate.tabulate(rows, header, tablefmt='grid'))
def display_info_dict(details):
for key, value in details.items():
if isinstance(value, list):
if all(isinstance(item, dict) for item in value):
print(f"\t{key}:")
display_table(value)
else:
print(f"\t{key}\t: {value}")
else:
print(f"\t{key}\t: {value}")
def confirm_and_proceed(collected_details):
print("\n================================= Confirm Info =================================\n")
display_info_dict(collected_details)
confirm = input("\nProceed with above info (y/n Default y) : ")
confirm = confirm if confirm else 'y'
if confirm != 'y':
print("Details not confirmed. Exiting process.")
os.system("pause")
sys.exit()
def save_user_info(filename, details):
print("\n================================= Save Info =================================\n")
save_info = input("Would you like to save this as a JSON file for easy use next time?: (y/n Default y): ")
save_info = save_info if save_info else 'y'
if save_info == 'y':
with open(filename, 'w') as f:
json.dump(details, f)
print(f"Info saved to {filename} in {os.getcwd()}")
def get_saved_user_info(filename):
with open(filename, 'r') as f:
data = json.load(f)
return data
def collect_user_details(request_header):
# Get Beneficiaries
print("Fetching registered beneficiaries.. ")
beneficiary_dtls = get_beneficiaries(request_header)
if len(beneficiary_dtls) == 0:
print("There should be at least one beneficiary. Exiting.")
os.system("pause")
sys.exit(1)
# Make sure all beneficiaries have the same type of vaccine
vaccine_types = [beneficiary['vaccine'] for beneficiary in beneficiary_dtls]
statuses = [beneficiary['status'] for beneficiary in beneficiary_dtls]
if len(set(statuses)) > 1:
print("\n================================= Important =================================\n")
print(f"All beneficiaries in one attempt should be of same vaccination status (same dose). Found {statuses}")
os.system("pause")
sys.exit(1)
vaccines = set(vaccine_types)
if len(vaccines) > 1 and ('' in vaccines):
vaccines.remove('')
vaccine_types.remove('')
print("\n================================= Important =================================\n")
print(f"Some of the beneficiaries have a set vaccine preference ({vaccines}) and some do not.")
print("Results will be filtered to show only the set vaccine preference.")
os.system("pause")
if len(vaccines) != 1:
print("\n================================= Important =================================\n")
print(f"All beneficiaries in one attempt should have the same vaccine type. Found {len(vaccines)}")
os.system("pause")
sys.exit(1)
vaccine_type = vaccine_types[0]
if not vaccine_type:
print("\n================================= Vaccine Info =================================\n")
vaccine_type = get_vaccine_preference()
print("\n================================= Location Info =================================\n")
# get search method to use
search_option = input(
"""Search by Pincode? Or by State/District? \nEnter 1 for Pincode or 2 for State/District. (Default 2) : """)
if not search_option or int(search_option) not in [1, 2]:
search_option = 2
else:
search_option = int(search_option)
if search_option == 2:
# Collect vaccination center preferance
location_dtls = get_districts(request_header)
else:
# Collect vaccination center preferance
location_dtls = get_pincodes()
print("\n================================= Additional Info =================================\n")
# Set filter condition
minimum_slots = input(f'Filter out centers with availability less than ? Minimum {len(beneficiary_dtls)} : ')
if minimum_slots:
minimum_slots = int(minimum_slots) if int(minimum_slots) >= len(beneficiary_dtls) else len(beneficiary_dtls)
else:
minimum_slots = len(beneficiary_dtls)
# Get refresh frequency
refresh_freq = input('How often do you want to refresh the calendar (in seconds)? Default 15. Minimum 5. : ')
refresh_freq = int(refresh_freq) if refresh_freq and int(refresh_freq) >= 5 else 15
# Get search start date
start_date = input(
'\nSearch for next seven day starting from when?\nUse 1 for today, 2 for tomorrow, or provide a date in the format DD-MM-YYYY. Default 2: ')
if not start_date:
start_date = 2
elif start_date in ['1', '2']:
start_date = int(start_date)
else:
try:
datetime.datetime.strptime(start_date, '%d-%m-%Y')
except ValueError:
print('Invalid Date! Proceeding with tomorrow.')
start_date = 2
# Get preference of Free/Paid option
fee_type = get_fee_type_preference()
print("\n=========== CAUTION! =========== CAUTION! CAUTION! =============== CAUTION! =======\n")
print("===== BE CAREFUL WITH THIS OPTION! AUTO-BOOKING WILL BOOK THE FIRST AVAILABLE CENTRE, DATE, AND A RANDOM SLOT! =====")
auto_book = input("Do you want to enable auto-booking? (yes-please or no) Default no: ")
auto_book = 'no' if not auto_book else auto_book
collected_details = {
'beneficiary_dtls': beneficiary_dtls,
'location_dtls': location_dtls,
'search_option': search_option,
'minimum_slots': minimum_slots,
'refresh_freq': refresh_freq,
'auto_book': auto_book,
'start_date': start_date,
'vaccine_type': vaccine_type,
'fee_type': fee_type
}
return collected_details
def check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type, dose):
"""
This function
1. Takes details required to check vaccination calendar
2. Filters result by minimum number of slots available
3. Returns False if token is invalid
4. Returns list of vaccination centers & slots if available
"""
try:
print('===================================================================================')
today = datetime.datetime.today()
base_url = CALENDAR_URL_DISTRICT
if vaccine_type:
base_url += f"&vaccine={vaccine_type}"
options = []
for location in location_dtls:
resp = requests.get(base_url.format(location['district_id'], start_date), headers=request_header)
if resp.status_code == 401:
print('TOKEN INVALID')
return False
elif resp.status_code == 200:
resp = resp.json()
if 'centers' in resp:
print(f"Centers available in {location['district_name']} from {start_date} as of {today.strftime('%Y-%m-%d %H:%M:%S')}: {len(resp['centers'])}")
options += viable_options(resp, minimum_slots, min_age_booking, fee_type, dose)
else:
pass
for location in location_dtls:
if location['district_name'] in [option['district'] for option in options]:
for _ in range(2):
beep(location['alert_freq'], 150)
return options
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type, dose):
"""
This function
1. Takes details required to check vaccination calendar
2. Filters result by minimum number of slots available
3. Returns False if token is invalid
4. Returns list of vaccination centers & slots if available
"""
try:
print('===================================================================================')
today = datetime.datetime.today()
base_url = CALENDAR_URL_PINCODE
if vaccine_type:
base_url += f"&vaccine={vaccine_type}"
options = []
for location in location_dtls:
resp = requests.get(base_url.format(location['pincode'], start_date), headers=request_header)
if resp.status_code == 401:
print('TOKEN INVALID')
return False
elif resp.status_code == 200:
resp = resp.json()
if 'centers' in resp:
print(f"Centers available in {location['pincode']} from {start_date} as of {today.strftime('%Y-%m-%d %H:%M:%S')}: {len(resp['centers'])}")
options += viable_options(resp, minimum_slots, min_age_booking, fee_type, dose)
else:
pass
for location in location_dtls:
if int(location['pincode']) in [option['pincode'] for option in options]:
for _ in range(2):
beep(location['alert_freq'], 150)
return options
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def generate_captcha(request_header):
print('================================= GETTING CAPTCHA ==================================================')
resp = requests.post(CAPTCHA_URL, headers=request_header)
print(f'Captcha Response Code: {resp.status_code}')
if resp.status_code == 200:
return captcha_builder(resp.json())
def book_appointment(request_header, details):
"""
This function
1. Takes details in json format
2. Attempts to book an appointment using the details
3. Returns True or False depending on Token Validity
"""
try:
valid_captcha = True
while valid_captcha:
captcha = generate_captcha(request_header)
details['captcha'] = captcha
print('================================= ATTEMPTING BOOKING ==================================================')
resp = requests.post(BOOKING_URL, headers=request_header, json=details)
print(f'Booking Response Code: {resp.status_code}')
print(f'Booking Response : {resp.text}')
if resp.status_code == 401:
print('TOKEN INVALID')
return False
elif resp.status_code == 200:
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
print('############## BOOKED! ############################ BOOKED! ##############')
print(" Hey, Hey, Hey! It's your lucky day! ")
print('\nPress any key thrice to exit program.')
os.system("pause")
os.system("pause")
os.system("pause")
sys.exit()
elif resp.status_code == 400:
print(f'Response: {resp.status_code} : {resp.text}')
pass
else:
print(f'Response: {resp.status_code} : {resp.text}')
return True
except Exception as e:
print(str(e))
beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1])
def check_and_book(request_header, beneficiary_dtls, location_dtls, search_option, **kwargs):
"""
This function
1. Checks the vaccination calendar for available slots,
2. Lists all viable options,
3. Takes user's choice of vaccination center and slot,
4. Calls function to book appointment, and
5. Returns True or False depending on Token Validity
"""
try:
min_age_booking = get_min_age(beneficiary_dtls)
minimum_slots = kwargs['min_slots']
refresh_freq = kwargs['ref_freq']
auto_book = kwargs['auto_book']
start_date = kwargs['start_date']
vaccine_type = kwargs['vaccine_type']
fee_type = kwargs['fee_type']
dose = 2 if [beneficiary['status'] for beneficiary in beneficiary_dtls][0] == 'Partially Vaccinated' else 1
if isinstance(start_date, int) and start_date == 2:
start_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y")
elif isinstance(start_date, int) and start_date == 1:
start_date = datetime.datetime.today().strftime("%d-%m-%Y")
else:
pass
if search_option == 2:
options = check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date,
minimum_slots, min_age_booking, fee_type, dose)
else:
options = check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date,
minimum_slots, min_age_booking, fee_type, dose)
if isinstance(options, bool):
return False
options = sorted(options,
key=lambda k: (k['district'].lower(), k['pincode'],
k['name'].lower(),
datetime.datetime.strptime(k['date'], "%d-%m-%Y"))
)
tmp_options = copy.deepcopy(options)
if len(tmp_options) > 0:
cleaned_options_for_display = []
for item in tmp_options:
item.pop('session_id', None)
item.pop('center_id', None)
cleaned_options_for_display.append(item)
display_table(cleaned_options_for_display)
if auto_book == 'yes-please':
print("AUTO-BOOKING IS ENABLED. PROCEEDING WITH FIRST CENTRE, DATE, and RANDOM SLOT.")
option = options[0]
random_slot = random.randint(1, len(option['slots']))
choice = f'1.{random_slot}'
else:
choice = f'1.4'
'''inputimeout(
prompt='----------> Wait 20 seconds for updated options OR \n----------> Enter a choice e.g: 1.4 for (1st center 4th slot): ',
timeout=20)'''
else:
try:
for i in range(refresh_freq, 0, -1):
msg = f"No viable options. Next update in {i} seconds. OR press 'Ctrl + C' to refresh now."
print(msg, end="\r", flush=True)
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
pass
choice = '.'
except TimeoutOccurred:
time.sleep(1)
return True
else:
if choice == '.':
return True
else:
try:
choice = choice.split('.')
choice = [int(item) for item in choice]
print(f'============> Got Choice: Center #{choice[0]}, Slot #{choice[1]}')
new_req = {
'beneficiaries': [beneficiary['bref_id'] for beneficiary in beneficiary_dtls],
'dose': 2 if [beneficiary['status'] for beneficiary in beneficiary_dtls][0] == 'Partially Vaccinated' else 1,
'center_id' : options[choice[0] - 1]['center_id'],
'session_id': options[choice[0] - 1]['session_id'],
'slot' : options[choice[0] - 1]['slots'][choice[1] - 1]
}
print(f'Booking with info: {new_req}')
return book_appointment(request_header, new_req)
except IndexError:
print("============> Invalid Option!")
os.system("pause")
pass
def get_vaccine_preference():
print("It seems you're trying to find a slot for your first dose. Do you have a vaccine preference?")
preference = input("Enter 0 for No Preference, 1 for COVISHIELD, 2 for COVAXIN, or 3 for SPUTNIK V. Default 0 : ")
preference = int(preference) if preference and int(preference) in [0, 1, 2, 3] else 0
if preference == 1:
return 'COVISHIELD'
elif preference == 2:
return 'COVAXIN'
elif preference == 3:
return 'SPUTNIK V'
else:
return None
def get_fee_type_preference():
print("\nDo you have a fee type preference?")
preference = input("Enter 0 for No Preference, 1 for Free Only, or 2 for Paid Only. Default 0 : ")
preference = int(preference) if preference and int(preference) in [0, 1, 2] else 0
if preference == 1:
return ['Free']
elif preference == 2:
return ['Paid']
else:
return ['Free', 'Paid']
def get_pincodes():
locations = []
pincodes = input("Enter comma separated pincodes to monitor: ")
for idx, pincode in enumerate(pincodes.split(',')):
pincode = {
'pincode': pincode,
'alert_freq': 440 + ((2 * idx) * 110)
}
locations.append(pincode)
return locations
def get_districts(request_header):
"""
This function
1. Lists all states, prompts to select one,
2. Lists all districts in that state, prompts to select required ones, and
3. Returns the list of districts as list(dict)
"""
states = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/states', headers=request_header)
if states.status_code == 200:
states = states.json()['states']
refined_states = []
for state in states:
tmp = {'state': state['state_name']}
refined_states.append(tmp)
display_table(refined_states)
state = int(input('\nEnter State index: '))
state_id = states[state - 1]['state_id']
districts = requests.get(f'https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}', headers=request_header)
if districts.status_code == 200:
districts = districts.json()['districts']
refined_districts = []
for district in districts:
tmp = {'district': district['district_name']}
refined_districts.append(tmp)
display_table(refined_districts)
reqd_districts = input('\nEnter comma separated index numbers of districts to monitor : ')
districts_idx = [int(idx) - 1 for idx in reqd_districts.split(',')]
reqd_districts = [{
'district_id': item['district_id'],
'district_name': item['district_name'],
'alert_freq': 440 + ((2 * idx) * 110)
} for idx, item in enumerate(districts) if idx in districts_idx]
print(f'Selected districts: ')
display_table(reqd_districts)
return reqd_districts
else:
print('Unable to fetch districts')
print(districts.status_code)
print(districts.text)
os.system("pause")
sys.exit(1)
else:
print('Unable to fetch states')
print(states.status_code)
print(states.text)
os.system("pause")
sys.exit(1)
def get_beneficiaries(request_header):
"""
This function
1. Fetches all beneficiaries registered under the mobile number,
2. Prompts user to select the applicable beneficiaries, and
3. Returns the list of beneficiaries as list(dict)
"""
beneficiaries = requests.get(BENEFICIARIES_URL, headers=request_header)
if beneficiaries.status_code == 200:
beneficiaries = beneficiaries.json()['beneficiaries']
refined_beneficiaries = []
for beneficiary in beneficiaries:
beneficiary['age'] = datetime.datetime.today().year - int(beneficiary['birth_year'])
tmp = {
'bref_id': beneficiary['beneficiary_reference_id'],
'name': beneficiary['name'],
'vaccine': beneficiary['vaccine'],
'age': beneficiary['age'],
'status': beneficiary['vaccination_status']
}
refined_beneficiaries.append(tmp)
display_table(refined_beneficiaries)
print("""
################# IMPORTANT NOTES #################
# 1. While selecting beneficiaries, make sure that selected beneficiaries are all taking the same dose: either first OR second.
# Please do no try to club together booking for first dose for one beneficiary and second dose for another beneficiary.
#
# 2. While selecting beneficiaries, also make sure that beneficiaries selected for second dose are all taking the same vaccine: COVISHIELD OR COVAXIN.
# Please do no try to club together booking for beneficiary taking COVISHIELD with beneficiary taking COVAXIN.
#
# 3. If you're selecting multiple beneficiaries, make sure all are of the same age group (45+ or 18+) as defined by the govt.
# Please do not try to club together booking for younger and older beneficiaries.
###################################################
""")
reqd_beneficiaries = input('Enter comma separated index numbers of beneficiaries to book for : ')
beneficiary_idx = [int(idx) - 1 for idx in reqd_beneficiaries.split(',')]
reqd_beneficiaries = [{
'bref_id': item['beneficiary_reference_id'],
'name': item['name'],
'vaccine': item['vaccine'],
'age': item['age'],
'status': item['vaccination_status']
} for idx, item in enumerate(beneficiaries) if idx in beneficiary_idx]
print(f'Selected beneficiaries: ')
display_table(reqd_beneficiaries)
return reqd_beneficiaries
else:
print('Unable to fetch beneficiaries')
print(beneficiaries.status_code)
print(beneficiaries.text)
os.system("pause")
return []
def get_min_age(beneficiary_dtls):
"""
This function returns a min age argument, based on age of all beneficiaries
:param beneficiary_dtls:
:return: min_age:int
"""
age_list = [item['age'] for item in beneficiary_dtls]
min_age = min(age_list)
return min_age
def generate_token_OTP(mobile, request_header):
"""
This function generate OTP and returns a new token
"""
if not mobile:
print("Mobile number cannot be empty")
os.system('pause')
sys.exit()
valid_token = False
while not valid_token:
try:
data = {"mobile": mobile,
"secret": "U2FsdGVkX1+z/4Nr9nta+2DrVJSv7KS6VoQUSQ1ZXYDx/CJUkWxFYG6P3iM/VW+6jLQ9RDQVzp/RcZ8kbT41xw=="
}
txnId = requests.post(url=OTP_PRO_URL, json=data, headers=request_header)
if txnId.status_code == 200:
print(f"Successfully requested OTP for mobile number {mobile} at {datetime.datetime.today()}..")
txnId = txnId.json()['txnId']
OTP = input("Enter OTP (If this takes more than 2 minutes, press Enter to retry): ")
if OTP:
data = {"otp": sha256(str(OTP).encode('utf-8')).hexdigest(), "txnId": txnId}
print(f"Validating OTP..")
token = requests.post(url='https://cdn-api.co-vin.in/api/v2/auth/validateMobileOtp', json=data,
headers=request_header)
if token.status_code == 200:
token = token.json()['token']
print(f'Token Generated: {token}')
valid_token = True
return token
else:
print('Unable to Validate OTP')
print(f"Response: {token.text}")
retry = input(f"Retry with {mobile} ? (y/n Default y): ")
retry = retry if retry else 'y'
if retry == 'y':
pass
else:
sys.exit()
else:
print('Unable to Generate OTP')
print(txnId.status_code, txnId.text)
retry = input(f"Retry with {mobile} ? (y/n Default y): ")
retry = retry if retry else 'y'
if retry == 'y':
pass
else:
sys.exit()
except Exception as e:
print(str(e))
| 38.755682 | 164 | 0.562784 |
e7a3ad9ff35c51f3d2b85fbe5f39509abf2f844b | 117 | py | Python | test.py | fanieblesat/proyectoMintic | 18e25caf4a077a67c0e83d82757dfdc167ef61f6 | [
"MIT"
] | null | null | null | test.py | fanieblesat/proyectoMintic | 18e25caf4a077a67c0e83d82757dfdc167ef61f6 | [
"MIT"
] | null | null | null | test.py | fanieblesat/proyectoMintic | 18e25caf4a077a67c0e83d82757dfdc167ef61f6 | [
"MIT"
] | null | null | null | from db import get_db, close_db
app.app_context(app=db)
close_db()
db=get_db()
db.execute('SELECT * FROM user')
| 19.5 | 33 | 0.717949 |
22616b8f01d1a9e3eb9e82698342969fb14a1756 | 2,142 | py | Python | pyrsss/gnss/fetch.py | grawe/pyrsss | 31fd88734b00f814e7aaa5829c4ac49c7bf53563 | [
"MIT"
] | 7 | 2016-12-27T08:00:54.000Z | 2021-12-16T06:55:16.000Z | pyrsss/gnss/fetch.py | grawe/pyrsss | 31fd88734b00f814e7aaa5829c4ac49c7bf53563 | [
"MIT"
] | 1 | 2017-01-31T20:36:08.000Z | 2017-01-31T20:36:08.000Z | pyrsss/gnss/fetch.py | grawe/pyrsss | 31fd88734b00f814e7aaa5829c4ac49c7bf53563 | [
"MIT"
] | 6 | 2016-04-08T16:25:14.000Z | 2021-05-02T12:05:16.000Z | import logging
import os
from ftplib import FTP
from contextlib import closing
from collections import defaultdict
from ..util.path import touch_path, decompress
logger = logging.getLogger('pyrsss.gps.fetch')
TEMPLATE_MAP = {'CORS': {'obs': ('geodesy.noaa.gov',
'/cors/rinex/{date:%Y}/{date:%j}/{stn}/{stn}{date:%j}0.{date:%y}',
'o.gz')}}
def fetch(source,
dates,
stns,
rinex_type='obs',
template_map=TEMPLATE_MAP,
local_path='./',
local_template='{stn}{date:%j}0.{date:%y}{suffix}'):
"""
???
"""
server, template, suffix = TEMPLATE_MAP[source][rinex_type]
fname_map = defaultdict(dict)
logger.info('opening connection to {}'.format(server))
with closing(FTP(server)) as ftp:
ftp.login()
for date in dates:
for stn in stns:
remote_fname = template.format(date=date, stn=stn) + suffix
local_fname = os.path.join(local_path.format(date=date, stn=stn, suffix=suffix),
local_template.format(date=date, stn=stn, suffix=suffix))
logger.info('fetching {} and storing to {}'.format(remote_fname,
local_fname))
touch_path(os.path.dirname(local_fname))
with open(local_fname, 'w') as fid:
try:
ftp.retrbinary('RETR {}'.format(remote_fname),
fid.write)
fname_map[date][stn] = local_fname
except Exception as e:
logger.warning('could not fetch {} ({}) --- skipping'.format(remote_fname,
e))
os.remove(local_fname)
continue
for date in sorted(fname_map):
for stn in sorted(fname_map[date]):
fname_map[date][stn] = decompress(fname_map[date][stn])
return fname_map
| 38.945455 | 100 | 0.500934 |
2b7b9e699369b1ed797b05949d56f49be9554a53 | 2,403 | py | Python | test/integration/069_build_test/test_build.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | null | null | null | test/integration/069_build_test/test_build.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | null | null | null | test/integration/069_build_test/test_build.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | null | null | null | from test.integration.base import DBTIntegrationTest, use_profile
import yaml
class TestBuildBase(DBTIntegrationTest):
@property
def schema(self):
return "build_test_069"
@property
def project_config(self):
return {
"config-version": 2,
"snapshot-paths": ["snapshots"],
"data-paths": ["data"],
"seeds": {
"quote_columns": False,
},
}
def build(self, expect_pass=True, extra_args=None, **kwargs):
args = ["build"]
if kwargs:
args.extend(("--args", yaml.safe_dump(kwargs)))
if extra_args:
args.extend(extra_args)
return self.run_dbt(args, expect_pass=expect_pass)
class TestPassingBuild(TestBuildBase):
@property
def models(self):
return "models"
@use_profile("postgres")
def test__postgres_build_happy_path(self):
self.build()
class TestFailingBuild(TestBuildBase):
@property
def models(self):
return "models-failing"
@use_profile("postgres")
def test__postgres_build_happy_path(self):
results = self.build(expect_pass=False)
self.assertEqual(len(results), 13)
actual = [r.status for r in results]
expected = ['error']*1 + ['skipped']*5 + ['pass']*2 + ['success']*5
self.assertEqual(sorted(actual), sorted(expected))
class TestFailingTestsBuild(TestBuildBase):
@property
def models(self):
return "tests-failing"
@use_profile("postgres")
def test__postgres_failing_test_skips_downstream(self):
results = self.build(expect_pass=False)
self.assertEqual(len(results), 13)
actual = [str(r.status) for r in results]
expected = ['fail'] + ['skipped']*6 + ['pass']*2 + ['success']*4
self.assertEqual(sorted(actual), sorted(expected))
class TestCircularRelationshipTestsBuild(TestBuildBase):
@property
def models(self):
return "models-circular-relationship"
@use_profile("postgres")
def test__postgres_circular_relationship_test_success(self):
""" Ensure that tests that refer to each other's model don't create
a circular dependency. """
results = self.build()
actual = [r.status for r in results]
expected = ['success']*7 + ['pass']*2
self.assertEqual(sorted(actual), sorted(expected))
| 29.304878 | 75 | 0.62963 |
b38dbdcc54959f07bad0d0ca997a29c7677fbf79 | 8,818 | py | Python | qtconsole/comms.py | opencor/qtconsole | a8c6c23aeaad7d4295d9d37117dd7e7a76a5d1bf | [
"BSD-3-Clause"
] | 1 | 2020-03-06T14:29:12.000Z | 2020-03-06T14:29:12.000Z | qtconsole/comms.py | opencor/qtconsole | a8c6c23aeaad7d4295d9d37117dd7e7a76a5d1bf | [
"BSD-3-Clause"
] | 3 | 2020-03-24T17:46:11.000Z | 2021-08-23T20:23:16.000Z | qtconsole/comms.py | opencor/qtconsole | a8c6c23aeaad7d4295d9d37117dd7e7a76a5d1bf | [
"BSD-3-Clause"
] | null | null | null | """
Based on
https://github.com/jupyter/notebook/blob/master/notebook/static/services/kernels/comm.js
https://github.com/ipython/ipykernel/blob/master/ipykernel/comm/manager.py
https://github.com/ipython/ipykernel/blob/master/ipykernel/comm/comm.py
Which are distributed under the terms of the Modified BSD License.
"""
import logging
from traitlets.config import LoggingConfigurable
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types
import uuid
from qtconsole.qt import QtCore
from qtconsole.util import MetaQObjectHasTraits, SuperQObject
class CommManager(MetaQObjectHasTraits(
'NewBase', (LoggingConfigurable, SuperQObject), {})):
"""
Manager for Comms in the Frontend
"""
def __init__(self, kernel_client, *args, **kwargs):
super(CommManager, self).__init__(*args, **kwargs)
self.comms = {}
self.targets = {}
if kernel_client:
self.init_kernel_client(kernel_client)
def init_kernel_client(self, kernel_client):
"""
connect the kernel, and register message handlers
"""
self.kernel_client = kernel_client
kernel_client.iopub_channel.message_received.connect(self._dispatch)
@QtCore.Slot(object)
def _dispatch(self, msg):
"""Dispatch messages"""
msg_type = msg['header']['msg_type']
handled_msg_types = ['comm_open', 'comm_msg', 'comm_close']
if msg_type in handled_msg_types:
getattr(self, msg_type)(msg)
def new_comm(self, target_name, data=None, metadata=None,
comm_id=None, buffers=None):
"""
Create a new Comm, register it, and open its Kernel-side counterpart
Mimics the auto-registration in `Comm.__init__` in the Jupyter Comm.
argument comm_id is optional
"""
comm = Comm(target_name, self.kernel_client, comm_id)
self.register_comm(comm)
try:
comm.open(data, metadata, buffers)
except Exception:
self.unregister_comm(comm)
raise
return comm
def register_target(self, target_name, f):
"""Register a callable f for a given target name
f will be called with two arguments when a comm_open message is
received with `target`:
- the Comm instance
- the `comm_open` message itself.
f can be a Python callable or an import string for one.
"""
if isinstance(f, string_types):
f = import_item(f)
self.targets[target_name] = f
def unregister_target(self, target_name, f):
"""Unregister a callable registered with register_target"""
return self.targets.pop(target_name)
def register_comm(self, comm):
"""Register a new comm"""
comm_id = comm.comm_id
comm.kernel_client = self.kernel_client
self.comms[comm_id] = comm
comm.sig_is_closing.connect(self.unregister_comm)
return comm_id
@QtCore.Slot(object)
def unregister_comm(self, comm):
"""Unregister a comm, and close its counterpart."""
# unlike get_comm, this should raise a KeyError
comm.sig_is_closing.disconnect(self.unregister_comm)
self.comms.pop(comm.comm_id)
def get_comm(self, comm_id):
"""Get a comm with a particular id
Returns the comm if found, otherwise None.
This will not raise an error,
it will log messages if the comm cannot be found.
"""
try:
return self.comms[comm_id]
except KeyError:
self.log.warning("No such comm: %s", comm_id)
# don't create the list of keys if debug messages aren't enabled
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug("Current comms: %s", list(self.comms.keys()))
# comm message handlers
def comm_open(self, msg):
"""Handler for comm_open messages"""
content = msg['content']
comm_id = content['comm_id']
target_name = content['target_name']
f = self.targets.get(target_name, None)
comm = Comm(target_name, self.kernel_client, comm_id)
self.register_comm(comm)
if f is None:
self.log.error("No such comm target registered: %s", target_name)
else:
try:
f(comm, msg)
return
except Exception:
self.log.error("Exception opening comm with target: %s",
target_name, exc_info=True)
# Failure.
try:
comm.close()
except Exception:
self.log.error(
"Could not close comm during `comm_open` failure "
"clean-up. The comm may not have been opened yet.""",
exc_info=True)
def comm_close(self, msg):
"""Handler for comm_close messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
return
self.unregister_comm(comm)
try:
comm.handle_close(msg)
except Exception:
self.log.error('Exception in comm_close for %s', comm_id,
exc_info=True)
def comm_msg(self, msg):
"""Handler for comm_msg messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
return
try:
comm.handle_msg(msg)
except Exception:
self.log.error('Exception in comm_msg for %s', comm_id,
exc_info=True)
class Comm(MetaQObjectHasTraits(
'NewBase', (LoggingConfigurable, SuperQObject), {})):
"""
Comm base class
"""
sig_is_closing = QtCore.Signal(object)
def __init__(self, target_name, kernel_client, comm_id=None,
msg_callback=None, close_callback=None):
"""
Create a new comm. Must call open to use.
"""
super(Comm, self).__init__(target_name=target_name)
self.target_name = target_name
self.kernel_client = kernel_client
if comm_id is None:
comm_id = uuid.uuid1().hex
self.comm_id = comm_id
self._msg_callback = msg_callback
self._close_callback = close_callback
self._send_channel = self.kernel_client.shell_channel
def _send_msg(self, msg_type, content, data, metadata, buffers):
"""
Send a message on the shell channel.
"""
if data is None:
data = {}
if content is None:
content = {}
content['comm_id'] = self.comm_id
content['data'] = data
msg = self.kernel_client.session.msg(
msg_type, content, metadata=metadata)
if buffers:
msg['buffers'] = buffers
return self._send_channel.send(msg)
# methods for sending messages
def open(self, data=None, metadata=None, buffers=None):
"""Open the kernel-side version of this comm"""
return self._send_msg(
'comm_open', {'target_name': self.target_name},
data, metadata, buffers)
def send(self, data=None, metadata=None, buffers=None):
"""Send a message to the kernel-side version of this comm"""
return self._send_msg(
'comm_msg', {}, data, metadata, buffers)
def close(self, data=None, metadata=None, buffers=None):
"""Close the kernel-side version of this comm"""
self.sig_is_closing.emit(self)
return self._send_msg(
'comm_close', {}, data, metadata, buffers)
# methods for registering callbacks for incoming messages
def on_msg(self, callback):
"""Register a callback for comm_msg
Will be called with the `data` of any comm_msg messages.
Call `on_msg(None)` to disable an existing callback.
"""
self._msg_callback = callback
def on_close(self, callback):
"""Register a callback for comm_close
Will be called with the `data` of the close message.
Call `on_close(None)` to disable an existing callback.
"""
self._close_callback = callback
# methods for handling incoming messages
def handle_msg(self, msg):
"""Handle a comm_msg message"""
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
if self._msg_callback:
return self._msg_callback(msg)
def handle_close(self, msg):
"""Handle a comm_close message"""
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
if self._close_callback:
return self._close_callback(msg)
__all__ = ['CommManager']
| 32.538745 | 88 | 0.614198 |
8e36d7924298a95778cd8dfe12a8cacf74187cbd | 3,666 | py | Python | Client/migrations/0001_initial.py | shubh242/E-Learning-App | 07320b1f1aba31602c3056398ffae16f2fa6f9ae | [
"MIT"
] | null | null | null | Client/migrations/0001_initial.py | shubh242/E-Learning-App | 07320b1f1aba31602c3056398ffae16f2fa6f9ae | [
"MIT"
] | null | null | null | Client/migrations/0001_initial.py | shubh242/E-Learning-App | 07320b1f1aba31602c3056398ffae16f2fa6f9ae | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-09 17:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=100)),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=30)),
('description', models.TextField(blank=True, max_length=300)),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=100)),
('branch', models.TextField(blank=True, max_length=100)),
('education', models.TextField(blank=True, max_length=300)),
('subject', models.ManyToManyField(blank=True, to='Client.Subject')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=100)),
('college', models.TextField(max_length=100)),
('joined', models.DateTimeField(auto_now_add=True)),
('course', models.ManyToManyField(blank=True, to='Client.Course')),
],
),
migrations.CreateModel(
name='Scores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.IntegerField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Client.student')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.FloatField(blank=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Client.course')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Parent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=100)),
('children', models.ManyToManyField(blank=True, to='Client.Student')),
],
),
migrations.AddField(
model_name='course',
name='subject',
field=models.ManyToManyField(blank=True, to='Client.Subject'),
),
migrations.AddField(
model_name='course',
name='teacher',
field=models.ManyToManyField(blank=True, to='Client.Teacher'),
),
]
| 41.191011 | 114 | 0.564648 |
b8121d9c90c9226f8ae90dbf5743b9e5b92b0ba1 | 7,907 | py | Python | beetsplug/mbsync.py | seldridge/beets | 7438cd50f63cd69a409d9bd00faf6d932872e205 | [
"MIT"
] | null | null | null | beetsplug/mbsync.py | seldridge/beets | 7438cd50f63cd69a409d9bd00faf6d932872e205 | [
"MIT"
] | null | null | null | beetsplug/mbsync.py | seldridge/beets | 7438cd50f63cd69a409d9bd00faf6d932872e205 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Update library's tags using MusicBrainz.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import autotag, library, ui, util
from beets.autotag import hooks
from collections import defaultdict
import re
MBID_REGEX = r"(\d|\w){8}-(\d|\w){4}-(\d|\w){4}-(\d|\w){4}-(\d|\w){12}"
def apply_item_changes(lib, item, move, pretend, write):
"""Store, move and write the item according to the arguments.
"""
if not pretend:
# Move the item if it's in the library.
if move and lib.directory in util.ancestry(item.path):
item.move(with_album=False)
if write:
item.try_write()
item.store()
class MBSyncPlugin(BeetsPlugin):
def __init__(self):
super(MBSyncPlugin, self).__init__()
def commands(self):
cmd = ui.Subcommand('mbsync',
help=u'update metadata from musicbrainz')
cmd.parser.add_option(
u'-p', u'--pretend', action='store_true',
help=u'show all changes but do nothing')
cmd.parser.add_option(
u'-m', u'--move', action='store_true', dest='move',
help=u"move files in the library directory")
cmd.parser.add_option(
u'-M', u'--nomove', action='store_false', dest='move',
help=u"don't move files in library")
cmd.parser.add_option(
u'-W', u'--nowrite', action='store_false',
default=None, dest='write',
help=u"don't write updated metadata to files")
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the mbsync function.
"""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
query = ui.decargs(args)
self.singletons(lib, query, move, pretend, write)
self.albums(lib, query, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + [u'singleton:true']):
item_formatted = format(item)
if not item.mb_trackid:
self._log.info(u'Skipping singleton with no mb_trackid: {0}',
item_formatted)
continue
# Do we have a valid MusicBrainz track ID?
if not re.match(MBID_REGEX, item.mb_trackid):
self._log.info(u'Skipping singleton with invalid mb_trackid:' +
' {0}', item_formatted)
continue
# Get the MusicBrainz recording info.
track_info = hooks.track_for_mbid(item.mb_trackid)
if not track_info:
self._log.info(u'Recording ID not found: {0} for track {0}',
item.mb_trackid,
item_formatted)
continue
# Apply.
with lib.transaction():
autotag.apply_item_metadata(item, track_info)
apply_item_changes(lib, item, move, pretend, write)
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for a in lib.albums(query):
album_formatted = format(a)
if not a.mb_albumid:
self._log.info(u'Skipping album with no mb_albumid: {0}',
album_formatted)
continue
items = list(a.items())
# Do we have a valid MusicBrainz album ID?
if not re.match(MBID_REGEX, a.mb_albumid):
self._log.info(u'Skipping album with invalid mb_albumid: {0}',
album_formatted)
continue
# Get the MusicBrainz album information.
album_info = hooks.album_for_mbid(a.mb_albumid)
if not album_info:
self._log.info(u'Release ID {0} not found for album {1}',
a.mb_albumid,
album_formatted)
continue
# Map release track and recording MBIDs to their information.
# Recordings can appear multiple times on a release, so each MBID
# maps to a list of TrackInfo objects.
releasetrack_index = dict()
track_index = defaultdict(list)
for track_info in album_info.tracks:
releasetrack_index[track_info.release_track_id] = track_info
track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs (release track MBIDs
# first, if available, and recording MBIDs otherwise). This should
# work for albums that have missing or extra tracks.
mapping = {}
for item in items:
if item.mb_releasetrackid and \
item.mb_releasetrackid in releasetrack_index:
mapping[item] = releasetrack_index[item.mb_releasetrackid]
else:
candidates = track_index[item.mb_trackid]
if len(candidates) == 1:
mapping[item] = candidates[0]
else:
# If there are multiple copies of a recording, they are
# disambiguated using their disc and track number.
for c in candidates:
if (c.medium_index == item.track and
c.medium == item.disc):
mapping[item] = c
break
# Apply.
self._log.debug(u'applying changes to {}', album_formatted)
with lib.transaction():
autotag.apply_metadata(album_info, mapping)
changed = False
# Find any changed item to apply MusicBrainz changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if not changed:
# No change to any item.
continue
if not pretend:
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
a[key] = any_changed_item[key]
a.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug(u'moving album {0}', album_formatted)
a.move()
| 40.757732 | 79 | 0.558113 |
371de2e602f00d055d4533452593978717501d99 | 411 | py | Python | modeltuts/wsgi.py | anandkumar190/django-code | c47fbc5f55f7d5961c29a0e65047573013c995dc | [
"MIT"
] | null | null | null | modeltuts/wsgi.py | anandkumar190/django-code | c47fbc5f55f7d5961c29a0e65047573013c995dc | [
"MIT"
] | null | null | null | modeltuts/wsgi.py | anandkumar190/django-code | c47fbc5f55f7d5961c29a0e65047573013c995dc | [
"MIT"
] | 1 | 2020-10-05T04:46:11.000Z | 2020-10-05T04:46:11.000Z | """
WSGI config for modeltuts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'modeltuts.settings')
application = get_wsgi_application()
| 24.176471 | 79 | 0.756691 |
223ba88f801c7f5470c818744abdad85c95e335e | 5,051 | py | Python | src/ggrc/migrations/versions/20190227080937_7769fdc16fef_migrate_html_to_markdown_for_controls.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-12T23:46:00.000Z | 2019-01-12T23:46:00.000Z | src/ggrc/migrations/versions/20190227080937_7769fdc16fef_migrate_html_to_markdown_for_controls.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/migrations/versions/20190227080937_7769fdc16fef_migrate_html_to_markdown_for_controls.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate html to markdown for controls
Create Date: 2019-02-27 08:09:37.653576
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import datetime
import sqlalchemy as sa
from alembic import op
from ggrc.migrations import utils
from ggrc.migrations.utils import html_markdown_parser
# revision identifiers, used by Alembic.
revision = '7769fdc16fef'
down_revision = '57b14cb4a7b4'
REGEX_HTML = r"(<[a-zA-Z]+>)+|(<\/[a-zA-Z]+>)+"
def parse_html(value):
"""Parse html to markdown."""
parser = html_markdown_parser.HTML2MarkdownParser()
parser.feed(value)
return parser.get_data()
def update_comments(connection):
"""Parse comments from html to markdown."""
comments_data = connection.execute(
sa.text("""
SELECT c.id, c.description
FROM comments as c
JOIN relationships as r
ON r.source_type = "Comment" and r.source_id = c.id
and r.destination_type = "Control"
WHERE c.description REGEXP :reg_exp
UNION
SELECT c.id, c.description
FROM comments as c
JOIN relationships as r
ON r.destination_type = "Comment" and r.destination_id = c.id
and r.source_type = "Control"
where c.description REGEXP :reg_exp
"""),
reg_exp=REGEX_HTML
).fetchall()
comments_ids = [c_id for c_id, _ in comments_data]
comments_table = sa.sql.table(
'comments',
sa.Column('id', sa.Integer()),
sa.Column('description', sa.Text, nullable=True),
sa.Column('updated_at', sa.DateTime, nullable=False),
)
for comment_id, description in comments_data:
op.execute(comments_table.update().values(
description=parse_html(description),
updated_at=datetime.datetime.utcnow(),
).where(comments_table.c.id == comment_id))
utils.add_to_objects_without_revisions_bulk(
connection, comments_ids, "Comment", "modified",
)
def update_control_cavs(connection):
"""Parse cavs from html to markdown."""
cavs_data = connection.execute(
sa.text("""
select cav.id, cav.attribute_value, cav.attributable_id
from custom_attribute_values cav
join custom_attribute_definitions cad
on cad.id = cav.custom_attribute_id
where cad.definition_type = "control"
and attribute_value REGEXP :reg_exp
"""),
reg_exp=REGEX_HTML
).fetchall()
controls_ids = {data[2] for data in cavs_data}
cavs_ids = {data[0] for data in cavs_data}
cavs_table = sa.sql.table(
'custom_attribute_values',
sa.Column('id', sa.Integer()),
sa.Column('attribute_value', sa.Text, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
)
for cav_id, attribute_value, _ in cavs_data:
op.execute(cavs_table.update().values(
attribute_value=parse_html(attribute_value),
updated_at=datetime.datetime.utcnow(),
).where(cavs_table.c.id == cav_id))
utils.add_to_objects_without_revisions_bulk(
connection, cavs_ids, "CustomAttributeValue", "modified",
)
return controls_ids
def update_control_attr(connection):
"""Parse Control attributes from html to markdown."""
controls_data = connection.execute(
sa.text("""
SELECT id, title, description, test_plan, notes
FROM controls
WHERE title REGEXP :reg_exp
OR description REGEXP :reg_exp
OR test_plan REGEXP :reg_exp
OR notes REGEXP :reg_exp
"""),
reg_exp=REGEX_HTML
).fetchall()
controls_ids = {data[0] for data in controls_data}
controls_table = sa.sql.table(
'controls',
sa.Column('id', sa.Integer()),
sa.Column('title', sa.String(250), nullable=False),
sa.Column('description', sa.Text, nullable=False),
sa.Column('test_plan', sa.Text, nullable=False),
sa.Column('notes', sa.Text, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
)
for c_id, title, description, test_plan, notes in controls_data:
op.execute(controls_table.update().values(
title=parse_html(title),
description=parse_html(description),
test_plan=parse_html(test_plan),
notes=parse_html(notes),
updated_at=datetime.datetime.utcnow(),
).where(controls_table.c.id == c_id))
return controls_ids
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
connection = op.get_bind()
update_comments(connection)
controls_ids_cavs = update_control_cavs(connection)
controls_ids_attr = update_control_attr(connection)
controls_ids = controls_ids_attr.union(controls_ids_cavs)
if controls_ids:
utils.add_to_objects_without_revisions_bulk(
connection, controls_ids, "Control", "modified",
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
| 32.587097 | 79 | 0.682637 |
942a8918c1433312d16377e471de191068819297 | 2,061 | py | Python | tests/emmet-api/molecules/test_query_operators.py | acrutt/emmet | e98100c9932f145a3ad3087ddb7aa9b779d9a191 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/emmet-api/molecules/test_query_operators.py | acrutt/emmet | e98100c9932f145a3ad3087ddb7aa9b779d9a191 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/emmet-api/molecules/test_query_operators.py | acrutt/emmet | e98100c9932f145a3ad3087ddb7aa9b779d9a191 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from monty.tempfile import ScratchDir
from monty.serialization import loadfn, dumpfn
from emmet.api.routes.molecules.query_operators import (
MoleculeBaseQuery,
MoleculeElementsQuery,
MoleculeFormulaQuery,
)
def test_molecule_elements_query():
op = MoleculeElementsQuery()
assert op.query(elements="Si, O, P") == {
"criteria": {"elements": {"$all": ["Si", "O", "P"]}}
}
with ScratchDir("."):
dumpfn(op, "temp.json")
new_op = loadfn("temp.json")
assert new_op.query(elements="Si, O, P") == {
"criteria": {"elements": {"$all": ["Si", "O", "P"]}}
}
def test_molecule_base_query():
op = MoleculeBaseQuery()
q = op.query(
nelements_min=0,
nelements_max=5,
EA_min=0,
EA_max=5,
IE_min=0,
IE_max=5,
charge_min=0,
charge_max=5,
pointgroup="C3v",
smiles="CN=C=O",
)
fields = [
"nelements",
"EA",
"IE",
"charge",
]
c = {field: {"$gte": 0, "$lte": 5} for field in fields}
assert q == {"criteria": {"pointgroup": "C3v", "smiles": "CN=C=O", **c}}
with ScratchDir("."):
dumpfn(op, "temp.json")
new_op = loadfn("temp.json")
q = new_op.query(
nelements_min=0,
nelements_max=5,
EA_min=0,
EA_max=5,
IE_min=0,
IE_max=5,
charge_min=0,
charge_max=5,
pointgroup="C3v",
smiles="CN=C=O",
)
c = {field: {"$gte": 0, "$lte": 5} for field in fields}
assert q == {"criteria": {"pointgroup": "C3v", "smiles": "CN=C=O", **c}}
def test_molecule_formula_query():
op = MoleculeFormulaQuery()
assert op.query(formula="C6H12O6") == {"criteria": {"formula_pretty": "H2CO"}}
with ScratchDir("."):
dumpfn(op, "temp.json")
new_op = loadfn("temp.json")
assert new_op.query(formula="C6H12O6") == {
"criteria": {"formula_pretty": "H2CO"}
}
| 24.535714 | 82 | 0.522077 |
0fe59a72bba28e6aef09121aeca9fb9122c71bc2 | 4,471 | py | Python | huaweicloud-sdk-live/huaweicloudsdklive/v1/model/delete_domain_mapping_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-live/huaweicloudsdklive/v1/model/delete_domain_mapping_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-live/huaweicloudsdklive/v1/model/delete_domain_mapping_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class DeleteDomainMappingRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'specify_project': 'str',
'pull_domain': 'str',
'push_domain': 'str'
}
attribute_map = {
'specify_project': 'specify_project',
'pull_domain': 'pull_domain',
'push_domain': 'push_domain'
}
def __init__(self, specify_project=None, pull_domain=None, push_domain=None):
"""DeleteDomainMappingRequest - a model defined in huaweicloud sdk"""
self._specify_project = None
self._pull_domain = None
self._push_domain = None
self.discriminator = None
if specify_project is not None:
self.specify_project = specify_project
self.pull_domain = pull_domain
self.push_domain = push_domain
@property
def specify_project(self):
"""Gets the specify_project of this DeleteDomainMappingRequest.
op账号需要携带的特定project_id,当使用op账号时该值为所操作租户的project_id
:return: The specify_project of this DeleteDomainMappingRequest.
:rtype: str
"""
return self._specify_project
@specify_project.setter
def specify_project(self, specify_project):
"""Sets the specify_project of this DeleteDomainMappingRequest.
op账号需要携带的特定project_id,当使用op账号时该值为所操作租户的project_id
:param specify_project: The specify_project of this DeleteDomainMappingRequest.
:type: str
"""
self._specify_project = specify_project
@property
def pull_domain(self):
"""Gets the pull_domain of this DeleteDomainMappingRequest.
直播播放域名
:return: The pull_domain of this DeleteDomainMappingRequest.
:rtype: str
"""
return self._pull_domain
@pull_domain.setter
def pull_domain(self, pull_domain):
"""Sets the pull_domain of this DeleteDomainMappingRequest.
直播播放域名
:param pull_domain: The pull_domain of this DeleteDomainMappingRequest.
:type: str
"""
self._pull_domain = pull_domain
@property
def push_domain(self):
"""Gets the push_domain of this DeleteDomainMappingRequest.
直播推流域名
:return: The push_domain of this DeleteDomainMappingRequest.
:rtype: str
"""
return self._push_domain
@push_domain.setter
def push_domain(self, push_domain):
"""Sets the push_domain of this DeleteDomainMappingRequest.
直播推流域名
:param push_domain: The push_domain of this DeleteDomainMappingRequest.
:type: str
"""
self._push_domain = push_domain
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteDomainMappingRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.429448 | 87 | 0.595169 |
def2efd504df59f64af7afe556b533a31241d742 | 971 | py | Python | setup.py | harunpehlivan/ParlAI | e1f2942feb8f158964477f4a46bc2c4c741b2ccd | [
"MIT"
] | 1 | 2019-07-24T00:19:28.000Z | 2019-07-24T00:19:28.000Z | setup.py | baffipelo/ParlAI | e1f2942feb8f158964477f4a46bc2c4c741b2ccd | [
"MIT"
] | null | null | null | setup.py | baffipelo/ParlAI | e1f2942feb8f158964477f4a46bc2c4c741b2ccd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >=3.6 is required for ParlAI.')
with open('README.md', encoding="utf8") as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
reqs = f.read()
setup(
name='parlai',
version='0.1.0',
description='Unified API for accessing dialog datasets.',
long_description=readme,
url='http://parl.ai/',
license=license,
python_requires='>=3.6',
packages=find_packages(
exclude=('data', 'docs', 'downloads', 'examples', 'logs', 'tests')
),
install_requires=reqs.strip().split('\n'),
include_package_data=True,
test_suite='tests.suites.unittests',
)
| 25.552632 | 74 | 0.666323 |
d6cfcb701c52c05a89eb0a3bd7426f51ff46fddb | 150 | py | Python | test.py | csningli/Project | 340c436ea5037508707464788b7a99a681ba6b92 | [
"MIT"
] | null | null | null | test.py | csningli/Project | 340c436ea5037508707464788b7a99a681ba6b92 | [
"MIT"
] | null | null | null | test.py | csningli/Project | 340c436ea5037508707464788b7a99a681ba6b92 | [
"MIT"
] | null | null | null |
if __name__ == "__main__" :
import sys
sys.path.append('./py/')
import doctest
doctest.testfile('./tests/tests.txt', verbose = True)
| 21.428571 | 57 | 0.626667 |
a0376f9beb26ddd5c569fd0db51d45dcfb052924 | 6,883 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/ucs/plugins/modules/ucs_vlans.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 10 | 2020-05-19T01:51:28.000Z | 2021-11-16T11:36:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/ucs/plugins/modules/ucs_vlans.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 19 | 2020-03-04T15:35:26.000Z | 2022-03-31T04:35:19.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/ucs/plugins/modules/ucs_vlans.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 9 | 2019-12-03T15:20:02.000Z | 2021-06-18T18:08:39.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: ucs_vlans
short_description: Configures VLANs on Cisco UCS Manager
description:
- Configures VLANs on Cisco UCS Manager.
extends_documentation_fragment: cisco.ucs.ucs
options:
state:
description:
- If C(present), will verify VLANs are present and will create if needed.
- If C(absent), will verify VLANs are absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name assigned to the VLAN.
- The VLAN name is case sensitive.
- This name can be between 1 and 32 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the VLAN is created.
required: yes
multicast_policy:
description:
- The multicast policy associated with this VLAN.
- This option is only valid if the Sharing Type field is set to None or Primary.
default: ''
fabric:
description:
- "The fabric configuration of the VLAN. This can be one of the following:"
- "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases."
- "A — The VLAN only applies to fabric A."
- "B — The VLAN only applies to fabric B."
- For upstream disjoint L2 networks, Cisco recommends that you choose common to create VLANs that apply to both fabrics.
choices: [common, A, B]
default: common
id:
description:
- The unique string identifier assigned to the VLAN.
- A VLAN ID can be between '1' and '3967', or between '4048' and '4093'.
- You cannot create VLANs with IDs from 4030 to 4047. This range of VLAN IDs is reserved.
- The VLAN IDs you specify must also be supported on the switch that you are using.
- VLANs in the LAN cloud and FCoE VLANs in the SAN cloud must have different IDs.
- Optional if state is absent.
required: yes
sharing:
description:
- The Sharing Type field.
- "Whether this VLAN is subdivided into private or secondary VLANs. This can be one of the following:"
- "none - This VLAN does not have any secondary or private VLANs. This is a regular VLAN."
- "primary - This VLAN can have one or more secondary VLANs, as shown in the Secondary VLANs area. This VLAN is a primary VLAN in the private VLAN domain."
- "isolated - This is a private VLAN associated with a primary VLAN. This VLAN is an Isolated VLAN."
- "community - This VLAN can communicate with other ports on the same community VLAN as well as the promiscuous port. This VLAN is a Community VLAN."
choices: [none, primary, isolated, community]
default: none
native:
description:
- Designates the VLAN as a native VLAN.
choices: ['yes', 'no']
default: 'no'
requirements:
- ucsmsdk
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure VLAN
cisco.ucs.ucs_vlans:
hostname: 172.16.143.150
username: admin
password: password
name: vlan2
id: '2'
native: 'yes'
- name: Remove VLAN
cisco.ucs.ucs_vlans:
hostname: 172.16.143.150
username: admin
password: password
name: vlan2
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ucs.plugins.module_utils.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
name=dict(type='str', required=True),
multicast_policy=dict(type='str', default=''),
fabric=dict(type='str', default='common', choices=['common', 'A', 'B']),
id=dict(type='str'),
sharing=dict(type='str', default='none', choices=['none', 'primary', 'isolated', 'community']),
native=dict(type='str', default='no', choices=['yes', 'no']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['id']],
],
)
ucs = UCSModule(module)
err = False
# UCSModule creation above verifies ucsmsdk is present and exits on failure, so additional imports are done below.
from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan
changed = False
try:
mo_exists = False
props_match = False
# dn is fabric/lan/net-<name> for common vlans or fabric/lan/[A or B]/net-<name> for A or B
dn_base = 'fabric/lan'
if module.params['fabric'] != 'common':
dn_base += '/' + module.params['fabric']
dn = dn_base + '/net-' + module.params['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
# mo must exist but all properties do not have to match
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(id=module.params['id'])
kwargs['default_net'] = module.params['native']
kwargs['sharing'] = module.params['sharing']
kwargs['mcast_policy_name'] = module.params['multicast_policy']
if mo.check_prop_match(**kwargs):
props_match = True
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = FabricVlan(
parent_mo_or_dn=dn_base,
name=module.params['name'],
id=module.params['id'],
default_net=module.params['native'],
sharing=module.params['sharing'],
mcast_policy_name=module.params['multicast_policy'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
| 35.663212 | 159 | 0.623856 |
adeb5a44f076b065b5bc7dee5af37784d1291029 | 906 | py | Python | src/preproc/spatial.py | erramuzpe/ruber | cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7 | [
"MIT"
] | 2 | 2018-11-07T07:54:34.000Z | 2022-01-13T13:06:06.000Z | src/preproc/spatial.py | erramuzpe/ruber | cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7 | [
"MIT"
] | null | null | null | src/preproc/spatial.py | erramuzpe/ruber | cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Function utilities to deal with spatial/coordinates problems in the nifti images.
"""
def get_bounding_box(in_file):
""" Retrieve the bounding box of a volume in millimetres."""
# the imports must be inside if you want them to work in a nipype.Function node.
from itertools import product
import nibabel as nib
import numpy as np
img = nib.load(in_file)
# eight corners of the 3-D unit cube [0, 0, 0] .. [1, 1, 1]
corners = np.array(list(product([0, 1], repeat=3)))
# scale to the index range of the volume
corners = corners * (np.array(img.shape[:3]) - 1)
# apply the affine transform
corners = img.affine.dot(np.hstack([corners, np.ones((8, 1))]).T).T[:, :3]
# get the extents
low_corner = np.min(corners, axis=0)
high_corner = np.max(corners, axis=0)
return [low_corner.tolist(), high_corner.tolist()] | 32.357143 | 84 | 0.652318 |
444f8d90fa075fe913765b260d32d68f5087be4d | 396 | py | Python | networks/abstracts.py | jinPrelude/simple-es | 759f3f70e641463a785a7275ba16c9db72fb29bf | [
"Apache-2.0"
] | 10 | 2020-08-16T11:43:23.000Z | 2022-01-21T23:21:52.000Z | networks/abstracts.py | jinPrelude/simple-es | 759f3f70e641463a785a7275ba16c9db72fb29bf | [
"Apache-2.0"
] | 2 | 2021-03-25T09:22:21.000Z | 2021-06-24T09:55:28.000Z | networks/abstracts.py | jinPrelude/give-life-to-agents | ebf7d89a05a8e820759a0cf80adcc9499c8425d8 | [
"Apache-2.0"
] | 2 | 2021-08-10T14:26:55.000Z | 2021-09-27T20:57:48.000Z | from abc import *
from torch import nn
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@abstractmethod
def zero_init(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def get_param_list(self):
pass
@abstractmethod
def apply_param(self, param_lst: list):
pass
| 15.84 | 43 | 0.628788 |
d00d5ba33eb7b11ee8a80e32a40c15b77c336783 | 4,190 | py | Python | cloudkitty/tests/samples.py | mail2nsrajesh/cloudkitty | efb35ee3a0857ba0b0b3ae4154809a6c1d186a98 | [
"Apache-2.0"
] | null | null | null | cloudkitty/tests/samples.py | mail2nsrajesh/cloudkitty | efb35ee3a0857ba0b0b3ae4154809a6c1d186a98 | [
"Apache-2.0"
] | null | null | null | cloudkitty/tests/samples.py | mail2nsrajesh/cloudkitty | efb35ee3a0857ba0b0b3ae4154809a6c1d186a98 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import copy
import decimal
from cloudkitty import utils as ck_utils
TENANT = 'f266f30b11f246b589fd266f85eeec39'
INITIAL_TIMESTAMP = 1420070400
FIRST_PERIOD_BEGIN = INITIAL_TIMESTAMP
FIRST_PERIOD_BEGIN_ISO = ck_utils.ts2iso(FIRST_PERIOD_BEGIN)
FIRST_PERIOD_END = FIRST_PERIOD_BEGIN + 3600
FIRST_PERIOD_END_ISO = ck_utils.ts2iso(FIRST_PERIOD_END)
SECOND_PERIOD_BEGIN = FIRST_PERIOD_END
SECOND_PERIOD_BEGIN_ISO = ck_utils.ts2iso(SECOND_PERIOD_BEGIN)
SECOND_PERIOD_END = SECOND_PERIOD_BEGIN + 3600
SECOND_PERIOD_END_ISO = ck_utils.ts2iso(SECOND_PERIOD_END)
COMPUTE_METADATA = {
'availability_zone': 'nova',
'flavor': 'm1.nano',
'image_id': 'f5600101-8fa2-4864-899e-ebcb7ed6b568',
'instance_id': '26c084e1-b8f1-4cbc-a7ec-e8b356788a17',
'memory': '64',
'metadata': {
'farm': 'prod'
},
'name': 'prod1',
'project_id': 'f266f30b11f246b589fd266f85eeec39',
'user_id': '55b3379b949243009ee96972fbf51ed1',
'vcpus': '1'}
IMAGE_METADATA = {
'checksum': '836c69cbcd1dc4f225daedbab6edc7c7',
'container_format': 'aki',
'created_at': '2014-06-04T16:26:01',
'deleted': 'False',
'deleted_at': 'None',
'disk_format': 'aki',
'is_public': 'True',
'min_disk': '0',
'min_ram': '0',
'name': 'cirros-0.3.2-x86_64-uec-kernel',
'protected': 'False',
'size': '4969360',
'status': 'active',
'updated_at': '2014-06-04T16:26:02'}
FIRST_PERIOD = {
'begin': FIRST_PERIOD_BEGIN,
'end': FIRST_PERIOD_END}
SECOND_PERIOD = {
'begin': SECOND_PERIOD_BEGIN,
'end': SECOND_PERIOD_END}
COLLECTED_DATA = [{
'period': FIRST_PERIOD,
'usage': {
'compute': [{
'desc': COMPUTE_METADATA,
'vol': {
'qty': decimal.Decimal(1.0),
'unit': 'instance'}}],
'image': [{
'desc': IMAGE_METADATA,
'vol': {
'qty': decimal.Decimal(1.0),
'unit': 'image'}}]
}}, {
'period': SECOND_PERIOD,
'usage': {
'compute': [{
'desc': COMPUTE_METADATA,
'vol': {
'qty': decimal.Decimal(1.0),
'unit': 'instance'}}]
}}]
RATED_DATA = copy.deepcopy(COLLECTED_DATA)
RATED_DATA[0]['usage']['compute'][0]['rating'] = {
'price': decimal.Decimal('0.42')}
RATED_DATA[0]['usage']['image'][0]['rating'] = {
'price': decimal.Decimal('0.1337')}
RATED_DATA[1]['usage']['compute'][0]['rating'] = {
'price': decimal.Decimal('0.42')}
def split_storage_data(raw_data):
final_data = []
for frame in raw_data:
frame['period']['begin'] = ck_utils.ts2iso(frame['period']['begin'])
frame['period']['end'] = ck_utils.ts2iso(frame['period']['end'])
usage_buffer = frame.pop('usage')
# Sort to have a consistent result as we are converting it to a list
for service, data in sorted(usage_buffer.items()):
new_frame = copy.deepcopy(frame)
new_frame['usage'] = {service: data}
new_frame['usage'][service][0]['tenant_id'] = TENANT
final_data.append(new_frame)
return final_data
# FIXME(sheeprine): storage is not using decimal for rates, we need to
# transition to decimal.
STORED_DATA = copy.deepcopy(COLLECTED_DATA)
STORED_DATA[0]['usage']['compute'][0]['rating'] = {
'price': 0.42}
STORED_DATA[0]['usage']['image'][0]['rating'] = {
'price': 0.1337}
STORED_DATA[1]['usage']['compute'][0]['rating'] = {
'price': 0.42}
STORED_DATA = split_storage_data(STORED_DATA)
| 32.230769 | 78 | 0.635561 |
fd06abed6b0e6bf59eb4162553674e8d2d7dff23 | 1,586 | py | Python | tests/integration/api/v2010/account/test_validation_request.py | neetaramaswamy/twilio-python | 28472ffab1a170824ba17f12a6c1692a5e849439 | [
"MIT"
] | null | null | null | tests/integration/api/v2010/account/test_validation_request.py | neetaramaswamy/twilio-python | 28472ffab1a170824ba17f12a6c1692a5e849439 | [
"MIT"
] | null | null | null | tests/integration/api/v2010/account/test_validation_request.py | neetaramaswamy/twilio-python | 28472ffab1a170824ba17f12a6c1692a5e849439 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ValidationRequestTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.validation_requests.create(phone_number="+15017122661")
values = {'PhoneNumber': "+15017122661", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"phone_number": "+18001234567",
"validation_code": 100
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.validation_requests.create(phone_number="+15017122661")
self.assertIsNotNone(actual)
| 31.72 | 115 | 0.602144 |
fa41e96e03dcef4f8b789d9af41df79866afb384 | 1,021 | py | Python | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/test/mock_swagger_json_navigator.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/test/mock_swagger_json_navigator.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/test/mock_swagger_json_navigator.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #4 $
from swagger_json_navigator import SwaggerNavigator
class SwaggerNavigatorMatcher(object):
def __init__(self, expected_value):
if isinstance(expected_value, SwaggerNavigator):
expected_value = expected_value.value
self.__expected_value = expected_value
def __eq__(self, other):
return isinstance(other, SwaggerNavigator) and other.value == self.__expected_value
def __str__(self):
return str(self.__expected_value)
| 34.033333 | 91 | 0.742409 |
7ea8c38221ca87e9eb1d8680db7ab7c8fd888813 | 31 | py | Python | sk_dsp_comm/pyaudio_helper/__init__.py | scikit-dsp-comm/pyaudio_helper | 475d73906089bcac71d884cd21407b519c1375af | [
"BSD-2-Clause"
] | null | null | null | sk_dsp_comm/pyaudio_helper/__init__.py | scikit-dsp-comm/pyaudio_helper | 475d73906089bcac71d884cd21407b519c1375af | [
"BSD-2-Clause"
] | 7 | 2020-03-21T17:06:13.000Z | 2022-01-11T00:52:25.000Z | sk_dsp_comm/pyaudio_helper/__init__.py | scikit-dsp-comm/pyaudio_helper | 475d73906089bcac71d884cd21407b519c1375af | [
"BSD-2-Clause"
] | 2 | 2020-07-13T05:42:53.000Z | 2020-12-28T03:36:53.000Z | from . pyaudio_helper import *
| 15.5 | 30 | 0.774194 |
8c10113e7571b670585d39f779b00778bd4dd97a | 12,594 | py | Python | pyroller/pyroller.py | Trebek/pyroller | 4a72b86c616b5d0420bd98366b19facea061718b | [
"MIT"
] | 1 | 2016-06-26T20:10:49.000Z | 2016-06-26T20:10:49.000Z | pyroller/pyroller.py | Trebek/pyroller | 4a72b86c616b5d0420bd98366b19facea061718b | [
"MIT"
] | null | null | null | pyroller/pyroller.py | Trebek/pyroller | 4a72b86c616b5d0420bd98366b19facea061718b | [
"MIT"
] | null | null | null | #===============================================================================
# PyRoller: Dice Package
#-------------------------------------------------------------------------------
# Version: 1.1.0
# Updated: 22-06-2014
# Author: Alex Crawford
# License: MIT
#===============================================================================
"""
A package containing a class & methods for simulating dice. Can also simulate
coin tosses, and Fudge dice.
"""
# The MIT License (MIT)
# Copyright (c) 2014 Alex Crawford
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#===============================================================================
# Imports
#===============================================================================
import random
import re
#===============================================================================
# Pyroller Class
#===============================================================================
class Pyroller(object):
"""
A dice class, containing methods for constructing / manipulating
dice objects.
"""
COIN_HASH = {0: "Heads", 1: "Tails"}
FUDGE_HASH = {0: "-", 1: "-", 2: " ", 3: " ", 4: "+", 5: "+"}
def __init__(self, notation=None, num=1, sides=6):
"""
Dice constructor method. Defaults to d6. It will accept
standard dice notation (d6, 3d6, d20, etc.), as a string,
or you can assign the number of dice, and number of sides
directly, using the arguments 'num' and 'sides'.
:param notation: A standard dice notation.
:param num: For specifying number of dice directly.
:param num: For specifying number of sides directly.
"""
self._dice_count = int(num)
self._dice_sides = int(sides)
self._dice_type = None
self._memory = False
self._notation = None
self._roll_total = 0
self._rolls = []
self._sums = []
self.parse_notat(notation)
@classmethod
def build_bag(self, notations):
"""
Takes a list of dice notations, and builds a dictionary of dice
instances, using the notations in the list. The key for each
instance is the same as it's notation. If two keys(notations) are
the same, then "_x" is added to the end of the key, where 'x' is
the number of instances in the dictionary with that notation.
Duplicate example:
["d6","d6","d6"] -> dict["d6"], dict["d6_2"], dict["d6_3"]
:param notations: A list of dice notations.
:returns: A dict of dice objects.
"""
dice_bag = {}
for die in notations:
dice_count = 1
for key in dice_bag.keys():
dupe_name = "{0}_{1}".format(die, dice_count)
if die == key or dupe_name == key:
dice_count += 1
if dice_count == 1:
dice_bag[die] = Pyroller(die)
else:
new_name = "{0}_{1}".format(die, dice_count)
dice_bag[new_name] = Pyroller(die)
return dice_bag
def count(self, num):
"""
Counts the number of ocurrences of a given number, fudge side ("+",
"-", or " "), or coin side ("heads", or "tails") in 'self._rolls'.
:param num: A number/fudge side to count the ocurrences of.
:returns: The number of ocurrences of a given number,
in 'self._rolls'
"""
num_count = 0
if self._dice_count > 1:
for roll_list in self._rolls:
num_count += roll_list.count(num)
else:
num_count += self._rolls.count(num)
return num_count
@property
def memory(self):
""":returns: The state of the dice objects memory (True or False)"""
return self._memory
def memory_toggle(self):
"""
Toggles the memory on (True) or off (False), depending on
`self._memory`'s current state.
"""
self._memory = not self._memory
@property
def notation(self):
""":returns: The notation of the dice object."""
return self._notation
def parse_notat(self, notation=None):
"""
Parses/checks for valid di(c)e notation, in 'self._notation',
and sets '_dice_count' and '_dice_sides' to their respective
values, based on the notation, and then calls `set_notat` to
set the dice objects notation.
:param notation: A dice notation.
"""
if notation:
pattern = re.compile(r"^([0-9]*)d([0-9]*|[fF])$|^(coin)$")
matches = pattern.match(notation)
if matches:
if matches.group(1):
self._dice_count = int(matches.group(1))
else:
self._dice_count = 1
if matches.group(2):
if matches.group(2) in ["f", "F"]:
self._dice_type = "fudge"
self._dice_sides = 6
else:
self._dice_type = "standard"
self._dice_sides = int(matches.group(2))
elif matches.group(3) == "coin":
self._dice_type = "coin"
self._dice_sides = 2
else:
self._dice_sides = 6
else:
raise ValueError("Invalid dice notation")
self.set_notat()
def reset(self, rolls=True, sums=True, total=True):
"""
Resets 'self._rolls', 'self._sums', and 'self._roll_total'.
:param rolls: Whether or not to reset ``self._rolls``.
:param sums: Whether or not to reset ``self._sums``.
:param total: Whether or not to reset ``self._roll_total``.
"""
if rolls:
self._rolls = []
if sums:
self._sums = []
if total:
self._roll_total = 0
def roll(self, count=1):
"""
Rolls the dice (or flips a coin).
:param count: The number of times to roll.
:returns: The last roll.
"""
roll_count_range = range(count)
dice_count_range = range(self._dice_count)
rolls = []
if not self._memory:
self._rolls = []
for i in roll_count_range:
roll = []
self._roll_total += 1
for i in dice_count_range:
if self._dice_type == "coin":
rand_num = random.randrange(self._dice_sides)
roll.append(self.COIN_HASH[rand_num])
elif self._dice_type == "fudge":
rand_num = random.randrange(self._dice_sides)
roll.append(self.FUDGE_HASH[rand_num])
else:
roll.append(random.randrange(1, self._dice_sides + 1))
if len(roll) > 1:
rolls.append(roll)
else:
rolls.append(roll[0])
self._rolls.extend(rolls)
return rolls[-1]
@property
def roll_list(self):
"""
Returns a list of all of the past rolls. Must toggle memory to
``True``, using the ``memory_toggle`` method first, before the
dice object will remember past rolls. Can turn it off by calling
``memory_toggle`` again, and vice versa.
:returns: A list of all of the past rolls.
"""
roll_len = len(self._rolls)
if roll_len == 1:
return self._rolls[0]
elif roll_len > 1:
return self._rolls
else:
return None
@property
def rolls_sums(self):
"""
Packages the rolls and their sums together into tuples.
:returns: The rolls and sums packaged together in tuples.
"""
zipped = zip(self._rolls, self._sums)
return zipped
def set_notat(self, notation=None):
"""Sets ``self._notation`` to a given notation."""
if not notation:
if self._dice_count > 1:
if self._dice_type != "fudge":
self._notation = "{0}d{1}".format(
self._dice_count,
self._dice_sides
)
else:
self._notation = "{0}dF".format(self._dice_count)
else:
if self._dice_type != "coin":
self._notation = "d{0}".format(self._dice_sides)
else:
self._notation = "coin".format(self._dice_sides)
else:
self._notation = None
def sum_avg(self, rounded=True, places=2):
"""
Figures the average of the sums in ``self._sums``.
:param rounded: Whether to round off the result.
:param places: The number of decimal places to round to.
:returns: The average of the currently summed rolls.
"""
self.sum_rolls()
sum_len = len(self._sums)
sums_sum = sum(self._sums)
sum_avg = float(sums_sum) / sum_len
if rounded:
return round(sum_avg, places)
else:
return sum_avg
def sum_rolls(self):
"""
Sums the current roll(s), and stores the sums in 'self.sums'.
Also returns the last sum.
:returns: The last sum.
"""
roll_sums = []
if not self._memory:
self._sums = []
if self._dice_type != "coin" and self._dice_type != "fudge":
if self._dice_count > 1:
for roll_list in self._rolls:
roll_sums.append(sum(roll_list))
self._sums = roll_sums
else:
roll_sums.append(self._rolls[-1])
self._sums = self._rolls
elif self._dice_type == "fudge":
if self._dice_count > 1:
for roll_list in self._rolls:
count = 0
for roll in roll_list:
if roll == "+":
count += 1
elif roll == "-":
count -= 1
roll_sums.append(count)
self._sums = roll_sums
else:
return None
return roll_sums[-1]
@property
def sums(self):
"""
Calls ``self.sum_rolls`` to sum the rolls in ``self._rolls``.
:returns: The current sum(s).
"""
self.sum_rolls()
return self._sums
def toss(self, count=1):
"""
Just a wrapper for the 'roll' method.
:param count: The number of times to toss the coin.
:returns: The result of the "toss".
"""
return self.roll(count)
@property
def toss_list(self):
"""
Just a wrapper for the `roll_list` method.
:returns: A list of all of the past "tosses".
"""
return self.roll_list
@property
def total_rolls(self):
"""
Returns the total number of times the dice object has
been rolled, since it's construction/last reset.
:returns: The total number of times the dice object has
been rolled
"""
return self._roll_total
#===============================================================================
# Main Check
#===============================================================================
if __name__ == '__main__':
pass
| 31.019704 | 80 | 0.514848 |
dbcf79d1ac20ddc32cb1605e06d253803250c855 | 5,605 | py | Python | mmdet/core/post_processing/merge_augs.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | mmdet/core/post_processing/merge_augs.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 39 | 2021-08-05T07:16:46.000Z | 2022-03-14T13:23:48.000Z | mmdet/core/post_processing/merge_augs.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 61 | 2021-07-30T07:51:41.000Z | 2022-03-30T14:40:02.000Z | import copy
import warnings
import numpy as np
import torch
from mmcv import ConfigDict
from mmcv.ops import nms
from ..bbox import bbox_mapping_back
def merge_aug_proposals(aug_proposals, img_metas, cfg):
"""Merge augmented proposals (multiscale, flip, etc.)
Args:
aug_proposals (list[Tensor]): proposals from different testing
schemes, shape (n, 5). Note that they are not rescaled to the
original image size.
img_metas (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
cfg (dict): rpn test config.
Returns:
Tensor: shape (n, 4), proposals corresponding to original image scale.
"""
cfg = copy.deepcopy(cfg)
# deprecate arguments warning
if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
warnings.warn(
'In rpn_proposal or test_cfg, '
'nms_thr has been moved to a dict named nms as '
'iou_threshold, max_num has been renamed as max_per_img, '
'name of original arguments and the way to specify '
'iou_threshold of NMS will be deprecated.')
if 'nms' not in cfg:
cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
if 'max_num' in cfg:
if 'max_per_img' in cfg:
assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \
f'max_per_img at the same time, but get {cfg.max_num} ' \
f'and {cfg.max_per_img} respectively' \
f'Please delete max_num which will be deprecated.'
else:
cfg.max_per_img = cfg.max_num
if 'nms_thr' in cfg:
assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
f'iou_threshold in nms and ' \
f'nms_thr at the same time, but get ' \
f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
f' respectively. Please delete the nms_thr ' \
f'which will be deprecated.'
recovered_proposals = []
for proposals, img_info in zip(aug_proposals, img_metas):
img_shape = img_info['img_shape']
scale_factor = img_info['scale_factor']
flip = img_info['flip']
flip_direction = img_info['flip_direction']
_proposals = proposals.clone()
_proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
recovered_proposals.append(_proposals)
aug_proposals = torch.cat(recovered_proposals, dim=0)
merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),
aug_proposals[:, -1].contiguous(),
cfg.nms.iou_threshold)
scores = merged_proposals[:, 4]
_, order = scores.sort(0, descending=True)
num = min(cfg.max_per_img, merged_proposals.shape[0])
order = order[:num]
merged_proposals = merged_proposals[order, :]
return merged_proposals
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
"""Merge augmented detection bboxes and scores.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class)
aug_scores (list[Tensor] or None): shape (n, #class)
img_shapes (list[Tensor]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_bboxes = []
for bboxes, img_info in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
flip_direction = img_info[0]['flip_direction']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
flip_direction)
recovered_bboxes.append(bboxes)
bboxes = torch.stack(recovered_bboxes).mean(dim=0)
if aug_scores is None:
return bboxes
else:
scores = torch.stack(aug_scores).mean(dim=0)
return bboxes, scores
def merge_aug_scores(aug_scores):
"""Merge augmented bbox scores."""
if isinstance(aug_scores[0], torch.Tensor):
return torch.mean(torch.stack(aug_scores), dim=0)
else:
return np.mean(aug_scores, axis=0)
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
"""Merge augmented mask prediction.
Args:
aug_masks (list[ndarray]): shape (n, #class, h, w)
img_shapes (list[ndarray]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_masks = []
for mask, img_info in zip(aug_masks, img_metas):
flip = img_info[0]['flip']
flip_direction = img_info[0]['flip_direction']
if flip:
if flip_direction == 'horizontal':
mask = mask[:, :, :, ::-1]
elif flip_direction == 'vertical':
mask = mask[:, :, ::-1, :]
else:
raise ValueError(
f"Invalid flipping direction '{flip_direction}'")
recovered_masks.append(mask)
if weights is None:
merged_masks = np.mean(recovered_masks, axis=0)
else:
merged_masks = np.average(
np.array(recovered_masks), axis=0, weights=np.array(weights))
return merged_masks
| 37.119205 | 78 | 0.608385 |
a4d74b00cec41da1487cbc80cdb2cd47f4466970 | 882 | py | Python | backend/solution/models.py | pixlie/buildgaga | 723bc9a114b3040a5c4d0b1d1b384f174f65b084 | [
"MIT"
] | 1 | 2021-06-19T04:12:24.000Z | 2021-06-19T04:12:24.000Z | backend/solution/models.py | pixlie/buildgaga | 723bc9a114b3040a5c4d0b1d1b384f174f65b084 | [
"MIT"
] | 4 | 2021-03-10T15:20:52.000Z | 2022-02-27T03:22:01.000Z | backend/solution/models.py | pixlie/buildgaga | 723bc9a114b3040a5c4d0b1d1b384f174f65b084 | [
"MIT"
] | null | null | null | from sqlalchemy import Table, Column, Integer, String, ForeignKey, UniqueConstraint
from utils import metadata
solution = Table(
"solution",
metadata,
Column("id", Integer, primary_key=True),
Column("organization_fk", Integer, ForeignKey("organization.id"), nullable=True),
Column("label", String(60), unique=True, nullable=False),
Column("one_liner", String(length=160), nullable=True),
Column("description", String(length=500), nullable=True),
Column("twitter", String(40), nullable=True),
Column("url", String(100), nullable=True)
)
category_solution = Table(
"category_solution",
metadata,
Column("id", Integer, primary_key=True),
Column("category_fk", Integer, nullable=False),
Column("solution_fk", Integer, nullable=False),
UniqueConstraint("category_fk", "solution_fk", name="category_solution_unique")
)
| 28.451613 | 85 | 0.707483 |
80238c5f6f7a29c54e017dd27fb2eae698fb4bd7 | 343 | py | Python | example.py | JZF07/testing_example | ab830b9d2d5a09718b952b6ddc77e1f11a74572a | [
"MIT"
] | null | null | null | example.py | JZF07/testing_example | ab830b9d2d5a09718b952b6ddc77e1f11a74572a | [
"MIT"
] | 1 | 2022-03-31T08:21:57.000Z | 2022-03-31T08:29:02.000Z | example.py | JZF07/testing_example | ab830b9d2d5a09718b952b6ddc77e1f11a74572a | [
"MIT"
] | null | null | null | def add(a, b):
return a + b
def test_add():
assert add(2, 3) == 5
assert add('space', 'ship') == 'spaceship'
def subtract(a, b):
return a - b # <--- fix this in step 7
def test_subtract():
assert subtract(2, 3) == -1
# uncomment the following test in step 5
#def test_subtract():
# assert subtract(2, 3) == -1
| 15.590909 | 46 | 0.580175 |
d9825652ba2c53667b2a2b24e4fd6b584b19b4a0 | 937 | py | Python | tests/unit/test_licenses.py | butsyk/github3.py | 72fa5125fce75c916733839963554765c907e9e7 | [
"BSD-3-Clause"
] | 1 | 2019-04-23T07:16:07.000Z | 2019-04-23T07:16:07.000Z | tests/unit/test_licenses.py | butsyk/github3.py | 72fa5125fce75c916733839963554765c907e9e7 | [
"BSD-3-Clause"
] | 4 | 2020-10-19T13:02:37.000Z | 2020-10-25T19:01:25.000Z | tests/unit/test_licenses.py | butsyk/github3.py | 72fa5125fce75c916733839963554765c907e9e7 | [
"BSD-3-Clause"
] | 1 | 2020-03-06T06:30:51.000Z | 2020-03-06T06:30:51.000Z | from .helper import UnitHelper
from .helper import create_example_data_helper
from .helper import create_url_helper
import github3
get_example_data = create_example_data_helper("license_example")
url_for = create_url_helper("https://api.github.com/licenses/mit")
class TestLicenses(UnitHelper):
"""Unit tests around the License class."""
described_class = github3.licenses.License
example_data = get_example_data()
def test_get_attr(self):
"""Show that attributes exist in class."""
attributes = [
"description",
"body",
"implementation",
"html_url",
"key",
"name",
"featured",
]
for attr in attributes:
assert getattr(self.instance, attr)
def test_repr(self):
"""Show that instance string is formatted properly."""
assert repr(self.instance).startswith("<License")
| 27.558824 | 66 | 0.641409 |
1428f3851235400e32b2c8a4d46eaef619337c08 | 9,848 | py | Python | text/text_navigation.py | schwafe/knausj_talon | 5da73a646f1e1c764be6ecab5bc280fee48ce8d5 | [
"MIT"
] | null | null | null | text/text_navigation.py | schwafe/knausj_talon | 5da73a646f1e1c764be6ecab5bc280fee48ce8d5 | [
"MIT"
] | 1 | 2022-03-26T15:27:18.000Z | 2022-03-26T15:27:18.000Z | text/text_navigation.py | schwafe/knausj_talon | 5da73a646f1e1c764be6ecab5bc280fee48ce8d5 | [
"MIT"
] | null | null | null | import re
from talon import ctrl, ui, Module, Context, actions, clip
import itertools
from typing import Union
ctx = Context()
mod = Module()
text_navigation_max_line_search = mod.setting(
"text_navigation_max_line_search",
type=int,
default=10,
desc="the maximum number of rows that will be included in the search for the keywords above and below in <user direction>",
)
mod.list(
"navigation_action",
desc="actions to perform, for instance move, select, cut, etc",
)
mod.list(
"before_or_after",
desc="words to indicate if the cursor should be moved before or after a given reference point",
)
mod.list(
"navigation_target_name",
desc="names for regular expressions for common things to navigate to, for instance a word with or without underscores",
)
ctx.lists["self.navigation_action"] = {
"move": "GO",
"extend": "EXTEND",
"select": "SELECT",
"clear": "DELETE",
"cut": "CUT",
"copy": "COPY",
}
ctx.lists["self.before_or_after"] = {
"before": "BEFORE",
"after": "AFTER",
# DEFAULT is also a valid option as input for this capture, but is not directly accessible for the user.
}
navigation_target_names = {
"word": r"\w+",
"small": r"[A-Z]?[a-z0-9]+",
"big": r"[\S]+",
"parens": r'\((.*?)\)',
"squares": r'\[(.*?)\]',
"braces": r'\{(.*?)\}',
"quotes": r'\"(.*?)\"',
"angles": r'\<(.*?)\>',
#"single quotes": r'\'(.*?)\'',
"all": r'(.+)',
"method": r'\w+\((.*?)\)',
"constant": r'[A-Z_][A-Z_]+'
}
ctx.lists["self.navigation_target_name"] = navigation_target_names
@mod.capture(rule="<user.any_alphanumeric_key> | {user.navigation_target_name} | phrase <user.text> | {user.punctuation_words}")
def navigation_target(m) -> re.Pattern:
"""A target to navigate to. Returns a regular expression."""
if hasattr(m, 'any_alphanumeric_key'):
return re.compile(re.escape(m.any_alphanumeric_key), re.IGNORECASE)
if hasattr(m, 'navigation_target_name'):
return re.compile(m.navigation_target_name)
return re.compile(re.escape(m.text), re.IGNORECASE)
@mod.action_class
class Actions:
def navigation(
navigation_action: str, # GO, EXTEND, SELECT, DELETE, CUT, COPY
direction: str, # up, down, left, right
navigation_target_name: str,
before_or_after: str, # BEFORE, AFTER, DEFAULT
regex: re.Pattern,
occurrence_number: int,
):
"""Navigate in `direction` to the occurrence_number-th time that `regex` occurs, then execute `navigation_action` at the given `before_or_after` position."""
direction = direction.upper()
navigation_target_name = re.compile((navigation_target_names["word"] if (navigation_target_name == "DEFAULT") else navigation_target_name))
function = navigate_left if direction in ("UP", "LEFT") else navigate_right
function(navigation_action, navigation_target_name, before_or_after, regex, occurrence_number, direction)
def navigation_by_name(
navigation_action: str, # GO, EXTEND, SELECT, DELETE, CUT, COPY
direction: str, # up, down, left, right
before_or_after: str, # BEFORE, AFTER, DEFAULT
navigation_target_name: str, # word, big, small
occurrence_number: int,
):
"""Like user.navigation, but to a named target."""
r = re.compile(navigation_target_names[navigation_target_name])
actions.user.navigation(navigation_action, direction, "DEFAULT", before_or_after, r, occurrence_number)
def get_text_left():
actions.edit.extend_line_start()
text = actions.edit.selected_text()
actions.edit.right()
return text
def get_text_right():
actions.edit.extend_line_end()
text = actions.edit.selected_text()
actions.edit.left()
return text
def get_text_up():
actions.edit.up()
actions.edit.line_end()
for j in range(0, text_navigation_max_line_search.get()):
actions.edit.extend_up()
actions.edit.extend_line_start()
text = actions.edit.selected_text()
actions.edit.right()
return text
def get_text_down():
actions.edit.down()
actions.edit.line_start()
for j in range(0, text_navigation_max_line_search.get()):
actions.edit.extend_down()
actions.edit.extend_line_end()
text = actions.edit.selected_text()
actions.edit.left()
return text
def get_current_selection_size():
return len(actions.edit.selected_text())
def go_right(i):
for j in range(0, i):
actions.edit.right()
def go_left(i):
for j in range(0, i):
actions.edit.left()
def extend_left(i):
for j in range(0, i):
actions.edit.extend_left()
def extend_right(i):
for j in range(0, i):
actions.edit.extend_right()
def select(direction, start, end, length):
if direction == "RIGHT" or direction == "DOWN":
go_right(start)
extend_right(end - start)
else:
go_left(length - end)
extend_left(end - start)
def navigate_left(
navigation_action, navigation_target_name, before_or_after, regex, occurrence_number, direction
):
current_selection_length = get_current_selection_size()
if current_selection_length > 0:
actions.edit.right()
text = get_text_left() if direction == "LEFT" else get_text_up()
# only search in the text that was not selected
subtext = (
text if current_selection_length <= 0 else text[:-current_selection_length]
)
match = match_backwards(regex, occurrence_number, subtext)
if match == None:
# put back the old selection, if the search failed
extend_left(current_selection_length)
return
start = match.start()
end = match.end()
handle_navigation_action(
navigation_action, navigation_target_name, before_or_after, direction, text, start, end
)
def navigate_right(
navigation_action, navigation_target_name, before_or_after, regex, occurrence_number, direction
):
current_selection_length = get_current_selection_size()
if current_selection_length > 0:
actions.edit.left()
text = get_text_right() if direction == "RIGHT" else get_text_down()
# only search in the text that was not selected
sub_text = text[current_selection_length:]
# pick the next interrater, Skip n number of occurrences, get an iterator given the Regex
match = match_forward(regex, occurrence_number, sub_text)
if match == None:
# put back the old selection, if the search failed
extend_right(current_selection_length)
return
start = current_selection_length + match.start()
end = current_selection_length + match.end()
handle_navigation_action(
navigation_action, navigation_target_name, before_or_after, direction, text, start, end
)
def handle_navigation_action(
navigation_action, navigation_target_name, before_or_after, direction, text, start, end
):
length = len(text)
if navigation_action == "GO":
handle_move(direction, before_or_after, start, end, length)
elif navigation_action == "SELECT":
handle_select(navigation_target_name, before_or_after, direction, text, start, end, length)
elif navigation_action == "DELETE":
handle_select(navigation_target_name, before_or_after, direction, text, start, end, length)
actions.edit.delete()
elif navigation_action == "CUT":
handle_select(navigation_target_name, before_or_after, direction, text, start, end, length)
actions.edit.cut()
elif navigation_action == "COPY":
handle_select(navigation_target_name, before_or_after, direction, text, start, end, length)
actions.edit.copy()
elif navigation_action == "EXTEND":
handle_extend(before_or_after, direction, start, end, length)
def handle_select(navigation_target_name, before_or_after, direction, text, start, end, length):
if before_or_after == "BEFORE":
select_left = length - start
text_left = text[:-select_left]
match2 = match_backwards(navigation_target_name, 1, text_left)
if match2 == None:
end = start
start = 0
else:
start = match2.start()
end = match2.end()
elif before_or_after == "AFTER":
text_right = text[end:]
match2 = match_forward(navigation_target_name, 1, text_right)
if match2 == None:
start = end
end = length
else:
start = end + match2.start()
end = end + match2.end()
select(direction, start, end, length)
def handle_move(direction, before_or_after, start, end, length):
if direction == "RIGHT" or direction == "DOWN":
if before_or_after == "BEFORE":
go_right(start)
else:
go_right(end)
else:
if before_or_after == "AFTER":
go_left(length - end)
else:
go_left(length - start)
def handle_extend(before_or_after, direction, start, end, length):
if direction == "RIGHT" or direction == "DOWN":
if before_or_after == "BEFORE":
extend_right(start)
else:
extend_right(end)
else:
if before_or_after == "AFTER":
extend_left(length - end)
else:
extend_left(length - start)
def match_backwards(regex, occurrence_number, subtext):
try:
match = list(regex.finditer(subtext))[-occurrence_number]
return match
except IndexError:
return
def match_forward(regex, occurrence_number, sub_text):
try:
match = next(
itertools.islice(regex.finditer(sub_text), occurrence_number - 1, None)
)
return match
except StopIteration:
return None
| 33.383051 | 165 | 0.658611 |
979c66822b499aa49dfebcdb6e639dc70966fae6 | 1,662 | py | Python | model/third_party/HMNet/Utils/distributed.py | NickSchoelkopf/SummerTime | 9a89aab8e1544e3c52c043b9c47ab325e665e11e | [
"Apache-2.0"
] | 178 | 2021-07-07T23:46:20.000Z | 2022-03-26T17:47:21.000Z | model/third_party/HMNet/Utils/distributed.py | NickSchoelkopf/SummerTime | 9a89aab8e1544e3c52c043b9c47ab325e665e11e | [
"Apache-2.0"
] | 77 | 2021-06-18T21:44:53.000Z | 2022-02-20T00:23:06.000Z | model/third_party/HMNet/Utils/distributed.py | NickSchoelkopf/SummerTime | 9a89aab8e1544e3c52c043b9c47ab325e665e11e | [
"Apache-2.0"
] | 19 | 2021-06-18T22:24:47.000Z | 2022-03-16T12:53:50.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torch
def distributed(opt, is_nocuda):
cluster = opt["cluster"]
world_size = 1
local_size = 1
rank = 0
local_rank = 0
is_master = True
run = None
if is_nocuda or not torch.cuda.is_available():
device = torch.device("cpu")
n_gpu = 0
else:
if "OMPI_COMM_WORLD_SIZE" in os.environ:
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
local_size = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"])
rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
is_master = rank == 0
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
n_gpu = 1
# the following assumes that all processes run on a single node
if torch.distributed.is_available() and world_size > 1:
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(rank)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = (
opt["master_port"] if "master_port" in opt else "35551"
)
torch.distributed.init_process_group(
backend="nccl"
) # using environment variable initialization
print("Distributed package is available. Process group initialized.")
return device, n_gpu, world_size, local_size, rank, local_rank, is_master, run
| 36.130435 | 97 | 0.62515 |
ee9a4d80a2eac92564e45a3983a0397c0d35ecb8 | 7,195 | py | Python | src/preprocessing.py | suvarna-m/DSCI_522_Group313 | c27ea76f9ac0d41dc3e632691232ee8a2f5a7a9f | [
"MIT"
] | null | null | null | src/preprocessing.py | suvarna-m/DSCI_522_Group313 | c27ea76f9ac0d41dc3e632691232ee8a2f5a7a9f | [
"MIT"
] | 32 | 2020-01-16T22:27:24.000Z | 2020-02-13T01:55:21.000Z | src/preprocessing.py | suvarna-m/DSCI_522_Group313 | c27ea76f9ac0d41dc3e632691232ee8a2f5a7a9f | [
"MIT"
] | 10 | 2020-01-16T00:35:46.000Z | 2020-02-12T22:45:07.000Z | # authors: Suvarna Moharir, Jaekeun Lee, Chimaobi Amadi
# date: 2020.01.24
''' This script reads in the data, drops blank columns, cleans missing data, and then splits the data into testing and training sets
Usage: preprocessing.py [--quebec_path=<quebec_path> --store_path=<store_path>]
Options:
--quebec_path=<quebec_path> Relative file path for the quebec_df csv [default: data/raw_quebec_city_airbnb_data.csv]
--store_path=<store_path> Full path and file name to where the processed data should live and called [default: data/cleaned_data.csv]
'''
#importing packages and libraries
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from docopt import docopt
opt = docopt(__doc__)
def main(quebec_path, store_path):
file = pd.read_csv(quebec_path)
#dropping columns where all values are null and/or columns that are in French
quebec_df = file.drop(columns = ['summary', 'space', 'listing_url', 'host_url', 'description', 'scrape_id', 'last_scraped', 'experiences_offered','thumbnail_url', 'medium_url', 'xl_picture_url', 'host_acceptance_rate', 'name', 'neighbourhood', 'neighborhood_overview', 'neighbourhood_group_cleansed', 'host_neighbourhood', 'jurisdiction_names', 'license', 'cancellation_policy', 'notes', 'transit', 'access', 'interaction', 'house_rules', 'picture_url', 'host_about', 'host_thumbnail_url', 'host_total_listings_count', 'minimum_minimum_nights', 'minimum_maximum_nights', 'maximum_maximum_nights', 'maximum_minimum_nights', 'minimum_nights_avg_ntm', 'maximum_nights_avg_ntm', 'host_picture_url', 'host_name', 'host_since', 'host_location', 'host_verifications', 'state', 'street', 'market', 'smart_location', 'country_code', 'country','amenities', 'calendar_updated', 'calendar_last_scraped', 'first_review', 'last_review', 'square_feet', 'weekly_price', 'monthly_price', 'security_deposit', 'cleaning_fee', 'zipcode', 'id', 'host_id', 'neighbourhood_cleansed', 'city'])
#dropping all NaN-containing rows
quebec_df = quebec_df.dropna()
#making sure there are no missing values and dataframe has the correct dimensions
assert quebec_df.columns[quebec_df.isna().any()].tolist() == [], "There are still missing values"
assert quebec_df.shape[0] == 2194, "Wrong number of rows"
assert quebec_df.shape[1] == 45, "Wrong number of columns"
#removing '$' from price and fees and converting string to float
quebec_df.price = quebec_df.price.replace('[\$,]', '', regex=True).astype(float)
quebec_df.extra_people = quebec_df.extra_people.replace('[\$,]', '', regex=True).astype(float)
#changing response rate from string to numeric and removing '%' sign
quebec_df.host_response_rate = quebec_df['host_response_rate'].str.rstrip('%').astype('float')
#changing booleans 'True' and 'False' to 1 and 0
quebec_df.host_is_superhost = quebec_df.host_is_superhost.replace({True: 1, False: 0})
quebec_df.host_has_profile_pic = quebec_df.host_has_profile_pic.replace({True: 1, False: 0})
quebec_df.host_identity_verified = quebec_df.host_identity_verified.replace({True: 1, False: 0})
quebec_df.is_location_exact = quebec_df.is_location_exact.replace({True: 1, False: 0})
quebec_df.has_availability = quebec_df.has_availability.replace({True: 1, False: 0})
quebec_df.requires_license = quebec_df.requires_license.replace({True: 1, False: 0})
quebec_df.instant_bookable = quebec_df.instant_bookable.replace({True: 1, False: 0})
quebec_df.is_business_travel_ready = quebec_df.is_business_travel_ready.replace({True: 1, False: 0})
quebec_df.require_guest_profile_picture = quebec_df.require_guest_profile_picture.replace({True: 1, False: 0})
quebec_df.require_guest_phone_verification = quebec_df.require_guest_phone_verification.replace({True: 1, False: 0})
#making sure that the datatypes oot converted properly
assert quebec_df.dtypes.price == 'float64', "price string to float conversion did not convert properly"
assert quebec_df.dtypes.host_response_rate == 'float64', "host_reponse_rate string to float did not convert properly"
assert quebec_df.dtypes.host_is_superhost == 'int64', "host_is_superhost boolean to number did not convert properly"
#making an 80-20 train-test split
X = quebec_df.drop(columns = ['price'])
y = quebec_df[['price']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 5)
#making sure split sizes are correct
assert X_train.shape[1] == 44, "X_train did not split properly"
assert y_train.shape[1] == 1, "y_train did not split properly"
#assigning categorical and numeric features
categorical_features = ['host_response_time', 'property_type', 'room_type', 'bed_type']
numeric_features = ['host_response_rate', 'host_is_superhost', 'host_listings_count', 'host_has_profile_pic', 'host_identity_verified', 'latitude', 'longitude', 'is_location_exact', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'guests_included', 'extra_people', 'minimum_nights', 'maximum_nights', 'has_availability', 'availability_30', 'availability_60', 'availability_90', 'availability_365', 'number_of_reviews', 'number_of_reviews_ltm', 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value', 'requires_license', 'instant_bookable', 'is_business_travel_ready', 'require_guest_profile_picture', 'require_guest_phone_verification', 'calculated_host_listings_count', 'calculated_host_listings_count_entire_homes', 'calculated_host_listings_count_private_rooms', 'calculated_host_listings_count_shared_rooms', 'reviews_per_month']
#preprocessing data with StandardScaler (numeric) and OneHotEncoder (categorical)
preprocessor = ColumnTransformer(
transformers=[
('scale', StandardScaler(), numeric_features),
('ohe', OneHotEncoder(handle_unknown= 'ignore'), categorical_features)
])
#transforming data
X_train = pd.DataFrame(preprocessor.fit_transform(X_train), index=X_train.index, columns = (numeric_features + list(preprocessor.named_transformers_['ohe'].get_feature_names(categorical_features))))
X_test = pd.DataFrame(preprocessor.transform(X_test), index=X_test.index, columns=X_train.columns)
#converting to csv
### The training and testing data live in the same directory as the processed datafile
### From the store_path, the closest parent directory is selected by splitting the store_path with "/"
folder_dir = "/".join(store_path.split("/")[:-1])
X_train.to_csv(folder_dir+"/" + "X_train.csv", index = False)
y_train.to_csv(folder_dir+"/" + "y_train.csv", index = False, header = True)
X_test.to_csv(folder_dir+"/" + "X_test.csv", index = False)
y_test.to_csv(folder_dir+"/" + "y_test.csv", index = False, header = True)
quebec_df.to_csv(store_path, index = False, header = True)
if __name__ == "__main__":
main(opt["--quebec_path"], opt["--store_path"])
| 69.854369 | 1,073 | 0.757331 |
2bcb0f1a67b48782a0584ce7f90fddcf58695398 | 770 | py | Python | core/manager/celeryManager.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | core/manager/celeryManager.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | core/manager/celeryManager.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | # -*- coding=utf-8 -*-
# datetime: 2019/4/10 13:27
"""
celery连接管理器包
"""
from quotations.conf import config
from celery import Celery
from celery import platforms
platforms.C_FORCE_ROOT = True
class CeleryManager:
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super(CeleryManager, cls).__new__(cls)
return cls._instance
def __init__(self, settings):
self.tasks = Celery("tasks", broker=settings['CELERY_BROKER_URL'])
self.tasks.conf.update(settings)
def get_task(self):
return self.tasks
def get_task():
settings=config('celery').get()
celery_manager=CeleryManager(settings)
return celery_manager.get_task()
tasks = get_task()
# print(tasks)
| 21.388889 | 74 | 0.675325 |
0b5407e273b4a2f3a51a205cfe18f4900b8f601a | 1,376 | py | Python | samples/drawing.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 23 | 2020-12-20T03:39:30.000Z | 2022-03-23T15:47:10.000Z | samples/drawing.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 15 | 2020-12-21T01:12:22.000Z | 2021-04-19T10:40:11.000Z | samples/drawing.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 2 | 2022-02-12T19:19:50.000Z | 2022-02-12T21:33:35.000Z | from minpiler.typeshed import (
M,
switch1, display1,
setup, time,
)
# large display is 176 x 176
dsize = 176
M.draw.clear(0, 0, 0)
M.draw.stroke(5)
M.draw.color(255, 0, 0, 255)
def posvel(pos, vel):
pos += vel
if pos < 0:
pos = 0
vel = M.abs(vel)
if pos > dsize:
pos = dsize
vel = -M.abs(vel)
return pos, vel
def randvel():
variation = 10
return M.rand(variation * 2) - variation
if setup is None or switch1.enabled:
t1x, t1y = M.rand(dsize), M.rand(dsize)
t2x, t2y = M.rand(dsize), M.rand(dsize)
t3x, t3y = M.rand(dsize), M.rand(dsize)
t1vx, t1vy = randvel(), randvel()
t2vx, t2vy = randvel(), randvel()
t3vx, t3vy = randvel(), randvel()
setup = True
else:
t1x, t1vx = posvel(t1x, t1vx)
t1y, t1vy = posvel(t1y, t1vy)
t2x, t2vx = posvel(t2x, t2vx)
t2y, t2vy = posvel(t2y, t2vy)
t3x, t3vx = posvel(t3x, t3vx)
t3y, t3vy = posvel(t3y, t3vy)
M.draw.line(t1x, t1y, t2x, t2y)
M.draw.line(t2x, t2y, t3x, t3y)
M.draw.line(t3x, t3y, t1x, t1y)
time += 10
if time > 360:
time %= 360
M.draw.color(255, 255, 255, 255)
M.draw.image(t1x, t1y, M.at.lead, 30 + M.sin(time) * 10, time)
M.draw.image(t2x, t2y, M.at.metaglass, 30 + M.sin(time + 120) * 10, time)
M.draw.image(t3x, t3y, M.at.thorium, 30 + M.sin(time + 240) * 10, time)
display1.drawFlush()
| 21.169231 | 73 | 0.593023 |
731c233f3697c93fbd1eec9c1f7823f628799fda | 2,603 | py | Python | validation/emulation/experiments/topo.p4app/util/plot_tcpprobe.py | nerds-ufes/G-PolKA | 9c6bd42167bc333f6421a751c93a88c00841def9 | [
"Apache-2.0",
"MIT"
] | 2 | 2022-02-09T15:17:50.000Z | 2022-02-17T15:50:45.000Z | dctcp-assignment/util/plot_tcpprobe.py | warjazz/DCTCP | ee30e6418c1be7469de30ebe71066fcbaba64508 | [
"Unlicense"
] | 1 | 2022-03-04T03:51:28.000Z | 2022-03-04T07:25:24.000Z | dctcp-assignment/util/plot_tcpprobe.py | warjazz/DCTCP | ee30e6418c1be7469de30ebe71066fcbaba64508 | [
"Unlicense"
] | null | null | null | from helper import *
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', dest="port", default='5001')
parser.add_argument('-f', dest="files", nargs='+', required=True)
parser.add_argument('-o', '--out', dest="out", default=None)
parser.add_argument('-H', '--histogram', dest="histogram",
help="Plot histogram of sum(cwnd_i)",
action="store_true",
default=False)
args = parser.parse_args()
def first(lst):
return map(lambda e: e[0], lst)
def second(lst):
return map(lambda e: e[1], lst)
"""
Sample line:
2.221032535 10.0.0.2:39815 10.0.0.1:5001 32 0x1a2a710c 0x1a2a387c 11 2147483647 14592 85
"""
def parse_file(f):
times = defaultdict(list)
cwnd = defaultdict(list)
srtt = []
for l in open(f).xreadlines():
fields = l.strip().split(' ')
if len(fields) != 10:
break
if fields[2].split(':')[1] != args.port:
continue
sport = int(fields[1].split(':')[1])
times[sport].append(float(fields[0]))
c = int(fields[6])
cwnd[sport].append(c * 1480 / 1024.0)
srtt.append(int(fields[-1]))
return times, cwnd
added = defaultdict(int)
events = []
def plot_cwnds(ax):
global events
for f in args.files:
times, cwnds = parse_file(f)
for port in sorted(cwnds.keys()):
t = times[port]
cwnd = cwnds[port]
events += zip(t, [port]*len(t), cwnd)
ax.plot(t, cwnd)
events.sort()
total_cwnd = 0
cwnd_time = []
min_total_cwnd = 10**10
max_total_cwnd = 0
totalcwnds = []
m.rc('figure', figsize=(16, 6))
fig = plt.figure()
plots = 1
if args.histogram:
plots = 2
axPlot = fig.add_subplot(1, plots, 1)
plot_cwnds(axPlot)
for (t,p,c) in events:
if added[p]:
total_cwnd -= added[p]
total_cwnd += c
cwnd_time.append((t, total_cwnd))
added[p] = c
totalcwnds.append(total_cwnd)
axPlot.plot(first(cwnd_time), second(cwnd_time), lw=2, label="$\sum_i W_i$")
axPlot.grid(True)
axPlot.legend()
axPlot.set_xlabel("seconds")
axPlot.set_ylabel("cwnd KB")
axPlot.set_title("TCP congestion window (cwnd) timeseries")
if args.histogram:
axHist = fig.add_subplot(1, 2, 2)
n, bins, patches = axHist.hist(totalcwnds, 50, normed=1, facecolor='green', alpha=0.75)
axHist.set_xlabel("bins (KB)")
axHist.set_ylabel("Fraction")
axHist.set_title("Histogram of sum(cwnd_i)")
if args.out:
print 'saving to', args.out
plt.savefig(args.out)
else:
plt.show()
| 25.271845 | 91 | 0.61698 |
adb47f55be19890195629a028c26ff21d566c615 | 8,425 | py | Python | deepchem/models/tensorgraph/tests/test_tensor_graph.py | hssinejihene/deepchem-1.1.0 | 6efbe6b638b77bb2685ac617f4d6649755c01335 | [
"MIT"
] | null | null | null | deepchem/models/tensorgraph/tests/test_tensor_graph.py | hssinejihene/deepchem-1.1.0 | 6efbe6b638b77bb2685ac617f4d6649755c01335 | [
"MIT"
] | null | null | null | deepchem/models/tensorgraph/tests/test_tensor_graph.py | hssinejihene/deepchem-1.1.0 | 6efbe6b638b77bb2685ac617f4d6649755c01335 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import os
from nose.tools import assert_true
import deepchem as dc
from deepchem.data import NumpyDataset
from deepchem.data.datasets import Databag
from deepchem.models.tensorgraph.layers import Dense, SoftMaxCrossEntropy, ReduceMean, SoftMax
from deepchem.models.tensorgraph.layers import Feature, Label
from deepchem.models.tensorgraph.layers import ReduceSquareDifference
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
class TestTensorGraph(unittest.TestCase):
"""
Test that graph topologies work correctly.
"""
def test_single_task_classifier(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [[0, 1] for x in range(n_data_points)]
dataset = NumpyDataset(X, y)
features = Feature(shape=(None, n_features))
dense = Dense(out_channels=2, in_layers=[features])
output = SoftMax(in_layers=[dense])
label = Label(shape=(None, 2))
smce = SoftMaxCrossEntropy(in_layers=[label, dense])
loss = ReduceMean(in_layers=[smce])
tg = dc.models.TensorGraph(learning_rate=0.1)
tg.add_output(output)
tg.set_loss(loss)
tg.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(tg.predict_proba_on_batch(X))
assert_true(np.all(np.isclose(prediction, y, atol=0.4)))
def test_multi_task_classifier(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y1 = np.array([[0, 1] for x in range(n_data_points)])
y2 = np.array([[1, 0] for x in range(n_data_points)])
X = NumpyDataset(X)
ys = [NumpyDataset(y1), NumpyDataset(y2)]
databag = Databag()
features = Feature(shape=(None, n_features))
databag.add_dataset(features, X)
outputs = []
entropies = []
for i in range(2):
label = Label(shape=(None, 2))
dense = Dense(out_channels=2, in_layers=[features])
output = SoftMax(in_layers=[dense])
smce = SoftMaxCrossEntropy(in_layers=[label, dense])
entropies.append(smce)
outputs.append(output)
databag.add_dataset(label, ys[i])
total_loss = ReduceMean(in_layers=entropies)
tg = dc.models.TensorGraph(learning_rate=0.1)
for output in outputs:
tg.add_output(output)
tg.set_loss(total_loss)
tg.fit_generator(
databag.iterbatches(
epochs=1000, batch_size=tg.batch_size, pad_batches=True))
prediction = tg.predict_proba_on_generator(databag.iterbatches())
for i in range(2):
y_real = ys[i].X
y_pred = prediction[:, i, :]
assert_true(np.all(np.isclose(y_pred, y_real, atol=0.6)))
def test_single_task_regressor(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [0.5 for x in range(n_data_points)]
dataset = NumpyDataset(X, y)
features = Feature(shape=(None, n_features))
dense = Dense(out_channels=1, in_layers=[features])
label = Label(shape=(None, 1))
loss = ReduceSquareDifference(in_layers=[dense, label])
tg = dc.models.TensorGraph(learning_rate=0.1)
tg.add_output(dense)
tg.set_loss(loss)
tg.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(tg.predict_proba_on_batch(X))
assert_true(np.all(np.isclose(prediction, y, atol=3.0)))
def test_multi_task_regressor(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y1 = np.expand_dims(np.array([0.5 for x in range(n_data_points)]), axis=-1)
y2 = np.expand_dims(np.array([-0.5 for x in range(n_data_points)]), axis=-1)
X = NumpyDataset(X)
ys = [NumpyDataset(y1), NumpyDataset(y2)]
databag = Databag()
features = Feature(shape=(None, n_features))
databag.add_dataset(features, X)
outputs = []
losses = []
for i in range(2):
label = Label(shape=(None, 1))
dense = Dense(out_channels=1, in_layers=[features])
loss = ReduceSquareDifference(in_layers=[dense, label])
outputs.append(dense)
losses.append(loss)
databag.add_dataset(label, ys[i])
total_loss = ReduceMean(in_layers=losses)
tg = dc.models.TensorGraph(learning_rate=0.1)
for output in outputs:
tg.add_output(output)
tg.set_loss(total_loss)
tg.fit_generator(
databag.iterbatches(
epochs=1000, batch_size=tg.batch_size, pad_batches=True))
prediction = tg.predict_proba_on_generator(databag.iterbatches())
for i in range(2):
y_real = ys[i].X
y_pred = prediction[:, i, :]
assert_true(np.all(np.isclose(y_pred, y_real, atol=1.5)))
def test_no_queue(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [[0, 1] for x in range(n_data_points)]
dataset = NumpyDataset(X, y)
features = Feature(shape=(None, n_features))
dense = Dense(out_channels=2, in_layers=[features])
output = SoftMax(in_layers=[dense])
label = Label(shape=(None, 2))
smce = SoftMaxCrossEntropy(in_layers=[label, dense])
loss = ReduceMean(in_layers=[smce])
tg = dc.models.TensorGraph(learning_rate=1.0, use_queue=False)
tg.add_output(output)
tg.set_loss(loss)
tg.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(tg.predict_proba_on_batch(X))
assert_true(np.all(np.isclose(prediction, y, atol=0.4)))
def test_tensorboard(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [[0, 1] for x in range(n_data_points)]
dataset = NumpyDataset(X, y)
features = Feature(shape=(None, n_features))
dense = Dense(out_channels=2, in_layers=[features])
output = SoftMax(in_layers=[dense])
label = Label(shape=(None, 2))
smce = SoftMaxCrossEntropy(in_layers=[label, dense])
loss = ReduceMean(in_layers=[smce])
tg = dc.models.TensorGraph(
tensorboard=True,
tensorboard_log_frequency=1,
learning_rate=0.1,
model_dir='/tmp/tensorgraph')
tg.add_output(output)
tg.set_loss(loss)
tg.fit(dataset, nb_epoch=1000)
files_in_dir = os.listdir(tg.model_dir)
event_file = list(filter(lambda x: x.startswith("events"), files_in_dir))
assert_true(len(event_file) > 0)
event_file = os.path.join(tg.model_dir, event_file[0])
file_size = os.stat(event_file).st_size
assert_true(file_size > 0)
def test_save_load(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [[0, 1] for x in range(n_data_points)]
dataset = NumpyDataset(X, y)
features = Feature(shape=(None, n_features))
dense = Dense(out_channels=2, in_layers=[features])
output = SoftMax(in_layers=[dense])
label = Label(shape=(None, 2))
smce = SoftMaxCrossEntropy(in_layers=[label, dense])
loss = ReduceMean(in_layers=[smce])
tg = dc.models.TensorGraph(learning_rate=0.1)
tg.add_output(output)
tg.set_loss(loss)
tg.fit(dataset, nb_epoch=1)
prediction = np.squeeze(tg.predict_proba_on_batch(X))
tg.save()
tg1 = TensorGraph.load_from_dir(tg.model_dir)
prediction2 = np.squeeze(tg1.predict_proba_on_batch(X))
assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
def test_shared_layer(self):
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y1 = np.array([[0, 1] for x in range(n_data_points)])
X = NumpyDataset(X)
ys = [NumpyDataset(y1)]
databag = Databag()
features = Feature(shape=(None, n_features))
databag.add_dataset(features, X)
outputs = []
label = Label(shape=(None, 2))
dense1 = Dense(out_channels=2, in_layers=[features])
dense2 = dense1.shared(in_layers=[features])
output1 = SoftMax(in_layers=[dense1])
output2 = SoftMax(in_layers=[dense2])
smce = SoftMaxCrossEntropy(in_layers=[label, dense1])
outputs.append(output1)
outputs.append(output2)
databag.add_dataset(label, ys[0])
total_loss = ReduceMean(in_layers=[smce])
tg = dc.models.TensorGraph(learning_rate=0.1)
for output in outputs:
tg.add_output(output)
tg.set_loss(total_loss)
tg.fit_generator(
databag.iterbatches(
epochs=1, batch_size=tg.batch_size, pad_batches=True))
prediction = tg.predict_proba_on_generator(databag.iterbatches())
assert_true(
np.all(np.isclose(prediction[:, 0], prediction[:, 1], atol=0.01)))
| 33.565737 | 94 | 0.684392 |
5c389f863fe08e12186a0d4b0150d16df728afd8 | 1,531 | py | Python | dm_control/mujoco/wrapper/mjbindings/__init__.py | wpumacay/dm_control | e13b6941470cd6be618b0cc004b8ea20d69429fe | [
"Apache-2.0"
] | 1 | 2019-05-29T15:49:15.000Z | 2019-05-29T15:49:15.000Z | dm_control/mujoco/wrapper/mjbindings/__init__.py | wpumacay/dm_control | e13b6941470cd6be618b0cc004b8ea20d69429fe | [
"Apache-2.0"
] | null | null | null | dm_control/mujoco/wrapper/mjbindings/__init__.py | wpumacay/dm_control | e13b6941470cd6be618b0cc004b8ea20d69429fe | [
"Apache-2.0"
] | 1 | 2021-01-24T20:28:15.000Z | 2021-01-24T20:28:15.000Z | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Import core names of MuJoCo ctypes bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from dm_control.mujoco.wrapper.mjbindings import constants
from dm_control.mujoco.wrapper.mjbindings import enums
from dm_control.mujoco.wrapper.mjbindings import sizes
from dm_control.mujoco.wrapper.mjbindings import types
from dm_control.mujoco.wrapper.mjbindings import wrappers
# pylint: disable=g-import-not-at-top
try:
from dm_control.mujoco.wrapper.mjbindings import functions
from dm_control.mujoco.wrapper.mjbindings.functions import mjlib
logging.info('MuJoCo library version is: %d', mjlib.mj_version())
except (IOError, OSError):
logging.warn('mjbindings failed to import mjlib and other functions. '
'libmujoco.so may not be accessible.')
| 40.289474 | 78 | 0.74853 |
ee7450a634050eb7c69e5b5271ae43adfa687f50 | 2,460 | py | Python | superset/data/random_time_series.py | franksam007/incubator-superset | a0f572eb3ea4b89cb435a8af20436f8e1d34814e | [
"Apache-2.0"
] | 108 | 2018-01-22T11:09:59.000Z | 2021-01-15T10:53:04.000Z | superset/data/random_time_series.py | franksam007/incubator-superset | a0f572eb3ea4b89cb435a8af20436f8e1d34814e | [
"Apache-2.0"
] | 112 | 2018-01-25T22:57:21.000Z | 2019-08-22T20:08:48.000Z | superset/data/random_time_series.py | franksam007/incubator-superset | a0f572eb3ea4b89cb435a8af20436f8e1d34814e | [
"Apache-2.0"
] | 24 | 2018-01-19T22:54:39.000Z | 2020-11-12T13:04:25.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pandas as pd
from sqlalchemy import DateTime
from superset import db
from superset.utils import core as utils
from .helpers import (
config,
get_example_data,
get_slice_json,
merge_slice,
Slice,
TBL,
)
def load_random_time_series_data():
"""Loading random time series data from a zip file in the repo"""
data = get_example_data('random_time_series.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.to_sql(
'random_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [random_time_series] reference')
obj = db.session.query(TBL).filter_by(table_name='random_time_series').first()
if not obj:
obj = TBL(table_name='random_time_series')
obj.main_dttm_col = 'ds'
obj.database = utils.get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
'granularity_sqla': 'day',
'row_limit': config.get('ROW_LIMIT'),
'since': '1 year ago',
'until': 'now',
'metric': 'count',
'where': '',
'viz_type': 'cal_heatmap',
'domain_granularity': 'month',
'subdomain_granularity': 'day',
}
print('Creating a slice')
slc = Slice(
slice_name='Calendar Heatmap',
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
| 30 | 82 | 0.663821 |
323ee7fbb9e9162e86e3a322a1e4e03177303977 | 7,868 | py | Python | userbot/modules/fban.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/fban.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/fban.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2020 KenHV
from sqlalchemy.exc import IntegrityError
from userbot import CMD_HELP, bot
from userbot.events import cilik_cmd
fban_replies = [
"New FedBan",
"Starting a federation ban",
"Start a federation ban",
"FedBan Reason update",
"FedBan reason updated",
"has already been fbanned, with the exact same reason.",
]
unfban_replies = ["New un-FedBan", "I'll give", "Un-FedBan"]
@bot.on(cilik_cmd(outgoing=True, pattern=r"(d)?fban(?: |$)(.*)"))
async def fban(event):
"""Bans a user from connected federations."""
try:
from userbot.modules.sql_helper.fban_sql import get_flist
except IntegrityError:
return await event.edit("**Running on Non-SQL mode!**")
match = event.pattern_match.group(2)
if event.is_reply:
reply_msg = await event.get_reply_message()
fban_id = reply_msg.sender_id
if event.pattern_match.group(1) == "d":
await reply_msg.delete()
reason = match
else:
pattern = match.split()
fban_id = pattern[0]
reason = " ".join(pattern[1:])
try:
fban_id = await event.client.get_peer_id(fban_id)
except Exception:
pass
if event.sender_id == fban_id:
return await event.edit(
"**Error: Tindakan ini telah dicegah oleh protokol keamanan diri Man-UserBot.**"
)
fed_list = get_flist()
if len(fed_list) == 0:
return await event.edit("**Anda belum terhubung ke federasi mana pun!**")
user_link = f"[{fban_id}](tg://user?id={fban_id})"
await event.edit(f"**Fbanning** {user_link}...")
failed = []
total = 0
for i in fed_list:
total += 1
chat = int(i.chat_id)
try:
async with bot.conversation(chat) as conv:
await conv.send_message(f"/fban {user_link} {reason}")
reply = await conv.get_response()
await bot.send_read_acknowledge(
conv.chat_id, message=reply, clear_mentions=True
)
if all(i not in reply.text for i in fban_replies):
failed.append(i.fed_name)
except Exception:
failed.append(i.fed_name)
reason = reason or "Not specified."
if failed:
status = f"Failed to fban in {len(failed)}/{total} feds.\n"
for i in failed:
status += f"• {i}\n"
else:
status = f"Success! Fbanned in {total} feds."
await event.edit(
f"**Fbanned **{user_link}!\n**Reason:** {reason}\n**Status:** {status}"
)
@bot.on(cilik_cmd(outgoing=True, pattern=r"unfban(?: |$)(.*)"))
async def unfban(event):
"""Unbans a user from connected federations."""
try:
from userbot.modules.sql_helper.fban_sql import get_flist
except IntegrityError:
return await event.edit("**Running on Non-SQL mode!**")
match = event.pattern_match.group(1)
if event.is_reply:
unfban_id = (await event.get_reply_message()).sender_id
reason = match
else:
pattern = match.split()
unfban_id = pattern[0]
reason = " ".join(pattern[1:])
try:
unfban_id = await event.client.get_peer_id(unfban_id)
except BaseException:
pass
if event.sender_id == unfban_id:
return await event.edit("**Tunggu, itu illegal**")
fed_list = get_flist()
if len(fed_list) == 0:
return await event.edit("**Anda belum terhubung ke federasi mana pun!**")
user_link = f"[{unfban_id}](tg://user?id={unfban_id})"
await event.edit(f"**Un-fbanning **{user_link}**...**")
failed = []
total = 0
for i in fed_list:
total += 1
chat = int(i.chat_id)
try:
async with bot.conversation(chat) as conv:
await conv.send_message(f"/unfban {user_link} {reason}")
reply = await conv.get_response()
await bot.send_read_acknowledge(
conv.chat_id, message=reply, clear_mentions=True
)
if all(i not in reply.text for i in unfban_replies):
failed.append(i.fed_name)
except Exception:
failed.append(i.fed_name)
reason = reason or "Not specified."
if failed:
status = f"Failed to un-fban in {len(failed)}/{total} feds.\n"
for i in failed:
status += f"• {i}\n"
else:
status = f"Success! Un-fbanned in {total} feds."
reason = reason or "Not specified."
await event.edit(
f"**Un-fbanned** {user_link}!\n**Reason:** {reason}\n**Status:** {status}"
)
@bot.on(cilik_cmd(outgoing=True, pattern=r"addf(?: |$)(.*)"))
async def addf(event):
"""Adds current chat to connected federations."""
try:
from userbot.modules.sql_helper.fban_sql import add_flist
except IntegrityError:
return await event.edit("**Running on Non-SQL mode!**")
fed_name = event.pattern_match.group(1)
if not fed_name:
return await event.edit("**Berikan nama untuk terhubung ke grup ini!**")
try:
add_flist(event.chat_id, fed_name)
except IntegrityError:
return await event.edit("**Grup ini sudah terhubung ke daftar federasi.**")
await event.edit("**Menambahkan grup ini ke daftar federasi!**")
@bot.on(cilik_cmd(outgoing=True, pattern=r"delf$"))
async def delf(event):
"""Removes current chat from connected federations."""
try:
from userbot.modules.sql_helper.fban_sql import del_flist
except IntegrityError:
return await event.edit("**Running on Non-SQL mode!**")
del_flist(event.chat_id)
await event.edit("**Menghapus grup ini dari daftar federasi!**")
@bot.on(cilik_cmd(outgoing=True, pattern=r"listf$"))
async def listf(event):
"""List all connected federations."""
try:
from userbot.modules.sql_helper.fban_sql import get_flist
except IntegrityError:
return await event.edit("**Running on Non-SQL mode!**")
fed_list = get_flist()
if len(fed_list) == 0:
return await event.edit("**Anda belum terhubung ke federasi mana pun!**")
msg = "**Connected federations:**\n\n"
for i in fed_list:
msg += f"• {i.fed_name}\n"
await event.edit(msg)
@bot.on(cilik_cmd(outgoing=True, pattern=r"clearf$"))
async def clearf(event):
"""Removes all chats from connected federations."""
try:
from userbot.modules.sql_helper.fban_sql import del_flist_all
except IntegrityError:
return await event.edit("**Running on Non-SQL mode!**")
del_flist_all()
await event.edit("**Disconnected dari semua federasi yang terhubung!**")
CMD_HELP.update(
{
"fban": "**Plugin : **`Federations Banned`\
\n\n • **Syntax :** `.fban` <id/username/reply> <reason>\
\n • **Function : **Membanned user dari federasi yang terhubung.\
\n\n • **Syntax :** `.dfban` <id/username/reply> <reason>\
\n • **Function : **Membanned user dari federasi yang terhubung dengan menghapus pesan yang dibalas.\
\n\n • **Syntax :** `.unfban` <id/username/reply> <reason>\
\n • **Function : **Membatalkan Federations Banned\
\n\n • **Syntax :** `.addf` <nama>\
\n • **Function : **Menambahkan grup saat ini dan menyimpannya sebagai <nama> di federasi yang terhubung. Menambahkan satu grup sudah cukup untuk satu federasi.\
\n\n • **Syntax :** `.delf`\
\n • **Function : **Menghapus grup saat ini dari federasi yang terhubung\
\n\n • **Syntax :** `.listf`\
\n • **Function : **Mencantumkan semua federasi yang terhubung dengan nama yang ditentukan.\
\n\n • **Syntax :** `.clearf`\
\n • **Function : **Menghapus dari semua federasi yang terhubung. Gunakan dengan hati-hati.\
"
}
)
| 32.378601 | 171 | 0.608414 |
b8d8661a9f9ce2eba63f6ebf24f1fc21e837449d | 3,774 | py | Python | third_party/cython/src/Cython/Compiler/AnalysedTreeTransforms.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/cython/src/Cython/Compiler/AnalysedTreeTransforms.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | third_party/cython/src/Cython/Compiler/AnalysedTreeTransforms.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | from Visitor import ScopeTrackingTransform
from Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
from ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
from PyrexTypes import py_object_type
from StringEncoding import EncodedString
import Symtab
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
def visit_ModuleNode(self, node):
if node.is_pxd:
return node
self.scope_type = 'module'
self.scope_node = node
if not self.current_directives['autotestdict']:
return node
self.all_docstrings = self.current_directives['autotestdict.all']
self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
assert isinstance(node.body, StatListNode)
# First see if __test__ is already created
if u'__test__' in node.scope.entries:
# Do nothing
return node
pos = node.pos
self.tests = []
self.testspos = node.pos
test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
py_object_type,
pos,
visibility='public')
create_test_dict_assignment = SingleAssignmentNode(pos,
lhs=NameNode(pos, name=EncodedString(u'__test__'),
entry=test_dict_entry),
rhs=DictNode(pos, key_value_pairs=self.tests))
self.visitchildren(node)
node.body.stats.append(create_test_dict_assignment)
return node
def add_test(self, testpos, path, doctest):
pos = self.testspos
keystr = u'%s (line %d)' % (path, testpos[1])
key = UnicodeNode(pos, value=EncodedString(keystr))
value = UnicodeNode(pos, value=doctest)
self.tests.append(DictItemNode(pos, key=key, value=value))
def visit_ExprNode(self, node):
# expressions cannot contain functions and lambda expressions
# do not have a docstring
return node
def visit_FuncDefNode(self, node):
if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
return node
if not self.cdef_docstrings:
if isinstance(node, CFuncDefNode) and not node.py_func:
return node
if not self.all_docstrings and '>>>' not in node.doc:
return node
pos = self.testspos
if self.scope_type == 'module':
path = node.entry.name
elif self.scope_type in ('pyclass', 'cclass'):
if isinstance(node, CFuncDefNode):
if node.py_func is not None:
name = node.py_func.name
else:
name = node.entry.name
else:
name = node.name
if self.scope_type == 'cclass' and name in self.blacklist:
return node
if self.scope_type == 'pyclass':
class_name = self.scope_node.name
else:
class_name = self.scope_node.class_name
if isinstance(node.entry.scope, Symtab.PropertyScope):
property_method_name = node.entry.scope.name
path = "%s.%s.%s" % (class_name, node.entry.scope.name,
node.entry.name)
else:
path = "%s.%s" % (class_name, node.entry.name)
else:
assert False
self.add_test(node.pos, path, node.doc)
return node
| 38.510204 | 98 | 0.586116 |
8ae697048f501380b6905f26a29155bee96068c2 | 1,817 | py | Python | homeassistant/components/notify/mysensors.py | jamescurtin/home-assistant | 6a9968ccb9b0082f5629e50955549d432aba7d90 | [
"Apache-2.0"
] | 2 | 2020-02-20T18:47:55.000Z | 2021-11-09T11:33:28.000Z | homeassistant/components/notify/mysensors.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 5 | 2022-03-01T06:31:03.000Z | 2022-03-31T07:20:45.000Z | homeassistant/components/notify/mysensors.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 3 | 2018-08-27T10:08:30.000Z | 2020-07-04T10:07:03.000Z | """
MySensors notification service.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/notify.mysensors/
"""
from homeassistant.components import mysensors
from homeassistant.components.notify import (
ATTR_TARGET, DOMAIN, BaseNotificationService)
def get_service(hass, config, discovery_info=None):
"""Get the MySensors notification service."""
new_devices = mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsNotificationDevice)
if not new_devices:
return
return MySensorsNotificationService(hass)
class MySensorsNotificationDevice(mysensors.MySensorsDevice):
"""Represent a MySensors Notification device."""
def send_msg(self, msg):
"""Send a message."""
for sub_msg in [msg[i:i + 25] for i in range(0, len(msg), 25)]:
# Max mysensors payload is 25 bytes.
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, sub_msg)
def __repr__(self):
"""Return the representation."""
return "<MySensorsNotificationDevice {}>".format(self.name)
class MySensorsNotificationService(BaseNotificationService):
"""Implement a MySensors notification service."""
# pylint: disable=too-few-public-methods
def __init__(self, hass):
"""Initialize the service."""
self.devices = mysensors.get_mysensors_devices(hass, DOMAIN)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
target_devices = kwargs.get(ATTR_TARGET)
devices = [device for device in self.devices.values()
if target_devices is None or device.name in target_devices]
for device in devices:
device.send_msg(message)
| 34.283019 | 78 | 0.694001 |
a2bde8f584941e6a4fe1fd9cd158eb874a3b6a31 | 8,461 | py | Python | 04_streaming/transform/df07.py | silverstar0727/data-science-on-gcp | ddceb2f708fcbdde96aa215df316a44f6fc2b333 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-02T15:54:02.000Z | 2022-01-02T15:54:02.000Z | 04_streaming/transform/df07.py | silverstar0727/data-science-on-gcp | ddceb2f708fcbdde96aa215df316a44f6fc2b333 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | 04_streaming/transform/df07.py | silverstar0727/data-science-on-gcp | ddceb2f708fcbdde96aa215df316a44f6fc2b333 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
import logging
import csv
import json
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
def addtimezone(lat, lon):
try:
import timezonefinder
tf = timezonefinder.TimezoneFinder()
lat = float(lat)
lon = float(lon)
return lat, lon, tf.timezone_at(lng=lon, lat=lat)
except ValueError:
return lat, lon, 'TIMEZONE' # header
def as_utc(date, hhmm, tzone):
"""
Returns date corrected for timezone, and the tzoffset
"""
try:
if len(hhmm) > 0 and tzone is not None:
import datetime, pytz
loc_tz = pytz.timezone(tzone)
loc_dt = loc_tz.localize(datetime.datetime.strptime(date, '%Y-%m-%d'), is_dst=False)
# can't just parse hhmm because the data contains 2400 and the like ...
loc_dt += datetime.timedelta(hours=int(hhmm[:2]), minutes=int(hhmm[2:]))
utc_dt = loc_dt.astimezone(pytz.utc)
return utc_dt.strftime(DATETIME_FORMAT), loc_dt.utcoffset().total_seconds()
else:
return '', 0 # empty string corresponds to canceled flights
except ValueError as e:
logging.exception('{} {} {}'.format(date, hhmm, tzone))
raise e
def add_24h_if_before(arrtime, deptime):
import datetime
if len(arrtime) > 0 and len(deptime) > 0 and arrtime < deptime:
adt = datetime.datetime.strptime(arrtime, DATETIME_FORMAT)
adt += datetime.timedelta(hours=24)
return adt.strftime(DATETIME_FORMAT)
else:
return arrtime
def airport_timezone(airport_id, airport_timezones):
if airport_id in airport_timezones:
return airport_timezones[airport_id]
else:
return '37.41', '-92.35', u'America/Chicago'
def tz_correct(fields, airport_timezones):
fields['FL_DATE'] = fields['FL_DATE'].strftime('%Y-%m-%d') # convert to a string so JSON code works
# convert all times to UTC
dep_airport_id = fields["ORIGIN_AIRPORT_SEQ_ID"]
arr_airport_id = fields["DEST_AIRPORT_SEQ_ID"]
fields["DEP_AIRPORT_LAT"], fields["DEP_AIRPORT_LON"], dep_timezone = airport_timezone(dep_airport_id,
airport_timezones)
fields["ARR_AIRPORT_LAT"], fields["ARR_AIRPORT_LON"], arr_timezone = airport_timezone(arr_airport_id,
airport_timezones)
for f in ["CRS_DEP_TIME", "DEP_TIME", "WHEELS_OFF"]:
fields[f], deptz = as_utc(fields["FL_DATE"], fields[f], dep_timezone)
for f in ["WHEELS_ON", "CRS_ARR_TIME", "ARR_TIME"]:
fields[f], arrtz = as_utc(fields["FL_DATE"], fields[f], arr_timezone)
for f in ["WHEELS_OFF", "WHEELS_ON", "CRS_ARR_TIME", "ARR_TIME"]:
fields[f] = add_24h_if_before(fields[f], fields["DEP_TIME"])
fields["DEP_AIRPORT_TZOFFSET"] = deptz
fields["ARR_AIRPORT_TZOFFSET"] = arrtz
yield fields
def get_next_event(fields):
if len(fields["DEP_TIME"]) > 0:
event = dict(fields) # copy
event["EVENT_TYPE"] = "departed"
event["EVENT_TIME"] = fields["DEP_TIME"]
for f in ["TAXI_OUT", "WHEELS_OFF", "WHEELS_ON", "TAXI_IN", "ARR_TIME", "ARR_DELAY", "DISTANCE"]:
event.pop(f, None) # not knowable at departure time
yield event
if len(fields["WHEELS_OFF"]) > 0:
event = dict(fields) # copy
event["EVENT_TYPE"] = "wheelsoff"
event["EVENT_TIME"] = fields["WHEELS_OFF"]
for f in ["WHEELS_ON", "TAXI_IN", "ARR_TIME", "ARR_DELAY", "DISTANCE"]:
event.pop(f, None) # not knowable at departure time
yield event
if len(fields["ARR_TIME"]) > 0:
event = dict(fields)
event["EVENT_TYPE"] = "arrived"
event["EVENT_TIME"] = fields["ARR_TIME"]
yield event
def create_event_row(fields):
featdict = dict(fields) # copy
featdict['EVENT_DATA'] = json.dumps(fields)
return featdict
def run(project, bucket, region):
argv = [
'--project={0}'.format(project),
'--job_name=ch04timecorr',
'--save_main_session',
'--staging_location=gs://{0}/flights/staging/'.format(bucket),
'--temp_location=gs://{0}/flights/temp/'.format(bucket),
'--setup_file=./setup.py',
'--autoscaling_algorithm=THROUGHPUT_BASED',
'--max_num_workers=8',
'--region={}'.format(region),
'--runner=DataflowRunner'
]
airports_filename = 'gs://{}/flights/airports/airports.csv.gz'.format(bucket)
flights_output = 'gs://{}/flights/tzcorr/all_flights'.format(bucket)
with beam.Pipeline(argv=argv) as pipeline:
airports = (pipeline
| 'airports:read' >> beam.io.ReadFromText(airports_filename)
| 'airports:onlyUSA' >> beam.Filter(lambda line: "United States" in line)
| 'airports:fields' >> beam.Map(lambda line: next(csv.reader([line])))
| 'airports:tz' >> beam.Map(lambda fields: (fields[0], addtimezone(fields[21], fields[26])))
)
flights = (pipeline
| 'flights:read' >> beam.io.ReadFromBigQuery(
query='SELECT * FROM dsongcp.flights', use_standard_sql=True)
| 'flights:tzcorr' >> beam.FlatMap(tz_correct, beam.pvalue.AsDict(airports))
)
(flights
| 'flights:tostring' >> beam.Map(lambda fields: json.dumps(fields))
| 'flights:gcsout' >> beam.io.textio.WriteToText(flights_output)
)
flights_schema = ','.join([
'FL_DATE:date,UNIQUE_CARRIER:string,ORIGIN_AIRPORT_SEQ_ID:string,ORIGIN:string',
'DEST_AIRPORT_SEQ_ID:string,DEST:string,CRS_DEP_TIME:timestamp,DEP_TIME:timestamp',
'DEP_DELAY:float,TAXI_OUT:float,WHEELS_OFF:timestamp,WHEELS_ON:timestamp,TAXI_IN:float',
'CRS_ARR_TIME:timestamp,ARR_TIME:timestamp,ARR_DELAY:float,CANCELLED:boolean',
'DIVERTED:boolean,DISTANCE:float',
'DEP_AIRPORT_LAT:float,DEP_AIRPORT_LON:float,DEP_AIRPORT_TZOFFSET:float',
'ARR_AIRPORT_LAT:float,ARR_AIRPORT_LON:float,ARR_AIRPORT_TZOFFSET:float'])
flights | 'flights:bqout' >> beam.io.WriteToBigQuery(
'dsongcp.flights_tzcorr', schema=flights_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED
)
events = flights | beam.FlatMap(get_next_event)
events_schema = ','.join([flights_schema, 'EVENT_TYPE:string,EVENT_TIME:timestamp,EVENT_DATA:string'])
(events
| 'events:totablerow' >> beam.Map(lambda fields: create_event_row(fields))
| 'events:bqout' >> beam.io.WriteToBigQuery(
'dsongcp.flights_simevents', schema=events_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED
)
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Run pipeline on the cloud')
parser.add_argument('-p', '--project', help='Unique project ID', required=True)
parser.add_argument('-b', '--bucket', help='Bucket where gs://BUCKET/flights/airports/airports.csv.gz exists',
required=True)
parser.add_argument('-r', '--region',
help='Region in which to run the Dataflow job. Choose the same region as your bucket.',
required=True)
args = vars(parser.parse_args())
print("Correcting timestamps and writing to BigQuery dataset")
run(project=args['project'], bucket=args['bucket'], region=args['region'])
| 41.679803 | 114 | 0.631249 |
08a3f4c3187a7eefa079c1a5a89f733de96738de | 696 | py | Python | module/admin.py | wilmerm/unolet-2022 | 18119a9381b763e38c888bafdb7f97028bd80ea1 | [
"BSD-3-Clause"
] | null | null | null | module/admin.py | wilmerm/unolet-2022 | 18119a9381b763e38c888bafdb7f97028bd80ea1 | [
"BSD-3-Clause"
] | null | null | null | module/admin.py | wilmerm/unolet-2022 | 18119a9381b763e38c888bafdb7f97028bd80ea1 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django.utils.html import format_html, html_safe
from module.models import Module
@admin.register(Module)
class ModuleAdmin(admin.ModelAdmin):
list_display = ("get_icon", "get_name", "description", "url_name", "get_parent")
list_display_links = ("get_icon", "get_name")
def get_icon(self, obj):
return format_html(obj.get_svg()["svg"])
def get_name(self, obj):
return format_html('<div style="padding: 1px; background-color: {}; color: {}">{}</div>',
obj.css_bgcolor, obj.css_textcolor, obj)
def get_parent(self, obj):
if obj.parent:
return self.get_name(obj.parent)
return "" | 30.26087 | 97 | 0.66523 |
30ac7d5ea4cd4360093903ce958dfc402c0221d1 | 2,171 | py | Python | cartography/intel/aws/resources.py | anshubansal2000/cartography | f72f5b66104311f7149c3fca416dd09e8cf99edc | [
"Apache-2.0"
] | null | null | null | cartography/intel/aws/resources.py | anshubansal2000/cartography | f72f5b66104311f7149c3fca416dd09e8cf99edc | [
"Apache-2.0"
] | null | null | null | cartography/intel/aws/resources.py | anshubansal2000/cartography | f72f5b66104311f7149c3fca416dd09e8cf99edc | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from . import apigateway
from . import dynamodb
from . import ecr
from . import eks
from . import elasticache
from . import elasticsearch
from . import emr
from . import iam
from . import kms
from . import lambda_function
from . import permission_relationships
from . import rds
from . import redshift
from . import resourcegroupstaggingapi
from . import route53
from . import s3
from .ec2.auto_scaling_groups import sync_ec2_auto_scaling_groups
from .ec2.images import sync_ec2_images
from .ec2.instances import sync_ec2_instances
from .ec2.internet_gateways import sync_internet_gateways
from .ec2.key_pairs import sync_ec2_key_pairs
from .ec2.load_balancer_v2s import sync_load_balancer_v2s
from .ec2.load_balancers import sync_load_balancers
from .ec2.network_interfaces import sync_network_interfaces
from .ec2.security_groups import sync_ec2_security_groupinfo
from .ec2.subnets import sync_subnets
from .ec2.tgw import sync_transit_gateways
from .ec2.vpc import sync_vpc
from .ec2.vpc_peerings import sync_vpc_peerings
RESOURCE_FUNCTIONS: Dict = {
'iam': iam.sync,
's3': s3.sync,
'dynamodb': dynamodb.sync,
'ec2:autoscalinggroup': sync_ec2_auto_scaling_groups,
'ec2:images': sync_ec2_images,
'ec2:instance': sync_ec2_instances,
'ec2:keypair': sync_ec2_key_pairs,
'ec2:load_balancer': sync_load_balancers,
'ec2:load_balancer_v2': sync_load_balancer_v2s,
'ec2:network_interface': sync_network_interfaces,
'ec2:security_group': sync_ec2_security_groupinfo,
'ec2:subnet': sync_subnets,
'ec2:tgw': sync_transit_gateways,
'ec2:vpc': sync_vpc,
'ec2:vpc_peering': sync_vpc_peerings,
'ec2:internet_gateway': sync_internet_gateways,
'ecr': ecr.sync,
'eks': eks.sync,
'elasticache': elasticache.sync,
'emr': emr.sync,
'lambda_function': lambda_function.sync,
'kms': kms.sync,
'rds': rds.sync,
'redshift': redshift.sync,
'route53': route53.sync,
'elasticsearch': elasticsearch.sync,
'permission_relationships': permission_relationships.sync,
'resourcegroupstaggingapi': resourcegroupstaggingapi.sync,
'apigateway': apigateway.sync,
}
| 33.921875 | 65 | 0.773376 |
024f71e6ea6b0a294a92ed9dd1099b7739da5746 | 3,493 | py | Python | ingestion/src/metadata/generated/schema/entity/tags/tagCategory.py | inspire99/OpenMetadata | a650aea59a41ab48a9e203af091ae13a1fdf46c2 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/generated/schema/entity/tags/tagCategory.py | inspire99/OpenMetadata | a650aea59a41ab48a9e203af091ae13a1fdf46c2 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/generated/schema/entity/tags/tagCategory.py | inspire99/OpenMetadata | a650aea59a41ab48a9e203af091ae13a1fdf46c2 | [
"Apache-2.0"
] | null | null | null | # generated by datamodel-codegen:
# filename: schema/entity/tags/tagCategory.json
# timestamp: 2021-10-31T21:55:34+00:00
from __future__ import annotations
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, Extra, Field, constr
from ...type import basic, entityHistory
class TagName(BaseModel):
__root__: constr(min_length=2, max_length=25) = Field(
..., description='Name of the tag.'
)
class TagCategoryType(Enum):
Descriptive = 'Descriptive'
Classification = 'Classification'
class Tag(BaseModel):
class Config:
extra = Extra.forbid
name: TagName = Field(..., description='Name of the tag.')
fullyQualifiedName: Optional[str] = Field(
None,
description='Unique name of the tag of format Category.PrimaryTag.SecondaryTag.',
)
description: str = Field(..., description='Unique name of the tag category.')
version: Optional[entityHistory.EntityVersion] = Field(
None, description='Metadata version of the entity.'
)
updatedAt: Optional[basic.DateTime] = Field(
None,
description='Last update time corresponding to the new version of the entity.',
)
updatedBy: Optional[str] = Field(None, description='User who made the update.')
href: Optional[basic.Href] = Field(
None, description='Link to the resource corresponding to the tag.'
)
usageCount: Optional[int] = Field(
None, description='Count of how many times this tag and children tags are used.'
)
deprecated: Optional[bool] = Field(False, description='If the tag is deprecated.')
associatedTags: Optional[List[str]] = Field(
None,
description="Fully qualified names of tags associated with this tag. Associated tags captures relationship of one tag to another automatically. As an example a tag 'User.PhoneNumber' might have an associated tag 'PII.Sensitive'. When 'User.Address' is used to label a column in a table, 'PII.Sensitive' label is also applied automatically due to Associated tag relationship.",
)
children: Optional[List[Tag]] = Field(
None,
description='Tags under this tag group or empty for tags at the leaf level.',
)
class TagCategory(BaseModel):
class Config:
extra = Extra.forbid
name: TagName
displayName: Optional[str] = Field(
None, description='Display Name that identifies this tag category.'
)
description: str = Field(..., description='Description of the tag category.')
version: Optional[entityHistory.EntityVersion] = Field(
None, description='Metadata version of the entity.'
)
updatedAt: Optional[basic.DateTime] = Field(
None,
description='Last update time corresponding to the new version of the entity.',
)
updatedBy: Optional[str] = Field(None, description='User who made the update.')
categoryType: TagCategoryType
href: Optional[basic.Href] = Field(
None, description='Link to the resource corresponding to the tag category.'
)
usageCount: Optional[int] = Field(
None,
description='Count of how many times the tags from this tag category are used.',
)
children: Optional[List[Tag]] = Field(None, description='Tags under this category.')
changeDescription: Optional[entityHistory.ChangeDescription] = Field(
None, description='Change that lead to this version of the entity.'
)
Tag.update_forward_refs()
| 37.55914 | 384 | 0.693387 |
ca507e57c6912f6fea60015f7f8f3e5be26a5f5f | 12,410 | py | Python | code/python/FactSetPrices/v1/fds/sdk/FactSetPrices/model/fixed_ids.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/FactSetPrices/v1/fds/sdk/FactSetPrices/model/fixed_ids.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/FactSetPrices/v1/fds/sdk/FactSetPrices/model/fixed_ids.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
FactSet Prices API
Gain access to comprehensive global coverage for Equities & Fixed Income. Perform quick analytics by controlling the date ranges, currencies, and rolling periods, or simply request Open, High, Low, and Close prices. Easily connect pricing data with other core company data or alternative content sets using FactSet's hub and spoke symbology. <p>Equity and Fund Security types include Common Stock, ADR, GDR, Preferred, Closed-ended Fund, Exchange Traded Fund, Unit, Open-ended Fund, Exchange Traded Fund UVI, Exchange Traded Fund NAV, Preferred Equity, Non-Voting Depositary Receipt/Certificate, Alien/Foreign, Structured Product, and Temporary Instruments. Reference over 180,000+ active and inactive securities.</p><p>Fixed Income Security Types include Corporate Bonds, Treasury and Agency bonds, Government Bonds, and Municipals.</p> # noqa: E501
The version of the OpenAPI document: 1.2.1
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetPrices.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetPrices.exceptions import ApiAttributeError
class FixedIds(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'max_items': 2000,
'min_items': 1,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': ([str],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""FixedIds - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([str]): The requested list of Fixed Income Security Identifiers. . # noqa: E501
Keyword Args:
value ([str]): The requested list of Fixed Income Security Identifiers. . # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""FixedIds - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([str]): The requested list of Fixed Income Security Identifiers. . # noqa: E501
Keyword Args:
value ([str]): The requested list of Fixed Income Security Identifiers. . # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 43.697183 | 856 | 0.574698 |
104018fe8d04373da8f24b0dd1ae4f50c7909148 | 672 | py | Python | P1: Mad Libs Generator.py | Nishi-16-K/Python-Projects | e1f38a4b38fa359d036575124f6ca8e3a3d11f4f | [
"MIT"
] | 1 | 2021-05-18T10:38:58.000Z | 2021-05-18T10:38:58.000Z | P1: Mad Libs Generator.py | Nishi-16-K/Python-Projects | e1f38a4b38fa359d036575124f6ca8e3a3d11f4f | [
"MIT"
] | null | null | null | P1: Mad Libs Generator.py | Nishi-16-K/Python-Projects | e1f38a4b38fa359d036575124f6ca8e3a3d11f4f | [
"MIT"
] | null | null | null | loop = 1
while (loop < 10):
place = input("Choose a place: ")
adjective = input("Choose a adjective: ")
adjective2 = input("Choose a adjective2: ")
name = input("Write a name: ")
print ("------------------------------------------")
print ("I love to visit",place,"Dude! it's so freaking", adjective,".")
print ("The weather there is so",adjective2,"everyday.")
print ("You know last time I went there was with",name,"and that person totally went insane.")
print ("Oh God, if there is anywhere heaven on this earth, it totally lies in",place,"!!")
print ("------------------------------------------")
loop = loop + 1
| 35.368421 | 98 | 0.540179 |
02edb4d3b78b4e160f207541a4ee0a5a7b1bc6a4 | 738 | py | Python | _projects/project1py.py | M-Sender/cmps3160 | 54546d307f913b35caa45efe6c5528dadb8055f2 | [
"MIT"
] | null | null | null | _projects/project1py.py | M-Sender/cmps3160 | 54546d307f913b35caa45efe6c5528dadb8055f2 | [
"MIT"
] | null | null | null | _projects/project1py.py | M-Sender/cmps3160 | 54546d307f913b35caa45efe6c5528dadb8055f2 | [
"MIT"
] | null | null | null | import pandas as pd
import bs4
from bs4 import BeautifulSoup
import requests
#2
url = "https://www.spaceweatherlive.com/en/solar-activity/top-50-solar-flares"
webpage = requests.get(url)
print(webpage) #response 403, this means that the webserver refused to authorize the request
#to fix do this
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
webpage = requests.get(url, headers=headers)
print(webpage) #now response 200
soup_content = BeautifulSoup(webpage.content,'html.parser')
pretty = soup_content#.prettify()
#print(pretty)
table_html = pretty.find("table",{"class":"table table-striped"})#['data-value']
pd.read_html(table_html)
| 41 | 148 | 0.768293 |
e692562a0e06d5e12b8bb4406cd749d3b53dd370 | 4,093 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/endpoint_stub.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/endpoint_stub.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/endpoint_stub.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import shutil
from pathlib import Path
from azure.ai.ml.entities import OnlineEndpoint
from typing import Iterable
from azure.ai.ml.entities._load_functions import load_online_endpoint
class EndpointStub:
"""EndpointStub is a class for representing local endpoints which do not have deployments created under them yet.
To maintain a catalog of local endpoints, it writes a yaml file with the endpoint specification to the user's machine
in an idempotent, well-known location.
"""
def create_or_update(self, endpoint: OnlineEndpoint):
"""Create or update a local endpoint.
:param endpoint OnlineEndpoint: OnlineEndpoint entity to create or update.
"""
self._create_endpoint_cache(endpoint=endpoint)
return endpoint
def get(self, endpoint_name: str):
"""Get a local endpoint.
:param endpoint_name str: Name of local endpoint to get.
"""
endpoint_path = self._get_endpoint_cache_file(endpoint_name=endpoint_name)
if endpoint_path.exists():
return load_online_endpoint(path=endpoint_path)
return None
def list(self) -> Iterable[Path]:
"""List all local endpoints."""
endpoints = []
azureml_dir = self._get_inferencing_cache_dir()
for endpoint_file in azureml_dir.glob("*/*.json"):
endpoints.append(endpoint_file)
return endpoints
def delete(self, endpoint_name: str):
"""Delete a local endpoint.
:param endpoint_name str: Name of local endpoint to delete.
"""
build_directory = self._get_build_directory(endpoint_name=endpoint_name)
shutil.rmtree(build_directory)
def invoke(self):
"""Invoke a local endpoint. For an EndpointStub, it cannot invoke, so we return a helper message."""
return "This local endpoint does not have any deployments, so it cannot be invoked. Please use 'az ml online-deployment create --local' before invoking."
def _create_endpoint_cache(self, endpoint: OnlineEndpoint):
"""Create or update a local endpoint cache.
:param endpoint OnlineEndpoint: OnlineEndpoint entity to create or update.
"""
endpoint_cache_path = self._get_endpoint_cache_file(endpoint_name=endpoint.name)
endpoint_metadata = json.dumps(endpoint.dump())
endpoint_cache_path.write_text(endpoint_metadata)
return endpoint_cache_path
def _get_endpoint_cache_file(self, endpoint_name: str) -> Path:
"""Get a local endpoint cache Path. Idempotent.
:param endpoint_name str: Name of local endpoint to get local cache.
:returns Path: path to cached endpoint file.
"""
build_directory = self._create_build_directory(endpoint_name=endpoint_name)
return Path(build_directory, f"{endpoint_name}.json")
def _create_build_directory(self, endpoint_name: str) -> Path:
"""Create or update a local endpoint build directory.
:param endpoint_name str: Name of local endpoint to get local directory.
:returns Path: path to endpoint build directory.
"""
build_directory = self._get_build_directory(endpoint_name=endpoint_name)
build_directory.mkdir(parents=True, exist_ok=True)
return build_directory
def _get_build_directory(self, endpoint_name: str) -> Path:
"""Get a local endpoint build directory. Idempotent.
:param endpoint_name str: Name of local endpoint to get local directory.
:returns Path: path to endpoint build directory.
"""
return Path(self._get_inferencing_cache_dir(), endpoint_name)
def _get_inferencing_cache_dir(self) -> Path:
"""Get a local inferencing directory. Idempotent.
:returns Path: path to local inferencing cache directory.
"""
return Path(Path.home(), ".azureml", "inferencing")
| 40.524752 | 161 | 0.679208 |
9d5266d0a8b2f1baa002b20187931163bd8fedb0 | 6,206 | py | Python | chc/cmdline/c_project/chc_project_dashboard.py | Databean/CodeHawk-C | 98720753beb51e0bf5105f8f6838618292fbf55c | [
"MIT"
] | 10 | 2020-08-17T15:35:55.000Z | 2022-03-23T14:39:57.000Z | chc/cmdline/c_project/chc_project_dashboard.py | kestreltechnology/CodeHawk-C | db0fa92fa630cd919f29021d464533f0e7170fed | [
"MIT"
] | 31 | 2020-07-17T05:45:43.000Z | 2021-05-29T04:49:49.000Z | chc/cmdline/c_project/chc_project_dashboard.py | kestreltechnology/CodeHawk-C | db0fa92fa630cd919f29021d464533f0e7170fed | [
"MIT"
] | 3 | 2020-06-13T05:32:34.000Z | 2021-09-16T02:31:39.000Z | # ------------------------------------------------------------------------------
# CodeHawk C Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import argparse
import os
import time
from typing import Any, Dict, List
import chc.util.fileutil as UF
import chc.reporting.ProofObligations as RP
if __name__ == "__main__":
targets = UF.get_registered_analysis_targets()
# project -> (linecount, clinecount, cfuncount)
projectstats: Dict[Any, Any] = {}
ppoprojecttotals: Dict[Any, Any] = {} # project -> dm -> dmtotal
spoprojecttotals: Dict[Any, Any] = {}
ppotagtotals: Dict[Any, Any] = {} # tag -> dm -> dmtotal
spotagtotals: Dict[Any, Any] = {}
nosummary: List[Any] = []
analysistimes: Dict[Any, Any] = {}
dsmethods = RP.get_dsmethods([])
for group in targets:
gpath = targets[group]["path"]
for project in targets[group]["projects"]:
path = os.path.join(gpath, targets[group]["projects"][project]["path"])
results = UF.read_project_summary_results(path)
if results is None:
nosummary.append(project)
continue
pd = results
try:
ppod = pd["tagresults"]["ppos"]
spod = pd["tagresults"]["spos"]
ppoprojecttotals[project] = {}
spoprojecttotals[project] = {}
except BaseException:
print("Problem with " + str(project))
continue
if "stats" in pd:
projectstats[project] = pd["stats"]
analysistimes[project] = pd["timestamp"]
else:
projectstats[project] = (0, 0, 0)
for t in ppod:
if "violated" not in ppod[t]:
ppod[t]["violated"] = -1
for t in spod:
if "violated" not in spod[t]:
spod[t]["violated"] = -1
for t in ppod:
if "contract" not in ppod[t]:
ppod[t]["contract"] = -1
for t in spod:
if "contract" not in spod[t]:
spod[t]["contract"] = -1
for t in ppod:
if t not in ppotagtotals:
ppotagtotals[t] = {}
for dm in ppod[t]:
if dm not in ppotagtotals[t]:
ppotagtotals[t][dm] = 0
ppotagtotals[t][dm] += ppod[t][dm]
for dm in dsmethods:
ppoprojecttotals[project][dm] = sum([ppod[t][dm] for t in ppod])
for t in spod:
if t not in spotagtotals:
spotagtotals[t] = {}
for dm in spod[t]:
if dm not in spotagtotals[t]:
spotagtotals[t][dm] = 0
spotagtotals[t][dm] += spod[t][dm]
for dm in dsmethods:
spoprojecttotals[project][dm] = sum([spod[t][dm] for t in spod])
print("Primary Proof Obligations")
print("\n".join(RP.totals_to_string(ppoprojecttotals)))
print("\nPrimary Proof Obligations (in percentages)")
print("\n".join(RP.totals_to_string(ppoprojecttotals, False)))
print("\nSupporting Proof Obligations")
print("\n".join(RP.totals_to_string(spoprojecttotals)))
print("\nSupporting Proof Obligations (in percentages)")
print("\n".join(RP.totals_to_string(spoprojecttotals, False)))
print("\n\nPrimary Proof Obligations")
print("\n".join(RP.totals_to_string(ppotagtotals)))
print("\nSupporting Proof Obligations")
print("\n".join(RP.totals_to_string(spotagtotals)))
if len(nosummary) > 0:
print("\n\nNo summary results found for:")
print("-" * 28)
for p in nosummary:
print(" " + p)
print("-" * 28)
print("\n\nProject statistics:")
print(
"analysis time".ljust(16)
+ " "
+ "project".ljust(28)
+ "LOC ".rjust(10)
+ "CLOC ".rjust(10)
+ "functions".rjust(10)
)
print("-" * 80)
lctotal = 0
clctotal = 0
fctotal = 0
for p in sorted(analysistimes, key=lambda p: analysistimes[p]):
(lc, clc, fc) = projectstats[p]
lctotal += lc
clctotal += clc
fctotal += fc
print(
time.strftime("%Y-%m-%d %H:%M", time.localtime(analysistimes[p]))
+ " "
+ p.ljust(28)
+ str(lc).rjust(10)
+ str(clc).rjust(10)
+ str(fc).rjust(10)
)
print("-" * 80)
print(
"Total".ljust(46)
+ str(lctotal).rjust(10)
+ str(clctotal).rjust(10)
+ str(fctotal).rjust(10)
)
print("\n\nProof obligation transfer")
print(
"\n".join(
RP.totals_to_presentation_string(
ppoprojecttotals, spoprojecttotals, projectstats
)
)
)
| 35.872832 | 83 | 0.545601 |
e211a226fec6324b8e9c9d60fe78624d2b85322e | 8,513 | py | Python | detectron2_ofa/modeling/roi_heads/mask_head.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | detectron2_ofa/modeling/roi_heads/mask_head.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | detectron2_ofa/modeling/roi_heads/mask_head.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2_ofa.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm
from detectron2_ofa.utils.events import get_event_storage
from detectron2_ofa.utils.registry import Registry
ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD")
ROI_MASK_HEAD_REGISTRY.__doc__ = """
Registry for mask heads, which predicts instance masks given
per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def mask_rcnn_loss(pred_mask_logits, instances):
"""
Compute the mask prediction loss defined in the Mask R-CNN paper.
Args:
pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
for class-specific or class-agnostic, where B is the total number of predicted masks
in all images, C is the number of foreground classes, and Hmask, Wmask are the height
and width of the mask predictions. The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
Returns:
mask_loss (Tensor): A scalar tensor containing the loss.
"""
cls_agnostic_mask = pred_mask_logits.size(1) == 1
total_num_masks = pred_mask_logits.size(0)
mask_side_len = pred_mask_logits.size(2)
assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!"
gt_classes = []
gt_masks = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
if not cls_agnostic_mask:
gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64)
gt_classes.append(gt_classes_per_image)
gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize(
instances_per_image.proposal_boxes.tensor, mask_side_len
).to(device=pred_mask_logits.device)
# A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len
gt_masks.append(gt_masks_per_image)
if len(gt_masks) == 0:
return pred_mask_logits.sum() * 0
gt_masks = cat(gt_masks, dim=0)
if cls_agnostic_mask:
pred_mask_logits = pred_mask_logits[:, 0]
else:
indices = torch.arange(total_num_masks)
gt_classes = cat(gt_classes, dim=0)
pred_mask_logits = pred_mask_logits[indices, gt_classes]
if gt_masks.dtype == torch.bool:
gt_masks_bool = gt_masks
else:
# Here we allow gt_masks to be float as well (depend on the implementation of rasterize())
gt_masks_bool = gt_masks > 0.5
# Log the training accuracy (using gt classes and 0.5 threshold)
mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool
mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0))
num_positive = gt_masks_bool.sum().item()
false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max(
gt_masks_bool.numel() - num_positive, 1.0
)
false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0)
storage = get_event_storage()
storage.put_scalar("mask_rcnn/accuracy", mask_accuracy)
storage.put_scalar("mask_rcnn/false_positive", false_positive)
storage.put_scalar("mask_rcnn/false_negative", false_negative)
mask_loss = F.binary_cross_entropy_with_logits(
pred_mask_logits, gt_masks.to(dtype=torch.float32), reduction="mean"
)
return mask_loss
def mask_rcnn_inference(pred_mask_logits, pred_instances):
"""
Convert pred_mask_logits to estimated foreground probability masks while also
extracting only the masks for the predicted classes in pred_instances. For each
predicted box, the mask of the same class is attached to the instance by adding a
new "pred_masks" field to pred_instances.
Args:
pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
for class-specific or class-agnostic, where B is the total number of predicted masks
in all images, C is the number of foreground classes, and Hmask, Wmask are the height
and width of the mask predictions. The values are logits.
pred_instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. Each Instances must have field "pred_classes".
Returns:
None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask,
Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized)
masks the resolution predicted by the network; post-processing steps, such as resizing
the predicted masks to the original image resolution and/or binarizing them, is left
to the caller.
"""
cls_agnostic_mask = pred_mask_logits.size(1) == 1
if cls_agnostic_mask:
mask_probs_pred = pred_mask_logits.sigmoid()
else:
# Select masks corresponding to the predicted classes
num_masks = pred_mask_logits.shape[0]
class_pred = cat([i.pred_classes for i in pred_instances])
indices = torch.arange(num_masks, device=class_pred.device)
mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid()
# mask_probs_pred.shape: (B, 1, Hmask, Wmask)
num_boxes_per_image = [len(i) for i in pred_instances]
mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0)
for prob, instances in zip(mask_probs_pred, pred_instances):
instances.pred_masks = prob # (1, Hmask, Wmask)
@ROI_MASK_HEAD_REGISTRY.register()
class MaskRCNNConvUpsampleHead(nn.Module):
"""
A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`).
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
num_conv: the number of conv layers
conv_dim: the dimension of the conv layers
norm: normalization for the conv layers
"""
super(MaskRCNNConvUpsampleHead, self).__init__()
# fmt: off
num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
conv_dims = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM
self.norm = cfg.MODEL.ROI_MASK_HEAD.NORM
num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV
input_channels = input_shape.channels
cls_agnostic_mask = cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK
# fmt: on
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
input_channels if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=1,
padding=1,
bias=not self.norm,
norm=get_norm(self.norm, conv_dims),
activation=F.relu,
)
self.add_module("mask_fcn{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self.deconv = ConvTranspose2d(
conv_dims if num_conv > 0 else input_channels,
conv_dims,
kernel_size=2,
stride=2,
padding=0,
)
num_mask_classes = 1 if cls_agnostic_mask else num_classes
self.predictor = Conv2d(conv_dims, num_mask_classes, kernel_size=1, stride=1, padding=0)
for layer in self.conv_norm_relus + [self.deconv]:
weight_init.c2_msra_fill(layer)
# use normal distribution initialization for mask prediction layer
nn.init.normal_(self.predictor.weight, std=0.001)
if self.predictor.bias is not None:
nn.init.constant_(self.predictor.bias, 0)
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
x = F.relu(self.deconv(x))
return self.predictor(x)
def build_mask_head(cfg, input_shape):
"""
Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`.
"""
name = cfg.MODEL.ROI_MASK_HEAD.NAME
return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape)
| 41.125604 | 100 | 0.675555 |
288ac75dd8de7e972d02e57716cf3160b11c238f | 680 | py | Python | workrate/dto/settings.py | happyharbor/working_condition | 95683c566fdef441f3966c09ae5ca6a342e98d7f | [
"Apache-2.0"
] | null | null | null | workrate/dto/settings.py | happyharbor/working_condition | 95683c566fdef441f3966c09ae5ca6a342e98d7f | [
"Apache-2.0"
] | 1 | 2021-07-25T15:04:25.000Z | 2021-07-25T15:04:25.000Z | workrate/dto/settings.py | happyharbor/working_condition | 95683c566fdef441f3966c09ae5ca6a342e98d7f | [
"Apache-2.0"
] | 1 | 2021-07-21T11:50:09.000Z | 2021-07-21T11:50:09.000Z | from dataclasses import dataclass
from operator import attrgetter
from workrate.dto.work_setting import WorkSetting
@dataclass
class Settings:
"""Class for keeping the settings."""
work_settings: list[WorkSetting]
min_temperature: float
max_temperature: float
def __min(self) -> float:
return min(self.work_settings, key=attrgetter('temperature')).temperature
def __max(self) -> float:
return max(self.work_settings, key=attrgetter('temperature')).temperature + 1
def __init__(self, work_settings):
self.work_settings = work_settings
self.min_temperature = self.__min()
self.max_temperature = self.__max()
| 28.333333 | 85 | 0.719118 |
e9a9827574fc5ee4239de3b0c60e4814099dcc86 | 614 | py | Python | wsgi.py | jkom-cloud/python-demo | 5c584eaf9e63dc48f858b979758e0b58cd1e2d4c | [
"MIT"
] | null | null | null | wsgi.py | jkom-cloud/python-demo | 5c584eaf9e63dc48f858b979758e0b58cd1e2d4c | [
"MIT"
] | null | null | null | wsgi.py | jkom-cloud/python-demo | 5c584eaf9e63dc48f858b979758e0b58cd1e2d4c | [
"MIT"
] | null | null | null | import time
import datetime
from flask import Flask
from flask import jsonify
from lib import fib
app = Flask(__name__)
@app.route('/')
def index():
"""Welcome page"""
now = datetime.datetime.now().isoformat()
return jsonify(message='Hello World!!', ts=now)
@app.route('/ping/')
def ping():
"""ping"""
return 'pong'
@app.route('/fib/seq/<int:n>/')
def fib_seq(n):
"""Get Fibonacci sequence till F_n."""
if not 0 <= n <= 1000:
return jsonify(error='n must between 0 and 1000'), 400
t = time.time()
return jsonify(n=n, seq=fib.seq(n), ms=(time.time() - t) * 1e3)
| 19.1875 | 67 | 0.618893 |
0ca10492f99fdc5ccbb1957483fd2f2374ff698f | 5,734 | py | Python | lib/detect/protos/pipeline_pb2.py | nhattruongpham/ALPR_SSD_CTPN_LPRNet | b20bf84f1ecc56252da37c25d08a0b7d7e10477c | [
"MIT"
] | null | null | null | lib/detect/protos/pipeline_pb2.py | nhattruongpham/ALPR_SSD_CTPN_LPRNet | b20bf84f1ecc56252da37c25d08a0b7d7e10477c | [
"MIT"
] | null | null | null | lib/detect/protos/pipeline_pb2.py | nhattruongpham/ALPR_SSD_CTPN_LPRNet | b20bf84f1ecc56252da37c25d08a0b7d7e10477c | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/pipeline.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from retail_demo.object_detection.protos import eval_pb2 as object__detection_dot_protos_dot_eval__pb2
from retail_demo.object_detection.protos import input_reader_pb2 as object__detection_dot_protos_dot_input__reader__pb2
from retail_demo.object_detection.protos import model_pb2 as object__detection_dot_protos_dot_model__pb2
from retail_demo.object_detection.protos import train_pb2 as object__detection_dot_protos_dot_train__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/pipeline.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n&object_detection/protos/pipeline.proto\x12\x17object_detection.protos\x1a\"object_detection/protos/eval.proto\x1a*object_detection/protos/input_reader.proto\x1a#object_detection/protos/model.proto\x1a#object_detection/protos/train.proto\"\xd5\x02\n\x17TrainEvalPipelineConfig\x12\x36\n\x05model\x18\x01 \x01(\x0b\x32\'.object_detection.protos.DetectionModel\x12:\n\x0ctrain_config\x18\x02 \x01(\x0b\x32$.object_detection.protos.TrainConfig\x12@\n\x12train_input_reader\x18\x03 \x01(\x0b\x32$.object_detection.protos.InputReader\x12\x38\n\x0b\x65val_config\x18\x04 \x01(\x0b\x32#.object_detection.protos.EvalConfig\x12?\n\x11\x65val_input_reader\x18\x05 \x01(\x0b\x32$.object_detection.protos.InputReader*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02')
,
dependencies=[object__detection_dot_protos_dot_eval__pb2.DESCRIPTOR,object__detection_dot_protos_dot_input__reader__pb2.DESCRIPTOR,object__detection_dot_protos_dot_model__pb2.DESCRIPTOR,object__detection_dot_protos_dot_train__pb2.DESCRIPTOR,])
_TRAINEVALPIPELINECONFIG = _descriptor.Descriptor(
name='TrainEvalPipelineConfig',
full_name='object_detection.protos.TrainEvalPipelineConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model', full_name='object_detection.protos.TrainEvalPipelineConfig.model', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_config', full_name='object_detection.protos.TrainEvalPipelineConfig.train_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_input_reader', full_name='object_detection.protos.TrainEvalPipelineConfig.train_input_reader', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='object_detection.protos.TrainEvalPipelineConfig.eval_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_input_reader', full_name='object_detection.protos.TrainEvalPipelineConfig.eval_input_reader', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=222,
serialized_end=563,
)
_TRAINEVALPIPELINECONFIG.fields_by_name['model'].message_type = object__detection_dot_protos_dot_model__pb2._DETECTIONMODEL
_TRAINEVALPIPELINECONFIG.fields_by_name['train_config'].message_type = object__detection_dot_protos_dot_train__pb2._TRAINCONFIG
_TRAINEVALPIPELINECONFIG.fields_by_name['train_input_reader'].message_type = object__detection_dot_protos_dot_input__reader__pb2._INPUTREADER
_TRAINEVALPIPELINECONFIG.fields_by_name['eval_config'].message_type = object__detection_dot_protos_dot_eval__pb2._EVALCONFIG
_TRAINEVALPIPELINECONFIG.fields_by_name['eval_input_reader'].message_type = object__detection_dot_protos_dot_input__reader__pb2._INPUTREADER
DESCRIPTOR.message_types_by_name['TrainEvalPipelineConfig'] = _TRAINEVALPIPELINECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainEvalPipelineConfig = _reflection.GeneratedProtocolMessageType('TrainEvalPipelineConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAINEVALPIPELINECONFIG,
__module__ = 'object_detection.protos.pipeline_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.TrainEvalPipelineConfig)
))
_sym_db.RegisterMessage(TrainEvalPipelineConfig)
# @@protoc_insertion_point(module_scope)
| 53.092593 | 767 | 0.812871 |
d9f0dd3035b71f36b60b70cd37976c8a4dcbf3bb | 247 | py | Python | kepler/pandas/__init__.py | micaelverissimo/kepler | 5db55aa39d7e65f460f533dfd91ca6e5fdb3076a | [
"MIT"
] | 1 | 2021-11-20T22:45:13.000Z | 2021-11-20T22:45:13.000Z | kepler/pandas/__init__.py | micaelverissimo/kepler | 5db55aa39d7e65f460f533dfd91ca6e5fdb3076a | [
"MIT"
] | null | null | null | kepler/pandas/__init__.py | micaelverissimo/kepler | 5db55aa39d7e65f460f533dfd91ca6e5fdb3076a | [
"MIT"
] | 2 | 2021-10-05T22:35:11.000Z | 2021-11-20T22:45:18.000Z | __all__ = []
from . import decorators
__all__.extend(decorators.__all__)
from .decorators import *
from . import readers
__all__.extend(readers.__all__)
from .readers import *
from . import menu
__all__.extend(menu.__all__)
from .menu import *
| 17.642857 | 34 | 0.769231 |
8c9d4a7bbe3566064146d3b49c91731573c7a72d | 607 | py | Python | src/you_get/extractor/cbs.py | codepongo/you-get | 23c45107f534dde9a80127913dd4b65fe026fad7 | [
"MIT"
] | 1 | 2021-09-20T02:54:36.000Z | 2021-09-20T02:54:36.000Z | src/you_get/extractors/cbs.py | piyipaya/you-get | ef16e74fe72e7779a0b10af56f435cb2558d90c7 | [
"MIT"
] | null | null | null | src/you_get/extractors/cbs.py | piyipaya/you-get | ef16e74fe72e7779a0b10af56f435cb2558d90c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__all__ = ['cbs_download']
from ..common import *
from .theplatform import theplatform_download_by_pid
def cbs_download(url, output_dir='.', merge=True, info_only=False):
"""Downloads CBS videos by URL.
"""
html = get_content(url)
pid = match1(html, r'video\.settings\.pid\s*=\s*\'([^\']+)\'')
title = match1(html, r'video\.settings\.title\s*=\s*\"([^\"]+)\"')
theplatform_download_by_pid(pid, title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "CBS.com"
download = cbs_download
download_playlist = playlist_not_supported('cbs')
| 27.590909 | 100 | 0.688633 |
0898d27a3f96b92a7ce7ba8e8f9dd58452abfea6 | 956 | py | Python | setup.py | dipteshkumar/perceptron_pypi | d80eb40483a82f2edcdd221d820a8e65fe067e71 | [
"MIT"
] | null | null | null | setup.py | dipteshkumar/perceptron_pypi | d80eb40483a82f2edcdd221d820a8e65fe067e71 | [
"MIT"
] | null | null | null | setup.py | dipteshkumar/perceptron_pypi | d80eb40483a82f2edcdd221d820a8e65fe067e71 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
PROJECT_NAME = "perceptron_pypi"
USER_NAME = "diptesh_km"
setuptools.setup(
name=f"{PROJECT_NAME}-{USER_NAME}",
version="0.0.1",
author=USER_NAME,
author_email="diptesh.km@gmail.com",
description="its an implementation of Perceptron",
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/{USER_NAME}/{PROJECT_NAME}",
project_urls={
"Bug Tracker": f"https://github.com/{USER_NAME}/{PROJECT_NAME}/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.7",
install_requires=[
"numpy",
"tqdm"
]
) | 28.969697 | 79 | 0.646444 |
f5adda2cc3b1351a55c07c709faae5a7c631dbf9 | 2,204 | py | Python | scibert/dataset_readers/classification_dataset_reader.py | CodenameSource/scibert | 2a9d2991a7f17198f51f71dd48f20af5dcc78e19 | [
"Apache-2.0"
] | null | null | null | scibert/dataset_readers/classification_dataset_reader.py | CodenameSource/scibert | 2a9d2991a7f17198f51f71dd48f20af5dcc78e19 | [
"Apache-2.0"
] | null | null | null | scibert/dataset_readers/classification_dataset_reader.py | CodenameSource/scibert | 2a9d2991a7f17198f51f71dd48f20af5dcc78e19 | [
"Apache-2.0"
] | null | null | null | """ Data reader for AllenNLP """
from typing import Dict, List, Any
import logging
import jsonlines
from overrides import overrides
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, MultiLabelField, ListField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("classification_dataset_reader")
class ClassificationDatasetReader(DatasetReader):
"""
Text classification data reader
The data is assumed to be in jsonlines format
each line is a json-dict with the following keys: 'text', 'label', 'metadata'
'metadata' is optional and only used for passing metadata to the model
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path):
with jsonlines.open(file_path) as f_in:
for json_object in f_in:
yield self.text_to_instance(
text=json_object.get('text'),
label=json_object.get('label'),
metadata=json_object.get('metadata')
)
@overrides
def text_to_instance(self,
text_tokens,
label: str = None,
metadata: Any = None) -> Instance: # type: ignore
#text_tokens = self._tokenizer.tokenize(text)
fields = {
'text': TextField(text_tokens, self._token_indexers),
}
if label is not None:
fields['label'] = LabelField(label)
if metadata:
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
| 36.131148 | 109 | 0.644283 |
5843a19987db02bf79996219ed6526f190a4785f | 12,551 | py | Python | server.py | mattochal/demo_fsl_public | f806c7b9d51559014275f854c8d4cbbed6825817 | [
"MIT"
] | 4 | 2021-03-17T18:09:47.000Z | 2021-04-22T10:18:55.000Z | server.py | mattochal/demo_fsl_public | f806c7b9d51559014275f854c8d4cbbed6825817 | [
"MIT"
] | null | null | null | server.py | mattochal/demo_fsl_public | f806c7b9d51559014275f854c8d4cbbed6825817 | [
"MIT"
] | 1 | 2021-04-09T03:59:58.000Z | 2021-04-09T03:59:58.000Z | import argparse
import time
import socket
import pickle
import struct
import pprint
import copy
import pdb
import tqdm
import json
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.join(os.path.dirname(currentdir),'src')
sys.path.insert(0,parentdir)
from utils.utils import set_torch_seed, set_gpu, get_tasks, get_data, get_model, get_backbone, get_strategy, get_args, compress_args
from utils.parser_utils import *
from utils.bunch import bunch
from utils.builder import ExperimentBuilder
from utils.ptracker import PerformanceTracker
from utils.dataloader import DataLoader
from tasks.task_generator import TaskGenerator
from fsl_demo_task import DemoFSLTask
import sys
import pprint
import numpy as np
def setup_algorithms(server_args):
"""
Load datasets and pretrained models
"""
loaded_models = {}
datasets = None
abspath = os.path.abspath(".")
set_torch_seed(server_args.seed)
if "exp_path" in server_args and server_args["exp_path"] is not None:
abspath = os.path.abspath(server_args["exp_path"])
for builder_args in server_args.models:
original_args = copy.copy(builder_args)
assert 'continue_from' in builder_args, 'All "models" should have a "continue_from" entry.'
assert 'gpu' in builder_args, 'All "models" should have a specified "gpu" entry or "cpu" device.'
stdin_list = [
"--args_file", os.path.join(abspath, builder_args["continue_from"], 'configs', 'config.json'),
"--continue_from", os.path.join(abspath, builder_args["continue_from"]),
"--gpu", builder_args['gpu'],
"--seed", server_args.seed,
"--dataset", server_args.dataset,
"--dataset_args", json.dumps({'dataset_version': server_args.version,
'data_path': server_args.data_path})
]
builder_args, excluded_args, parser = get_args(stdin_list)
builder_args = bunch.bunchify(builder_args)
compressed_args = compress_args(bunch.unbunchify(builder_args), parser)
device = set_gpu(builder_args.gpu)
tasks = get_tasks(builder_args)
datasets = get_data(builder_args) if datasets is None else datasets
backbone = get_backbone(builder_args, device)
strategy = get_strategy(builder_args, device)
model = get_model(backbone, tasks, datasets, strategy, builder_args, device)
compressed_args = compress_args(bunch.unbunchify(builder_args), parser)
print(" ----------------- FULL ARGS (COMPACT) ----------------")
pprint.pprint(compressed_args, indent=2)
print(" ------------------------------------------------------")
print(" ------------------ UNRECOGNISED ARGS -----------------")
pprint.pprint(excluded_args, indent=2)
print(" ------------------------------------------------------")
system = ExperimentBuilder(model, tasks, datasets, device, builder_args)
system.load_pretrained()
model.set_mode('test')
if builder_args["model"] == 'simpleshot':
system.model.set_train_mean(system.datasets['train'])
name = original_args['name'] if 'name' in original_args else builder_args['model']
tie_breaker = 0
name_proposal = name
while name_proposal in loaded_models:
tie_breaker+=1
name_proposal = "{}({})".format(name,tie_breaker)
loaded_models[name_proposal] = system
return loaded_models, datasets
def setup_socket(server_args):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', server_args.port)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
return sock
def send_data(sock, data):
print('sending', data)
pickled = pickle.dumps(data)
send_one_message(sock, pickled)
def send_one_message(sock, data):
length = len(data)
sock.sendall(struct.pack('!I', length))
time.sleep(0.5)
sock.sendall(data)
def recv_one_message(sock):
lengthbuf = recvall(sock, 4)
if not lengthbuf: return None
length, = struct.unpack('!I', lengthbuf)
data = recvall(sock, length)
return data
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def wait_for_response(sock):
print('waiting for a response')
data = recv_one_message(sock)
if data is None:
return None
return pickle.loads(data)
class Server:
def __init__(self, server_args):
self.server_args = server_args
self.sock = setup_socket(self.server_args)
self.systems, self.datasets = setup_algorithms(self.server_args)
self.ptracker_args = {
"test": { "metrics": [ "accuracy", "loss", "preds" ], "save_task_performance": False },
"train": { "metrics": [ "accuracy", "loss", "preds" ], "save_task_performance": False },
"val": { "metrics": [ "accuracy", "loss", "preds" ], "save_task_performance": False }
}
def listen(self):
while True:
# wait for client:
print("\n\n\n")
print("--------------------------------------------")
print('| Waiting for a connection on port: {} |'.format(self.server_args.port))
print("--------------------------------------------")
connection, client_address = self.sock.accept()
print('> Connection accepted!')
self.restore_initial_state()
try:
self.listen_and_reply(connection)
finally:
# Clean up the connection
connection.close()
def listen_and_reply(self, sock):
while True:
print('listening')
message = wait_for_response(sock)
if message is None:
break
if type(message) is dict and 'action' in message:
if message['action'] == 'setup':
data = self.setup_information()
send_data(sock, data)
if message['action'] == 'classify':
data = self.train_algorithms(message['supports'], message['targets'])
send_data(sock, data)
if message['action'] == 'reset_task':
print('reset task')
self.restore_initial_state()
else:
print('Message unrecognised! Message: {}'.format(message))
def setup_information(self):
data = {
'action':'setup',
'algorithms': list(self.systems.keys()),
'dataset': self.server_args.dataset,
'version': self.server_args.version,
'dataset_sig': self.datasets['test'].get_signature(),
'dataset_args': {setname:bunch.unbunchify(self.datasets[setname].args) for setname in ['train', 'test', 'val']}
}
return data
def train_algorithms(self, supports_idx, targets_idx):
data = {'action':'output','models':{}}
with tqdm.tqdm(total=len(self.systems), disable=False) as pbar_val:
for model_name in self.systems:
ptracker = PerformanceTracker(args=self.ptracker_args)
builder = self.systems[model_name]
supports_lblname = [builder.datasets['test'].inv_class_dict[i] for i in supports_idx]
targets_lblname = [builder.datasets['test'].inv_class_dict[i] for i in targets_idx]
slbl_uniq, supports_lbl = np.unique(supports_lblname, return_inverse=True)
tlbl_uniq = np.array(slbl_uniq.tolist())
tlbl_uniq_map = {n:i for i, n in enumerate(tlbl_uniq)}
targets_lbl = np.array([tlbl_uniq_map[name] for name in targets_lblname])
task_args = {"test":{"support_idx":supports_idx,"support_lbls":supports_lbl,
"target_idx":targets_idx, "target_lbls" :targets_lbl}}
print("training {} on {} supports, eval {} targets".format(model_name, len(supports_idx),
len(targets_idx)))
ptracker.set_mode('test')
builder.model.set_mode('test')
builder.task_args['test'] = bunch.bunchify(task_args['test'])
task_generator = TaskGenerator(builder.datasets['test'],
task=DemoFSLTask,
task_args= bunch.bunchify(task_args['test']),
num_tasks=1,
seed=builder.args.seed,
epoch=builder.state.epoch,
mode='test',
fix_classes=False,
deterministic=True)
for sampler in task_generator:
dataloader = DataLoader(builder.datasets['test'], sampler, builder.device, builder.state.epoch, 'test')
builder.model.meta_test(dataloader, ptracker)
pbar_val.set_description('Testing ({}) -> {} {}'. format(model_name,
ptracker.get_performance_str(),
builder.model.get_summary_str()))
data['models'][model_name] = tlbl_uniq[ptracker.lastest_task_performance["preds"]].tolist()
return data
def restore_initial_state(self):
for model_name in self.systems:
self.systems[model_name].model.net_reset()
def get_server_parser(*args, **kwargs):
server_parser=argparse.ArgumentParser(*args, **kwargs)
server_parser.add_argument('--models', type=list, default=[])
server_parser.add_argument('--port', type=int, default=8991)
server_parser.add_argument('--dataset', type=str, default='mini')
server_parser.add_argument('--version', type=str, default=None)
server_parser.add_argument('--seed', type=int, default=0)
server_parser.add_argument('--data_path', default='../data/')
server_parser.add_argument('--exp_path', default='../experiment/')
return server_parser
def get_server_args(sysargv=None, json_args=None):
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument('--args_file', type=str, default='server_args.json',
help="Path and filename to json configuration file, over writing the values in the argparse")
base_parser = get_server_parser(description="FSL Demo (Server)", add_help=False, parents=[config_parser])
# Step 1. Get config file
config_args, remaining_argv = config_parser.parse_known_args(sysargv)
config_args = vars(config_args)
if json_args is None and config_args['args_file'] not in [None,'None','none','']:
json_args = load_json(config_args['args_file'])
json_args = from_syntactic_sugar(json_args)
json_args['args_file'] = config_args['args_file']
elif json_args is None:
json_args = {}
# Step 2. Update base args defaults using json
default_args = vars(base_parser.parse_args([]))
default_args, excluded_args = update_dict_exclusive(default_args, json_args)
base_parser.set_defaults(**default_args)
# Step 3. Update base args using command line args
args, remaining_argv = base_parser.parse_known_args(remaining_argv)
args_dict = vars(args)
pprint.pprint(args_dict, indent=4)
args = bunch.bunchify(args_dict)
return args
def main(server_args):
# setup
print('Setting up server')
time.sleep(2)
server = Server(server_args)
server.listen()
if __name__ == "__main__":
server_args = get_server_args(sys.argv)
main(server_args) | 39.718354 | 132 | 0.587125 |
724e91ba3586fb80eeead31acb599aaf1b9bcef3 | 865 | py | Python | app.py | tnniras/StockAdvisor | 55c23361c468abf2cbffc263277d5a734011264f | [
"MIT"
] | null | null | null | app.py | tnniras/StockAdvisor | 55c23361c468abf2cbffc263277d5a734011264f | [
"MIT"
] | null | null | null | app.py | tnniras/StockAdvisor | 55c23361c468abf2cbffc263277d5a734011264f | [
"MIT"
] | null | null | null | #!/usr/bin/python
#===================================================================================
# Author : Tushar Niras, Amol Morbale
# Email : tnniras@gmail.com
# Information : this class is defined to make CRUD operations easy.
#===================================================================================
#import yahoo_finance
#from yahoo_finance import Share
from MySQL import MySQL
import re
sql = MySQL()
# sql.order_by function should be called before the sql.select() function.
# this will select all the feilds from `users` table.
# you can specify whichever feilds you want to return. like : sql.select("users")
sql.where("email", "prashant@gmail.com")
sql.order_by("email", "asc")
result = sql.select("users", "id, email")
for email in result:
print "%s : %s" % (email["id"], email["email"])
#sql.delete("users")
sql.close()
| 26.212121 | 84 | 0.569942 |
32704bd5b348674be3ea3682e8065c20fd719a52 | 3,719 | py | Python | libqtile/widget/net.py | jrazik/qtile | 9af8e75c59827c437606a5fd752cdbcea5d0029f | [
"MIT"
] | null | null | null | libqtile/widget/net.py | jrazik/qtile | 9af8e75c59827c437606a5fd752cdbcea5d0029f | [
"MIT"
] | null | null | null | libqtile/widget/net.py | jrazik/qtile | 9af8e75c59827c437606a5fd752cdbcea5d0029f | [
"MIT"
] | null | null | null | # Copyright (c) 2014 Rock Neurotiko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
from libqtile.log_utils import logger
from . import base
import six
class Net(base.ThreadedPollText):
"""Displays interface down and up speed"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('interface', 'wlan0', 'The interface to monitor'),
('update_interval', 1, 'The update interval.'),
]
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(Net.defaults)
self.interfaces = self.get_stats()
def convert_b(self, b):
# Here we round to 1000 instead of 1024
# because of round things
letter = 'B'
# b is a float, so don't use integer division
if int(b / 1000) > 0:
b /= 1000.0
letter = 'k'
if int(b / 1000) > 0:
b /= 1000.0
letter = 'M'
if int(b / 1000) > 0:
b /= 1000.0
letter = 'G'
# I hope no one have more than 999 GB/s
return b, letter
def get_stats(self):
lines = [] # type: List[str]
with open('/proc/net/dev', 'r') as f:
lines = f.readlines()[2:]
interfaces = {}
for s in lines:
int_s = s.split()
name = int_s[0][:-1]
down = float(int_s[1])
up = float(int_s[-8])
interfaces[name] = {'down': down, 'up': up}
return interfaces
def _format(self, down, up):
down = "%0.2f" % down
up = "%0.2f" % up
if len(down) > 5:
down = down[:5]
if len(up) > 5:
up = up[:5]
down = " " * (5 - len(down)) + down
up = " " * (5 - len(up)) + up
return down, up
def poll(self):
try:
new_int = self.get_stats()
down = new_int[self.interface]['down'] - \
self.interfaces[self.interface]['down']
up = new_int[self.interface]['up'] - \
self.interfaces[self.interface]['up']
down = down / self.update_interval
up = up / self.update_interval
down, down_letter = self.convert_b(down)
up, up_letter = self.convert_b(up)
down, up = self._format(down, up)
str_base = six.u("%s%s \u2193\u2191 %s%s")
self.interfaces = new_int
return str_base % (down, down_letter, up, up_letter)
except Exception:
logger.error('%s: Probably your wlan device is switched off or otherwise not present in your system.',
self.__class__.__name__)
| 35.759615 | 114 | 0.595321 |
18a1cdf4f3aa1cb357e2553c0b7928317381092b | 5,237 | py | Python | lib/nltk/tokenize/sexpr.py | sreejithb/cows_and_bulls | 1c411e788432cfb73bcaefd16823414425677652 | [
"Apache-2.0"
] | 101 | 2016-11-04T06:11:22.000Z | 2021-12-16T14:45:36.000Z | lib/nltk/tokenize/sexpr.py | sreejithb/cows_and_bulls | 1c411e788432cfb73bcaefd16823414425677652 | [
"Apache-2.0"
] | 386 | 2016-09-26T08:40:46.000Z | 2022-01-04T11:51:38.000Z | lib/nltk/tokenize/sexpr.py | sreejithb/cows_and_bulls | 1c411e788432cfb73bcaefd16823414425677652 | [
"Apache-2.0"
] | 29 | 2016-10-13T13:34:51.000Z | 2021-12-16T14:44:53.000Z | # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Yoav Goldberg <yoavg@cs.bgu.ac.il>
# Steven Bird <stevenbird1@gmail.com> (minor edits)
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
"""
S-Expression Tokenizer
``SExprTokenizer`` is used to find parenthesized expressions in a
string. In particular, it divides a string into a sequence of
substrings that are either parenthesized expressions (including any
nested parenthesized expressions), or other whitespace-separated
tokens.
>>> from nltk.tokenize import SExprTokenizer
>>> SExprTokenizer().tokenize('(a b (c d)) e f (g)')
['(a b (c d))', 'e', 'f', '(g)']
By default, `SExprTokenizer` will raise a ``ValueError`` exception if
used to tokenize an expression with non-matching parentheses:
>>> SExprTokenizer().tokenize('c) d) e (f (g')
Traceback (most recent call last):
...
ValueError: Un-matched close paren at char 1
The ``strict`` argument can be set to False to allow for
non-matching parentheses. Any unmatched close parentheses will be
listed as their own s-expression; and the last partial sexpr with
unmatched open parentheses will be listed as its own sexpr:
>>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g')
['c', ')', 'd', ')', 'e', '(f (g']
The characters used for open and close parentheses may be customized
using the ``parens`` argument to the `SExprTokenizer` constructor:
>>> SExprTokenizer(parens='{}').tokenize('{a b {c d}} e f {g}')
['{a b {c d}}', 'e', 'f', '{g}']
The s-expression tokenizer is also available as a function:
>>> from nltk.tokenize import sexpr_tokenize
>>> sexpr_tokenize('(a b (c d)) e f (g)')
['(a b (c d))', 'e', 'f', '(g)']
"""
import re
from nltk.tokenize.api import TokenizerI
class SExprTokenizer(TokenizerI):
"""
A tokenizer that divides strings into s-expressions.
An s-expresion can be either:
- a parenthesized expression, including any nested parenthesized
expressions, or
- a sequence of non-whitespace non-parenthesis characters.
For example, the string ``(a (b c)) d e (f)`` consists of four
s-expressions: ``(a (b c))``, ``d``, ``e``, and ``(f)``.
By default, the characters ``(`` and ``)`` are treated as open and
close parentheses, but alternative strings may be specified.
:param parens: A two-element sequence specifying the open and close parentheses
that should be used to find sexprs. This will typically be either a
two-character string, or a list of two strings.
:type parens: str or list
:param strict: If true, then raise an exception when tokenizing an ill-formed sexpr.
"""
def __init__(self, parens='()', strict=True):
if len(parens) != 2:
raise ValueError('parens must contain exactly two strings')
self._strict = strict
self._open_paren = parens[0]
self._close_paren = parens[1]
self._paren_regexp = re.compile('%s|%s' % (re.escape(parens[0]),
re.escape(parens[1])))
def tokenize(self, text):
"""
Return a list of s-expressions extracted from *text*.
For example:
>>> SExprTokenizer().tokenize('(a b (c d)) e f (g)')
['(a b (c d))', 'e', 'f', '(g)']
All parentheses are assumed to mark s-expressions.
(No special processing is done to exclude parentheses that occur
inside strings, or following backslash characters.)
If the given expression contains non-matching parentheses,
then the behavior of the tokenizer depends on the ``strict``
parameter to the constructor. If ``strict`` is ``True``, then
raise a ``ValueError``. If ``strict`` is ``False``, then any
unmatched close parentheses will be listed as their own
s-expression; and the last partial s-expression with unmatched open
parentheses will be listed as its own s-expression:
>>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g')
['c', ')', 'd', ')', 'e', '(f (g']
:param text: the string to be tokenized
:type text: str or iter(str)
:rtype: iter(str)
"""
result = []
pos = 0
depth = 0
for m in self._paren_regexp.finditer(text):
paren = m.group()
if depth == 0:
result += text[pos:m.start()].split()
pos = m.start()
if paren == self._open_paren:
depth += 1
if paren == self._close_paren:
if self._strict and depth == 0:
raise ValueError('Un-matched close paren at char %d'
% m.start())
depth = max(0, depth-1)
if depth == 0:
result.append(text[pos:m.end()])
pos = m.end()
if self._strict and depth > 0:
raise ValueError('Un-matched open paren at char %d' % pos)
if pos < len(text):
result.append(text[pos:])
return result
sexpr_tokenize = SExprTokenizer().tokenize
| 36.622378 | 88 | 0.605308 |
533821c16a3a4e39005ed2d44d9abf24cb864d1a | 1,706 | py | Python | third_party/protobuf/python/google/protobuf/__init__.py | Comran/SpartanBalloon2016 | 54eff7ca04339849b827d35cac361f9109da12e5 | [
"BSD-2-Clause"
] | 17 | 2017-08-04T15:41:05.000Z | 2020-10-16T18:02:41.000Z | third_party/protobuf/python/google/protobuf/__init__.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 3 | 2017-08-04T23:37:37.000Z | 2017-08-04T23:38:34.000Z | third_party/protobuf/python/google/protobuf/__init__.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 10 | 2016-02-25T08:08:09.000Z | 2021-10-21T12:40:33.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright 2007 Google Inc. All Rights Reserved.
__version__ = '3.0.0b2'
| 50.176471 | 72 | 0.78136 |
133aa9b90362ba9582204b08abeb421a0515353f | 316 | py | Python | kattis/alphabetanimals.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | 2 | 2020-08-01T22:53:32.000Z | 2020-08-31T22:45:35.000Z | kattis/alphabetanimals.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | null | null | null | kattis/alphabetanimals.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | null | null | null | s, d, p = input()[-1], {i: 0 for i in list("abcdefghijklmnopqrstuvwxyz")}, []
for i in range(int(input())):
t = input()
d[t[0]] = 1
if t[0] == s: p.append(t)
if not p:
print("?")
exit()
for i in p:
if not d[i[-1]] or (s == p[0][-1] and len(p) == 1):
print("{}!".format(i))
exit()
print(p[0])
| 17.555556 | 77 | 0.493671 |
c90b6c460982e89400aecfb83a696604b15d7d70 | 1,669 | py | Python | src/backend/partaj/users/admin.py | MTES-MCT/partaj | 0025c17a96d9212430d18ec36f6a2474c4609738 | [
"MIT"
] | 2 | 2020-10-15T11:28:26.000Z | 2021-06-25T15:24:33.000Z | src/backend/partaj/users/admin.py | MTES-MCT/partaj | 0025c17a96d9212430d18ec36f6a2474c4609738 | [
"MIT"
] | 7 | 2020-10-01T14:49:51.000Z | 2022-01-24T09:44:10.000Z | src/backend/partaj/users/admin.py | MTES-MCT/partaj | 0025c17a96d9212430d18ec36f6a2474c4609738 | [
"MIT"
] | 3 | 2020-03-18T15:53:26.000Z | 2021-09-16T14:39:27.000Z | """
Admin of the `users` app of the Partaj project.
"""
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from impersonate.admin import UserAdminImpersonateMixin
from .models import User
@admin.register(User)
class UserAdmin(UserAdminImpersonateMixin, admin.ModelAdmin):
"""
Admin setup for users.
"""
# Display fields automatically created and updated by Django (as readonly)
readonly_fields = ["id", "date_joined", "updated_at"]
# Organize data on the admin page
fieldsets = (
(
_("Database information"),
{"fields": ["id", "date_joined", "updated_at"]},
),
(
_("Identifying information"),
{
"fields": [
"username",
"first_name",
"title",
"last_name",
"email",
"phone_number",
"unit_name",
]
},
),
(
_("Authorization information"),
{"fields": ["is_active", "is_staff", "is_superuser"]},
),
)
# Most important identifying fields to show on a User in list view in the admin
list_display = ("email", "first_name", "last_name", "is_staff", "is_superuser")
# Add easy filters on our most relevant fields for filtering
list_filter = ("is_active", "is_staff", "is_superuser")
# By default, show newest users first
ordering = ("-date_joined",)
# When impersonating a user with django-impersonate, open the impersonation in a new window
open_new_window = True
| 28.775862 | 95 | 0.569802 |
84e3872a3789d5351e3416aba8da14ec8958eb5d | 44,313 | py | Python | tests/integration_tests/python/test_uploads.py | scitran/nimsapi | a4203cf6c6d29aa15d33011250ee69ff929fcb0d | [
"MIT"
] | 13 | 2016-05-31T14:32:58.000Z | 2021-09-17T07:18:11.000Z | tests/integration_tests/python/test_uploads.py | scitran/core | a4203cf6c6d29aa15d33011250ee69ff929fcb0d | [
"MIT"
] | 911 | 2016-02-16T18:40:27.000Z | 2018-08-07T17:50:29.000Z | tests/integration_tests/python/test_uploads.py | scitran/nimsapi | a4203cf6c6d29aa15d33011250ee69ff929fcb0d | [
"MIT"
] | 16 | 2016-02-17T15:54:34.000Z | 2021-04-07T05:30:34.000Z | import copy
import datetime
import json
import dateutil.parser
import pytest
# TODO switch to upload_file_form in all uid(-match)/label/reaper upload tests
# after #772 (coverage-low-hanging 3) gets merged to avoid conflict hell
@pytest.fixture(scope='function')
def upload_file_form(file_form, merge_dict, randstr):
def create_form(**meta_override):
prefix = randstr()
names = ('project', 'subject', 'session', 'acquisition', 'unused')
files = {name: '{}-{}.csv'.format(prefix, name) for name in names}
meta = {
'project': {
'label': prefix + '-project-label',
'files': [{'name': files['project']}]
},
'session': {
'uid': prefix + '-session-uid',
'label': prefix + '-session-label',
'subject': {
'code': prefix + '-subject-code',
'files': [{'name': files['subject']}]
},
'files': [{'name': files['session']}]
},
'acquisition': {
'uid': prefix + '-acquisition-uid',
'label': prefix + '-acquisition-label',
'files': [{'name': files['acquisition']}]
}
}
if meta_override:
merge_dict(meta, meta_override)
return file_form(*files.values(), meta=meta)
return create_form
def test_reaper_upload(data_builder, randstr, upload_file_form, as_admin):
group_1 = data_builder.create_group()
prefix = randstr()
project_label_1 = prefix + '-project-label-1'
session_uid = prefix + '-session-uid'
# reaper-upload files to group_1/project_label_1 using session_uid
r = as_admin.post('/upload/reaper', files=upload_file_form(
group={'_id': group_1},
project={'label': project_label_1},
session={'uid': session_uid},
))
assert r.ok
# reaper-upload files to group_1/project_label_1 using session_uid without any files
file_form = upload_file_form(
group={'_id': group_1},
project={'label': project_label_1, "files":[]},
session={'uid': session_uid+"1", "files":[], 'subject': {
'code': prefix + '-subject-code',
'files': []
}}
)
print file_form
r = as_admin.post('/upload/reaper', files={"metadata": file_form.get("metadata")})
print r.json()
assert r.status_code == 400
# get session created by the upload
project_1 = as_admin.get('/groups/' + group_1 + '/projects').json()[0]['_id']
session = as_admin.get('/projects/' + project_1 + '/sessions').json()[0]['_id']
assert len(as_admin.get('/projects/' + project_1 + '/sessions').json()) == 1
assert len(as_admin.get('/sessions/' + session + '/acquisitions').json()) == 1
assert len(as_admin.get('/sessions/' + session).json()['files']) == 1
# move session to group_2/project_2
group_2 = data_builder.create_group()
project_2 = data_builder.create_project(group=group_2, label=prefix + '-project-label-2')
as_admin.put('/sessions/' + session, json={'project': project_2})
assert len(as_admin.get('/projects/' + project_1 + '/sessions').json()) == 0
assert len(as_admin.get('/projects/' + project_2 + '/sessions').json()) == 1
# reaper-upload files using existing session_uid and incorrect group/project
r = as_admin.post('/upload/reaper', files=upload_file_form(
group={'_id': group_1},
project={'label': project_label_1},
session={'uid': session_uid},
))
assert r.ok
# verify no new sessions were created and that group/project was ignored
# NOTE uploaded project file is NOT stored in this scenario!
assert len(as_admin.get('/projects/' + project_1 + '/sessions').json()) == 0
assert len(as_admin.get('/projects/' + project_2 + '/sessions').json()) == 1
# verify that acquisition creation/file uploads worked
assert len(as_admin.get('/sessions/' + session + '/acquisitions').json()) == 2
assert len(as_admin.get('/sessions/' + session).json()['files']) == 2
# clean up
data_builder.delete_group(group_1, recursive=True)
data_builder.delete_group(group_2, recursive=True)
def test_reaper_upload_unknown_group_project(data_builder, file_form, as_root, as_admin):
"""
If the label endpoint receives an upload with a blank group and project, set to
group: unknown and create or find "Unknown" project
"""
# Upload without group id or project label
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': ''},
'project': {
'label': '',
},
'session': {
'label': 'test_session_label',
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# get session created by the upload
r = as_root.get('/groups/unknown/projects')
assert r.ok
project_list = r.json()
assert len(project_list) == 1
project = project_list[0]
assert 'Unknown' == project_list[0]['label']
unknown_project = project['_id']
assert len(as_root.get('/projects/' + unknown_project + '/sessions').json()) == 1
session = as_root.get('/projects/' + unknown_project + '/sessions').json()[0]['_id']
assert len(as_root.get('/sessions/' + session + '/acquisitions').json()) == 1
# do another upload without group id or project label
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': ''},
'project': {
'label': '',
},
'session': {
'label': 'test_session_label_2',
},
'acquisition': {
'label': 'test_acquisition_label_2',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# Test that another session was added to Unkonwn project
assert len(as_root.get('/projects/' + unknown_project + '/sessions').json()) == 2
session2 = as_root.get('/projects/' + unknown_project + '/sessions').json()[1]['_id']
assert len(as_root.get('/sessions/' + session2 + '/acquisitions').json()) == 1
# Upload with a nonexistent group id and a project label
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': 'not_a_real_group'},
'project': {
'label': 'new_project',
},
'session': {
'label': 'test_session_label',
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# Try uploading 0 files
r = as_root.post('/upload/label', files={"metadata":file_form(
'acquisition.csv',
meta={
'group': {'_id': 'not_a_real_group'},
'project': {
'label': 'new_project',
},
'session': {
'label': 'test_session_label',
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
}).get("metadata")}
)
assert r.status_code == 400
# get session created by the upload
r = as_root.get('/groups/unknown/projects')
assert r.ok
project_list = r.json()
assert len(project_list) == 2
project = project_list[1]
assert 'not_a_real_group_new_project' == project['label']
named_unknown_project = project['_id']
assert len(as_root.get('/projects/' + named_unknown_project + '/sessions').json()) == 1
session = as_root.get('/projects/' + named_unknown_project + '/sessions').json()[0]['_id']
assert len(as_root.get('/sessions/' + session + '/acquisitions').json()) == 1
group1 = data_builder.create_group()
# Upload with an existing group id and no project label
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': group1},
'project': {
'label': '',
},
'session': {
'label': 'test_session_label',
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# get session created by the upload
r = as_root.get('/groups/' + group1 + '/projects')
assert r.ok
project_list = r.json()
assert len(project_list) == 1
project = project_list[0]
assert 'Unknown' == project['label']
project1 = project['_id']
assert len(as_root.get('/projects/' + project1 + '/sessions').json()) == 1
session = as_root.get('/projects/' + project1 + '/sessions').json()[0]['_id']
assert len(as_root.get('/sessions/' + session + '/acquisitions').json()) == 1
# clean up
data_builder.delete_group(group1, recursive=True)
data_builder.delete_project(unknown_project, recursive=True)
data_builder.delete_project(named_unknown_project, recursive=True)
def test_reaper_project_search(data_builder, file_form, as_root):
"""
When attempting to find a project, we do a case insensitive lookup.
Ensure that mongo regex works as expected.
Scenario: three sessions come in with similar but different group labels
and blank project lables.
1 - "Test with more info"
2 - "TeSt"
3 - "test"
Since neither of these groups exist by this id, they will be placed in the
"unknown" group with the above string as their project label. Ensure the first
is placed in a separate project than the second and third.
"""
group_label_1 = 'Test with more info'
group_label_2 = 'TeSt'
group_label_3 = 'test'
expected_project_label_1 = 'Test with more info_'
expected_project_label_2 = 'TeSt_'
# Upload with group 1
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': group_label_1},
'project': {
'label': '',
},
'session': {
'label': 'test_session_label',
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# get session created by the upload
r = as_root.get('/groups/unknown/projects')
assert r.ok
project_list = r.json()
assert len(project_list) == 1
project = project_list[0]
assert project_list[0]['label'] == expected_project_label_1
project_1 = project['_id']
assert len(as_root.get('/projects/' + project_1 + '/sessions').json()) == 1
session = as_root.get('/projects/' + project_1 + '/sessions').json()[0]['_id']
assert len(as_root.get('/sessions/' + session + '/acquisitions').json()) == 1
# Ensure group label 2 ends up in separate project
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': group_label_2},
'project': {
'label': '',
},
'session': {
'label': 'test_session_label',
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# get session created by the upload
r = as_root.get('/groups/unknown/projects')
assert r.ok
project_list = r.json()
assert len(project_list) == 2
project = project_list[1]
assert project_list[1]['label'] == expected_project_label_2
project_2 = project['_id']
assert len(as_root.get('/projects/' + project_2 + '/sessions').json()) == 1
session = as_root.get('/projects/' + project_2 + '/sessions').json()[0]['_id']
assert len(as_root.get('/sessions/' + session + '/acquisitions').json()) == 1
# Upload with another "test" project with different case
r = as_root.post('/upload/label', files=file_form(
'acquisition.csv',
meta={
'group': {'_id': group_label_3},
'project': {
'label': '',
},
'session': {
'label': 'test_session_label_2',
},
'acquisition': {
'label': 'test_acquisition_label_2',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# get session created by the upload
r = as_root.get('/groups/unknown/projects')
assert r.ok
project_list = r.json()
# Ensure there are still only 2 projects
assert len(project_list) == 2
project = project_list[1]
assert project_list[1]['label'] == expected_project_label_2
assert len(as_root.get('/projects/' + project_2 + '/sessions').json()) == 2
session2 = as_root.get('/projects/' + project_2 + '/sessions').json()[1]['_id']
assert len(as_root.get('/sessions/' + session2 + '/acquisitions').json()) == 1
# clean up
data_builder.delete_group('unknown', recursive=True)
def test_uid_upload(data_builder, file_form, as_admin, as_user, as_public):
group = data_builder.create_group()
project3_id = data_builder.create_project()
# try to uid-upload w/o logging in
r = as_public.post('/upload/uid')
assert r.status_code == 403
# try to uid-upload w/o metadata
r = as_admin.post('/upload/uid', files=file_form('test.csv'))
assert r.status_code == 400
# NOTE unused.csv is testing code that discards files not referenced from meta
uid_files = ('project.csv', 'subject.csv', 'session.csv', 'acquisition.csv', 'unused.csv')
uid_meta = {
'group': {'_id': group},
'project': {
'label': 'uid_upload',
'files': [{'name': 'project.csv'}]
},
'session': {
'uid': 'uid_upload',
'subject': {
'code': 'uid_upload',
'files': [{'name': 'subject.csv'}]
},
'files': [{'name': 'session.csv'}]
},
'acquisition': {
'uid': 'uid_upload',
'files': [{'name': 'acquisition.csv'}]
}
}
# try to uid-upload to new project w/o group rw perms
r = as_user.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta))
assert r.status_code == 403
# try to uid-upload no files
r = as_admin.post('/upload/uid', files={"metadata": file_form(*uid_files, meta=uid_meta).get("metadata")})
assert r.status_code == 400
# uid-upload files
r = as_admin.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta))
assert r.ok
# try to uid-upload to existing project w/o project rw perms
uid_meta_2 = copy.deepcopy(uid_meta)
uid_meta_2['session']['uid'] = uid_meta_2['acquisition']['uid'] = 'uid_upload_2'
r = as_user.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta_2))
assert r.status_code == 403
# uid-upload to existing project but new session uid
r = as_admin.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta_2))
assert r.ok
# uid-upload files to existing session uid
r = as_admin.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta))
assert r.ok
# try uid-upload files to existing session uid w/ other user (having no rw perms on session)
r = as_user.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta))
assert r.status_code == 403
#Upload to different project with same uid
uid_meta_3 = copy.deepcopy(uid_meta)
r = as_admin.get('/projects/' + project3_id)
assert r.ok
uid_meta_3['project']['label'] = r.json()['label']
r = as_admin.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta_3))
assert r.ok
r = as_admin.get('/projects/' + project3_id + '/sessions')
assert r.ok
assert len(r.json()) > 0
# TODO figure out why api.dao.hierarchy._group_id_fuzzy_match is NOT called below
# # uid-upload to fat-fingered group id (should end up in group)
# uid_meta_fuzzy = copy.deepcopy(uid_meta)
# uid_meta_fuzzy['group']['_id'] = 'c' + group
# r = as_admin.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta_fuzzy))
# assert r.ok
# # uid-upload to utterly non-existent group id (should end up in unknown group)
# uid_meta_unknown = copy.deepcopy(uid_meta)
# uid_meta_unknown['group']['_id'] = '0000000000000000000000000'
# r = as_admin.post('/upload/uid', files=file_form(*uid_files, meta=uid_meta_unknown))
# assert r.ok
# uid-match-upload files (to the same session and acquisition uid's as above)
uid_match_meta = copy.deepcopy(uid_meta)
del uid_match_meta['group']
r = as_admin.post('/upload/uid-match', files=file_form(*uid_files, meta=uid_match_meta))
assert r.ok
# try uid-match upload w/ other user (having no rw permissions on session)
r = as_user.post('/upload/uid-match', files=file_form(*uid_files, meta=uid_match_meta))
assert r.status_code == 403
# try uid-match upload w/ non-existent acquisition uid
uid_match_meta['acquisition']['uid'] = 'nonexistent_uid'
r = as_admin.post('/upload/uid-match', files=file_form(*uid_files, meta=uid_match_meta))
assert r.status_code == 404
# try uid-match upload w/ non-existent session uid
uid_match_meta['session']['uid'] = 'nonexistent_uid'
r = as_admin.post('/upload/uid-match', files=file_form(*uid_files, meta=uid_match_meta))
assert r.status_code == 404
# delete group and children recursively (created by upload)
data_builder.delete_group(group, recursive=True)
def test_label_upload(data_builder, file_form, as_admin):
group = data_builder.create_group()
# label-upload files
r = as_admin.post('/upload/label', files=file_form(
'project.csv', 'subject.csv', 'session.csv', 'acquisition.csv', 'unused.csv',
meta={
'group': {'_id': group},
'project': {
'label': 'test_project',
'files': [{'name': 'project.csv'}]
},
'session': {
'label': 'test_session_label',
'subject': {
'code': 'test_subject_code',
'files': [{'name': 'subject.csv'}]
},
'files': [{'name': 'session.csv'}]
},
'acquisition': {
'label': 'test_acquisition_label',
'files': [{'name': 'acquisition.csv'}]
}
})
)
assert r.ok
# delete group and children recursively (created by upload)
data_builder.delete_group(group, recursive=True)
def test_multi_upload(data_builder, upload_file_form, randstr, as_admin):
# test uid-uploads respecting existing uids
fixed_uid = randstr()
fixed_uid_group = data_builder.create_group(_id=fixed_uid)
fixed_uid_form_args = dict(
group={'_id': fixed_uid_group},
project={'label': fixed_uid + '-project-label'},
session={'uid': fixed_uid + '-fixed-uid'},
acquisition={'uid': fixed_uid + '-fixed-uid'},
)
# uid-upload #1 w/ fixed uid
r = as_admin.post('/upload/uid', files=upload_file_form(**fixed_uid_form_args))
assert r.ok
# get newly created project/session/acquisition
project = as_admin.get('/groups/' + fixed_uid_group + '/projects').json()[0]['_id']
session = as_admin.get('/projects/' + project + '/sessions').json()[0]['_id']
acquisition = as_admin.get('/sessions/' + session + '/acquisitions').json()[0]['_id']
# test uploaded files
assert len(as_admin.get('/projects/' + project).json()['files']) == 1
assert len(as_admin.get('/sessions/' + session).json()['files']) == 1
assert len(as_admin.get('/acquisitions/' + acquisition).json()['files']) == 1
# uid-upload #2 w/ fixed uid
r = as_admin.post('/upload/uid', files=upload_file_form(**fixed_uid_form_args))
assert r.ok
# test hierarchy (should have no new containers)
assert len(as_admin.get('/groups/' + fixed_uid_group + '/projects').json()) == 1
assert len(as_admin.get('/projects/' + project + '/sessions').json()) == 1
assert len(as_admin.get('/sessions/' + session + '/acquisitions').json()) == 1
# test uploaded files
assert len(as_admin.get('/projects/' + project).json()['files']) == 2
assert len(as_admin.get('/sessions/' + session).json()['files']) == 2
assert len(as_admin.get('/acquisitions/' + acquisition).json()['files']) == 2
# label-upload w/ fixed uid
r = as_admin.post('/upload/label', files=upload_file_form(**fixed_uid_form_args))
assert r.ok
# test hierarchy (should have new session)
assert len(as_admin.get('/groups/' + fixed_uid_group + '/projects').json()) == 1
assert len(as_admin.get('/projects/' + project + '/sessions').json()) == 2
# test label-uploads respecting existing labels
# NOTE subject.code is also checked when label-matching sessions!
fixed_label = randstr()
fixed_label_group = data_builder.create_group(_id=fixed_label)
fixed_label_form_args = dict(
group={'_id': fixed_label_group},
project={'label': fixed_label + '-project-label'},
session={'label': fixed_label + '-fixed-label', 'subject': {'code': fixed_label + '-subject-code'}},
acquisition={'label': fixed_label + '-fixed-label'},
)
# label-upload #1 w/ fixed label
r = as_admin.post('/upload/label', files=upload_file_form(**fixed_label_form_args))
assert r.ok
# get newly created project/session/acquisition
project = as_admin.get('/groups/' + fixed_label_group + '/projects').json()[0]['_id']
session = as_admin.get('/projects/' + project + '/sessions').json()[0]['_id']
acquisition = as_admin.get('/sessions/' + session + '/acquisitions').json()[0]['_id']
# test uploaded files
assert len(as_admin.get('/projects/' + project).json()['files']) == 1
assert len(as_admin.get('/sessions/' + session).json()['files']) == 1
assert len(as_admin.get('/acquisitions/' + acquisition).json()['files']) == 1
# label-upload #2 w/ fixed label
r = as_admin.post('/upload/label', files=upload_file_form(**fixed_label_form_args))
assert r.ok
# test hierarchy (should have no new containers)
assert len(as_admin.get('/groups/' + fixed_label_group + '/projects').json()) == 1
assert len(as_admin.get('/projects/' + project + '/sessions').json()) == 1
assert len(as_admin.get('/sessions/' + session + '/acquisitions').json()) == 1
# test uploaded files
assert len(as_admin.get('/projects/' + project).json()['files']) == 2
assert len(as_admin.get('/sessions/' + session).json()['files']) == 2
assert len(as_admin.get('/acquisitions/' + acquisition).json()['files']) == 2
# uid-upload w/ fixed label
r = as_admin.post('/upload/uid', files=upload_file_form(**fixed_label_form_args))
assert r.ok
# test hierarchy (should have new session)
assert len(as_admin.get('/groups/' + fixed_label_group + '/projects').json()) == 1
assert len(as_admin.get('/projects/' + project + '/sessions').json()) == 2
# clean up
data_builder.delete_group(fixed_uid_group, recursive=True)
data_builder.delete_group(fixed_label_group, recursive=True)
def find_file_in_array(filename, files):
for f in files:
if f.get('name') == filename:
return f
def test_engine_upload_errors(as_drone, as_user):
# try engine upload w/ non-root
r = as_user.post('/engine')
assert r.status_code == 402
# try engine upload w/o level
r = as_drone.post('/engine', params={})
assert r.status_code == 400
# try engine upload w/ invalid level
r = as_drone.post('/engine', params={'level': 'what'})
assert r.status_code == 400
# try engine upload w/o id
r = as_drone.post('/engine', params={'level': 'project'})
assert r.status_code == 400
def test_acquisition_engine_upload(data_builder, file_form, as_root):
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
assert as_root.post('/acquisitions/' + acquisition + '/files', files=file_form('test.txt')).ok
job = data_builder.create_job(inputs={
'test': {'type': 'acquisition', 'id': acquisition, 'name': 'test.txt'}
})
metadata = {
'project':{
'label': 'engine project',
'info': {'test': 'p'}
},
'session':{
'label': 'engine session',
'subject': {'code': 'engine subject'},
'info': {'test': 's'}
},
'acquisition':{
'label': 'engine acquisition',
'timestamp': '2016-06-20T21:57:36+00:00',
'info': {'test': 'a'},
'files':[
{
'name': 'one.csv',
'type': 'engine type 0',
'info': {'test': 'f0'}
},
{
'name': 'two.csv',
'type': 'engine type 1',
'info': {'test': 'f1'}
}
]
}
}
# try engine upload w/ non-existent job_id
r = as_root.post('/engine',
params={'level': 'acquisition', 'id': acquisition, 'job': '000000000000000000000000'},
files=file_form('one.csv', 'two.csv', meta=metadata)
)
assert r.status_code == 404
# engine upload
r = as_root.post('/engine',
params={'level': 'acquisition', 'id': acquisition, 'job': job},
files=file_form('one.csv', 'two.csv', meta=metadata)
)
assert r.ok
r = as_root.get('/projects/' + project)
assert r.ok
p = r.json()
# Engine metadata should not replace existing fields
assert p['label'] != metadata['project']['label']
assert p['info'] == metadata['project']['info']
r = as_root.get('/sessions/' + session)
assert r.ok
s = r.json()
# Engine metadata should not replace existing fields
assert s['label'] != metadata['session']['label']
assert s['info'] == metadata['session']['info']
assert s['subject']['code'] == metadata['session']['subject']['code']
r = as_root.get('/acquisitions/' + acquisition)
assert r.ok
a = r.json()
# Engine metadata should not replace existing fields
assert a['label'] != metadata['acquisition']['label']
assert a['info'] == metadata['acquisition']['info']
a_timestamp = dateutil.parser.parse(a['timestamp'])
m_timestamp = dateutil.parser.parse(metadata['acquisition']['timestamp'])
assert a_timestamp == m_timestamp
for mf in metadata['acquisition']['files']:
f = find_file_in_array(mf['name'], a['files'])
assert mf is not None
assert f['type'] == mf['type']
assert f['info'] == mf['info']
def test_session_engine_upload(data_builder, file_form, as_root):
project = data_builder.create_project()
session = data_builder.create_session()
metadata = {
'project':{
'label': 'engine project',
'info': {'test': 'p'}
},
'session':{
'label': 'engine session',
'subject': {'code': 'engine subject'},
'timestamp': '2016-06-20T21:57:36+00:00',
'info': {'test': 's'},
'files': [
{
'name': 'one.csv',
'type': 'engine type 0',
'info': {'test': 'f0'}
},
{
'name': 'two.csv',
'type': 'engine type 1',
'info': {'test': 'f1'}
}
]
}
}
r = as_root.post('/engine',
params={'level': 'session', 'id': session},
files=file_form('one.csv', 'two.csv', meta=metadata)
)
assert r.ok
r = as_root.get('/projects/' + project)
assert r.ok
p = r.json()
# Engine metadata should not replace existing fields
assert p['label'] != metadata['project']['label']
assert p['info'] == metadata['project']['info']
r = as_root.get('/sessions/' + session)
assert r.ok
s = r.json()
# Engine metadata should not replace existing fields
assert s['label'] != metadata['session']['label']
assert s['info'] == metadata['session']['info']
assert s['subject']['code'] == metadata['session']['subject']['code']
s_timestamp = dateutil.parser.parse(s['timestamp'])
m_timestamp = dateutil.parser.parse(metadata['session']['timestamp'])
assert s_timestamp == m_timestamp
for f in s['files']:
mf = find_file_in_array(f['name'], metadata['session']['files'])
assert mf is not None
assert f['type'] == mf['type']
assert f['info'] == mf['info']
def test_project_engine_upload(data_builder, file_form, as_root):
project = data_builder.create_project()
metadata = {
'project':{
'label': 'engine project',
'info': {'test': 'p'},
'files': [
{
'name': 'one.csv',
'type': 'engine type 0',
'info': {'test': 'f0'}
},
{
'name': 'two.csv',
'type': 'engine type 1',
'info': {'test': 'f1'}
}
]
}
}
r = as_root.post('/engine',
params={'level': 'project', 'id': project},
files=file_form('one.csv', 'two.csv', meta=metadata)
)
assert r.ok
r = as_root.get('/projects/' + project)
assert r.ok
p = r.json()
# Engine metadata should not replace existing fields
assert p['label'] != metadata['project']['label']
assert p['info'] == metadata['project']['info']
for f in p['files']:
mf = find_file_in_array(f['name'], metadata['project']['files'])
assert mf is not None
assert f['type'] == mf['type']
assert f['info'] == mf['info']
def test_acquisition_file_only_engine_upload(data_builder, file_form, as_root):
acquisition = data_builder.create_acquisition()
file_names = ['one.csv', 'two.csv']
r = as_root.post('/engine',
params={'level': 'acquisition', 'id': acquisition},
files=file_form(*file_names)
)
assert r.ok
r = as_root.get('/acquisitions/' + acquisition)
assert r.ok
assert set(f['name'] for f in r.json()['files']) == set(file_names)
def test_acquisition_subsequent_file_engine_upload(data_builder, file_form, as_root):
acquisition = data_builder.create_acquisition()
file_name_1 = 'one.csv'
r = as_root.post('/engine',
params={'level': 'acquisition', 'id': acquisition},
files=file_form(file_name_1, meta={
'acquisition': {
'files': [{
'name': file_name_1,
'type': 'engine type 1',
'info': {'test': 'f1'}
}]
}
})
)
assert r.ok
r = as_root.get('/acquisitions/' + acquisition)
assert r.ok
assert set(f['name'] for f in r.json()['files']) == set([file_name_1])
file_name_2 = 'two.csv'
r = as_root.post('/engine',
params={'level': 'acquisition', 'id': acquisition},
files=file_form(file_name_2, meta={
'acquisition': {
'files': [{
'name': file_name_2,
'type': 'engine type 2',
'info': {'test': 'f2'}
}]
}
})
)
assert r.ok
r = as_root.get('/acquisitions/' + acquisition)
assert r.ok
assert set(f['name'] for f in r.json()['files']) == set([file_name_1, file_name_2])
def test_acquisition_metadata_only_engine_upload(data_builder, file_form, as_root):
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
metadata = {
'project': {
'label': 'engine project',
'info': {'test': 'p'}
},
'session':{
'label': 'engine session',
'subject': {'code': 'engine subject'},
'info': {'test': 's'}
},
'acquisition':{
'label': 'engine acquisition',
'timestamp': '2016-06-20T21:57:36+00:00',
'info': {'test': 'a'}
}
}
r = as_root.post('/engine',
params={'level': 'acquisition', 'id': acquisition},
files=file_form(meta=metadata)
)
assert r.ok
r = as_root.get('/projects/' + project)
assert r.ok
p = r.json()
# Engine metadata should not replace existing fields
assert p['label'] != metadata['project']['label']
assert p['info'] == metadata['project']['info']
r = as_root.get('/sessions/' + session)
assert r.ok
s = r.json()
# Engine metadata should not replace existing fields
assert s['label'] != metadata['session']['label']
assert s['info'] == metadata['session']['info']
assert s['subject']['code'] == metadata['session']['subject']['code']
r = as_root.get('/acquisitions/' + acquisition)
assert r.ok
a = r.json()
# Engine metadata should not replace existing fields
assert a['label'] != metadata['acquisition']['label']
assert a['info'] == metadata['acquisition']['info']
a_timestamp = dateutil.parser.parse(a['timestamp'])
m_timestamp = dateutil.parser.parse(metadata['acquisition']['timestamp'])
assert a_timestamp == m_timestamp
def test_analysis_upload(data_builder, default_payload, file_form, as_admin):
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
gear_doc = default_payload['gear']['gear']
gear_doc['inputs'] = {
'csv': {
'base': 'file'
}
}
gear = data_builder.create_gear(gear=gear_doc)
# create session analysis
r = as_admin.post('/sessions/' + session + '/analyses', files=file_form(
'one.csv', meta={'label': 'test analysis', 'inputs': [{'name': 'one.csv'}]}
))
assert r.ok
session_analysis = r.json()['_id']
# delete session analysis
r = as_admin.delete('/sessions/' + session + '/analyses/' + session_analysis)
assert r.ok
# create acquisition analysis
r = as_admin.post('/acquisitions/' + acquisition + '/analyses', files=file_form(
'one.csv', meta={'label': 'test analysis', 'inputs': [{'name': 'one.csv'}]}
))
assert r.ok
acquisition_analysis = r.json()['_id']
# delete acquisition analysis
r = as_admin.delete('/acquisitions/' + acquisition + '/analyses/' + acquisition_analysis)
assert r.ok
# create acquisition file (for the fixture acquisition)
r = as_admin.post('/acquisitions/' + acquisition + '/files', files=file_form('one.csv'))
assert r.ok
# create session analysis (job) using acquisition's file as input
r = as_admin.post('/sessions/' + session + '/analyses', json={
'label': 'test analysis job',
'job': {
'gear_id': gear,
'inputs': {
'csv': {
'type': 'acquisition',
'id': acquisition,
'name': 'one.csv'
}
},
'tags': ['example']
}
})
assert r.ok
session_analysis = r.json()['_id']
# delete session analysis (job)
r = as_admin.delete('/sessions/' + session + '/analyses/' + session_analysis)
assert r.ok
def test_analysis_engine_upload(data_builder, file_form, as_root):
session = data_builder.create_session()
# create acquisition analysis
r = as_root.post('/sessions/' + session + '/analyses', files=file_form(
'one.csv', meta={'label': 'test analysis', 'inputs': [{'name': 'one.csv'}]}
))
assert r.ok
session_analysis = r.json()['_id']
r = as_root.post('/engine',
params={'level': 'analysis', 'id': session_analysis},
files=file_form('out.csv', meta={
'type': 'text',
'value': {'label': 'test'},
'enabled': True}
))
assert r.ok
# Check for created timestamps for output files
r = as_root.get('/sessions/'+ session + '/analyses/' + session_analysis)
assert 'created' in r.json()['files'][0]
# delete acquisition analysis
r = as_root.delete('/sessions/' + session + '/analyses/' + session_analysis)
assert r.ok
def test_packfile_upload(data_builder, file_form, as_admin, as_root, api_db):
project = data_builder.create_project()
session = data_builder.create_session()
# try to start packfile-upload to non-project target
r = as_admin.post('/sessions/' + session + '/packfile-start')
assert r.status_code == 500
# try to start packfile-upload to non-existent project
r = as_admin.post('/projects/000000000000000000000000/packfile-start')
assert r.status_code == 500
# start packfile-upload
r = as_admin.post('/projects/' + project + '/packfile-start')
assert r.ok
token = r.json()['token']
# try to upload to packfile w/o token
r = as_admin.post('/projects/' + project + '/packfile')
assert r.status_code == 500
# upload to packfile
r = as_admin.post('/projects/' + project + '/packfile',
params={'token': token}, files=file_form('one.csv'))
assert r.ok
metadata_json = json.dumps({
'project': {'_id': project},
'session': {'label': 'test-packfile-label'},
'acquisition': {
'label': 'test-packfile-label',
'timestamp': '1979-01-01T00:00:00+00:00'
},
'packfile': {'type': 'test'}
})
# try to finish packfile-upload w/o token
r = as_admin.post('/projects/' + project + '/packfile-end',
params={'metadata': metadata_json})
assert r.status_code == 500
# try to finish packfile-upload with files in the request
r = as_admin.post('/projects/' + project + '/packfile-end',
params={'token': token, 'metadata': metadata_json},
files={'file': ('packfile-end.txt', 'sending files to packfile-end is not allowed\n')}
)
assert r.status_code == 500
# finish packfile-upload (creates new session/acquisition)
r = as_admin.post('/projects/' + project + '/packfile-end',
params={'token': token, 'metadata': metadata_json})
assert r.ok
# make sure file was uploaded and mimetype and type are properly set
packfile = as_admin.get('/acquisitions').json()[0]['files'][0]
assert packfile['mimetype'] == 'application/zip'
assert packfile['type'] == 'test'
# Test that acquisition timestamp was parsed into date type
r = as_admin.post('/projects/' + project + '/packfile-start')
assert r.ok
token = r.json()['token']
r = as_admin.post('/projects/' + project + '/packfile',
params={'token': token}, files=file_form('one.csv'))
assert r.ok
metadata_json = json.dumps({
'project': {'_id': project},
'session': {
'label': 'test-packfile-timestamp'
},
'acquisition': {
'label': 'test-packfile-timestamp',
'timestamp': '1990-01-01T00:00:00+00:00'
},
'packfile': {'type': 'test'}
})
r = as_admin.post('/projects/' + project + '/packfile-end',
params={'token': token, 'metadata': metadata_json})
assert r.ok
acquisition = api_db.acquisitions.find_one({'label':'test-packfile-timestamp', 'timestamp':{'$type':'date'}})
assert acquisition.get('label') == 'test-packfile-timestamp'
# Test that acquisition timestamp is used to differenciate acquisitions and session code for sessions
# Make sure there is only one session and one acquisition with the above label to start
sessions = list(api_db.sessions.find({'label':'test-packfile-timestamp'}))
acquisitions = list(api_db.acquisitions.find({'label':'test-packfile-timestamp'}))
assert len(sessions) == 1
assert len(acquisitions) == 1
r = as_admin.post('/projects/' + project + '/packfile-start')
assert r.ok
token = r.json()['token']
r = as_admin.post('/projects/' + project + '/packfile',
params={'token': token}, files=file_form('one.csv'))
assert r.ok
metadata_json = json.dumps({
'project': {'_id': project},
'session': {
'label': 'test-packfile-timestamp',
'subject': {
'code': 'new-subject'
}
},
'acquisition': {
'label': 'test-packfile-timestamp',
'timestamp': '1999-01-01T00:00:00+00:00'
},
'packfile': {'type': 'test'}
})
r = as_admin.post('/projects/' + project + '/packfile-end',
params={'token': token, 'metadata': metadata_json})
assert r.ok
sessions = list(api_db.sessions.find({'label':'test-packfile-timestamp'}))
acquisitions = list(api_db.acquisitions.find({'label':'test-packfile-timestamp'}))
# Ensure a new session was created
assert len(sessions) == 2
# Ensure a new acquisition was created
assert len(acquisitions) == 2
# Ensure subject code exists on a session
for s in sessions:
if s.get('subject', {}).get('code') == 'new-subject':
break
else:
# We didn't fine one
assert False
# Ensure second acquisition timestamp exists on an acquisition
for a in acquisitions:
if str(a.get('timestamp')) == '1999-01-01 00:00:00':
break
else:
# We didn't fine one
assert False
# Remove sessions and acquisitions via delete and ensure new containers are created
session_ids_before = [str(x['_id']) for x in sessions]
acquisition_ids_before = [str(x['_id']) for x in acquisitions]
for s in session_ids_before:
assert as_admin.delete('/sessions/'+s).ok
# Add another packfile with the same metadata as above
r = as_admin.post('/projects/' + project + '/packfile-start')
assert r.ok
token = r.json()['token']
r = as_admin.post('/projects/' + project + '/packfile',
params={'token': token}, files=file_form('one.csv'))
assert r.ok
r = as_admin.post('/projects/' + project + '/packfile-end',
params={'token': token, 'metadata': metadata_json})
assert r.ok
# Ensure a new session and acquisition was created
sessions_after = list(api_db.sessions.find({'label':'test-packfile-timestamp', 'deleted': {'$exists': False}}))
acquisitions_after = list(api_db.acquisitions.find({'label':'test-packfile-timestamp', 'deleted': {'$exists': False}}))
assert len(sessions_after) == 1
assert len(acquisitions_after) == 1
assert str(sessions_after[0]['_id']) not in session_ids_before
assert str(acquisitions_after[0]['_id']) not in acquisition_ids_before
# get another token (start packfile-upload)
r = as_admin.post('/projects/' + project + '/packfile-start')
assert r.ok
token = r.json()['token']
files = [
('file', file_form('two.csv')['file']) ,
('file', file_form('three.csv')['file'])
]
# upload to packfile
r = as_admin.post('/projects/' + project + '/packfile',
params={'token': token}, files=files)
assert r.ok
# expire upload token
expired_time = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
api_db.tokens.update({'_id': token}, {'$set': {'modified': expired_time}})
# try to clean packfile tokens w/o root
r = as_admin.post('/clean-packfiles')
assert r.status_code == 402
r = as_root.post('/clean-packfiles')
assert r.ok
assert r.json()['removed']['tokens'] > 0
# clean up added session/acquisition
data_builder.delete_project(project, recursive=True)
| 35.564205 | 123 | 0.586871 |
208b173eb904a7aa7f211666ade046523a4f933e | 30 | py | Python | project/loss_functions/absolute_loss/__init__.py | FedericoV/SysBio_Modeling | f56c9ee3e361e88dbdb3e71f833ed3d433213309 | [
"MIT"
] | 1 | 2016-09-03T18:53:17.000Z | 2016-09-03T18:53:17.000Z | project/loss_functions/absolute_loss/__init__.py | FedericoV/SysBio_Modeling | f56c9ee3e361e88dbdb3e71f833ed3d433213309 | [
"MIT"
] | null | null | null | project/loss_functions/absolute_loss/__init__.py | FedericoV/SysBio_Modeling | f56c9ee3e361e88dbdb3e71f833ed3d433213309 | [
"MIT"
] | null | null | null | __author__ = 'Federico Vaggi'
| 15 | 29 | 0.766667 |
70fdb8ea37bd5a8d0f485c7c8dfbb3f8b46a8d31 | 1,606 | py | Python | src/api/sample.py | vinizalabs/aws-serverless-cicd | 46ff449d35ef2b3c80b66c05bb357c79ddb8ab71 | [
"Apache-2.0"
] | null | null | null | src/api/sample.py | vinizalabs/aws-serverless-cicd | 46ff449d35ef2b3c80b66c05bb357c79ddb8ab71 | [
"Apache-2.0"
] | null | null | null | src/api/sample.py | vinizalabs/aws-serverless-cicd | 46ff449d35ef2b3c80b66c05bb357c79ddb8ab71 | [
"Apache-2.0"
] | null | null | null | import boto3
import botocore
import json
import os
import uuid
import logging
#import yaml
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.getenv('TABLE_NAME'))
def respond(response):
print(response)
return {
'statusCode': '400' if 'Error' in response.keys() else '200',
'body': json.dumps(response),
'headers': {
'Content-Type': 'application/json',
},
}
def get(id):
try:
response = table.get_item(
Key={
'id': id
}
)
except botocore.exceptions.ClientError as e:
logging.error(e)
return e.response
return response
def post(payload):
try:
response = table.put_item(
Item=payload
)
except botocore.exceptions.ClientError as e:
logging.error(e)
return e.response
return response
def delete(id):
try:
response = table.delete_item(
Key={
'id': id
}
)
except botocore.exceptions.ClientError as e:
logging.error(e)
return e.response
return response
def lambda_handler(event, context):
operation = event['httpMethod']
id = event['pathParameters']['id']
if operation == 'POST':
payload = json.loads(event['body'])
payload['id'] = id
return respond(post(payload))
elif operation == 'GET':
return respond(get(id))
elif operation == 'DELETE':
return respond(delete(id))
| 21.413333 | 70 | 0.542341 |
e668ec0b7cda548c204257853c8ca49b38fef480 | 2,368 | py | Python | tests/wikiqa/test_wikiqa_02_train.py | sciling/example-kubeflow-qatransfer | a3d74dacb1da22ab5ea60574943022584570f4d0 | [
"Apache-2.0"
] | null | null | null | tests/wikiqa/test_wikiqa_02_train.py | sciling/example-kubeflow-qatransfer | a3d74dacb1da22ab5ea60574943022584570f4d0 | [
"Apache-2.0"
] | null | null | null | tests/wikiqa/test_wikiqa_02_train.py | sciling/example-kubeflow-qatransfer | a3d74dacb1da22ab5ea60574943022584570f4d0 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
import sys
import tempfile
import unittest
import zipfile
from unittest import TestCase
import requests
from tqdm import tqdm
sys.path.append("..")
WORK_DIR = "/tmp/wikiqa-tests"
def download():
# Download Squad
r_squad = requests.get(
"https://github.com/sciling/qatransfer/releases/download/v0.1/save_class.zip"
)
total_size_in_bytes = int(r_squad.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with tempfile.TemporaryFile() as tf:
for chunk in r_squad.iter_content(chunk_size=1024):
progress_bar.update(len(chunk))
tf.write(chunk)
with zipfile.ZipFile(tf, "r") as f:
f.extractall(WORK_DIR)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
class TestAll(TestCase):
def test_train(self):
from src.wikiqa.wikiqa_train import wikiqa_train
download()
load_path = "/save/out/squad/basic-class/00/save/basic-class-1"
shared_path = "/save/out/squad/basic-class/00/shared.json"
run_id = "00"
sent_size_th = "10"
ques_size_th = "10"
num_epochs = "1"
num_steps = "1"
eval_period = "1"
save_period = "1"
device = "/cpu:0"
device_type = "gpu"
num_gpus = "1"
model_path = WORK_DIR
try:
from multiprocessing import Process
args = (
WORK_DIR,
WORK_DIR,
load_path,
shared_path,
run_id,
sent_size_th,
ques_size_th,
num_epochs,
num_steps,
eval_period,
save_period,
device,
device_type,
num_gpus,
model_path,
)
p = Process(target=wikiqa_train, args=args)
p.start()
p.join()
except SystemExit:
print("Finished successfully!")
# Check model directory has all files
self.assertIn("out", os.listdir(model_path))
self.assertIn("wikiqa", os.listdir(model_path + "/out"))
if __name__ == "__main__":
unittest.main()
| 25.73913 | 85 | 0.570524 |
d6e23281a4117beb5017739d3ca46039cc2f2312 | 1,976 | py | Python | test/sst/6.0.0/goblin_singlestream1-trace.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 10 | 2018-02-26T02:39:36.000Z | 2020-10-20T14:55:56.000Z | test/sst/6.0.0/goblin_singlestream1-trace.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 5 | 2017-09-07T11:41:35.000Z | 2020-10-12T14:35:39.000Z | test/sst/6.0.0/goblin_singlestream1-trace.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 4 | 2017-09-07T06:03:43.000Z | 2021-09-10T13:44:19.000Z | import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.SingleStreamGenerator",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512",
"clock" : "1GHz",
"backend" : "memHierarchy.goblinHMCSim",
"backend.trace-banks" : "1",
"backend.trace-queue" : "1",
"backend.trace-cmds" : "1",
"backend.trace-latency" : "1",
"backend.trace-stalls" : "1"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
| 30.875 | 109 | 0.688259 |
8c58101f7c46a43ad2a0c5b8635db59b0d59b24c | 24,063 | py | Python | esp_sdk/apis/integrations_hipchat_api.py | zimmermanc/esp-sdk-python | cdef13c0dc6c3996b6c444160c71b2f1e3910c97 | [
"MIT"
] | 6 | 2017-06-05T20:37:19.000Z | 2019-04-10T08:43:59.000Z | esp_sdk/apis/integrations_hipchat_api.py | zimmermanc/esp-sdk-python | cdef13c0dc6c3996b6c444160c71b2f1e3910c97 | [
"MIT"
] | 18 | 2016-06-22T16:14:33.000Z | 2018-10-29T21:53:15.000Z | esp_sdk/apis/integrations_hipchat_api.py | zimmermanc/esp-sdk-python | cdef13c0dc6c3996b6c444160c71b2f1e3910c97 | [
"MIT"
] | 18 | 2016-07-27T19:20:01.000Z | 2020-11-17T02:09:58.000Z | # coding: utf-8
"""
ESP Documentation
The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures.
OpenAPI spec version: v2_sdk
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class IntegrationsHipchatApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create(self, external_account_ids, name, room, token, **kwargs):
"""
Create a Hipchat Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create(external_account_ids, name, room, token, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[int] external_account_ids: External accounts for integration (required)
:param str name: Name of the integration (required)
:param str room: The Hipchat room (required)
:param str token: The Hipchat token (required)
:param str include: Related objects that can be included in the response: integration See Including Objects for more information.
:param bool all_high_risk: Send all high risk alerts
:param bool all_low_risk: Send all low risk alerts
:param bool all_medium_risk: Send all medium risk alerts
:param list[int] custom_signature_ids: Custom signatures for integration
:param bool send_updates: This feature enables the integration to send alerts when they are updated. When disabled, alerts will only be sent when they are initially created. When enabled, alerts will additionally be sent when a change is made such as the alert ending. An alert may end for multiple reasons.
:param bool send_when_suppressed: Send notifications for suppressed alerts
:param list[int] signature_ids: Signatures for integration
:param list[str] statuses: Only send alerts that have the status in this list. Valid values are fail, warn, error, pass, info
:return: IntegrationHipchat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_with_http_info(external_account_ids, name, room, token, **kwargs)
else:
(data) = self.create_with_http_info(external_account_ids, name, room, token, **kwargs)
return data
def create_with_http_info(self, external_account_ids, name, room, token, **kwargs):
"""
Create a Hipchat Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_with_http_info(external_account_ids, name, room, token, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[int] external_account_ids: External accounts for integration (required)
:param str name: Name of the integration (required)
:param str room: The Hipchat room (required)
:param str token: The Hipchat token (required)
:param str include: Related objects that can be included in the response: integration See Including Objects for more information.
:param bool all_high_risk: Send all high risk alerts
:param bool all_low_risk: Send all low risk alerts
:param bool all_medium_risk: Send all medium risk alerts
:param list[int] custom_signature_ids: Custom signatures for integration
:param bool send_updates: This feature enables the integration to send alerts when they are updated. When disabled, alerts will only be sent when they are initially created. When enabled, alerts will additionally be sent when a change is made such as the alert ending. An alert may end for multiple reasons.
:param bool send_when_suppressed: Send notifications for suppressed alerts
:param list[int] signature_ids: Signatures for integration
:param list[str] statuses: Only send alerts that have the status in this list. Valid values are fail, warn, error, pass, info
:return: IntegrationHipchat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_account_ids', 'name', 'room', 'token', 'include', 'all_high_risk', 'all_low_risk', 'all_medium_risk', 'custom_signature_ids', 'send_updates', 'send_when_suppressed', 'signature_ids', 'statuses']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_account_ids' is set
if ('external_account_ids' not in params) or (params['external_account_ids'] is None):
raise ValueError("Missing the required parameter `external_account_ids` when calling `create`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `create`")
# verify the required parameter 'room' is set
if ('room' not in params) or (params['room'] is None):
raise ValueError("Missing the required parameter `room` when calling `create`")
# verify the required parameter 'token' is set
if ('token' not in params) or (params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `create`")
collection_formats = {}
resource_path = '/api/v2/integrations/hipchat.json_api'.replace('{format}', 'json_api')
path_params = {}
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
if 'all_high_risk' in params:
form_params.append(('all_high_risk', params['all_high_risk']))
if 'all_low_risk' in params:
form_params.append(('all_low_risk', params['all_low_risk']))
if 'all_medium_risk' in params:
form_params.append(('all_medium_risk', params['all_medium_risk']))
if 'custom_signature_ids' in params:
form_params.append(('custom_signature_ids', params['custom_signature_ids']))
collection_formats['None'] = 'csv'
if 'external_account_ids' in params:
form_params.append(('external_account_ids', params['external_account_ids']))
collection_formats['None'] = 'csv'
if 'name' in params:
form_params.append(('name', params['name']))
if 'room' in params:
form_params.append(('room', params['room']))
if 'send_updates' in params:
form_params.append(('send_updates', params['send_updates']))
if 'send_when_suppressed' in params:
form_params.append(('send_when_suppressed', params['send_when_suppressed']))
if 'signature_ids' in params:
form_params.append(('signature_ids', params['signature_ids']))
collection_formats['None'] = 'csv'
if 'statuses' in params:
form_params.append(('statuses', params['statuses']))
collection_formats['None'] = 'csv'
if 'token' in params:
form_params.append(('token', params['token']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IntegrationHipchat',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def show(self, integration_id, **kwargs):
"""
Show a single Hipchat Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show(integration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int integration_id: The ID of the integration (required)
:param str include: Related objects that can be included in the response: integration See Including Objects for more information.
:return: IntegrationHipchat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.show_with_http_info(integration_id, **kwargs)
else:
(data) = self.show_with_http_info(integration_id, **kwargs)
return data
def show_with_http_info(self, integration_id, **kwargs):
"""
Show a single Hipchat Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show_with_http_info(integration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int integration_id: The ID of the integration (required)
:param str include: Related objects that can be included in the response: integration See Including Objects for more information.
:return: IntegrationHipchat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['integration_id', 'include']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method show" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'integration_id' is set
if ('integration_id' not in params) or (params['integration_id'] is None):
raise ValueError("Missing the required parameter `integration_id` when calling `show`")
collection_formats = {}
resource_path = '/api/v2/integrations/{integration_id}/hipchat.json_api'.replace('{format}', 'json_api')
path_params = {}
if 'integration_id' in params:
path_params['integration_id'] = params['integration_id']
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IntegrationHipchat',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, integration_id, **kwargs):
"""
Update a Hipchat Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(integration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int integration_id: The ID of the integration (required)
:param str include: Related objects that can be included in the response: integration See Including Objects for more information.
:param bool all_high_risk: Send all high risk alerts
:param bool all_low_risk: Send all low risk alerts
:param bool all_medium_risk: Send all medium risk alerts
:param list[int] custom_signature_ids: Custom signatures for integration
:param list[int] external_account_ids: External accounts for integration
:param str name: Name of the integration
:param str room: The Hipchat room
:param bool send_updates: This feature enables the integration to send alerts when they are updated. When disabled, alerts will only be sent when they are initially created. When enabled, alerts will additionally be sent when a change is made such as the alert ending. An alert may end for multiple reasons.
:param bool send_when_suppressed: Send notifications for suppressed alerts
:param list[int] signature_ids: Signatures for integration
:param list[str] statuses: Only send alerts that have the status in this list. Valid values are fail, warn, error, pass, info
:param str token: The Hipchat token
:return: IntegrationHipchat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_with_http_info(integration_id, **kwargs)
else:
(data) = self.update_with_http_info(integration_id, **kwargs)
return data
def update_with_http_info(self, integration_id, **kwargs):
"""
Update a Hipchat Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_with_http_info(integration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int integration_id: The ID of the integration (required)
:param str include: Related objects that can be included in the response: integration See Including Objects for more information.
:param bool all_high_risk: Send all high risk alerts
:param bool all_low_risk: Send all low risk alerts
:param bool all_medium_risk: Send all medium risk alerts
:param list[int] custom_signature_ids: Custom signatures for integration
:param list[int] external_account_ids: External accounts for integration
:param str name: Name of the integration
:param str room: The Hipchat room
:param bool send_updates: This feature enables the integration to send alerts when they are updated. When disabled, alerts will only be sent when they are initially created. When enabled, alerts will additionally be sent when a change is made such as the alert ending. An alert may end for multiple reasons.
:param bool send_when_suppressed: Send notifications for suppressed alerts
:param list[int] signature_ids: Signatures for integration
:param list[str] statuses: Only send alerts that have the status in this list. Valid values are fail, warn, error, pass, info
:param str token: The Hipchat token
:return: IntegrationHipchat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['integration_id', 'include', 'all_high_risk', 'all_low_risk', 'all_medium_risk', 'custom_signature_ids', 'external_account_ids', 'name', 'room', 'send_updates', 'send_when_suppressed', 'signature_ids', 'statuses', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'integration_id' is set
if ('integration_id' not in params) or (params['integration_id'] is None):
raise ValueError("Missing the required parameter `integration_id` when calling `update`")
collection_formats = {}
resource_path = '/api/v2/integrations/{integration_id}/hipchat.json_api'.replace('{format}', 'json_api')
path_params = {}
if 'integration_id' in params:
path_params['integration_id'] = params['integration_id']
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
if 'all_high_risk' in params:
form_params.append(('all_high_risk', params['all_high_risk']))
if 'all_low_risk' in params:
form_params.append(('all_low_risk', params['all_low_risk']))
if 'all_medium_risk' in params:
form_params.append(('all_medium_risk', params['all_medium_risk']))
if 'custom_signature_ids' in params:
form_params.append(('custom_signature_ids', params['custom_signature_ids']))
collection_formats['None'] = 'csv'
if 'external_account_ids' in params:
form_params.append(('external_account_ids', params['external_account_ids']))
collection_formats['None'] = 'csv'
if 'name' in params:
form_params.append(('name', params['name']))
if 'room' in params:
form_params.append(('room', params['room']))
if 'send_updates' in params:
form_params.append(('send_updates', params['send_updates']))
if 'send_when_suppressed' in params:
form_params.append(('send_when_suppressed', params['send_when_suppressed']))
if 'signature_ids' in params:
form_params.append(('signature_ids', params['signature_ids']))
collection_formats['None'] = 'csv'
if 'statuses' in params:
form_params.append(('statuses', params['statuses']))
collection_formats['None'] = 'csv'
if 'token' in params:
form_params.append(('token', params['token']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IntegrationHipchat',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 49.716942 | 315 | 0.621992 |
d53400dc8e6cb4ddb94bc5fd53951ae2d5e702dc | 9,732 | py | Python | src/apps/yield_offset/app.py | originalpkbims/dash-apps | ea84cbd3e7227fb3de40cd16000838dd088343c7 | [
"MIT"
] | 1 | 2022-02-19T01:37:29.000Z | 2022-02-19T01:37:29.000Z | src/apps/yield_offset/app.py | originalpkbims/dash-apps | ea84cbd3e7227fb3de40cd16000838dd088343c7 | [
"MIT"
] | 12 | 2022-03-11T21:32:35.000Z | 2022-03-30T13:45:12.000Z | src/apps/yield_offset/app.py | originalpkbims/dash-apps | ea84cbd3e7227fb3de40cd16000838dd088343c7 | [
"MIT"
] | 3 | 2022-02-05T17:13:47.000Z | 2022-03-24T00:36:45.000Z | from datetime import datetime, timedelta
import json
import os
import dash
import dash_bootstrap_components as dbc
from dash import html, dcc
from dash.dependencies import Input, Output
import requests
from subgrounds.schema import TypeRef
from subgrounds.subgraph import SyntheticField
from subgrounds.subgrounds import Subgrounds
from web3.middleware import geth_poa_middleware
from ...constants import DISTRIBUTOR_ADDRESS
from ...util import get_polygon_web3, load_abi
SCAN_API_KEY = os.environ['POLYGONSCAN_API_KEY']
INFURA_PROJ_ID = os.environ['WEB3_INFURA_PROJECT_ID']
# Initialize web3
web3 = get_polygon_web3()
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
# Load ABIs
DISTRIBUTOR_ABI = load_abi('distributor.json')
# Load Subgraphs
sg = Subgrounds()
markets = sg.load_subgraph('https://api.thegraph.com/subgraphs/name/0xplaygrounds/playgrounds-klima-markets')
metrics = sg.load_subgraph('https://api.thegraph.com/subgraphs/name/cujowolf/klima-graph')
# This is unecessary, but nice for brevity
market_query = markets.Query
Trade = markets.Trade
metric_query = metrics.Query
Metric = metrics.ProtocolMetric
# This is a synthetic field
Trade.datetime = SyntheticField(
lambda timestamp: str(datetime.fromtimestamp(timestamp)),
TypeRef.Named('String'),
Trade.timestamp,
)
Metric.datetime = SyntheticField(
lambda timestamp: str(datetime.fromtimestamp(timestamp)),
TypeRef.Named('String'),
Metric.timestamp,
)
trades = market_query.trades(
orderBy=Trade.timestamp,
orderDirection='desc',
first=500,
where=[
Trade.pair == '0x9803c7ae526049210a1725f7487af26fe2c24614'
]
)
last_5_metrics = metric_query.protocolMetrics(
orderBy=Metric.timestamp,
orderDirection='desc',
first=5
)
# Price & APY calculations
def get_blocks_per_epoch():
distributor_contract = web3.eth.contract(
address=DISTRIBUTOR_ADDRESS,
abi=DISTRIBUTOR_ABI
)
return distributor_contract.functions.epochLength().call()
def get_block_30_days_ago():
'''Fetch the block number that was closest to 30 days ago from PolygonScan'''
days_ago = datetime.today() - timedelta(days=30)
timestamp = int(days_ago.timestamp())
resp = requests.get(
f'https://api.polygonscan.com/api?module=block&action=getblocknobytime×tamp={timestamp}&closest=before&apikey={SCAN_API_KEY}' # noqa: E501
)
block_num = int(
json.loads(resp.content)['result']
)
return block_num
def get_rebases_per_day(blocks_per_rebase):
'''
Calculates the average number of rebases per day based on the average
block production time for the previous 1 million blocks
'''
block_30_days_ago = get_block_30_days_ago()
latest_block = web3.eth.get_block('latest')
latest_block_num = latest_block['number']
latest_block_time = latest_block['timestamp']
prev_block_time = web3.eth.get_block(block_30_days_ago)['timestamp']
block_diff = latest_block_num - block_30_days_ago
avg_block_secs = (latest_block_time - prev_block_time) / block_diff
secs_per_rebase = blocks_per_rebase * avg_block_secs
return 24 / (secs_per_rebase / 60 / 60)
def get_avg_yield(days=5):
reward_yield_df = sg.query_df([last_5_metrics.nextEpochRebase])
avg_yield = float(reward_yield_df.mean().values[0])
return avg_yield / 100
# TODO: finish implementing when Cujo updates the Subgraph to include MCO2
# def get_avg_cc_per_klima(days=5):
# cc_df = sg.query_df([last_5_metrics.treasuryRiskFreeValue, last_5_metrics.totalSupply])
# avg_cc = float(
# (cc_df['protocolMetrics_treasuryRiskFreeValue'] / cc_df['protocolMetrics_totalSupply']).mean()
# )
# return avg_cc
def get_avg_price(days=30):
trades = market_query.trades(
orderBy=Trade.timestamp,
orderDirection='desc',
first=days,
where=[
Trade.pair == '0x9803c7ae526049210a1725f7487af26fe2c24614'
]
)
price_df = sg.query_df([trades.close])
return price_df.mean().values[0]
def get_data():
price = get_avg_price()
rebase_yield = get_avg_yield()
# cc_per_klima = get_avg_cc_per_klima()
epoch_length = get_blocks_per_epoch()
rebases_per_day = get_rebases_per_day(epoch_length)
return price, rebase_yield, rebases_per_day
data = get_data()
avg_price = data[0]
# Dashboard
app = dash.Dash(
__name__,
suppress_callback_exceptions=True,
title="KLIMA Offset Yield Calculator",
external_stylesheets=[dbc.themes.BOOTSTRAP],
meta_tags=[{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5,'
}]
)
input_card = dbc.Card(
[
dbc.CardHeader(html.H2("Input Parameters")),
dbc.CardBody([
dbc.Row([
dbc.Col(
dbc.Card([
dbc.CardHeader("Monthly Carbon Emissions to Offset"),
dbc.CardBody([
dbc.InputGroup([
dbc.Input(placeholder="tonnes to offset", type="number", id="input-tonnes"),
dbc.InputGroupText(" tonnes"),
])
])
])
),
dbc.Col(
dbc.Card([
dbc.CardHeader("KLIMA => Carbon Offset Conversion Factor"),
dbc.CardBody([
dbc.InputGroup([
dbc.Select(
options=[
{
"label": "Intrinsic Value: 1 tonne per KLIMA (most conservative)",
"value": 1
},
# TODO: add option for current CC
{
"label": (
f"Market Value in BCT: {avg_price:,.2f} tonnes per KLIMA "
f"at recent prices (fluctuates with market activity)"
),
"value": avg_price
},
],
placeholder="How many tonnes do you assume each KLIMA is worth?",
id="input-conversion"
),
])
])
])
)
])
]),
]
)
growth_per_klima_card = dbc.Card(
[
dbc.CardBody([
dbc.CardHeader("Estimated Monthly Rebase Yield"),
dbc.CardBody("Loading...", id="monthly-return-per-klima")
]),
]
)
output_cards = [
dbc.Col(
dbc.Card(
[
dbc.CardHeader(html.H3(f"{x}x Monthly Emissions")),
dbc.CardBody("Loading...", id=f"klima-required-{x}x")
]
),
)
for x in [1, 2, 3]
]
app.layout = html.Div([
dbc.Row(dbc.Col(
dbc.Alert(
"WARNING: THIS TOOL IS STILL UNDER DEVELOPMENT!",
color="warning", style={'textAlign': 'center'}
)
)),
dbc.Row([
dbc.Col(
dbc.Card(
dbc.Row([
dbc.Col(html.Img(src='assets/KlimaDAO-Logo-Green.png', width=100, height=100), width=1),
dbc.Col(html.H1("Klima Infinity Yield-Based Offsetting Estimator", className='page-title'))
]),
body=True
)
),
dbc.Col(growth_per_klima_card, width=3)
]),
dbc.Row(dbc.Col(input_card)),
dbc.Row([
dbc.Col(
dbc.Card([
dbc.CardHeader(html.H2("Estimated Staked KLIMA to Offset Monthly Emissions with Rebase Yield")),
dbc.CardBody([dbc.Row([*output_cards])])
])
)
]),
# Hidden div inside the app that stores the intermediate value
html.Div(id='intermediate-value', style={'display': 'none'}, children=data),
dcc.Interval(
id="interval-component",
interval=60*60*1000, # 1 hour in milliseconds,
n_intervals=0
)
])
x_outputs = [
Output(f"klima-required-{x}x", "children") for x in [1, 2, 3]
]
@app.callback(
Output('intermediate-value', 'children'),
Input('interval-component', 'n_intervals')
)
def update_metrics(n):
return get_data()
@app.callback(
Output("monthly-return-per-klima", "children"),
*x_outputs,
Input("input-tonnes", "value"),
Input("input-conversion", "value"),
Input('intermediate-value', 'children')
)
def cb_render(*vals):
tonnes_to_offset = vals[0]
conversion_factor = vals[1]
data = vals[2]
price, reward_yield, rebases_per_day = data
# 30 day ROI
monthly_roi = (1 + reward_yield) ** (30 * rebases_per_day) - 1
monthly_roi_rounded = f"{round(monthly_roi * 100, 2)}% per month"
if tonnes_to_offset is None or conversion_factor is None:
return (monthly_roi_rounded, "--", "--", "--")
klima_principal = round((tonnes_to_offset / monthly_roi) / float(conversion_factor), 2)
return (
monthly_roi_rounded, str(klima_principal) + ' sKLIMA',
str(2 * klima_principal) + ' sKLIMA', str(3 * klima_principal) + ' sKLIMA'
)
# For Gunicorn to reference
server = app.server
if __name__ == '__main__':
app.run_server(debug=True, host='0.0.0.0')
| 29.225225 | 152 | 0.586827 |
2788aa077ef4f6529f1db7646eee1f5a6dd43cb0 | 25,715 | py | Python | tests/modeltests/invalid_models/models.py | kvbik/django | a507e552af4e7ac3080282e690e2e33c6d34570d | [
"BSD-3-Clause"
] | 1 | 2016-05-09T15:16:24.000Z | 2016-05-09T15:16:24.000Z | tests/modeltests/invalid_models/models.py | kvbik/django | a507e552af4e7ac3080282e690e2e33c6d34570d | [
"BSD-3-Clause"
] | null | null | null | tests/modeltests/invalid_models/models.py | kvbik/django | a507e552af4e7ac3080282e690e2e33c6d34570d | [
"BSD-3-Clause"
] | null | null | null | """
26. Invalid models
This example exists purely to point out errors in models.
"""
from django.contrib.contenttypes import generic
from django.db import models
class FieldErrors(models.Model):
charfield = models.CharField()
charfield2 = models.CharField(max_length=-1)
charfield3 = models.CharField(max_length="bad")
decimalfield = models.DecimalField()
decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1)
decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad")
filefield = models.FileField()
choices = models.CharField(max_length=10, choices='bad')
choices2 = models.CharField(max_length=10, choices=[(1,2,3),(1,2,3)])
index = models.CharField(max_length=10, db_index='bad')
field_ = models.CharField(max_length=10)
nullbool = models.BooleanField(null=True)
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash1 = models.CharField(max_length=10)
clash2 = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Clash1(models.Model):
src_safe = models.CharField(max_length=10)
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
class Clash2(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
class Target2(models.Model):
clash3 = models.CharField(max_length=10)
foreign_tgt = models.ForeignKey(Target)
clashforeign_set = models.ForeignKey(Target)
m2m_tgt = models.ManyToManyField(Target)
clashm2m_set = models.ManyToManyField(Target)
class Clash3(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt')
foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt')
m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt')
m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt')
class ClashForeign(models.Model):
foreign = models.ForeignKey(Target2)
class ClashM2M(models.Model):
m2m = models.ManyToManyField(Target2)
class SelfClashForeign(models.Model):
src_safe = models.CharField(max_length=10)
selfclashforeign = models.CharField(max_length=10)
selfclashforeign_set = models.ForeignKey("SelfClashForeign")
foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id')
foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe')
class ValidM2M(models.Model):
src_safe = models.CharField(max_length=10)
validm2m = models.CharField(max_length=10)
# M2M fields are symmetrical by default. Symmetrical M2M fields
# on self don't require a related accessor, so many potential
# clashes are avoided.
validm2m_set = models.ManyToManyField("self")
m2m_1 = models.ManyToManyField("self", related_name='id')
m2m_2 = models.ManyToManyField("self", related_name='src_safe')
m2m_3 = models.ManyToManyField('self')
m2m_4 = models.ManyToManyField('self')
class SelfClashM2M(models.Model):
src_safe = models.CharField(max_length=10)
selfclashm2m = models.CharField(max_length=10)
# Non-symmetrical M2M fields _do_ have related accessors, so
# there is potential for clashes.
selfclashm2m_set = models.ManyToManyField("self", symmetrical=False)
m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False)
m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False)
m2m_3 = models.ManyToManyField('self', symmetrical=False)
m2m_4 = models.ManyToManyField('self', symmetrical=False)
class Model(models.Model):
"But it's valid to call a model Model."
year = models.PositiveIntegerField() #1960
make = models.CharField(max_length=10) #Aston Martin
name = models.CharField(max_length=10) #DB 4 GT
class Car(models.Model):
colour = models.CharField(max_length=5)
model = models.ForeignKey(Model)
class MissingRelations(models.Model):
rel1 = models.ForeignKey("Rel1")
rel2 = models.ManyToManyField("Rel2")
class MissingManualM2MModel(models.Model):
name = models.CharField(max_length=5)
missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel")
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary")
class GroupTwo(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership")
secondary = models.ManyToManyField(Group, through="MembershipMissingFK")
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
not_default_or_null = models.CharField(max_length=5)
class MembershipMissingFK(models.Model):
person = models.ForeignKey(Person)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Relationship")
too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK")
class PersonSelfRefM2MExplicit(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_added = models.DateTimeField()
class ExplicitRelationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set")
date_added = models.DateTimeField()
class RelationshipTripleFK(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2")
third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far")
date_added = models.DateTimeField()
class RelationshipDoubleFK(models.Model):
first = models.ForeignKey(Person, related_name="first_related_name")
second = models.ForeignKey(Person, related_name="second_related_name")
third = models.ForeignKey(Group, related_name="rel_to_set")
date_added = models.DateTimeField()
class AbstractModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
abstract = True
class AbstractRelationModel(models.Model):
fk1 = models.ForeignKey('AbstractModel')
fk2 = models.ManyToManyField('AbstractModel')
class UniqueM2M(models.Model):
""" Model to test for unique ManyToManyFields, which are invalid. """
unique_people = models.ManyToManyField(Person, unique=True)
class NonUniqueFKTarget1(models.Model):
""" Model to test for non-unique FK target in yet-to-be-defined model: expect an error """
tgt = models.ForeignKey('FKTarget', to_field='bad')
class UniqueFKTarget1(models.Model):
""" Model to test for unique FK target in yet-to-be-defined model: expect no error """
tgt = models.ForeignKey('FKTarget', to_field='good')
class FKTarget(models.Model):
bad = models.IntegerField()
good = models.IntegerField(unique=True)
class NonUniqueFKTarget2(models.Model):
""" Model to test for non-unique FK target in previously seen model: expect an error """
tgt = models.ForeignKey(FKTarget, to_field='bad')
class UniqueFKTarget2(models.Model):
""" Model to test for unique FK target in previously seen model: expect no error """
tgt = models.ForeignKey(FKTarget, to_field='good')
class NonExistingOrderingWithSingleUnderscore(models.Model):
class Meta:
ordering = ("does_not_exist",)
class InvalidSetNull(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_NULL)
class InvalidSetDefault(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_DEFAULT)
class Tag(models.Model):
name = models.CharField("name", max_length=20)
class TaggedObject(models.Model):
object_id = models.PositiveIntegerField("Object ID")
tag = models.ForeignKey(Tag)
content_object = generic.GenericForeignKey()
class UserTaggedObject(models.Model):
object_tag = models.ForeignKey(TaggedObject)
class ArticleAttachment(models.Model):
tags = generic.GenericRelation(TaggedObject)
user_tags = generic.GenericRelation(UserTaggedObject)
model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute.
invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list).
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "index": "db_index" should be either None, True or False.
invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.
invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead.
invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract.
invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract.
invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo
invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo
invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed
invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead.
invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted.
invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted.
invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'.
invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonexistingorderingwithsingleunderscore: "ordering" refers to "does_not_exist", a field that doesn't exist.
invalid_models.invalidsetnull: 'fk' specifies on_delete=SET_NULL, but cannot be null.
invalid_models.invalidsetdefault: 'fk' specifies on_delete=SET_DEFAULT, but has no default value.
invalid_models.articleattachment: Model 'UserTaggedObject' must have a GenericForeignKey in order to create a GenericRelation that points to it.
"""
| 74.752907 | 214 | 0.796617 |
eb4bd3ef93523276a029ce74696a58d6dbda7df8 | 1,791 | py | Python | tests/infrastructure/test_hashing_clients.py | enchainte/enchainte-sdk-py | d7e6803cb941ae929bca9bb00584aa5b4853689a | [
"MIT"
] | 1 | 2021-11-21T22:02:49.000Z | 2021-11-21T22:02:49.000Z | tests/infrastructure/test_hashing_clients.py | enchainte/enchainte-sdk-py | d7e6803cb941ae929bca9bb00584aa5b4853689a | [
"MIT"
] | 1 | 2021-09-16T20:19:04.000Z | 2021-09-16T20:19:04.000Z | tests/infrastructure/test_hashing_clients.py | enchainte/enchainte-sdk-py | d7e6803cb941ae929bca9bb00584aa5b4853689a | [
"MIT"
] | 1 | 2021-12-10T10:22:30.000Z | 2021-12-10T10:22:30.000Z | from unittest import TestCase
from bloock.infrastructure.hashing.blake2b import Blake2b
from bloock.infrastructure.hashing.keccak import Keccak
class Blake2bTestCase(TestCase):
def setUp(self):
self.blake = Blake2b()
def test_blake_generate_hash_64_zeros_string(self):
data = b'0000000000000000000000000000000000000000000000000000000000000000'
self.assertEqual(self.blake.generateHash(
data), '681df247e1ece8365db91166ed273590019df392004d2ea25543335c71bbe2d2',
'Hashes do not match')
def test_blake_generate_hash_string(self):
data = b'testing blake'
self.assertEqual(self.blake.generateHash(
data), 'bbe426afe3fae78c3d3e25502a3e197762ada886da94c1b8104a1984c8c4d886',
'Hashes do not match')
class KeccakTestCase(TestCase):
def setUp(self):
self.keccak = Keccak()
def test_keccak_generate_hash_64_zeros_hexa(self):
data = bytes.fromhex(
'0000000000000000000000000000000000000000000000000000000000000000')
self.assertEqual(self.keccak.generateHash(
data), '290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563',
'Hashes do not match')
def test_keccak_generate_hash_64_zeros_string(self):
data = b'0000000000000000000000000000000000000000000000000000000000000000'
self.assertEqual(self.keccak.generateHash(
data), 'd874d9e5ad41e13e8908ab82802618272c3433171cdc3d634f3b1ad0e6742827',
'Hashes do not match')
def test_keccak_generate_hash_string(self):
data = b'testing keccak'
self.assertEqual(self.keccak.generateHash(
data), '7e5e383e8e70e55cdccfccf40dfc5d4bed935613dffc806b16b4675b555be139',
'Hashes do not match')
| 38.106383 | 86 | 0.731993 |
0140812d8b0befa02724cb53cf80ee29a3c0da43 | 10,805 | py | Python | pypy/module/_weakref/test/test_weakref.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | pypy/module/_weakref/test/test_weakref.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/module/_weakref/test/test_weakref.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | from pypy.conftest import gettestobjspace
class AppTestWeakref(object):
def setup_class(cls):
space = gettestobjspace(usemodules=('_weakref',))
cls.space = space
def test_simple(self):
import _weakref, gc
class A(object):
pass
a = A()
assert _weakref.getweakrefcount(a) == 0
ref = _weakref.ref(a)
assert ref() is a
assert a.__weakref__ is ref
assert _weakref.getweakrefcount(a) == 1
del a
gc.collect()
assert ref() is None
def test_callback(self):
import _weakref, gc
class A(object):
pass
a1 = A()
a2 = A()
def callback(ref):
a2.ref = ref()
ref1 = _weakref.ref(a1, callback)
ref2 = _weakref.ref(a1)
assert _weakref.getweakrefcount(a1) == 2
del a1
gc.collect()
assert ref1() is None
assert a2.ref is None
def test_callback_order(self):
import _weakref, gc
class A(object):
pass
a1 = A()
a2 = A()
def callback1(ref):
a2.x = 42
def callback2(ref):
a2.x = 43
ref1 = _weakref.ref(a1, callback1)
ref2 = _weakref.ref(a1, callback2)
del a1
gc.collect()
assert a2.x == 42
def test_dont_callback_if_weakref_dead(self):
import _weakref, gc
class A(object):
pass
a1 = A()
a1.x = 40
a2 = A()
def callback(ref):
a1.x = 42
assert _weakref.getweakrefcount(a2) == 0
ref = _weakref.ref(a2, callback)
assert _weakref.getweakrefcount(a2) == 1
ref = None
gc.collect()
assert _weakref.getweakrefcount(a2) == 0
a2 = None
gc.collect()
assert a1.x == 40
def test_callback_cannot_ressurect(self):
import _weakref, gc
class A(object):
pass
a = A()
alive = A()
alive.a = 1
def callback(ref2):
alive.a = ref1()
ref1 = _weakref.ref(a, callback)
ref2 = _weakref.ref(a, callback)
del a
gc.collect()
assert alive.a is None
def test_weakref_reusing(self):
import _weakref, gc
class A(object):
pass
a = A()
ref1 = _weakref.ref(a)
ref2 = _weakref.ref(a)
assert ref1 is ref2
class wref(_weakref.ref):
pass
wref1 = wref(a)
assert isinstance(wref1, wref)
def test_correct_weakrefcount_after_death(self):
import _weakref, gc
class A(object):
pass
a = A()
ref1 = _weakref.ref(a)
ref2 = _weakref.ref(a)
assert _weakref.getweakrefcount(a) == 1
del ref1
gc.collect()
assert _weakref.getweakrefcount(a) == 1
del ref2
gc.collect()
assert _weakref.getweakrefcount(a) == 0
def test_weakref_equality(self):
import _weakref, gc
class A(object):
def __eq__(self, other):
return True
a1 = A()
a2 = A()
ref1 = _weakref.ref(a1)
ref2 = _weakref.ref(a2)
assert ref1 == ref2
del a1
gc.collect()
assert not ref1 == ref2
assert ref1 != ref2
del a2
gc.collect()
assert not ref1 == ref2
assert ref1 != ref2
def test_getweakrefs(self):
import _weakref, gc
class A(object):
pass
a = A()
assert _weakref.getweakrefs(a) == []
assert _weakref.getweakrefs(None) == []
ref1 = _weakref.ref(a)
assert _weakref.getweakrefs(a) == [ref1]
def test_hashing(self):
import _weakref, gc
class A(object):
def __hash__(self):
return 42
a = A()
w = _weakref.ref(a)
assert hash(a) == hash(w)
del a
gc.collect()
assert hash(w) == 42
w = _weakref.ref(A())
gc.collect()
raises(TypeError, hash, w)
def test_weakref_subclassing(self):
import _weakref, gc
class A(object):
pass
class Ref(_weakref.ref):
pass
def callable(ref):
b.a = 42
a = A()
b = A()
b.a = 1
w = Ref(a, callable)
assert a.__weakref__ is w
assert b.__weakref__ is None
w1 = _weakref.ref(a)
w2 = _weakref.ref(a, callable)
assert a.__weakref__ is w1
del a
gc.collect()
assert w1() is None
assert w() is None
assert w2() is None
assert b.a == 42
def test_function_weakrefable(self):
import _weakref, gc
def f(x):
return 42
wf = _weakref.ref(f)
assert wf()(63) == 42
del f
gc.collect()
assert wf() is None
def test_method_weakrefable(self):
import _weakref, gc
class A(object):
def f(self):
return 42
a = A()
meth = A.f
w_unbound = _weakref.ref(meth)
assert w_unbound()(A()) == 42
meth = A().f
w_bound = _weakref.ref(meth)
assert w_bound()() == 42
del meth
gc.collect()
assert w_unbound() is None
assert w_bound() is None
def test_set_weakrefable(self):
import _weakref, gc
s = set([1, 2, 3, 4])
w = _weakref.ref(s)
assert w() is s
del s
gc.collect()
assert w() is None
def test_generator_weakrefable(self):
import _weakref, gc
def f(x):
for i in range(x):
yield i
g = f(10)
w = _weakref.ref(g)
r = w().next()
assert r == 0
r = g.next()
assert r == 1
del g
gc.collect()
assert w() is None
def test_weakref_subclass_with_del(self):
import _weakref, gc
class Ref(_weakref.ref):
def __del__(self):
b.a = 42
class A(object):
pass
a = A()
b = A()
b.a = 1
w = Ref(a)
del w
gc.collect()
assert b.a == 42
if _weakref.getweakrefcount(a) > 0:
# the following can crash if the presence of the applevel __del__
# leads to the fact that the __del__ of _weakref.ref is not called.
assert _weakref.getweakrefs(a)[0]() is a
def test_buggy_case(self):
import gc, weakref
gone = []
class A(object):
def __del__(self):
gone.append(True)
a = A()
w = weakref.ref(a)
del a
tries = 5
for i in range(5):
if not gone:
gc.collect()
if gone:
a1 = w()
assert a1 is None
def test_del_and_callback_and_id(self):
import gc, weakref
seen_del = []
class A(object):
def __del__(self):
seen_del.append(id(self))
seen_del.append(w1() is None)
seen_del.append(w2() is None)
seen_callback = []
def callback(r):
seen_callback.append(r is w2)
seen_callback.append(w1() is None)
seen_callback.append(w2() is None)
a = A()
w1 = weakref.ref(a)
w2 = weakref.ref(a, callback)
aid = id(a)
del a
for i in range(5):
gc.collect()
if seen_del:
assert seen_del == [aid, True, True]
if seen_callback:
assert seen_callback == [True, True, True]
class AppTestProxy(object):
def setup_class(cls):
space = gettestobjspace(usemodules=('_weakref',))
cls.space = space
def test_simple(self):
import _weakref, gc
class A(object):
def __init__(self, x):
self.x = x
a = A(1)
p = _weakref.proxy(a)
assert p.x == 1
assert str(p) == str(a)
raises(TypeError, p)
def test_caching(self):
import _weakref, gc
class A(object): pass
a = A()
assert _weakref.proxy(a) is _weakref.proxy(a)
def test_callable_proxy(self):
import _weakref, gc
class A(object):
def __call__(self):
global_a.x = 1
global_a = A()
global_a.x = 41
A_ = _weakref.proxy(A)
a = A_()
assert isinstance(a, A)
a_ = _weakref.proxy(a)
a_()
assert global_a.x == 1
def test_callable_proxy_type(self):
import _weakref, gc
class Callable(object):
def __call__(self, x):
pass
o = Callable()
ref1 = _weakref.proxy(o)
assert type(ref1) is _weakref.CallableProxyType
def test_dont_create_directly(self):
import _weakref, gc
raises(TypeError, _weakref.ProxyType, [])
raises(TypeError, _weakref.CallableProxyType, [])
def test_dont_hash(self):
import _weakref, gc
class A(object):
pass
a = A()
p = _weakref.proxy(a)
raises(TypeError, hash, p)
def test_subclassing_not_allowed(self):
import _weakref, gc
def tryit():
class A(_weakref.ProxyType):
pass
return A
raises(TypeError, tryit)
def test_repr(self):
import _weakref, gc
for kind in ('ref', 'proxy'):
def foobaz():
"A random function not returning None."
return 42
w = getattr(_weakref, kind)(foobaz)
s = repr(w)
print s
if kind == 'ref':
assert s.startswith('<weakref at ')
else:
assert (s.startswith('<weakproxy at ') or
s.startswith('<weakcallableproxy at '))
assert "function" in s
del foobaz
try:
for i in range(10):
if w() is None:
break # only reachable if kind == 'ref'
gc.collect()
except ReferenceError:
pass # only reachable if kind == 'proxy'
s = repr(w)
print s
assert "dead" in s
def test_eq(self):
import _weakref
class A(object):
pass
a = A()
assert not(_weakref.ref(a) == a)
assert _weakref.ref(a) != a
class A(object):
def __eq__(self, other):
return True
a = A()
assert _weakref.ref(a) == a
| 26.6133 | 79 | 0.494216 |
6de62037443df35dba4dace5cd2a7aae9f386c2d | 800 | py | Python | tests/test_cell_types.py | seung-lab/EMAnnotationSchemas | ef1061ca78860d53f1cd180496e87ad685a32ffe | [
"MIT"
] | null | null | null | tests/test_cell_types.py | seung-lab/EMAnnotationSchemas | ef1061ca78860d53f1cd180496e87ad685a32ffe | [
"MIT"
] | 18 | 2018-08-15T17:38:04.000Z | 2022-02-18T02:08:28.000Z | tests/test_cell_types.py | fcollman/EMAnnotationSchemas | bcc9f308868b2ea3e1504089d9cbab878b626acc | [
"MIT"
] | 5 | 2018-08-14T22:39:49.000Z | 2021-10-05T16:36:07.000Z | from emannotationschemas.schemas.cell_type_local import CellTypeLocal
good_ivscc_cell_type = {
"classification_system": "ivscc_m",
"cell_type": "spiny_4",
"pt": {"position": [1, 2, 3]},
}
bad_classical_cell_type = {
"classification_system": "classical",
"cell_type": "spiny_4",
"pt": {"position": [5, 2, 3]},
}
def annotation_import(item):
item["supervoxel_id"] = 5
item.pop("rootId", None)
def test_cell_type_validation():
schema = CellTypeLocal(context={"bsp_fn": annotation_import})
result = schema.load(good_ivscc_cell_type)
assert result["pt"]["supervoxel_id"] == 5
def test_cell_type_invalid():
schema = CellTypeLocal(context={"bsp_fn": annotation_import})
result = schema.load(bad_classical_cell_type)
assert not result["valid"]
| 25.806452 | 69 | 0.695 |
0c91c338722379a7407def3d429745c0a84f2d59 | 1,621 | py | Python | labtex/linear.py | CianLM/labtex | cb8233d762f62825c466fbdb050334f743847aaa | [
"MIT"
] | 4 | 2021-07-10T13:28:48.000Z | 2021-09-04T07:06:18.000Z | labtex/linear.py | CianLM/labtex | cb8233d762f62825c466fbdb050334f743847aaa | [
"MIT"
] | null | null | null | labtex/linear.py | CianLM/labtex | cb8233d762f62825c466fbdb050334f743847aaa | [
"MIT"
] | null | null | null |
from labtex.measurement import Measurement, MeasurementList
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
class LinearRegression:
"Linearly regress two MeasurementLists."
def __init__(self,x : MeasurementList, y : MeasurementList):
self.x = x
self.y = y
assert len(x) == len(y)
# TODO Support weighted regression
n = len(x)
w = [ 1 / n] * n
xmean = sum(w*x) / sum(w)
ymean = sum(w*y) / sum(w)
D = sum(w*(x - xmean) ** 2)
m = 1 / D * sum(w * (x - xmean) * y)
c = ymean - m * xmean
d = y - x*m - c.value
Delta_m = (1/D * sum(w * d ** 2) / (n - 2) ) ** 0.5
Delta_c = ( (1 / sum(w) + xmean ** 2 / D) * sum( w * d ** 2 ) / (n - 2) ) ** 0.5
# Line of best fit parameters
self.lobf = {
"m": Measurement(m.value,Delta_m.value,m.unit),
"c": Measurement(c.value,Delta_c.value,c.unit)
}
def __repr__(self):
return f"m = {self.lobf['m']}\nc = {self.lobf['c']}"
def savefig(self,filename : str = "figure", title: str = "", xlabel : str = "", ylabel: str = "",showline : bool = True, graphnumber : int = 0):
plt.figure(graphnumber)
plt.errorbar(self.x.tolist(),self.y.tolist(),yerr = self.y.uncertainty,fmt='o')
if showline:
plt.plot(self.x.tolist(),(self.x*self.lobf["m"].value+self.lobf["c"].value).tolist())
plt.title(title)
plt.xlabel(xlabel + f", (${self.x.unit}$)")
plt.ylabel(ylabel + f", (${self.y.unit}$)")
plt.savefig(filename)
| 31.173077 | 148 | 0.526835 |
e3c66b14e40978430dd79068526fd597c92b49a6 | 10,502 | py | Python | pfg/template.py | michaelfreyj/pfg | 25397a5539852667ea4ab55bc47511b488c63404 | [
"MIT"
] | null | null | null | pfg/template.py | michaelfreyj/pfg | 25397a5539852667ea4ab55bc47511b488c63404 | [
"MIT"
] | null | null | null | pfg/template.py | michaelfreyj/pfg | 25397a5539852667ea4ab55bc47511b488c63404 | [
"MIT"
] | null | null | null | # vim:fdm=marker
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import deepcopy
import configparser
import logging
import os
from pathlib import Path
import sys
import time
import yaml
from .input_utils import choice, yes_or_no
from .template_utils import check_templates
log = logging.getLogger('pfg.Template')
log.addHandler(logging.NullHandler())
now = time.localtime()
home = Path.home()
cwd = Path.cwd()
class Template:
def __init__(self, args):# {{{
self.infile = None
self.outfile = None
self.template_file = None
self.rc = None
self.read_rc()
if args.outfile is not None:
self.outfile = Path(args.outfile)
elif args.outfile is None and not args.print_to_console:
outfile = input("enter the name for file (no extension) to be created\n>> ")
self.set_outfile(outfile)
else:
self.outfile = Path(
'dry-run_{}-{:02d}-{:02d}_{:02d}:{:02d}'.format(
now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour,
now.tm_min,)
)
if args.template is not None:
path = Path(args.template)
available_templates, names = check_templates()
print(names)
print(args.template)
if args.template in names:
self.set_template_file(available_templates[names.index(args.template)])
elif not path.exists():
log.error(f'template \'{path}\' does not exist')
sys.exit(1)
elif not path.is_file():
log.error(f'template \'{path}\' is not a file')
sys.exit(1)
else:
self.template_file = path
self.read_template()
self.parse_yaml()
elif args.template is None:
available_templates, names = check_templates()
self.set_template_file(choice(available_templates))
self.read_template()
self.parse_yaml()
if args.infile is not None:
path = Path(args.infile)
if not path.exists():
log.error(f'yaml \'{path}\' does not exist')
sys.exit(1)
elif not path.is_file():
log.error(f'yaml \'{path}\' is not a file')
sys.exit(1)
else:
self.infile = path
self.read_yaml()
elif args.infile is None:
self.query()
log.info(f"Config : \'{self.rc}\'")
log.info(f"Outfile : \'{self.outfile}\'")
log.info(f"Infile : \'{self.infile}\'")
log.info(f"Template : \'{self.template}\'")
# }}}
def set_outfile(self, filepath):# {{{
self.outfile = Path(filepath)
# }}}
def set_infile(self, filepath):# {{{
self.infile = Path(filepath)
# }}}
def set_template_file(self, filepath):# {{{
self.template_file = Path(filepath)
# }}}
def read_rc(self):# {{{
rc = False
config_locations = [
home.joinpath(".config/pfg/pfgrc"),
home.joinpath(".pfg/pfgrc")
]
for conf in config_locations:
if conf.exists():
self.rc_file = conf
self.rc = True
log.info(f'found rc file \'{conf}\'')
break
if self.rc:
config = configparser.ConfigParser()
config.read(self.rc_file)
self.rc_defaults = config.defaults()
self.rc_todays_date = bool(config['DATE']['today'])
# }}}
def read_template(self):# {{{
log.info(f'reading template text from \'{self.template_file}\'')
data = self.template_file.read_text()
self.yaml_text = data.split('^^^\n')[0]
self.template = data.split('^^^\n')[1]
# }}}
def read_yaml(self):# {{{
data = self.infile.read_text()
log.info(f'loading yaml dictionary from \'{self.infile}\'')
self.subs = yaml.load(data, Loader=yaml.FullLoader)
# }}}
def parse_yaml(self): # {{{
keyword_dict = {}
for i, section in enumerate(self.yaml_text.split('---\n')):
sec = yaml.load(section, Loader=yaml.FullLoader)
if i == 0:
try:
extension = sec['extension']
log.info(f'file extension is \'{sec["extension"]}\'')
except (KeyError, TypeError):
log.warning('extension not defined in template file')
log.warning('setting default to \'.txt\'')
extension = '.txt'
try:
keyword_dict.update({ 'required' : {} })
for item in sec['required']:
keyword_dict['required'].update({ item : None })
log.info('list of required replacements:')
log.info(sec['required'])
except KeyError:
log.error('no required fields defined in template...')
raise
log.error('using values \'title\', \'author\', \'date\'')
keyword_dict['required'].update(
{ "title" : None, "author" : None, "date" : None }
)
elif i == 1:
for key, value in sec.items():
keyword_dict.update({ key : { "include" : True }})
if value is not None:
for item in value:
keyword_dict[key].update({ item : None })
self.dict = keyword_dict
self.extension = '.' + extension
self.outfile = self.outfile.with_suffix(self.extension)
# }}}
def query(self):# {{{
"""Defines a set of substitutions for a template"""
self.subs = deepcopy(self.dict)
for sec, subdict in self.subs.copy().items():
if sec == "extension":
pass
elif sec == 'required':
for key, value in subdict.items():
if key == "date":
try:
if self.rc_todays_date == True:
date = f'{now.tm_year}-{now.tm_mon:02d}-{now.tm_mday:02d}'
self.subs[sec].update({ key : date })
except AttributeError:
if yes_or_no('Use today\'s date?'):
date = f'{now.tm_year}-{now.tm_mon}-{now.tm_mday}'
self.subs[sec].update({ key : date })
else:
self.subs[sec].update(
{ key : input(f'{key} >> ')})
else:
try:
self.subs[sec].update(
{ key : self.rc_defaults[key] })
except (AttributeError, KeyError):
self.subs[sec].update(
{ key : input(f'{key} >> ')})
else:
for key, value in subdict.items():
if subdict['include']:
if key == 'include':
self.subs[sec].update(
{ key : yes_or_no(
f'Include {sec} section?') })
else:
try:
self.subs[sec].update(
{ key : self.rc_defaults[key] })
except (AttributeError, KeyError):
self.subs[sec].update(
{ key : input(f'{key} >> ')})
# }}}
def substitute(self):# {{{
"""Defines a set of substitutions for a template"""
template = self.template
for sec, subdict in self.subs.items():
if sec == 'required':
for key, value in subdict.items():
template = value.join(template.split(f'%%{key}%%'))
else:
if subdict['include']:
template = ''.join(template.split(f'***{sec}***\n'))
template = ''.join(template.split(f'\n+++{sec}+++'))
for key, value in subdict.items():
if key != 'include':
template = value.join(template.split(f'%%{key}%%'))
else:
pass
else:
# Keep text before and after optional section
print(f'removing {sec} section from the file')
template = template.split(f'***{sec}***\n')[0] + \
template.split(f'\n+++{sec}+++')[1]
self.template_final = template
# }}}
def print(self):# {{{
print('{:#^79}'.format(''))
print('#{:^77}#'.format('Begin Template'))
print('{:#^79}'.format(''))
print(self.template_final)
print('{:#^79}'.format(''))
print('#{:^77}#'.format('End Template'))
print('{:#^79}'.format(''))
log.debug(f'substitutions dict:\n{yaml.dump(self.subs)}')
# }}}
def save_yaml(self):# {{{
path = input('enter a name for the yaml dict') + '.yml'
log.info('saving dictionary to \'{path}\'')
yaml.dump(self.subs, open(path))
# }}}
def write(self):# {{{
write = True
log.debug(f'file path: {self.outfile}')
if self.outfile.exists():
write = yes_or_no(f'\'{self.outfile.name}\' already exists, overwrite it?')
if not write:
if yes_or_no('do you want to change the name/path of the file?'):
new_path = input('enter the new name/path >> ')
self.set_outfile(new_path)
elif write:
if not self.outfile.parent.exists():
log.info(f'creating \'{self.outfile.parent.absolute()}\'')
self.outfile.parent.mkdir(parents=True)
log.info(f'writing \'{self.outfile.name}\' to \'{self.outfile.parent.absolute()}\'')
self.outfile.write_text(self.template_final)
else:
log.info(f'\'{self.outfile.name}\' was not written')
# }}}
| 39.630189 | 96 | 0.46991 |
7504d455d259efe999fcc271280b6a35d72c2fe9 | 558 | py | Python | main.py | buzZ-exe/low | 04a3de43a6416398e5075731f1631d6cb5d7d526 | [
"MIT"
] | 1 | 2022-02-10T15:00:06.000Z | 2022-02-10T15:00:06.000Z | main.py | buzZ-exe/low | 04a3de43a6416398e5075731f1631d6cb5d7d526 | [
"MIT"
] | null | null | null | main.py | buzZ-exe/low | 04a3de43a6416398e5075731f1631d6cb5d7d526 | [
"MIT"
] | null | null | null | #main.py
#For main bot functionality
from discord.ext.commands import Bot
import cogs.config as config
global status_state #Unused variable for future commands - Don't delete
status_state = 'with Humans' #Probably never gonna use
BOT_PREFIX = "!"
client = Bot(command_prefix = BOT_PREFIX)
client.remove_command('help')
#Loading cogs
client.load_extension("cogs.redditCommands")
client.load_extension("cogs.commands")
client.load_extension("cogs.listeners")
client.load_extension("cogs.helpCommand")
client.run(config.token) | 27.9 | 94 | 0.756272 |
84fab177068187be357158cce0e355a38aade51e | 747 | py | Python | setup.py | mosquito/simpleaes | 097151473ca67f1b1494f1a25f8aec2083d5d76a | [
"MIT"
] | null | null | null | setup.py | mosquito/simpleaes | 097151473ca67f1b1494f1a25f8aec2083d5d76a | [
"MIT"
] | 1 | 2018-07-14T13:09:57.000Z | 2018-07-14T13:09:57.000Z | setup.py | mosquito/simpleaes | 097151473ca67f1b1494f1a25f8aec2083d5d76a | [
"MIT"
] | null | null | null | # encoding: utf-8
from __future__ import absolute_import, print_function
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
__version__ = '0.4.7'
__author__ = 'Dmitry Orlov <me@mosquito.su>'
setup(name='simple_aes',
version=__version__,
author=__author__,
author_email='me@mosquito.su',
license="MIT",
description="Very simple pycrypto AES helper.",
platforms="all",
url="http://github.com/mosquito/simpleaes",
classifiers=[
'Environment :: Console',
'Programming Language :: Python',
],
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=[
'.',
],
install_requires=[
'pycrypto'
],
)
| 20.75 | 54 | 0.647925 |
be7aa05804deb6f2651f1ae431a3c5ecb595fd48 | 56 | py | Python | rlmolecule/molecule/policy/__init__.py | harrysorensennrel/rlmolecule | 978269400b90f752bf4741f42f03522603b321e2 | [
"BSD-3-Clause"
] | null | null | null | rlmolecule/molecule/policy/__init__.py | harrysorensennrel/rlmolecule | 978269400b90f752bf4741f42f03522603b321e2 | [
"BSD-3-Clause"
] | null | null | null | rlmolecule/molecule/policy/__init__.py | harrysorensennrel/rlmolecule | 978269400b90f752bf4741f42f03522603b321e2 | [
"BSD-3-Clause"
] | null | null | null | import lazy_import
lazy_import.lazy_module("tensorflow") | 28 | 37 | 0.875 |
8b909ca6f8a228c7a751486c3b6c0d7a1f583489 | 424 | py | Python | pandasgui/__init__.py | jmartens/PandasGUI | aa9c366882ec17dcf8531947631a602b486c9253 | [
"MIT"
] | 1,859 | 2019-06-14T04:19:01.000Z | 2020-12-04T19:43:15.000Z | pandasgui/__init__.py | felipeescallon/pandas-GUI | 40327cd2763d830e761475df00d62b8cb29c3438 | [
"MIT"
] | 106 | 2020-12-05T08:35:30.000Z | 2022-02-11T16:41:17.000Z | pandasgui/__init__.py | felipeescallon/pandas-GUI | 40327cd2763d830e761475df00d62b8cb29c3438 | [
"MIT"
] | 106 | 2019-09-14T04:57:26.000Z | 2020-12-03T04:59:14.000Z | # Set version
from pkg_resources import get_distribution
__version__ = get_distribution('pandasgui').version
# Logger config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('PandasGUI %(levelname)s — %(name)s — %(message)s'))
logger.addHandler(sh)
# Imports
from pandasgui.gui import show
__all__ = ["show", "__version__"]
| 23.555556 | 86 | 0.775943 |
d26bf2908b844b9e67d02d54009a7ebf4f2bee08 | 314 | py | Python | day-01/problem.py | mkemp/aoc-2020 | 01f65bc4aee05f819c3a8f3b04565188fcc17d25 | [
"MIT"
] | 1 | 2020-12-06T19:33:53.000Z | 2020-12-06T19:33:53.000Z | day-01/problem.py | mkemp/aoc-2020 | 01f65bc4aee05f819c3a8f3b04565188fcc17d25 | [
"MIT"
] | null | null | null | day-01/problem.py | mkemp/aoc-2020 | 01f65bc4aee05f819c3a8f3b04565188fcc17d25 | [
"MIT"
] | null | null | null | from itertools import combinations
with open('input') as f:
values = [int(line) for line in f.read().strip().split()]
# Part 1
for i, j in combinations(values, 2):
if i + j == 2020:
print(i * j)
# Part 2
for i, j, k in combinations(values, 3):
if i + j + k == 2020:
print(i * j * k)
| 20.933333 | 61 | 0.570064 |
1b2add19af9a032f54e6b000099343ab8dc697d2 | 4,203 | py | Python | ldm/id3b_ingest.py | akrherz/id3b | c2504da1690af2eb510c979bc59a261d73b87c72 | [
"Apache-2.0"
] | null | null | null | ldm/id3b_ingest.py | akrherz/id3b | c2504da1690af2eb510c979bc59a261d73b87c72 | [
"Apache-2.0"
] | 1 | 2017-11-09T03:02:03.000Z | 2019-09-30T15:36:15.000Z | ldm/id3b_ingest.py | akrherz/id3b | c2504da1690af2eb510c979bc59a261d73b87c72 | [
"Apache-2.0"
] | null | null | null | """Our fancy pants ingest of LDM product metadata"""
from __future__ import print_function
from syslog import LOG_LOCAL2
from io import BytesIO
import json
import os
import re
import datetime
from twisted.python import log, syslog
from twisted.internet import stdio
from twisted.internet import reactor
from twisted.protocols import basic
from twisted.enterprise import adbapi
from applib.parser import parser
WMO_RE = re.compile(
(
r"^([0-9A-Za-z]{4,6}) ([A-Z0-9]{4}) ([0-9]{6})( [A-Z]{3})?"
r"( /p[A-Z0-9]{3,6})?"
)
)
syslog.startLogging(prefix="id3b_ingest", facility=LOG_LOCAL2)
CFGFN = "%s/settings.json" % (
os.path.join(os.path.dirname(__file__), "../config"),
)
CONFIG = json.load(open(CFGFN))
DBOPTS = CONFIG["databaserw"]
DBPOOL = adbapi.ConnectionPool(
"psycopg2",
database=DBOPTS["name"],
cp_reconnect=True,
cp_max=20,
host=DBOPTS["host"],
user=DBOPTS["user"],
password=DBOPTS["password"],
)
def compute_wmo_time(valid, ddhhmm):
"""Attempt to resolve the time!"""
day = int(ddhhmm[:2])
if day < 5 and valid.day > 24:
# Next month
valid += datetime.timedelta(days=15)
if day > 24 and valid.day < 5:
# previous month
valid -= datetime.timedelta(days=15)
return valid.replace(
day=day,
hour=int(ddhhmm[2:4]),
minute=int(ddhhmm[4:6]),
second=0,
microsecond=0,
)
def handle_error(err):
"""Handle an error?"""
print(err)
def save_msgs(txn, msgs):
"""Persist our message"""
for msg in msgs:
# print("%6s %s" % (msg.size, msg.product_id))
tokens = WMO_RE.findall(msg.product_id)
awips_id = None
wmo_ttaaii = None
wmo_source = None
wmo_time = None
wmo_bbb = None
awips_id = None
if tokens:
(wmo_ttaaii, wmo_source, wmo_time, wmo_bbb, awips_id) = tokens[0]
wmo_bbb = wmo_bbb.strip()
awips_id = awips_id[3:]
try:
wmo_time = compute_wmo_time(msg.valid, wmo_time)
except Exception as exp:
print("%s valid: %s wmo_time: %s" % (exp, msg.valid, wmo_time))
continue
txn.execute(
"""
INSERT into ldm_product_log
(md5sum, size, valid_at, ldm_feedtype,
seqnum, product_id, product_origin,
wmo_ttaaii, wmo_source, wmo_valid_at, wmo_bbb, awips_id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
(
msg.md5sum,
msg.size,
msg.valid,
msg.feedtype,
msg.seqnum,
msg.product_id,
msg.product_origin,
wmo_ttaaii,
wmo_source,
wmo_time,
wmo_bbb,
awips_id,
),
)
class IngestorProtocol(basic.LineReceiver):
"""Go"""
def connectionLost(self, reason):
""" Called when the STDIN connection is lost """
log.msg("connectionLost")
log.err(reason)
reactor.callLater(15, reactor.callWhenRunning, reactor.stop)
def dataReceived(self, data):
""" Process a chunk of data """
# print("Got %s bytes" % (len(data), ))
#
self.leftover, msgs = parser(BytesIO(self.leftover + data))
if msgs:
df = DBPOOL.runInteraction(save_msgs, msgs)
df.addErrback(handle_error)
else:
if len(self.leftover) > 8000:
print("ABORT RESET, leftover size is too large!")
self.leftover = b""
class LDMProductFactory(stdio.StandardIO):
"""Go"""
def __init__(self, protocol, **kwargs):
""" constructor with a protocol instance """
stdio.StandardIO.__init__(self, protocol, **kwargs)
def main():
"""Our main loop"""
proto = IngestorProtocol()
# Puts us into rawdata mode and not line receiver
proto.setRawMode()
# Something to store data between runs
proto.leftover = b""
_ = LDMProductFactory(proto)
reactor.run() # @UndefinedVariable
if __name__ == "__main__":
main()
| 27.292208 | 79 | 0.5734 |
2105d426ec6f99f737013c901daab257a2a6284e | 323 | py | Python | src/resumeSite/AuthenApp/urls.py | AdaFactor/ResumeProject | 7492fe0a52a135ee44c599d9dd1edee86cfb6787 | [
"MIT"
] | null | null | null | src/resumeSite/AuthenApp/urls.py | AdaFactor/ResumeProject | 7492fe0a52a135ee44c599d9dd1edee86cfb6787 | [
"MIT"
] | 23 | 2018-01-19T07:48:44.000Z | 2019-04-23T17:49:39.000Z | src/resumeSite/AuthenApp/urls.py | AdaFactor/ResumeProject | 7492fe0a52a135ee44c599d9dd1edee86cfb6787 | [
"MIT"
] | 1 | 2018-01-17T03:39:26.000Z | 2018-01-17T03:39:26.000Z | from django.urls import path
from . import views
app_name = 'AuthenApp'
urlpatterns = [
path('login/', views.login_view, name='login'),
path('logout/', views.logout_view, name='logout'),
path('authen/', views.authen_view, name='authen_user'),
path('registration/', views.new_user_view, name='new_user'),
]
| 29.363636 | 64 | 0.690402 |
8bf6a209ad695d52bdee7cce27bb8df817cb618f | 89 | py | Python | script/common/notify_email.py | shinhwagk/cmsfs | 652ee5779936ebf265364a098a10cde49349396e | [
"Apache-2.0"
] | null | null | null | script/common/notify_email.py | shinhwagk/cmsfs | 652ee5779936ebf265364a098a10cde49349396e | [
"Apache-2.0"
] | null | null | null | script/common/notify_email.py | shinhwagk/cmsfs | 652ee5779936ebf265364a098a10cde49349396e | [
"Apache-2.0"
] | null | null | null | def mail_send(to, subject, content):
print(to)
print(subject)
print(content)
| 17.8 | 36 | 0.662921 |
20118e345d94d3d056fb82815170d5e32db7e41b | 980 | py | Python | protector/tests/rules_test/test_prevent_drop.py | trivago/Protector | 7ebe7bde965e27737b961a0cb5740724d174fdc7 | [
"BSD-3-Clause"
] | 54 | 2016-02-23T16:04:11.000Z | 2021-05-01T06:43:55.000Z | protector/tests/rules_test/test_prevent_drop.py | trivago/Protector | 7ebe7bde965e27737b961a0cb5740724d174fdc7 | [
"BSD-3-Clause"
] | 1 | 2016-03-08T15:07:35.000Z | 2016-06-23T12:52:36.000Z | protector/tests/rules_test/test_prevent_drop.py | trivago/Protector | 7ebe7bde965e27737b961a0cb5740724d174fdc7 | [
"BSD-3-Clause"
] | 4 | 2016-06-01T14:22:47.000Z | 2017-03-09T05:23:08.000Z | import unittest
from protector.query.delete import DeleteQuery
from protector.query.drop import DropQuery
from protector.query.list import ListQuery
from protector.query.select import SelectQuery
from protector.rules import prevent_drop
class TestPreventDrop(unittest.TestCase):
def setUp(self):
self.prevent_drop = prevent_drop.RuleChecker()
def test_prevent_drop(self):
"""
Test prevention of DROP queries
"""
self.assertFalse(self.prevent_drop.check(DropQuery('/myseries/')).is_ok())
self.assertTrue(self.prevent_drop.check(DeleteQuery('myseries')).is_ok())
self.assertTrue(self.prevent_drop.check(DeleteQuery('/myseries/')).is_ok())
self.assertTrue(self.prevent_drop.check(DeleteQuery('/myseries/', 'time > now() - 24h')).is_ok())
self.assertTrue(self.prevent_drop.check(SelectQuery('*', 'myseries')).is_ok())
self.assertTrue(self.prevent_drop.check(ListQuery('/myseries/')).is_ok())
| 40.833333 | 105 | 0.719388 |
ea967ea9a924c0f7bbddc1ec8a69271aafd9670b | 7,599 | py | Python | browsepy/plugin/player/playable.py | galacticpolymath/browsepy | c3ee7de62c479e2bbbdcc6b7120e85fde054bf22 | [
"MIT"
] | 164 | 2015-04-09T16:24:32.000Z | 2022-03-27T16:27:51.000Z | browsepy/plugin/player/playable.py | galacticpolymath/browsepy | c3ee7de62c479e2bbbdcc6b7120e85fde054bf22 | [
"MIT"
] | 35 | 2015-11-04T09:01:58.000Z | 2021-08-04T18:58:31.000Z | browsepy/plugin/player/playable.py | galacticpolymath/browsepy | c3ee7de62c479e2bbbdcc6b7120e85fde054bf22 | [
"MIT"
] | 79 | 2016-04-13T02:13:31.000Z | 2022-03-22T15:19:56.000Z |
import sys
import codecs
import os.path
import warnings
from werkzeug.utils import cached_property
from browsepy.compat import range, PY_LEGACY # noqa
from browsepy.file import Node, File, Directory, \
underscore_replace, check_under_base
if PY_LEGACY:
import ConfigParser as configparser
else:
import configparser
ConfigParserBase = (
configparser.SafeConfigParser
if hasattr(configparser, 'SafeConfigParser') else
configparser.ConfigParser
)
class PLSFileParser(object):
'''
ConfigParser wrapper accepting fallback on get for convenience.
This wraps instead of inheriting due ConfigParse being classobj on python2.
'''
NOT_SET = type('NotSetType', (object,), {})
parser_class = (
configparser.SafeConfigParser
if hasattr(configparser, 'SafeConfigParser') else
configparser.ConfigParser
)
def __init__(self, path):
with warnings.catch_warnings():
# We already know about SafeConfigParser deprecation!
warnings.filterwarnings('ignore', category=DeprecationWarning)
self._parser = self.parser_class()
self._parser.read(path)
def getint(self, section, key, fallback=NOT_SET):
try:
return self._parser.getint(section, key)
except (configparser.NoOptionError, ValueError):
if fallback is self.NOT_SET:
raise
return fallback
def get(self, section, key, fallback=NOT_SET):
try:
return self._parser.get(section, key)
except (configparser.NoOptionError, ValueError):
if fallback is self.NOT_SET:
raise
return fallback
class PlayableBase(File):
extensions = {
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
'wav': 'audio/wav',
'm3u': 'audio/x-mpegurl',
'm3u8': 'audio/x-mpegurl',
'pls': 'audio/x-scpls',
}
@classmethod
def extensions_from_mimetypes(cls, mimetypes):
mimetypes = frozenset(mimetypes)
return {
ext: mimetype
for ext, mimetype in cls.extensions.items()
if mimetype in mimetypes
}
@classmethod
def detect(cls, node, os_sep=os.sep):
basename = node.path.rsplit(os_sep)[-1]
if '.' in basename:
ext = basename.rsplit('.')[-1]
return cls.extensions.get(ext, None)
return None
class PlayableFile(PlayableBase):
mimetypes = ['audio/mpeg', 'audio/ogg', 'audio/wav']
extensions = PlayableBase.extensions_from_mimetypes(mimetypes)
media_map = {mime: ext for ext, mime in extensions.items()}
def __init__(self, **kwargs):
self.duration = kwargs.pop('duration', None)
self.title = kwargs.pop('title', None)
super(PlayableFile, self).__init__(**kwargs)
@property
def title(self):
return self._title or self.name
@title.setter
def title(self, title):
self._title = title
@property
def media_format(self):
return self.media_map[self.type]
class PlayListFile(PlayableBase):
playable_class = PlayableFile
mimetypes = ['audio/x-mpegurl', 'audio/x-mpegurl', 'audio/x-scpls']
extensions = PlayableBase.extensions_from_mimetypes(mimetypes)
@classmethod
def from_urlpath(cls, path, app=None):
original = Node.from_urlpath(path, app)
if original.mimetype == PlayableDirectory.mimetype:
return PlayableDirectory(original.path, original.app)
elif original.mimetype == M3UFile.mimetype:
return M3UFile(original.path, original.app)
if original.mimetype == PLSFile.mimetype:
return PLSFile(original.path, original.app)
return original
def normalize_playable_path(self, path):
if '://' in path:
return path
path = os.path.normpath(path)
if not os.path.isabs(path):
return os.path.join(self.parent.path, path)
drive = os.path.splitdrive(self.path)[0]
if drive and not os.path.splitdrive(path)[0]:
path = drive + path
if check_under_base(path, self.app.config['directory_base']):
return path
return None
def _entries(self):
return
yield # noqa
def entries(self, sortkey=None, reverse=None):
for file in self._entries():
if PlayableFile.detect(file):
yield file
class PLSFile(PlayListFile):
ini_parser_class = PLSFileParser
maxsize = getattr(sys, 'maxint', 0) or getattr(sys, 'maxsize', 0) or 2**32
mimetype = 'audio/x-scpls'
extensions = PlayableBase.extensions_from_mimetypes([mimetype])
def _entries(self):
parser = self.ini_parser_class(self.path)
maxsize = parser.getint('playlist', 'NumberOfEntries', None)
for i in range(1, self.maxsize if maxsize is None else maxsize + 1):
path = parser.get('playlist', 'File%d' % i, None)
if not path:
if maxsize:
continue
break
path = self.normalize_playable_path(path)
if not path:
continue
yield self.playable_class(
path=path,
app=self.app,
duration=parser.getint(
'playlist', 'Length%d' % i,
None
),
title=parser.get(
'playlist',
'Title%d' % i,
None
),
)
class M3UFile(PlayListFile):
mimetype = 'audio/x-mpegurl'
extensions = PlayableBase.extensions_from_mimetypes([mimetype])
def _iter_lines(self):
prefix = '#EXTM3U\n'
encoding = 'utf-8' if self.path.endswith('.m3u8') else 'ascii'
with codecs.open(
self.path, 'r',
encoding=encoding,
errors=underscore_replace
) as f:
if f.read(len(prefix)) != prefix:
f.seek(0)
for line in f:
line = line.rstrip()
if line:
yield line
def _entries(self):
data = {}
for line in self._iter_lines():
if line.startswith('#EXTINF:'):
duration, title = line.split(',', 1)
data['duration'] = None if duration == '-1' else int(duration)
data['title'] = title
if not line:
continue
path = self.normalize_playable_path(line)
if path:
yield self.playable_class(path=path, app=self.app, **data)
data.clear()
class PlayableDirectory(Directory):
file_class = PlayableFile
name = ''
@cached_property
def parent(self):
return Directory(self.path)
@classmethod
def detect(cls, node):
if node.is_directory:
for file in node._listdir():
if PlayableFile.detect(file):
return cls.mimetype
return None
def entries(self, sortkey=None, reverse=None):
listdir_fnc = super(PlayableDirectory, self).listdir
for file in listdir_fnc(sortkey=sortkey, reverse=reverse):
if PlayableFile.detect(file):
yield file
def detect_playable_mimetype(path, os_sep=os.sep):
basename = path.rsplit(os_sep)[-1]
if '.' in basename:
ext = basename.rsplit('.')[-1]
return PlayableBase.extensions.get(ext, None)
return None
| 30.518072 | 79 | 0.588762 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.