id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3365541 | # -*- coding: utf-8 -*-
import json
from unittest.mock import MagicMock, patch
from chaosaws.iam.actions import (attach_role_policy, create_policy,
detach_role_policy)
@patch('chaosaws.iam.actions.aws_client', autospec=True)
def test_create_policy(aws_client):
client = MagicMock()
aws_client.return_value = client
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "logs:CreateLogGroup",
"Resource": "RESOURCE_ARN"
}
]
}
create_policy("mypolicy", policy, "/user/Jon")
client.create_policy.assert_called_with(
PolicyName="mypolicy", Path="/user/Jon",
PolicyDocument=json.dumps(policy),
Description="")
@patch('chaosaws.iam.actions.aws_client', autospec=True)
def test_attach_role_policy(aws_client):
client = MagicMock()
aws_client.return_value = client
arn = "aws:iam:whatever"
role = "somerole"
attach_role_policy(arn, role)
client.attach_role_policy.assert_called_with(
PolicyArn=arn, RoleName=role)
@patch('chaosaws.iam.actions.aws_client', autospec=True)
def test_detach_role_policy(aws_client):
client = MagicMock()
aws_client.return_value = client
arn = "aws:iam:whatever"
role = "somerole"
detach_role_policy(arn, role)
client.detach_role_policy.assert_called_with(
PolicyArn=arn, RoleName=role)
| StarcoderdataPython |
3374986 | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
data = pd.read_csv(path)
bank = pd.DataFrame(data)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID',axis = 1)
print(banks.isnull().sum())
bank_mode = banks.mode()
print(bank_mode)
# for x in banks.columns:
banks['Gender'] = banks['Gender'].fillna(banks['Gender'].mode()[0])
banks['Married'] = banks['Married'].fillna(banks['Married'].mode()[0])
banks['Dependents'] = banks['Dependents'].fillna(banks['Dependents'].mode()[0])
banks['Self_Employed'] = banks['Self_Employed'].fillna(banks['Self_Employed'].mode()[0])
banks['LoanAmount'] = banks['LoanAmount'].fillna(banks['LoanAmount'].mode()[0])
banks['Loan_Amount_Term'] = banks['Loan_Amount_Term'].fillna(banks['Loan_Amount_Term'].mode()[0])
banks['Credit_History'] = banks['Credit_History'].fillna(banks['Credit_History'].mode()[0])
print(banks.isnull().sum())
print(banks)
#code ends here
# --------------
# Code starts here
avg_loan_amount = banks.pivot_table(index = ['Gender','Married','Self_Employed'], values = 'LoanAmount')
# code ends here
# --------------
# code starts here
loan_approved_se = len(banks[(banks['Self_Employed']== 'Yes') & (banks['Loan_Status']=='Y')])
loan_approved_nse = len(banks[(banks['Self_Employed']== 'No') & (banks['Loan_Status']=='Y')])
percentage_se = loan_approved_se/614 *100
percentage_nse = loan_approved_nse/614 *100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x: x/12 )
big_loan_term = len(loan_term[loan_term >= 25])
print(loan_term)
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
mean_values =loan_groupby.mean()
# code ends here
| StarcoderdataPython |
6703036 | <filename>exercicios/desafio 36.py
v = float(input('Qual รฉ o valor da casa? \n'))
s = float(input('Qual รฉ o seu salรกrio? \n'))
a = float(input('Vai pagar em quantos anos? \n'))
p = v/(a*12)
if p < (s+s*.3):
print('a prestaรงรฃo serรก de R${:.2f}' .format(p))
else:
print('Emprestimo negado') | StarcoderdataPython |
1931660 | <gh_stars>1-10
'''
#Train a simple deep CNN on the CIFAR10 small images dataset using augmentation.
Using TensorFlow internal augmentation APIs by replacing ImageGenerator with
an embedded AugmentLayer using LambdaLayer, which is faster on GPU.
** Benchmark of `ImageGenerator`(IG) vs `AugmentLayer`(AL) both using augmentation
2D:**
(backend = Tensorflow-GPU, Nvidia Tesla P100-SXM2)
Epoch no. | IG %Accuracy | IG Performance | AL %Accuracy | AL Performance
---------:|---------------:|---------------:|--------------:|--------------:
1 | 44.84 | 15 ms/step | 45.54 | 358 us/step
2 | 52.34 | 8 ms/step | 50.55 | 285 us/step
8 | 65.45 | 8 ms/step | 65.59 | 281 us/step
25 | 76.74 | 8 ms/step | 76.17 | 280 us/step
100 | 78.81 | 8 ms/step | 78.70 | 285 us/step
Settings: horizontal_flip = True
Epoch no. | IG %Accuracy | IG Performance | AL %Accuracy | AL Performance
---------:|---------------:|---------------:|--------------:|--------------:
1 | 43.46 | 15 ms/step | 42.21 | 334 us/step
2 | 48.95 | 11 ms/step | 48.06 | 282 us/step
8 | 63.59 | 11 ms/step | 61.35 | 290 us/step
25 | 72.25 | 12 ms/step | 71.08 | 287 us/step
100 | 76.35 | 11 ms/step | 74.62 | 286 us/step
Settings: rotation = 30.0
(Corner process and rotation precision by `ImageGenerator` and `AugmentLayer`
are slightly different.)
'''
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, Lambda, MaxPooling2D
from keras import backend as K
import os
if K.backend() != 'tensorflow':
raise RuntimeError('This example can only run with the '
'TensorFlow backend, '
'because it requires TF-native augmentation APIs')
import tensorflow as tf
def augment_2d(inputs, rotation=0, horizontal_flip=False, vertical_flip=False):
"""Apply additive augmentation on 2D data.
# Arguments
rotation: A float, the degree range for rotation (0 <= rotation < 180),
e.g. 3 for random image rotation between (-3.0, 3.0).
horizontal_flip: A boolean, whether to allow random horizontal flip,
e.g. true for 50% possibility to flip image horizontally.
vertical_flip: A boolean, whether to allow random vertical flip,
e.g. true for 50% possibility to flip image vertically.
# Returns
input data after augmentation, whose shape is the same as its original.
"""
if inputs.dtype != tf.float32:
inputs = tf.image.convert_image_dtype(inputs, dtype=tf.float32)
with tf.name_scope('augmentation'):
shp = tf.shape(inputs)
batch_size, height, width = shp[0], shp[1], shp[2]
width = tf.cast(width, tf.float32)
height = tf.cast(height, tf.float32)
transforms = []
identity = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
if rotation > 0:
angle_rad = rotation * 3.141592653589793 / 180.0
angles = tf.random_uniform([batch_size], -angle_rad, angle_rad)
f = tf.contrib.image.angles_to_projective_transforms(angles,
height, width)
transforms.append(f)
if horizontal_flip:
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
shape = [-1., 0., width, 0., 1., 0., 0., 0.]
flip_transform = tf.convert_to_tensor(shape, dtype=tf.float32)
flip = tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1])
noflip = tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])
transforms.append(tf.where(coin, flip, noflip))
if vertical_flip:
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
shape = [1., 0., 0., 0., -1., height, 0., 0.]
flip_transform = tf.convert_to_tensor(shape, dtype=tf.float32)
flip = tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1])
noflip = tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])
transforms.append(tf.where(coin, flip, noflip))
if transforms:
f = tf.contrib.image.compose_transforms(*transforms)
inputs = tf.contrib.image.transform(inputs, f, interpolation='BILINEAR')
return inputs
batch_size = 32
num_classes = 10
epochs = 100
num_predictions = 20
save_dir = '/tmp/saved_models'
model_name = 'keras_cifar10_trained_model.h5'
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Lambda(augment_2d,
input_shape=x_train.shape[1:],
arguments={'rotation': 8.0, 'horizontal_flip': True}))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| StarcoderdataPython |
96492 | <reponame>evgeny-dmi3ev/js-services<filename>js_services/admin.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from aldryn_apphooks_config.admin import BaseAppHookConfig, ModelAppHookConfig
from aldryn_people.models import Person
from aldryn_translation_tools.admin import AllTranslationsMixin
from cms.admin.placeholderadmin import FrontendEditableAdminMixin
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.utils.i18n import get_current_language, get_language_list
from cms.utils import copy_plugins, get_current_site
from django.db import transaction
from django.db.models.query import EmptyQuerySet
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.sites.models import Site
from django.forms import widgets
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.core.exceptions import PermissionDenied
from django.http import (
HttpResponseRedirect,
HttpResponse,
Http404,
HttpResponseBadRequest,
HttpResponseForbidden,
)
from parler.admin import TranslatableAdmin
from parler.forms import TranslatableModelForm
try:
from sortedm2m_filter_horizontal_widget.forms import SortedFilteredSelectMultiple
except:
SortedFilteredSelectMultiple = FilteredSelectMultiple
try:
from js_custom_fields.forms import CustomFieldsFormMixin, CustomFieldsSettingsFormMixin
except:
class CustomFieldsFormMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'custom_fields' in self.fields:
self.fields['custom_fields'].widget = forms.HiddenInput()
class CustomFieldsSettingsFormMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'custom_fields_settings' in self.fields:
self.fields['custom_fields_settings'].widget = forms.HiddenInput()
from . import models
from .constants import (
SERVICES_SUMMARY_RICHTEXT,
SERVICES_HIDE_RELATED_SERVICES,
SERVICES_ENABLE_PUBDATE,
SERVICES_ENABLE_IMAGE,
IS_THERE_COMPANIES,
TRANSLATE_IS_PUBLISHED,
TRANSLATE_LAYOUT,
SERVICE_CUSTOM_FIELDS,
SERVICE_SECTION_CUSTOM_FIELDS,
SERVICE_LAYOUT_CHOICES,
)
if IS_THERE_COMPANIES:
from js_companies.models import Company
require_POST = method_decorator(require_POST)
def make_published(modeladmin, request, queryset):
if TRANSLATE_IS_PUBLISHED:
for i in queryset.all():
i.is_published_trans = True
i.save()
#language = get_current_language()
#models.ArticleTranslation.objects.filter(language_code=language, master__in=queryset).update(is_published_trans=True)
else:
queryset.update(is_published=True)
make_published.short_description = _(
"Mark selected services as published")
def make_unpublished(modeladmin, request, queryset):
if TRANSLATE_IS_PUBLISHED:
for i in queryset.all():
i.is_published_trans = False
i.save()
#language = get_current_language()
#models.ArticleTranslation.objects.filter(language_code=language, master__in=queryset).update(is_published_trans=False)
else:
queryset.update(is_published=False)
make_unpublished.short_description = _(
"Mark selected services as not published")
def make_featured(modeladmin, request, queryset):
if TRANSLATE_IS_PUBLISHED:
for i in queryset.all():
i.is_featured_trans = True
i.save()
#language = get_current_language()
#models.ArticleTranslation.objects.filter(language_code=language, master__in=queryset).update(is_featured_trans=True)
else:
queryset.update(is_featured=True)
make_featured.short_description = _(
"Mark selected services as featured")
def make_not_featured(modeladmin, request, queryset):
if TRANSLATE_IS_PUBLISHED:
for i in queryset.all():
i.is_featured_trans = False
i.save()
#language = get_current_language()
#models.ArticleTranslation.objects.filter(language_code=language, master__in=queryset).update(is_featured_trans=False)
else:
queryset.update(is_featured=False)
make_not_featured.short_description = _(
"Mark selected services as not featured")
class ServiceAdminForm(CustomFieldsFormMixin, TranslatableModelForm):
companies = forms.CharField()
layout = forms.ChoiceField(choices=SERVICE_LAYOUT_CHOICES, required=False)
layout_trans = forms.ChoiceField(choices=SERVICE_LAYOUT_CHOICES, required=False)
custom_fields = 'get_custom_fields'
class Meta:
model = models.Service
fields = '__all__'
def __init__(self, *args, **kwargs):
super(ServiceAdminForm, self).__init__(*args, **kwargs)
qs = models.Service.objects
#if self.instance.app_config_id:
#qs = models.Service.objects.filter(
#app_config=self.instance.app_config)
#elif 'initial' in kwargs and 'app_config' in kwargs['initial']:
#qs = models.Service.objects.filter(
#app_config=kwargs['initial']['app_config'])
if self.instance.pk:
qs = qs.exclude(pk=self.instance.pk)
if 'related' in self.fields:
self.fields['related'].queryset = qs
# Don't allow app_configs to be added here. The correct way to add an
# apphook-config is to create an apphook on a cms Page.
#self.fields['app_config'].widget.can_add_related = False
# Don't allow related articles to be added here.
# doesn't makes much sense to add articles from another article other
# than save and add another.
if ('related' in self.fields and
hasattr(self.fields['related'], 'widget')):
self.fields['related'].widget.can_add_related = False
if not SERVICES_SUMMARY_RICHTEXT:
self.fields['lead_in'].widget = widgets.Textarea()
self.fields['lead_in'].help_text = """The Summary gives the reader
the main idea of the service, this is useful in overviews, lists or
as an introduction to your service."""
if IS_THERE_COMPANIES:
self.fields['companies'] = forms.ModelMultipleChoiceField(queryset=Company.objects.all(), required=False)# self.instance.companies
self.fields['companies'].widget = SortedFilteredSelectMultiple()
self.fields['companies'].queryset = Company.objects.all()
if self.instance.pk and self.instance.companies.count():
self.fields['companies'].initial = self.instance.companies.all()
else:
del self.fields['companies']
def get_custom_fields(self):
fields = SERVICE_CUSTOM_FIELDS
if self.instance and self.instance.pk:
for section in self.instance.sections.all():
if section.custom_fields_settings:
fields.update(section.custom_fields_settings)
return fields
class ServiceAdmin(
AllTranslationsMixin,
PlaceholderAdminMixin,
FrontendEditableAdminMixin,
#ModelAppHookConfig,
TranslatableAdmin
):
search_fields = ['translations__title']
app_config_attribute = 'sections'
form = ServiceAdminForm
list_display = ('title', 'slug', 'is_featured',
'is_published')
list_filter = [
'is_published',
'is_featured',
'sections',
'categories',
]
if IS_THERE_COMPANIES:
list_filter += (
'companies',
)
actions = (
make_featured, make_not_featured,
make_published, make_unpublished,
)
settings_fields = (
'title',
'is_published',
'is_featured',
)
if SERVICES_ENABLE_PUBDATE:
settings_fields += (
'publishing_date',
)
if SERVICES_ENABLE_IMAGE:
settings_fields += (
'featured_image',
)
settings_fields += (
'lead_in',
'layout',
'custom_fields',
)
advanced_settings_fields = (
'categories',
)
if SERVICES_HIDE_RELATED_SERVICES == 0:
advanced_settings_fields += (
'related',
)
advanced_settings_fields += (
'sections',
)
if IS_THERE_COMPANIES:
advanced_settings_fields += (
'companies',
)
fieldsets = (
(None, {
'fields': settings_fields
}),
(_('Categorisation'), {
'classes': ('collapse',),
'fields': advanced_settings_fields,
}),
(_('Meta Options'), {
'classes': ('collapse',),
'fields': (
'slug',
'meta_title',
'meta_description',
'meta_keywords',
'show_on_sitemap',
'show_on_xml_sitemap',
'noindex',
'nofollow',
'canonical_url',
)
}),
)
filter_horizontal = [
'categories',
'sections',
]
app_config_values = {
'default_published': 'is_published'
}
app_config_selection_title = ''
app_config_selection_desc = ''
def get_list_display(self, request):
fields = []
list_display = super().get_list_display(request)
for field in list_display:
if field in ['is_published', 'is_featured'] and TRANSLATE_IS_PUBLISHED:
field += '_trans'
fields.append(field)
return fields
def get_fieldsets(self, request, obj=None):
fieldsets = super().get_fieldsets(request, obj)
for fieldset in fieldsets:
if len(fieldset) == 2 and 'fields' in fieldset[1]:
fields = []
for field in fieldset[1]['fields']:
if field in ['is_published', 'is_featured'] and TRANSLATE_IS_PUBLISHED:
field += '_trans'
if field == 'layout' and TRANSLATE_LAYOUT:
field += '_trans'
fields.append(field)
fieldset[1]['fields'] = fields
return fieldsets
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'related' and SERVICES_HIDE_RELATED_SERVICES == 0:
kwargs['widget'] = SortedFilteredSelectMultiple('service')
if db_field.name == 'companies':
kwargs['widget'] = SortedFilteredSelectMultiple('company', False, attrs={'verbose_name_plural': 'companies'})
if db_field.name == 'sections':
kwargs["queryset"] = models.ServicesConfig.objects.exclude(namespace=models.ServicesConfig.default_namespace)
return super(ServiceAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def get_form(self, request, obj=None, **kwargs):
return super(TranslatableAdmin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if IS_THERE_COMPANIES:
obj.companies.set(Company.objects.filter(pk__in=form.cleaned_data.get('companies')))
def get_site(self, request):
site_id = request.session.get('cms_admin_site')
if not site_id:
return get_current_site()
try:
site = Site.objects._get_site_by_id(site_id)
except Site.DoesNotExist:
site = get_current_site()
return site
@require_POST
@transaction.atomic
def copy_language(self, request, obj_id):
obj = self.get_object(request, object_id=obj_id)
source_language = request.POST.get('source_language')
target_language = request.POST.get('target_language')
if not self.has_change_permission(request, obj=obj):
raise PermissionDenied
if obj is None:
raise Http404
if not target_language or not target_language in get_language_list(site_id=self.get_site(request).pk):
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
for placeholder in obj.get_placeholders():
plugins = list(
placeholder.get_plugins(language=source_language).order_by('path'))
if not placeholder.has_add_plugins_permission(request.user, plugins):
return HttpResponseForbidden(force_text(_('You do not have permission to copy these plugins.')))
copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
return HttpResponse("ok")
def get_urls(self):
urlpatterns = super().get_urls()
opts = self.model._meta
info = opts.app_label, opts.model_name
return [url(
r'^(.+)/copy_language/$',
self.admin_site.admin_view(self.copy_language),
name='{0}_{1}_copy_language'.format(*info)
)] + urlpatterns
admin.site.register(models.Service, ServiceAdmin)
class ServicesConfigAdminForm(CustomFieldsFormMixin, CustomFieldsSettingsFormMixin, TranslatableModelForm):
custom_fields = SERVICE_SECTION_CUSTOM_FIELDS
class ServicesConfigAdmin(
AllTranslationsMixin,
PlaceholderAdminMixin,
BaseAppHookConfig,
TranslatableAdmin
):
form = ServicesConfigAdminForm
def get_config_fields(self):
return (
'app_title', 'allow_post', 'permalink_type', 'non_permalink_handling',
'template_prefix', 'paginate_by', 'pagination_pages_start',
'pagination_pages_visible', 'exclude_featured',
'search_indexed', 'config.default_published',
'custom_fields_settings', 'custom_fields')
#def get_readonly_fields(self, request, obj=None):
#return self.readonly_fields
admin.site.register(models.ServicesConfig, ServicesConfigAdmin)
| StarcoderdataPython |
9789549 | <reponame>Horta/pandas-plink<filename>build_ext.py
import os
from os.path import join
from cffi import FFI
ffibuilder = FFI()
ffibuilder.set_unicode(False)
folder = os.path.dirname(os.path.abspath(__file__))
with open(join(folder, "pandas_plink", "_bed_reader.h"), "r") as f:
ffibuilder.cdef(f.read())
with open(join(folder, "pandas_plink", "_bed_reader.c"), "r") as f:
reader_file = f.read()
with open(join(folder, "pandas_plink", "_bed_writer.h"), "r") as f:
ffibuilder.cdef(f.read())
with open(join(folder, "pandas_plink", "_bed_writer.c"), "r") as f:
writer_file = f.read()
c_content = f"{reader_file}\n{writer_file}"
ffibuilder.set_source("pandas_plink.bed_reader", c_content, language="c")
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
| StarcoderdataPython |
370402 | <reponame>freshjang/MyCoin
import os
import sys
import psutil
import sqlite3
import numpy as np
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import columns_gj1, db_stg, ui_num
from utility.static import now, timedelta_sec, thread_decorator, strf_time, timedelta_hour
class Strategy:
def __init__(self, windowQ, workerQ, queryQ, stgQ):
self.windowQ = windowQ
self.workerQ = workerQ
self.queryQ = queryQ
self.stgQ = stgQ
self.list_buy = []
self.list_sell = []
self.dict_csan = {} # key: ์ข
๋ชฉ์ฝ๋, value: datetime
self.dict_gsjm = {} # key: ์ข
๋ชฉ์ฝ๋, value: DataFrame
self.dict_intg = {
'์ฒด๊ฒฐ๊ฐ๋์ฐจ์ด1': 0.,
'ํ๊ท ์๊ฐ1': 0,
'๊ฑฐ๋๋๊ธ์ฐจ์ด1': 0,
'์ฒด๊ฒฐ๊ฐ๋ํํ1': 0.,
'๋์ ๊ฑฐ๋๋๊ธํํ1': 0,
'๋ฑ๋ฝ์จํํ1': 0.,
'๋ฑ๋ฝ์จ์ํ1': 0.,
'์ฒญ์ฐ์์ต๋ฅ 1': 0.,
'์ฒด๊ฒฐ๊ฐ๋์ฐจ์ด2': 0.,
'ํ๊ท ์๊ฐ2': 0,
'๊ฑฐ๋๋๊ธ์ฐจ์ด2': 0,
'์ฒด๊ฒฐ๊ฐ๋ํํ2': 0.,
'๋์ ๊ฑฐ๋๋๊ธํํ2': 0,
'๋ฑ๋ฝ์จํํ2': 0.,
'๋ฑ๋ฝ์จ์ํ2': 0.,
'์ฒญ์ฐ์์ต๋ฅ 2': 0.,
'์ค๋ ๋': 0,
'์ํผ์ ': 0.,
'๋ฉ๋ชจ๋ฆฌ': 0.
}
self.dict_time = {
'๊ด์ฌ์ข
๋ชฉ': now(),
'๋ถ๊ฐ์ ๋ณด': now()
}
self.Start()
def Start(self):
con = sqlite3.connect(db_stg)
df = pd.read_sql('SELECT * FROM setting', con)
df = df.set_index('index')
self.windowQ.put([ui_num['๋จํ์ค์ '], df])
self.dict_intg['์ฒด๊ฒฐ๊ฐ๋์ฐจ์ด1'] = df['์ฒด๊ฒฐ๊ฐ๋์ฐจ์ด1'][0]
self.dict_intg['ํ๊ท ์๊ฐ1'] = df['ํ๊ท ์๊ฐ1'][0]
self.dict_intg['๊ฑฐ๋๋๊ธ์ฐจ์ด1'] = df['๊ฑฐ๋๋๊ธ์ฐจ์ด1'][0]
self.dict_intg['์ฒด๊ฒฐ๊ฐ๋ํํ1'] = df['์ฒด๊ฒฐ๊ฐ๋ํํ1'][0]
self.dict_intg['๋์ ๊ฑฐ๋๋๊ธํํ1'] = df['๋์ ๊ฑฐ๋๋๊ธํํ1'][0]
self.dict_intg['๋ฑ๋ฝ์จํํ1'] = df['๋ฑ๋ฝ์จํํ1'][0]
self.dict_intg['๋ฑ๋ฝ์จ์ํ1'] = df['๋ฑ๋ฝ์จ์ํ1'][0]
self.dict_intg['์ฒญ์ฐ์์ต๋ฅ 1'] = df['์ฒญ์ฐ์์ต๋ฅ 1'][0]
self.dict_intg['์ฒด๊ฒฐ๊ฐ๋์ฐจ์ด2'] = df['์ฒด๊ฒฐ๊ฐ๋์ฐจ์ด2'][0]
self.dict_intg['ํ๊ท ์๊ฐ2'] = df['ํ๊ท ์๊ฐ2'][0]
self.dict_intg['๊ฑฐ๋๋๊ธ์ฐจ์ด2'] = df['๊ฑฐ๋๋๊ธ์ฐจ์ด2'][0]
self.dict_intg['์ฒด๊ฒฐ๊ฐ๋ํํ2'] = df['์ฒด๊ฒฐ๊ฐ๋ํํ2'][0]
self.dict_intg['๋์ ๊ฑฐ๋๋๊ธํํ2'] = df['๋์ ๊ฑฐ๋๋๊ธํํ2'][0]
self.dict_intg['๋ฑ๋ฝ์จํํ2'] = df['๋ฑ๋ฝ์จํํ2'][0]
self.dict_intg['๋ฑ๋ฝ์จ์ํ2'] = df['๋ฑ๋ฝ์จ์ํ2'][0]
self.dict_intg['์ฒญ์ฐ์์ต๋ฅ 2'] = df['์ฒญ์ฐ์์ต๋ฅ 2'][0]
con.close()
while True:
data = self.stgQ.get()
if len(data) == 2:
self.UpdateList(data[0], data[1])
elif len(data) == 12:
self.BuyStrategy(data[0], data[1], data[2], data[3], data[4], data[5], data[6],
data[7], data[8], data[9], data[10], data[11])
elif len(data) == 5:
self.SellStrategy(data[0], data[1], data[2], data[3], data[4])
if now() > self.dict_time['๊ด์ฌ์ข
๋ชฉ'] and len(self.dict_gsjm) > 0:
self.windowQ.put([ui_num['๊ด์ฌ์ข
๋ชฉ'], self.dict_gsjm])
self.dict_time['๊ด์ฌ์ข
๋ชฉ'] = timedelta_sec(1)
if now() > self.dict_time['๋ถ๊ฐ์ ๋ณด']:
self.UpdateInfo()
self.dict_time['๋ถ๊ฐ์ ๋ณด'] = timedelta_sec(2)
def UpdateList(self, gubun, tickers):
if '๊ด์ฌ์ข
๋ชฉ์ด๊ธฐํ' in gubun:
self.dict_gsjm = {}
time = 1 if 90000 < int(strf_time('%H%M%S', timedelta_hour(-9))) <= 1000000 else 2
for ticker in tickers:
data = np.zeros((self.dict_intg[f'ํ๊ท ์๊ฐ{time}'] + 2, len(columns_gj1))).tolist()
df = pd.DataFrame(data, columns=columns_gj1)
df['์ฒด๊ฒฐ์๊ฐ'] = strf_time('%H%M%S', timedelta_hour(-9))
self.dict_gsjm[ticker] = df.copy()
self.windowQ.put([ui_num['๊ด์ฌ์ข
๋ชฉ'] + 100, self.dict_gsjm])
elif '์ฅ์ด๋จํ์ ๋ต์์' in gubun:
data = np.zeros((self.dict_intg['ํ๊ท ์๊ฐ1'] + 2, len(columns_gj1))).tolist()
df = pd.DataFrame(data, columns=columns_gj1)
df['์ฒด๊ฒฐ์๊ฐ'] = '090000'
for ticker in list(self.dict_gsjm.keys()):
self.dict_gsjm[ticker] = df.copy()
self.windowQ.put([ui_num['๊ด์ฌ์ข
๋ชฉ'] + 100, self.dict_gsjm])
elif '์ฅ์ค๋จํ์ ๋ต์์' in gubun:
data = np.zeros((self.dict_intg['ํ๊ท ์๊ฐ2'] + 2, len(columns_gj1))).tolist()
df = pd.DataFrame(data, columns=columns_gj1)
df['์ฒด๊ฒฐ์๊ฐ'] = '100000'
for ticker in list(self.dict_gsjm.keys()):
self.dict_gsjm[ticker] = df.copy()
self.windowQ.put([ui_num['๊ด์ฌ์ข
๋ชฉ'] + 100, self.dict_gsjm])
elif gubun == '๋งค์์๋ฃ':
if tickers in self.list_buy:
self.list_buy.remove(tickers)
elif gubun == '๋งค๋์๋ฃ':
if tickers in self.list_sell:
self.list_sell.remove(tickers)
def BuyStrategy(self, ticker, c, h, low, per, dm, bid, ask, t, uuidnone, injango, batting):
if ticker not in self.dict_gsjm.keys():
return
tn = 1 if 90000 < int(strf_time('%H%M%S', timedelta_hour(-9))) <= 1000000 else 2
hlm = round((h + low) / 2)
hlmp = round((c / hlm - 1) * 100, 2)
predm = self.dict_gsjm[ticker]['๋์ ๊ฑฐ๋๋๊ธ'][1]
sm = 0 if predm == 0 else int(dm - predm)
try:
ch = round(bid / ask * 100, 2)
except ZeroDivisionError:
ch = 500.
self.dict_gsjm[ticker] = self.dict_gsjm[ticker].shift(1)
if len(self.dict_gsjm[ticker]) == self.dict_intg[f'ํ๊ท ์๊ฐ{tn}'] + 2 and \
self.dict_gsjm[ticker]['์ฒด๊ฒฐ๊ฐ๋'][self.dict_intg[f'ํ๊ท ์๊ฐ{tn}']] != 0.:
avg_sm = int(self.dict_gsjm[ticker]['๊ฑฐ๋๋๊ธ'][1:self.dict_intg[f'ํ๊ท ์๊ฐ{tn}'] + 1].mean())
avg_ch = round(self.dict_gsjm[ticker]['์ฒด๊ฒฐ๊ฐ๋'][1:self.dict_intg[f'ํ๊ท ์๊ฐ{tn}'] + 1].mean(), 2)
high_ch = round(self.dict_gsjm[ticker]['์ฒด๊ฒฐ๊ฐ๋'][1:self.dict_intg[f'ํ๊ท ์๊ฐ{tn}'] + 1].max(), 2)
self.dict_gsjm[ticker].at[self.dict_intg[f'ํ๊ท ์๊ฐ{tn}'] + 1] = 0., 0., avg_sm, 0, avg_ch, high_ch, t
self.dict_gsjm[ticker].at[0] = per, hlmp, sm, dm, ch, 0., t
if self.dict_gsjm[ticker]['์ฒด๊ฒฐ๊ฐ๋'][self.dict_intg[f'ํ๊ท ์๊ฐ{tn}']] == 0:
return
if ticker in self.list_buy:
return
if injango:
return
if not uuidnone:
return
# ์ ๋ต ๋น๊ณต๊ฐ
oc = int(batting / c)
if oc > 0:
self.list_buy.append(ticker)
self.workerQ.put(['๋งค์', ticker, c, oc])
def SellStrategy(self, ticker, sp, jc, ch, c):
if ticker in self.list_sell:
return
oc = 0
tn = 1 if 90000 < int(strf_time('%H%M%S', timedelta_hour(-9))) <= 1000000 else 2
# ์ ๋ต ๋น๊ณต๊ฐ
if oc > 0:
self.list_sell.append(ticker)
self.workerQ.put(['๋งค๋', ticker, c, oc])
@thread_decorator
def UpdateInfo(self):
info = [2, self.dict_intg['๋ฉ๋ชจ๋ฆฌ'], self.dict_intg['์ค๋ ๋'], self.dict_intg['์ํผ์ ']]
self.windowQ.put(info)
self.UpdateSysinfo()
def UpdateSysinfo(self):
p = psutil.Process(os.getpid())
self.dict_intg['๋ฉ๋ชจ๋ฆฌ'] = round(p.memory_info()[0] / 2 ** 20.86, 2)
self.dict_intg['์ค๋ ๋'] = p.num_threads()
self.dict_intg['์ํผ์ '] = round(p.cpu_percent(interval=2) / 2, 2)
| StarcoderdataPython |
8044711 | from imblearn.under_sampling import AllKNN
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, MinMaxScaler, RobustScaler, StandardScaler, Normalizer, PowerTransformer, QuantileTransformer
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer, KNNImputer
class PipelineGenerator(object):
def __init__(self,
data_config,
preprocess_config
):
self.dc = data_config
self.pc = preprocess_config
self._maps_pc = {
"cat_imputer": {
"SimpleImputer": SimpleImputer(strategy="most_frequent")
},
"num_imputer": {
"SimpleImputer": SimpleImputer(strategy="mean"),
"KNNImputer": KNNImputer()
},
"scaler": {
"StandardScaler": StandardScaler(),
"MinMaxScaler": MinMaxScaler(),
"RobustScaler": RobustScaler(),
"QuantileTransformer": QuantileTransformer(),
"PowerTransformer": PowerTransformer(method='box-cox'),
"Normalizer": Normalizer()
},
"encoder": {
"OrdinalEncoder": OrdinalEncoder(),
"OneHotEncoder": OneHotEncoder(handle_unknown="ignore")
},
"resample": {
"oversampling": SMOTE(),
"undersampling": AllKNN()
}
}
def preprocess_pipe(self):
cat_pipe = []
num_pipe = []
preprocess = None
if self.pc.cat_imputer:
cat_pipe.append(("imputer", self._maps_pc["cat_imputer"][self.pc.cat_imputer]))
if self.pc.num_imputer:
num_pipe.append(("imputer", self._maps_pc["num_imputer"][self.pc.num_imputer]))
if self.pc.scaler:
num_pipe.append(("scaler", self._maps_pc["scaler"][self.pc.scaler]))
if self.pc.encoder:
cat_pipe.append(("encoder", self._maps_pc["encoder"][self.pc.encoder]))
pre = []
if len(cat_pipe) > 0:
pre.append(("cat", Pipeline(cat_pipe), self.dc.cat_features))
if len(num_pipe) > 0:
pre.append(("num", Pipeline(num_pipe), self.dc.num_features))
if len(pre) > 0:
preprocess = ColumnTransformer(pre)
return preprocess
def generate_pipe(self):
pipelines = []
preprocess = self.preprocess_pipe()
if preprocess:
pipelines.append(("preprocess", preprocess))
if self.pc.resample:
pipelines.append(("resample", self._maps_pc["resample"][self.pc.resample]))
return pipelines
| StarcoderdataPython |
6602284 | <gh_stars>1-10
import pytest
import numpy as np
from .tin import *
def TIN1():
p = np.array([
[-0.5, -0.5, 0],
[ 0.5, -0.5, 0],
[ 0.5, 0.5, 0],
[-0.5, 0.5, 0],
[ 0, 0, 0.5]
])
return TIN('TIN1', p)
def TIN2():
p = np.array([
[-0.5, -0.5, 0],
[ 0.5, -0.5, 0],
[ 0.5, 0.5, 0],
[-0.5, 0.5, 0],
[ 0, 0, 0.5]
])
return TIN('TIN2', p, max_edge=0.1)
def TIN3():
p = np.array([
[-0.5, -0.5, 0],
[ 0.5, -0.5, 0],
[ 0.5, 0.5, 0],
[-0.5, 0.5, 0],
[ 0, 0, 0.5]
])
# Breaklines
b = np.linspace(0, 2*np.pi, 10)
b = 0.5 * np.column_stack([np.cos(b), np.sin(b), np.zeros(len(b))])
return TIN('TIN3', p, [b], step=100)
def test_repr():
t = TIN1()
repr(t)
def test_elevation():
t = TIN1()
p = [(0, 0, 1), (0, 1, 0), (0, 0.5, 0.5), (0, 0.25, 0.25)]
zs = [t.elevation(x) for x in p]
np.testing.assert_equal(zs, [0.5, np.nan, 0, 0.25])
def test_remove_simplices():
t = TIN2()
assert t.tri.simplices.shape[0] == 0
def test_breakpoints():
t = TIN3()
b = np.array(sorted(map(tuple, t.breaklines[0])))
bp = np.array(sorted(map(tuple, t.breakpoints())))
assert pytest.approx(bp) == b
def test_query_distances():
t = TIN1()
p = [(0, 0, 1), (0, 1, 0), (0, 0.5, 0.5), (0, 0.25, 0.25)]
dist = []
tin = []
for x in p:
a, b = t.query_distances(x, 5)
dist.append(a[0])
tin.append(b[0])
dist = np.array(dist)
tin = np.array(tin)
a = np.array([0.5, 0.5, 0.353553391, 0])
b = np.array([(0, 0, 0.5), (0, 0.5, 0), (0, 0.25, 0.25), (0, 0.25, 0.25)])
assert pytest.approx(dist) == a
assert pytest.approx(tin) == b
def test_plot_surface_3d():
t = TIN1()
t.plot_surface_3d()
def test_plot_surface_2d():
t = TIN1()
t.plot_surface_2d()
def test_plot_contour_2d():
t = TIN1()
t.plot_contour_2d()
| StarcoderdataPython |
3501481 | <reponame>erocs/AOC
inp = 'hepxcrrq'
alpbet = 'abcdefghjkmnpqrstuvwxyz'
_inc = {alpbet[i]:alpbet[i+1] for i in xrange(len(alpbet)-1)}
_inc['z'] = 'a'
invld = 'iol'
def valid(s):
if len(s) != 8:
return False
l1 = len(s) - 1
l2 = len(s) - 2
p1 = None
p2 = None
trip = False
for i in xrange(len(s)):
# if s[i] in invld:
# return False
if i < l1:
if not p2 and s[i] == s[i + 1]:
if not p1:
p1 = s[i]
elif p1 != s[i]:
p2 = s[i]
if not trip and i < l2 and s[i:i+3] in alpbet:
trip = True
return p1 and p2 and trip
def incr(s):
carry = 1
res = []
for i in xrange(len(s)-1, -1, -1):
if carry:
ch = _inc[s[i]]
else:
ch = s[i]
res.append(ch)
if ch != 'a':
carry = 0
return ''.join(res[::-1])
def solve():
c = 0
s = inp
while True:
s = incr(s)
if valid(s):
print s
c += 1
if c == 2:
break
if __name__ == '__main__':
solve()
# hepxxyzz
# heqaabcc
| StarcoderdataPython |
6569835 | import time
from itertools import chain
import torch
from addict import Dict
from torch import nn
from codes.model.base_model import BaseModel
from codes.model.imagination_model.util import get_component, merge_first_and_second_dim, \
unmerge_first_and_second_dim, sample_zt_from_distribution, clamp_mu_logsigma
from codes.utils.util import get_product_of_iterable, log_pdf
class Model(BaseModel):
"""Learning to Query w/o Imitation Learning
This model uses the observation-dependent path"""
def __init__(self, config):
super(Model, self).__init__(config=config)
self.convolutional_encoder = get_component("convolutional_encoder", config)
self.state_transition_model = get_component("stochastic_state_transition_model", config)
self.convolutional_decoder = get_component("stochastic_convolutional_decoder", config)
self.prior_model = get_component("prior_model", config)
self.posterior_model = get_component("posterior_model", config)
self.use_consistency_model = False
if(self.config.model.imagination_model.consistency_model.alpha!=0.0):
self.use_consistency_model = True
_consistency_model_name = self.config.model.imagination_model.consistency_model.name
self.is_consistency_model_metric_based = False
if _consistency_model_name in ("euclidean", "cosine"):
self.is_consistency_model_metric_based = True
self.consistency_model = get_component("consistency_model.{}".format(_consistency_model_name), config)
self.use_imitation_learning_model = False
if self.config.model.imagination_model.imitation_learning_model.should_train:
self.use_imitation_learning_model = True
if(self.use_imitation_learning_model):
self.imitation_learning_model = get_component("imitation_learning_model.{}".
format(
self.config.model.imagination_model.imitation_learning_model.name),
config)
def encode_obs(self, obs):
obs_shape = obs.shape
per_image_shape = obs_shape[-3:]
batch_size = obs_shape[0]
trajectory_length = obs_shape[1]
num_frames = obs_shape[2]
h_t = self.convolutional_encoder(obs.view(-1, *per_image_shape)).view(batch_size, trajectory_length, num_frames,
-1)
h_t = torch.mean(h_t, dim=2)
return h_t, trajectory_length
def decode_obs(self, output, trajectory_length):
reconstructed_obs = self.convolutional_decoder(output)
per_image_shape = reconstructed_obs.shape[-3:]
batch_size = int(reconstructed_obs.shape[0] / trajectory_length)
return reconstructed_obs.view(batch_size, trajectory_length, *per_image_shape)
def forward(self, x):
# not that x is same as x_(t-1)
sequence_length = self.config.dataset.sequence_length
imagination_length = self.config.dataset.imagination_length
h, _ = self.encode_obs(obs=x.obs)
output_obs = x.next_obs
output_obs_encoding, _ = self.encode_obs(obs=output_obs.unsqueeze(2))
output_obs_encoding = output_obs_encoding
action = x.action
open_loop_data = Dict()
# Preparing input for open_loop by using a seperate namespace called as input
index_to_select_till = sequence_length + imagination_length
open_loop_data.input = Dict()
open_loop_data.input.unroll_length = index_to_select_till
open_loop_data.input.output_obs_encoding = output_obs_encoding[:, :index_to_select_till, :]
open_loop_data.input.output_obs = output_obs[:, :index_to_select_till, :]
if (self.use_imitation_learning_model):
open_loop_data.input.action = action[:, 0, :]
else:
open_loop_data.input.action = action[:, :index_to_select_till+1, :]
open_loop_data.input.h_t = h[:, 0, :]
open_loop_data, imitation_learning_data = self._vectorized_open_loop_prediction(open_loop_data)
if(self.use_imitation_learning_model):
imitation_learning_data.action = action
imitation_learning_output = self._prepare_imitation_learning_result_to_yield(
self._imitation_learning_prediction(imitation_learning_data))
yield imitation_learning_output
del imitation_learning_output
to_yield = Dict()
to_yield.loss = open_loop_data.output.loss
to_yield.retain_graph = False
to_yield.description = "open_loop"
yield to_yield
del to_yield
close_loop_data = Dict()
close_loop_data.input = Dict()
# Note that this is not a bug, we are purposefully not making the prediction over the entire sequence length
if(sequence_length >= 2*imagination_length):
close_loop_data.input.sequence_length = 2 * imagination_length
elif(sequence_length >= imagination_length):
close_loop_data.input.sequence_length = imagination_length
else:
close_loop_data.input.sequence_length = 1
close_loop_data.input.imagination_length = imagination_length
index_to_start_with = max(0, sequence_length - close_loop_data.input.sequence_length)
index_to_select_till = index_to_start_with + close_loop_data.input.sequence_length + close_loop_data.input.imagination_length
close_loop_data.input.h_t = unmerge_first_and_second_dim(open_loop_data.h_t,
first_dim=action.shape[0])[:,index_to_start_with:index_to_select_till,:]
close_loop_data.input.action = action[:, index_to_start_with:index_to_select_till, :]
close_loop_data.input.output_obs = output_obs[:, index_to_start_with:index_to_select_till, :]
close_loop_output, discriminator_output = self._vectorized_closed_loop_prediction(close_loop_data)
output = Dict()
output.open_loop = open_loop_data.output
output.close_loop = close_loop_output
output.reporting_metrics.log_likelihood = close_loop_output.likelihood.item()
output.reporting_metrics.consistency_loss = close_loop_output.consistency_loss.item()
output.discriminator = discriminator_output
alpha = self.config.model.imagination_model.consistency_model.alpha
loss_tuple = (output.close_loop.loss + alpha * output.close_loop.consistency_loss,
output.discriminator.loss)
loss_tuple = tuple(filter(lambda _loss: _loss.requires_grad, loss_tuple))
to_yield = Dict()
to_yield.loss = loss_tuple[0]
to_yield.imagination_log_likelihood = output.reporting_metrics.log_likelihood
to_yield.consistency_loss = output.reporting_metrics.consistency_loss
to_yield.retain_graph = False
to_yield.description = "close_loop"
if (len(loss_tuple) == 1):
yield to_yield
else:
to_yield.retain_graph = True
yield to_yield
to_yield = Dict()
to_yield.loss = loss_tuple[1]
to_yield.discriminator_loss = loss_tuple[1].item()
to_yield.retain_graph = False
to_yield.description = "discriminator"
yield to_yield
def _imitation_learning_prediction(self, imitation_learning_data):
true_output = merge_first_and_second_dim(imitation_learning_data.action[:,1:,:].contiguous())
predicted_output = imitation_learning_data.prediction
imitation_learning_output = Dict()
loss = self.imitation_learning_model.loss(predicted_output, true_output)
imitation_learning_output.loss = self.config.model.imagination_model.imitation_learning_model.alpha * loss
imitation_learning_output.imitation_learning_loss = loss.item()
return imitation_learning_output
def _prepare_imitation_learning_result_to_yield(self, imitation_learning_output):
imitation_learning_output.retain_graph = True
imitation_learning_output.imagination_log_likelihood = 0.0
imitation_learning_output.description = "imitation_learning"
return imitation_learning_output
def _vectorized_open_loop_prediction(self, open_loop_data):
# This is a simple implementation of the open loop prediction. This function pulls some operations outside the
# for-loop and vectorizes them. This is meant as the primary function for doing open-loop prediction.
# Open loop
unroll_length = open_loop_data.input.unroll_length
output_obs_encoding = open_loop_data.input.output_obs_encoding
output_obs = open_loop_data.input.output_obs
action = open_loop_data.input.action
if(self.use_imitation_learning_model):
# First action
a_t = action
else:
a_t = action[:, 0, :]
# First state encoding
h_t = open_loop_data.input.h_t
self.state_transition_model.set_state(h_t)
# Note that this datastructure is used as a container for variables to track. It helps to avoid writing multiple
# statements.
temp_data = Dict()
vars_to_track = ["h_t", "z_t", "posterior_mu", "posterior_sigma"]
if(self.use_imitation_learning_model):
vars_to_track.append("a_t")
for name in vars_to_track:
key = name + "_list"
temp_data[key] = []
for t in range(0, unroll_length):
current_output_obs_encoding = output_obs_encoding[:, t, :]
posterior = self.sample_zt_from_posterior(h=h_t, a=a_t, o=current_output_obs_encoding)
z_t = posterior.z_t
inp = torch.cat((z_t, a_t), dim=1)
h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)
if(self.use_imitation_learning_model):
a_t = self.imitation_learning_model(torch.cat((h_t, z_t), dim=1))
else:
a_t = action[:, t+1, :]
posterior_mu = posterior.mu
posterior_sigma = posterior.sigma
for name in vars_to_track:
key = name + "_list"
temp_data[key].append(eval(name).unsqueeze(1))
for name in vars_to_track:
key = name + "_list"
temp_data[name] = merge_first_and_second_dim(torch.cat(temp_data[key], dim=1))
if not self.use_imitation_learning_model:
temp_data.a_t = merge_first_and_second_dim(action[:, :unroll_length, :].contiguous())
temp_data.prior = self.sample_zt_from_prior(
h=temp_data.h_t,
a=temp_data.a_t)
likelihood_mu, likelihood_sigma = self.convolutional_decoder(
torch.cat((temp_data.h_t,
temp_data.z_t), dim=1))
elbo_prior = log_pdf(temp_data.z_t, temp_data.prior.mu, temp_data.prior.sigma)
elbo_q_likelihood = log_pdf(temp_data.z_t, temp_data.posterior_mu,
temp_data.posterior_sigma)
elbo_likelihood = log_pdf(merge_first_and_second_dim(output_obs.contiguous()),
likelihood_mu, likelihood_sigma)
elbo = sum([torch.mean(x) for x in (
elbo_likelihood, elbo_prior, -elbo_q_likelihood)])
open_loop_data.output = Dict()
open_loop_data.output.loss = -elbo
open_loop_data.output.log_likelihood = torch.mean(elbo_likelihood)
open_loop_data.h_t = temp_data.h_t.detach()
imitation_learning_data = Dict()
imitation_learning_data.prediction = temp_data.a_t
return open_loop_data, imitation_learning_data
def _vectorized_closed_loop_prediction(self, close_loop_data):
# This is a simple implementation of the close loop prediction. This function pulls some operations outside the
# for-loop and vectorizes them. This is meant as the primary function for doing close-loop prediction.
# Closed Loop
sequence_length = close_loop_data.input.sequence_length
imagination_length = close_loop_data.input.imagination_length
output_obs = close_loop_data.input.output_obs
action = close_loop_data.input.action.contiguous()
a_t = merge_first_and_second_dim(action[:, :sequence_length, :].contiguous())
true_h_t = close_loop_data.input.h_t
h_t = true_h_t[:, :sequence_length, :]
h_t = merge_first_and_second_dim(h_t.contiguous())
self.state_transition_model.set_state(h_t)
elbo_likelihood = []
consistency_loss = Dict()
consistency_loss.discriminator = []
consistency_loss.close_loop = []
h_t_from_close_loop = None
for t in range(0, imagination_length):
prior = self.sample_zt_from_prior(h=h_t, a=a_t)
z_t = prior.z_t
inp = torch.cat((z_t, a_t), dim=1)
h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)
if(self.use_imitation_learning_model):
a_t = self.imitation_learning_model(torch.cat((h_t, z_t), dim=1))
else:
a_t = merge_first_and_second_dim(action[:, t+1:t + sequence_length+1, :].contiguous())
h_t_from_open_loop = true_h_t[:, t + 1:t + sequence_length + 1, :]
h_t_from_close_loop = h_t
if(self.use_consistency_model):
if (self.is_consistency_model_metric_based):
h_t_from_open_loop = merge_first_and_second_dim(h_t_from_open_loop.contiguous())
else:
h_t_from_close_loop = unmerge_first_and_second_dim(h_t_from_close_loop,
first_dim=-1,
second_dim=sequence_length)
loss_close_loop, loss_discriminator = self.consistency_model((h_t_from_open_loop, h_t_from_close_loop))
consistency_loss.close_loop.append(loss_close_loop)
consistency_loss.discriminator.append(loss_discriminator)
likelihood_mu, likelihood_sigma = self.convolutional_decoder(
torch.cat((h_t, z_t), dim=1))
elbo_likelihood.append(
log_pdf(merge_first_and_second_dim(output_obs[:, t:t + sequence_length, :].contiguous()),
likelihood_mu,
likelihood_sigma))
elbo_likelihood = list(map(lambda x: torch.mean(x).unsqueeze(0), elbo_likelihood))
elbo_likelihood = torch.mean(torch.cat(elbo_likelihood))
for key in consistency_loss:
if consistency_loss[key]:
# Checking if the list is non-empty
consistency_loss[key] = torch.mean(torch.cat(consistency_loss[key]))
else:
consistency_loss[key] = torch.tensor(0.0).to(device=elbo_likelihood.device)
close_loop_output = Dict()
close_loop_output.loss = -elbo_likelihood
close_loop_output.likelihood = elbo_likelihood
close_loop_output.consistency_loss = consistency_loss.close_loop
discriminator_output = Dict()
discriminator_output.loss = consistency_loss.discriminator
return close_loop_output, discriminator_output
def sample_zt_from_prior(self, h, a):
mu, logsigma = self.prior_model(torch.cat((h, a), dim=1))
mu, logsigma = clamp_mu_logsigma(mu, logsigma)
return sample_zt_from_distribution(mu, logsigma)
def sample_zt_from_posterior(self, h, a, o):
mu, logsigma = self.posterior_model(torch.cat((h, a, o), dim=1))
mu, logsigma = clamp_mu_logsigma(mu, logsigma)
return sample_zt_from_distribution(mu, logsigma)
def get_optimizers(self):
'''Method to return the list of optimizers for the model'''
optimizers = []
model_params = []
if(self.use_imitation_learning_model):
imitation_learning_model_params = list(self.get_imitation_learning_model_params())
model_params.append(imitation_learning_model_params)
open_loop_params = list(self.get_open_loop_params())
model_params.append(open_loop_params)
close_loop_params = list(self.get_close_loop_params())
model_params.append(close_loop_params)
if(self.use_consistency_model):
consistency_model_params = list(self.get_consistency_model_params())
model_params.append(consistency_model_params)
optimizers = tuple(map(self._register_params_to_optimizer, filter(lambda x: x, model_params)))
if (optimizers):
return optimizers
return None
def get_open_loop_params(self):
# Method to get params which are to be updated with the open loop
open_loop_models = (self.convolutional_encoder,
self.state_transition_model,
self.convolutional_decoder,
self.prior_model,
self.posterior_model)
open_loop_params = tuple(map(lambda model: model.get_model_params(), open_loop_models))
return chain(*open_loop_params)
def get_close_loop_params(self):
# Method to get params which are to be updated with the close loop
close_loop_models = [self.state_transition_model,]
if(self.use_imitation_learning_model):
close_loop_models.append(self.imitation_learning_model)
close_loop_params = tuple(map(lambda model: model.get_model_params(), close_loop_models))
return chain(*close_loop_params)
def get_consistency_model_params(self):
# Method to get params which are to be updated with the consistency model
consistency_models = (self.consistency_model,)
consistency_model_params = tuple(map(lambda model: model.get_model_params(), consistency_models))
return chain(*consistency_model_params)
def get_imitation_learning_model_params(self):
# Method to get params which are to be updated with the imitation learning model
imitation_learning_models = (self.imitation_learning_model,)
imitation_learning_model_params = tuple(map(lambda model: model.get_model_params(), imitation_learning_models))
return chain(*imitation_learning_model_params)
| StarcoderdataPython |
1647712 | <filename>python/p001.py
def solve():
return sum(x for x in range(1000) if (x % 3 == 0 or x % 5 == 0))
if __name__ == "__main__":
print(solve())
| StarcoderdataPython |
3477963 | <filename>seq.py<gh_stars>0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
| StarcoderdataPython |
67818 | <reponame>Mee321/HAPG_exp
import tensorflow as tf
from garage.envs import normalize
from garage.envs.box2d import CartpoleEnv
from garage.misc.instrument import run_experiment
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures.tf.instrumented_trpo import InstrumentedTRPO
def run_task(*_):
env = TfEnv(normalize(CartpoleEnv()))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
algo = InstrumentedTRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=1024,
max_path_length=100,
n_itr=4,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0,
plot=True,
)
algo.train()
run_experiment(
run_task,
# Number of parallel workers for sampling
n_parallel=6,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random
# seed will be used
seed=1,
plot=True,
)
| StarcoderdataPython |
5127391 | <reponame>pulp-platform/stream-ebpc
# Copyright 2019 ETH Zurich, <NAME> and <NAME>
# Copyright and related rights are licensed under the Solderpad Hardware
# License, Version 0.51 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://solderpad.org/licenses/SHL-0.51. Unless required by applicable law
# or agreed to in writing, software, hardware and materials distributed under
# this License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import cocotb
from cocotb.clock import Clock
from cocotb.result import TestFailure
from cocotb.triggers import Timer, RisingEdge
from cocotb.binary import BinaryValue
from common.drivers import reset_dut
from bpc_encoder_driver import BPCEncoderDriver
from bpc_encoder_scoreboard import BPCEncoderScoreboard
from bpc_encoder_monitor import BPCEncoderMonitor
import random
#import pydevd_pycharm
#pydevd_pycharm.settrace('localhost', port=9100, stdoutToServer=True, stderrToServer=True)
random.seed(a=8)
CLOCK_PERIOD = 2500
RESET_TIME = 15000
DATA_W = 8
BLOCK_SIZE = 8
TA = 200
TT = 2000
STIM_REPORT_FILE = '../reports/stimuli.log'
#STIM_REPORT_FILE = None
INT_RESULTS_REPORT_FILE = '../intermediate_results.log'
#INT_RESULTS_REPORT_FILE = None
SUMMARY_REPORT_FILE = '../reports/summary.log'
#SUMMARY_REPORT_FILE = None
@cocotb.test()
def basic_bringup(dut):
cocotb.fork(Clock(dut.clk_i, CLOCK_PERIOD).start())
yield reset_dut(dut.rst_ni, RESET_TIME)
yield Timer(10000)
@cocotb.test()
def random_inputs(dut):
drv = BPCEncoderDriver(dut, TA, TT)
sb = BPCEncoderScoreboard(dut, BLOCK_SIZE, DATA_W, stim_report_file=STIM_REPORT_FILE)
mon = BPCEncoderMonitor(dut, BLOCK_SIZE, DATA_W, sb)
num_input_blocks = 1000
input_vals = [BinaryValue(n_bits=DATA_W, bigEndian=False, value=random.randint(-2**(DATA_W-1), 2**(DATA_W-1)-1), binaryRepresentation=2) for k in range(BLOCK_SIZE*num_input_blocks)]
flush = [0] * (len(input_vals)-1)
flush.append(1)
cocotb.fork(Clock(dut.clk_i, CLOCK_PERIOD).start())
drv.apply_defaults()
yield reset_dut(dut.rst_ni, RESET_TIME)
dut._log.info("Reset Done!")
for k in range(4):
yield RisingEdge(dut.clk_i)
mon.start()
read_task = cocotb.fork(drv.read_outputs(len(input_vals)*2, tmin=0, tmax=0))
yield drv.drive_input(zip(input_vals, flush), tmin=0, tmax=0)
yield Timer(4*CLOCK_PERIOD*len(input_vals))
read_task.kill()
mon.stop()
if sb.report():
raise TestFailure("Scoreboard reported problems - check log!")
| StarcoderdataPython |
4871029 | <gh_stars>10-100
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, Reshape
from keras.layers import LSTM
from keras.datasets import imdb
import numpy as np
max_features = 20000
maxlen = 256 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print("train labals", len(y_train))
list1 = []
for i in range(len(y_train)):
if y_train[i] == 1:
list1.append([1, 0])
else:
list1.append([0, 1])
y_train = np.array(list1)
print("after process", len(y_train))
print("Test labals", len(y_test))
list1 = []
for i in range(len(y_test)):
if y_test[i] == 1:
list1.append([1, 0])
else:
list1.append([0, 1])
y_test = np.array(list1)
print("after process", len(y_test))
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
x_train = x_train.reshape((x_train.shape[0], 16, 16, 1))
x_test = x_test.reshape((x_test.shape[0], 16, 16, 1))
print('Build model...')
model = Sequential()
model.add(Reshape((256,), input_shape=(16, 16, 1)))
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2, activation='softmax'))
#model.add(Dense(1, activation='sigmoid'))
# model.add(Dense(2))
# model.add(softmax(x, axis=-1))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=8,
#epochs=15,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
model.save_weights("imdb_model.h5")
print("Saved model to disk")
| StarcoderdataPython |
250850 | <filename>web/telebble/utils.py
import re
import json
import logging
import random
import string
import pytz
import datetime
import dateutil.parser
from common import api
import constants
import sources
import timeline
def generate_key():
"""
Generate a random string.
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
def is_crunchyroll_source(series):
return series.source_type == sources.CRUNCHYROLL_SOURCE
def is_funimation_source(series):
return series.source_type == sources.FUNIMATION_SOURCE
def unix_time(dt):
"""
Converts a datetime object to the seconds since epoch. Converts to
UTC as a preventative measure.
"""
dt = dt.astimezone(pytz.utc)
epoch = datetime.datetime(constants.EPOCH_YEAR, constants.EPOCH_MONTH, constants.EPOCH_DAY, tzinfo=pytz.utc)
return int((dt - epoch).total_seconds())
def timestamp_to_iso(timestamp):
"""
Converts a timestamp representating seconds since epoch in UTC to an
equivalent datetime object.
"""
utc = pytz.utc
datetime_utc = datetime.datetime.utcfromtimestamp(int(timestamp))
datetime_utc = datetime_utc.replace(tzinfo=utc)
return '%sZ' % datetime_utc.isoformat()
def iso_to_timestamp(iso):
"""
Converts an ISO datestring to the seconds since epoch in UTC.
"""
dt = dateutil.parser.parse(iso)
dt = dt.astimezone(pytz.utc)
return unix_time(dt)
def normalize_description(description):
"""
Descriptions have a max length when passed over, so this utility function allows us
to trim the description down to a set of sentences that fits within the length limit.
"""
sentences = re.split('\.[\s$]', description)
pieces = []
description_length = 0
max_length = 300
for sentence in sentences:
description_length += len(sentence)
if description_length >= max_length:
break
pieces.append(sentence)
if len(pieces) == 0:
if len(description) == 0:
return 'No Description'
return description[0:max_length]
return '. '.join(pieces)
def create_generic_pin(media):
"""
Creates a generic pin from a media object.
"""
pin = timeline.Pin()
pin.id = str(media._id)
pin.time = timestamp_to_iso(media.timestamp).replace('+00:00', '')
if media.runtime is not None:
pin.duration = media.runtime
pin.layout.type = timeline.resources.PIN_LAYOUTS['GENERIC']
pin.layout.title = media.normalized_name
pin.layout.shortTitle = media.normalized_name
pin.layout.subtitle = media.series_name
series = media.series
description = media.summary
if description is None or len(description) == 0:
if series.description is not None and len(series.description) > 0:
description = normalize_description(series.description)
else:
description = 'No Description'
else:
description = normalize_description(description)
pin.layout.body = description
icon = timeline.resources.ICONS['MISC']['SHOW']
pin.layout.tinyIcon = icon
pin.layout.smallIcon = icon
pin.layout.largeIcon = icon
colours = timeline.resources.COLOURS
pin.layout.foregroundColor = colours['BLACK']
pin.layout.secondaryColor = colours['WHITE']
pin.layout.backgroundColor = colours['LIGHT_GRAY']
pin.layout.add_section('Series', media.series_name)
if media.season is not None:
pin.layout.add_section('Season', str(media.season))
pin.layout.add_section('Episode', str(media.number))
action = timeline.Action()
action.title = 'Open in Watchapp'
action.launchCode = media._id
action.type = timeline.resources.ACTION_TYPES['OPEN']
pin.add_action(action)
# The minutes before the time the event begins to show a reminder on
# the watch.
reminder_times = (0, 30, 60 * 24)
messages = (
'',
'',
'(You will get another reminder 30 minutes before it begins)'
)
for (minutes, message) in zip(reminder_times, messages):
reminder = timeline.Reminder()
reminder.layout.type = timeline.resources.REMINDER_LAYOUTS['GENERIC']
reminder.layout.tinyIcon = icon
reminder.layout.smallIcon = icon
reminder.layout.largeIcon = icon
reminder.layout.foregroundColor = colours['BLACK']
reminder.layout.backgroundColor = colours['WHITE']
reminder.layout.title = media.normalized_name
reminder.layout.subtitle = media.series_name
reminder.layout.shortTitle = media.normalized_name
air_time = media.timestamp - (minutes * constants.SECONDS_IN_A_MINUTE)
reminder.time = timestamp_to_iso(air_time).replace('+00:00', '')
reminder.layout.body = '%s %s' % (description, message)
pin.add_reminder(reminder)
return pin
def create_pin_for_crunchyroll(media):
pin = create_generic_pin(media)
pin.layout.add_section('Service', 'Crunchyroll')
return pin
def create_pin_for_funimation(media):
pin = create_generic_pin(media)
pin.layout.add_section('Service', 'Funimation')
return pin
def create_pin_for_television(media):
pin = create_generic_pin(media)
pin.layout.add_section('Network', media.network.name)
pin.layout.add_section('Country', media.country)
return pin
def create_pin_for_media_object(media):
"""
Creates a pin for a media object and returns the pin.
"""
if media.source_type == sources.CRUNCHYROLL_SOURCE:
return create_pin_for_crunchyroll(media)
elif media.source_type == sources.FUNIMATION_SOURCE:
return create_pin_for_funimation(media)
elif media.source_type == sources.TELEVISION_SOURCE:
return create_pin_for_television(media)
return None
def send_pin_for_topic(topic, pin):
try:
pin.validate()
serialized = pin.json()
status, result = api.send_shared_pin([topic], serialized)
if not status:
if len(result.content) > 0:
to_json = json.loads(result.content)
logging.error(json.dumps(to_json, indent=4, sort_keys=True))
return status
except timeline.fields.ValidationException as e:
logging.error('Failed to validate pin: %s' % str(e))
logging.error('%s\n' % json.dumps(pin.json(), indent=4, sort_keys=True))
return False
return True
def send_pin(media):
now = unix_time(datetime.datetime.utcnow().replace(tzinfo=pytz.utc))
if media.timestamp < (now - constants.SECONDS_IN_A_DAY):
logging.debug('Skipping %s, too far in the past: %s' % (media, timestamp_to_iso(media.timestamp)))
return False
pin = create_pin_for_media_object(media)
topic = str(media.series.topic)
if is_crunchyroll_source(media):
# We have to send a pin for both for the free and premium versions
# of this data source, so we'll send the free one here as a copy.
free_topic = topic + '-free'
free_pin = create_pin_for_media_object(media)
free_pin.id = str(media._id) + '-free'
free_pin.time = timestamp_to_iso(iso_to_timestamp(media.extra_data['free_available_time'])).replace('+00:00', '')
send_pin_for_topic(free_topic, free_pin)
pin.id = str(media._id) + '-premium'
topic += '-premium'
elif is_funimation_source(media):
if media.extra_data['premium']:
topic += '-premium'
pin.id = str(media._id) + '-premium'
else:
topic += '-free'
pin.id = str(media._id) + '-free'
return send_pin_for_topic(topic, pin)
| StarcoderdataPython |
3437982 | <gh_stars>0
from django.shortcuts import render
from django.views import View
from django.http import HttpResponse
from teachers.models import Teacher
from operation.models import UserFavorite
# Create your views here.
class TeacherDetailView(View):
def get(self, request,teacher_id):
teacher = Teacher.objects.get(id=int(teacher_id))
teacher.click_nums += 1
teacher.save()
all_course = teacher.course_set.all()
# ๆ่กๆฆ่ฎฒๅธ
has_fav_teacher = False
if not request.user:
if UserFavorite.objects.filter(user=request.user, fav_id=teacher_id, fav_type=1):
has_fav_teacher = True
return render(request,"teacher-center.html", {
"teacher": teacher,
"all_course": all_course,
'has_fav_teacher':has_fav_teacher
})
| StarcoderdataPython |
3424918 | def df_rename_col(data, col, rename_to, destructive=False):
"""Rename a single column
data : pandas DataFrame
Pandas dataframe with the column to be renamed.
col : str
Column to be renamed
rename_to : str
New name for the column to be renamed
destructive : bool
If set to True, will make changes directly to the dataframe which
may be useful with very large dataframes instead of making a copy.
"""
if destructive is False:
data = data.copy(deep=True)
cols = list(data.columns)
loc = cols.index(col)
cols.insert(loc, rename_to)
cols.remove(col)
data.columns = cols
return data
| StarcoderdataPython |
77669 | import os
from . import PyScoreDraft
ScoreDraftPath_old= os.path.dirname(__file__)
ScoreDraftPath=""
#\\escaping fix
for ch in ScoreDraftPath_old:
if ch=="\\":
ScoreDraftPath+="/"
else:
ScoreDraftPath+=ch
if os.name == 'nt':
os.environ["PATH"]+=";"+ScoreDraftPath
elif os.name == "posix":
os.environ["PATH"]+=":"+ScoreDraftPath
PyScoreDraft.ScanExtensions(ScoreDraftPath)
from .PyScoreDraft import TellDuration
'''
TellDuration(seq) takes in a single input "seq"
It can be a note-sequence, a beat-sequence, or a singing-sequence,
anything acceptable by Instrument.play(), Percussion.play(), Singer.sing()
as the "seq" parameter
The return value is the total duration of the sequence as an integer
'''
from .TrackBuffer import setDefaultNumberOfChannels
from .TrackBuffer import TrackBuffer
from .TrackBuffer import MixTrackBufferList
from .TrackBuffer import WriteTrackBufferToWav
from .TrackBuffer import ReadTrackBufferFromWav
try:
from .Extensions import WriteNoteSequencesToMidi
except ImportError:
pass
try:
from .Extensions import PlayTrackBuffer
except ImportError:
pass
try:
from .Extensions import PlayGetRemainingTime
except ImportError:
pass
try:
from .Extensions import QPlayTrackBuffer
except ImportError:
pass
try:
from .Extensions import QPlayGetRemainingTime
except ImportError:
pass
from .Catalog import Catalog
from .Catalog import PrintCatalog
from .Instrument import Instrument
from .Percussion import Percussion
from .Singer import Singer
from .Document import Document
try:
from .Meteor import Document as MeteorDocument
except ImportError:
pass
from .InternalInstruments import PureSin, Square, Triangle, Sawtooth, NaivePiano, BottleBlow
try:
from .PercussionSampler import PercussionSampler
PERC_SAMPLE_ROOT=ScoreDraftPath+'/PercussionSamples'
if os.path.isdir(PERC_SAMPLE_ROOT):
for item in os.listdir(PERC_SAMPLE_ROOT):
file_path = PERC_SAMPLE_ROOT+'/'+item
if os.path.isfile(file_path) and item.endswith(".wav"):
name = item[0:len(item)-4]
definition="""
def """+name+"""():
return PercussionSampler('"""+file_path+"""')
"""
exec(definition)
Catalog['Percussions'] += [name+' - PercussionSampler']
except ImportError:
pass
try:
from .InstrumentSampler import InstrumentSampler_Single
from .InstrumentSampler import InstrumentSampler_Multi
INSTR_SAMPLE_ROOT=ScoreDraftPath+'/InstrumentSamples'
if os.path.isdir(INSTR_SAMPLE_ROOT):
for item in os.listdir(INSTR_SAMPLE_ROOT):
inst_path = INSTR_SAMPLE_ROOT+'/'+item
if os.path.isfile(inst_path) and item.endswith(".wav"):
name = item[0:len(item)-4]
definition="""
def """+name+"""():
return InstrumentSampler_Single('"""+inst_path+"""')
"""
exec(definition)
Catalog['Instruments'] += [name+' - InstrumentSampler_Single']
elif os.path.isdir(inst_path):
name = item
definition="""
def """+item+"""():
return InstrumentSampler_Multi('"""+inst_path+"""')
"""
exec(definition)
Catalog['Instruments'] += [name+' - InstrumentSampler_Multi']
except ImportError:
pass
try:
from .KeLa import KeLa
KELA_SAMPLE_ROOT=ScoreDraftPath+'/KeLaSamples'
if os.path.isdir(KELA_SAMPLE_ROOT):
for item in os.listdir(KELA_SAMPLE_ROOT):
kela_path = KELA_SAMPLE_ROOT+'/'+item
if os.path.isdir(kela_path):
definition="""
def """+item+"""():
return KeLa('"""+kela_path+"""')
"""
exec(definition)
Catalog['Singers'] += [item+' - KeLa']
except ImportError:
pass
try:
from .UtauDraft import UtauDraft
from .CVVCChineseConverter import CVVCChineseConverter
from .XiaYYConverter import XiaYYConverter
from .JPVCVConverter import JPVCVConverter
from .TsuroVCVConverter import TsuroVCVConverter
from .TTEnglishConverter import TTEnglishConverter
from .VCCVEnglishConverter import VCCVEnglishConverter
UTAU_VB_ROOT=ScoreDraftPath+'/UTAUVoice'
UTAU_VB_SUFFIX='_UTAU'
if os.path.isdir(UTAU_VB_ROOT):
for item in os.listdir(UTAU_VB_ROOT):
utau_path = UTAU_VB_ROOT+'/'+item
if os.path.isdir(utau_path):
definition="""
def """+item+UTAU_VB_SUFFIX+"""(useCuda=True):
return UtauDraft('"""+utau_path+"""',useCuda)
"""
exec(definition)
Catalog['Singers'] += [item+UTAU_VB_SUFFIX+' - UtauDraft']
except ImportError:
pass
try:
from .SF2Instrument import ListPresets as ListPresetsSF2
from .SF2Instrument import SF2Instrument
SF2_ROOT=ScoreDraftPath+'/SF2'
if os.path.isdir(SF2_ROOT):
for item in os.listdir(SF2_ROOT):
sf2_path = SF2_ROOT+'/'+item
if os.path.isfile(sf2_path) and item.endswith(".sf2"):
name = item[0:len(item)-4]
definition="""
def """+name+"""(preset_index):
return SF2Instrument('"""+sf2_path+"""', preset_index)
def """+name+"""_List():
ListPresetsSF2('"""+sf2_path+"""')
"""
exec(definition)
Catalog['Instruments'] += [name+' - SF2Instrument']
except ImportError:
pass
try:
from .KarplusStrongInstrument import KarplusStrongInstrument
except ImportError:
pass
| StarcoderdataPython |
5040320 | from __future__ import division
import golly as g
from PIL import Image
from math import floor, ceil, log
import os
import json
#---------------------------------------------
# settings
srcPath = "/Volumes/MugiRAID1/Works/2015/13_0xff/ae/a_frame.png"
#---------------------------------------------
# settings
def log( data ):
g.note( json.dumps(data) )
def main( step = 1, start = 0 ):
img = Image.open( srcPath )
size = img.size
for y in xrange( 0, size[1] ):
for x in xrange( 0, size[0] ):
c = img.getpixel( (x, y) )
state = 1 if c[0] > 128 else 0
g.setcell( x, y, state )
#---------------------------------------------
# main
main()
# 0.7019105 * ( 114.953 * tan( 53.13/ 180 * pi / 2) ) | StarcoderdataPython |
1865454 | <filename>tests/sdk/queries/fileevents/filters/test_device_filter.py
# -*- coding: utf-8 -*-
import pytest
from tests.sdk.queries.conftest import EXISTS
from tests.sdk.queries.conftest import IS
from tests.sdk.queries.conftest import IS_IN
from tests.sdk.queries.conftest import IS_NOT
from tests.sdk.queries.conftest import NOT_EXISTS
from tests.sdk.queries.conftest import NOT_IN
from py42._compat import str
from py42.sdk.queries.fileevents.filters.device_filter import DeviceSignedInUserName
from py42.sdk.queries.fileevents.filters.device_filter import DeviceUsername
from py42.sdk.queries.fileevents.filters.device_filter import OSHostname
from py42.sdk.queries.fileevents.filters.device_filter import PrivateIPAddress
from py42.sdk.queries.fileevents.filters.device_filter import PublicIPAddress
def test_device_username_exists_str_gives_correct_json_representation():
_filter = DeviceUsername.exists()
expected = EXISTS.format("deviceUserName")
assert str(_filter) == expected
def test_device_username_not_exists_str_gives_correct_json_representation():
_filter = DeviceUsername.not_exists()
expected = NOT_EXISTS.format("deviceUserName")
assert str(_filter) == expected
def test_device_username_eq_str_gives_correct_json_representation():
_filter = DeviceUsername.eq("test_deviceUserName")
expected = IS.format("deviceUserName", "test_deviceUserName")
assert str(_filter) == expected
def test_device_username_not_eq_str_gives_correct_json_representation():
_filter = DeviceUsername.not_eq("test_deviceUserName")
expected = IS_NOT.format("deviceUserName", "test_deviceUserName")
assert str(_filter) == expected
def test_device_username_is_in_str_gives_correct_json_representation():
items = ["deviceUserName1", "deviceUserName2", "deviceUserName3"]
_filter = DeviceUsername.is_in(items)
expected = IS_IN.format("deviceUserName", *items)
assert str(_filter) == expected
def test_device_username_not_in_str_gives_correct_json_representation():
items = ["deviceUserName1", "deviceUserName2", "deviceUserName3"]
_filter = DeviceUsername.not_in(items)
expected = NOT_IN.format("deviceUserName", *items)
assert str(_filter) == expected
def test_device_username_eq_unicode_str_gives_correct_json_representation():
unicode_username = u"ๆจๅทฒ็ปๅ็ฐไบ็งๅฏไฟกๆฏ"
_filter = DeviceUsername.eq(unicode_username)
expected = IS.format(
u"deviceUserName",
u"\u60a8\u5df2\u7ecf\u53d1\u73b0\u4e86\u79d8\u5bc6\u4fe1\u606f",
)
assert str(_filter) == expected
def test_os_hostname_exists_str_gives_correct_json_representation():
_filter = OSHostname.exists()
expected = EXISTS.format("osHostName")
assert str(_filter) == expected
def test_os_hostname_not_exists_str_gives_correct_json_representation():
_filter = OSHostname.not_exists()
expected = NOT_EXISTS.format("osHostName")
assert str(_filter) == expected
def test_os_hostname_eq_str_gives_correct_json_representation():
_filter = OSHostname.eq("test_osHostName")
expected = IS.format("osHostName", "test_osHostName")
assert str(_filter) == expected
def test_os_hostname_not_eq_str_gives_correct_json_representation():
_filter = OSHostname.not_eq("test_osHostName")
expected = IS_NOT.format("osHostName", "test_osHostName")
assert str(_filter) == expected
def test_os_hostname_is_in_str_gives_correct_json_representation():
items = ["osHostName1", "osHostName2", "osHostName3"]
_filter = OSHostname.is_in(items)
expected = IS_IN.format("osHostName", *items)
assert str(_filter) == expected
def test_os_hostname_not_in_str_gives_correct_json_representation():
items = ["osHostName1", "osHostName2", "osHostName3"]
_filter = OSHostname.not_in(items)
expected = NOT_IN.format("osHostName", *items)
assert str(_filter) == expected
def test_private_ip_exists_str_gives_correct_json_representation():
_filter = PrivateIPAddress.exists()
expected = EXISTS.format("privateIpAddresses")
assert str(_filter) == expected
def test_private_ip_not_exists_str_gives_correct_json_representation():
_filter = PrivateIPAddress.not_exists()
expected = NOT_EXISTS.format("privateIpAddresses")
assert str(_filter) == expected
def test_private_ip_address_eq_str_gives_correct_json_representation():
_filter = PrivateIPAddress.eq("test_privateIp")
expected = IS.format("privateIpAddresses", "test_privateIp")
assert str(_filter) == expected
def test_private_ip_address_not_eq_str_gives_correct_json_representation():
_filter = PrivateIPAddress.not_eq("test_privateIp")
expected = IS_NOT.format("privateIpAddresses", "test_privateIp")
assert str(_filter) == expected
def test_private_ip_address_is_in_str_gives_correct_json_representation():
items = ["privateIp1", "privateIp2", "privateIp3"]
_filter = PrivateIPAddress.is_in(items)
expected = IS_IN.format("privateIpAddresses", *items)
assert str(_filter) == expected
def test_private_ip_address_not_in_str_gives_correct_json_representation():
items = ["privateIp1", "privateIp2", "privateIp3"]
_filter = PrivateIPAddress.not_in(items)
expected = NOT_IN.format("privateIpAddresses", *items)
assert str(_filter) == expected
def test_public_ip_address_exists_str_gives_correct_json_representation():
_filter = PublicIPAddress.exists()
expected = EXISTS.format("publicIpAddress")
assert str(_filter) == expected
def test_public_ip_address_not_exists_str_gives_correct_json_representation():
_filter = PublicIPAddress.not_exists()
expected = NOT_EXISTS.format("publicIpAddress")
assert str(_filter) == expected
def test_public_ip_address_eq_str_gives_correct_json_representation():
_filter = PublicIPAddress.eq("test_publicIp")
expected = IS.format("publicIpAddress", "test_publicIp")
assert str(_filter) == expected
def test_public_ip_address_not_eq_str_gives_correct_json_representation():
_filter = PublicIPAddress.not_eq("test_publicIp")
expected = IS_NOT.format("publicIpAddress", "test_publicIp")
assert str(_filter) == expected
def test_public_ip_address_is_in_str_gives_correct_json_representation():
items = ["publicIpAddress1", "publicIpAddress2", "publicIpAddress3"]
_filter = PublicIPAddress.is_in(items)
expected = IS_IN.format("publicIpAddress", *items)
assert str(_filter) == expected
def test_public_ip_address_not_in_str_gives_correct_json_representation():
items = ["publicIpAddress1", "publicIpAddress2", "publicIpAddress3"]
_filter = PublicIPAddress.not_in(items)
expected = NOT_IN.format("publicIpAddress", *items)
assert str(_filter) == expected
@pytest.mark.parametrize(
"filter_criteria, test_filter",
[(DeviceSignedInUserName.eq, IS), (DeviceSignedInUserName.not_eq, IS_NOT)],
)
def test_equality_device_signed_in_username_gives_correct_json_representation(
filter_criteria, test_filter
):
_filter = filter_criteria("username")
expected = test_filter.format("operatingSystemUser", "username")
assert str(_filter) == expected
@pytest.mark.parametrize(
"filter_criteria, test_filter",
[(DeviceSignedInUserName.is_in, IS_IN), (DeviceSignedInUserName.not_in, NOT_IN)],
)
def test_multi_vlaue_device_signed_in_username_gives_correct_json_representation(
filter_criteria, test_filter
):
usernames = ["username1", "username2", "username3"]
_filter = filter_criteria(usernames)
expected = test_filter.format("operatingSystemUser", *usernames)
assert str(_filter) == expected
| StarcoderdataPython |
8191410 | <filename>search/value.py
# ----------
# User Instructions:
#
# Create a function compute_value which returns
# a grid of values. The value of a cell is the minimum
# number of moves required to get from the cell to the goal.
#
# If a cell is a wall or it is impossible to reach the goal from a cell,
# assign that cell a value of 99.
# ----------
grid = [[0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0]]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1 # the cost associated with moving from a cell to an adjacent one
delta = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
delta_name = ['^', '<', 'v', '>']
def compute_value(grid,goal,cost):
# ----------------------------------------
# insert code below
# ----------------------------------------
# make sure your function returns a grid of values as
# demonstrated in the previous video.
value = [[99 for col in range(len(grid[0]))] for row in range(len(grid))]
closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]
# initialization
value[goal[0]][goal[1]] = 0
closed[goal[0]][goal[1]] = 1
lst = [goal]
for e in lst:
x = e[0]
y = e[1]
step = value[x][y] + cost
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
value[x2][y2] = step
lst.append([x2,y2])
closed[x2][y2] = 1
return value
result = compute_value(grid, goal, cost)
for row in result:
print(row) | StarcoderdataPython |
3292034 | <filename>tests/test_linear_router.py<gh_stars>10-100
import pytest
from simobility.routers import LinearRouter
from simobility.core import GeographicPosition
from simobility.core.clock import Clock
def test_router2d():
speed_kmph = 25
nyc_pos = GeographicPosition(-73.935242, 40.730610)
nyc_pos_shift = GeographicPosition(-73.935, 40.7306)
# monutes
clock = Clock(time_step=1, time_unit="m")
router = LinearRouter(speed=speed_kmph, clock=clock)
assert router.estimate_duration(nyc_pos, nyc_pos) == 0
route = router.calculate_route(nyc_pos, nyc_pos)
assert route.duration == 0
assert route.distance == 0
assert len(route.coordinates) == 1
assert route.approximate_position(clock.clock_time) == nyc_pos
assert route.approximate_position(clock.clock_time + 1) == nyc_pos
assert router.estimate_duration(nyc_pos, nyc_pos_shift) == 1
for i in range(10):
clock.tick()
assert router.estimate_duration(nyc_pos, nyc_pos) == 0
# seconds
clock = Clock(time_step=1, time_unit="s")
router = LinearRouter(speed=speed_kmph, clock=clock)
assert router.estimate_duration(nyc_pos, nyc_pos_shift) == 3
route = router.calculate_route(nyc_pos, nyc_pos_shift)
assert route.duration == 3
assert pytest.approx(route.distance, 3) == 0.02
assert len(route.coordinates) == 4
assert route.approximate_position(clock.clock_time) == nyc_pos
assert route.approximate_position(clock.clock_time + 1) != nyc_pos
assert route.approximate_position(clock.clock_time + 1) == route.coordinates[1]
assert route.approximate_position(clock.clock_time + 3) == nyc_pos_shift
assert route.approximate_position(clock.clock_time + 3) == route.coordinates[-1]
def test_router2d_2():
speed_kmph = 17
nyc_pos = GeographicPosition(-73.935242, 40.730610)
nyc_pos_shift = GeographicPosition(-73.935, 40.730610)
clock = Clock(time_step=1, time_unit="s")
router = LinearRouter(speed=speed_kmph, clock=clock)
assert router.estimate_duration(nyc_pos, nyc_pos_shift) == 5
route = router.calculate_route(nyc_pos, nyc_pos_shift)
assert len(route.coordinates) == 6
for p in route.coordinates:
assert p.lat == nyc_pos.lat
def test_map_match():
clock = Clock(time_step=1, time_unit="m")
router = LinearRouter(speed=12, clock=clock)
pos1 = GeographicPosition(-0.39376, 39.5145)
pos2 = GeographicPosition(-0.38874, 39.503)
for pos in [pos1, pos2]:
pos_m = router.map_match(pos)
assert pos == pos_m
assert pos.id != pos_m.id
def test_route_destination():
origin = GeographicPosition(-73.99780, 40.71205)
destination = GeographicPosition(-73.99567, 40.71689)
clock = Clock(time_step=10, time_unit="s")
router = LinearRouter(clock=clock)
assert router.calculate_route(origin, destination).destination == destination
| StarcoderdataPython |
3374306 | <reponame>thomasjpfan/distiller<gh_stars>10-100
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import distiller
import models
def test_sparsity():
zeros = torch.zeros(2,3,5,6)
print(distiller.sparsity(zeros))
assert distiller.sparsity(zeros) == 1.0
assert distiller.sparsity_3D(zeros) == 1.0
assert distiller.density_3D(zeros) == 0.0
ones = torch.zeros(12,43,4,6)
ones.fill_(1)
assert distiller.sparsity(ones) == 0.0
def test_utils():
model = models.create_model(False, 'cifar10', 'resnet20_cifar', parallel=False)
assert model is not None
p = distiller.model_find_param(model, "")
assert p is None
# Search for a parameter by its "non-parallel" name
p = distiller.model_find_param(model, "layer1.0.conv1.weight")
assert p is not None
# Search for a module name
module_to_find = None
for name, m in model.named_modules():
if name == "layer1.0.conv1":
module_to_find = m
break
assert module_to_find is not None
module_name = distiller.model_find_module_name(model, module_to_find)
assert module_name == "layer1.0.conv1"
| StarcoderdataPython |
3437710 | <filename>statsrat/bayes_regr/tausq_inv_dist.py
import numpy as np
from scipy import stats
'''
Distributions for prior weight precision (tausq_inv), defined as classes.
constant: Prior precision (tausq_inv) is treated as constant, i.e.
there is no attempt to change the initial hyperparameter values.
ard: Automatic relevance determination, i.e. the model tries
to learn the distribution of tausq_inv via variational Bayes
(assuming that tausq_inv has a gamma distribution).
ard_drv_atn: Automatic relevance determination (assuming that tausq_inv has a gamma
distribution) with the assumption that all of the regression weights
(w) associated with a feature share a common prior precision (tausq_inv).
This ends up being a form of derived attention model.
'''
class constant:
'''
Prior precision (tausq_inv) is treated as constant, i.e.
there is no attempt to change the initial hyperparameter values.
'''
def __init__(self, n_y, n_f, sim_pars):
self.tausq_inv_array = np.array(n_y*n_f*[sim_pars['tausq_inv']]).reshape((n_f, n_y))
def update(self, mean_wsq, y_psb):
pass # do nothing, because tausq_inv is assumed to be known and constant
def mean_tausq_inv(self):
return self.tausq_inv_array
def mean_tausq(self):
return 1/self.tausq_inv_array
constant.par_names = ['tausq_inv']
class ard:
'''
Automatic relevance determination, i.e. the model tries
to learn the distribution of tausq_inv via variational Bayes
(assuming that tausq_inv has a gamma distribution).
'''
def __init__(self, n_y, n_f, sim_pars):
self.n_y = n_y
self.prior_hpar0 = sim_pars['prior_tausq_inv_hpar0']
self.prior_hpar1 = sim_pars['prior_tausq_inv_hpar1']
self.hpar0 = np.array(n_f*n_y*[sim_pars['prior_tausq_inv_hpar0']], dtype='float').reshape((n_f, n_y))
self.hpar1 = sim_pars['prior_tausq_inv_hpar1']
def update(self, mean_wsq, y_psb):
# update hyperparameters
for j in range(self.n_y):
self.hpar0[:, j] = self.prior_hpar0 - 0.5*mean_wsq[:, j]
self.hpar1 = self.prior_hpar1 + 0.5
def mean_tausq_inv(self):
return (self.hpar1 + 1)/(-self.hpar0)
def mean_tausq(self):
return -self.hpar0/self.hpar1
ard.par_names = ['prior_tausq_inv_hpar0', 'prior_tausq_inv_hpar1']
class ard_drv_atn:
'''
Automatic relevance determination (assuming that tausq_inv has a gamma
distribution) with the assumption that all of the regression weights
(w) associated with a feature share a common prior precision (tausq_inv).
This ends up being a form of derived attention model.
'''
def __init__(self, n_y, n_f, sim_pars):
self.n_y = n_y
self.n_f = n_f
self.prior_hpar0 = sim_pars['prior_tausq_inv_hpar0']
self.prior_hpar1 = sim_pars['prior_tausq_inv_hpar1']
self.hpar0 = np.array(n_f*[sim_pars['prior_tausq_inv_hpar0']], dtype='float')
self.hpar1 = sim_pars['prior_tausq_inv_hpar1']
self.y_psb_so_far = np.zeros(n_y)
def update(self, mean_wsq, y_psb):
# keep track of which outcomes have been observed so far
for j in range(self.n_y):
if y_psb[j] == 1:
self.y_psb_so_far[j] = 1
# update hyperparameters
self.hpar0 = self.prior_hpar0 - 0.5*mean_wsq.sum(1)
self.hpar1 = self.prior_hpar1 + 0.5*self.y_psb_so_far.sum()
def mean_tausq_inv(self):
mean_tausq_inv = np.zeros((self.n_f, self.n_y))
for i in range(self.n_f):
mean_tausq_inv[i, :] = (self.hpar1 + 1)/(-self.hpar0[i])
return mean_tausq_inv
def mean_tausq(self):
mean_tausq = np.zeros((self.n_f, self.n_y))
for i in range(self.n_f):
mean_tausq[i, :] = -self.hpar0[i]/self.hpar1
return mean_tausq
ard_drv_atn.par_names = ['prior_tausq_inv_hpar0', 'prior_tausq_inv_hpar1'] | StarcoderdataPython |
6667115 | <gh_stars>0
def main():
import nltk
from nltk.corpus import stopwords
(dictOfTexts, fullText) = getTextFromFiles('test_docs')
freqDist = getMostFrequentWords(fullText).most_common(10)
hashTags = [w for (w, num) in freqDist ]
finalResultDictionary = {}
for hashTag in hashTags:
finalResultDictionary[hashTag] = getListOfOccurancesAndSentences(hashTag, dictOfTexts)
print(finalResultDictionary)
def getListOfOccurancesAndSentences(hashTag, dictOfTexts):
from nltk import sent_tokenize
listOfSentences = []
listOfFiles = []
for (file, text) in dictOfTexts.items():
occurs = False
for sentence in sent_tokenize(text):
if hashTag in sentence:
occurs = True
listOfSentences.append(sentence.replace(hashTag, hashTag.upper()))
if occurs:
listOfFiles.append(file)
return (listOfFiles, listOfSentences)
def getTextFromFiles(folder):
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(folder) if isfile(join(folder, f))]
fullText = ""
dictOfTexts = {}
for file in onlyfiles:
textFile = open(folder + '/' + file, encoding="utf8")
if textFile.mode == "r":
dictOfTexts[file] = textFile.read()
fullText += dictOfTexts[file]
return (dictOfTexts, fullText)
def getMostFrequentWords(text):
from nltk import FreqDist
from nltk import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
tokenizedText = word_tokenize(text.lower())
taggedWords = pos_tag(tokenizedText)
nouns = [w for (w, pos) in taggedWords if pos == 'NN']
filteredText = [w for w in nouns if not w in stop_words]
return FreqDist(filteredText)
if __name__== "__main__":
main() | StarcoderdataPython |
158479 | <filename>sdapis/migrations/0012_post_comment_list.py<gh_stars>0
# Generated by Django 3.2.8 on 2021-10-22 04:33
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sdapis', '0011_auto_20211021_2133'),
]
operations = [
migrations.AddField(
model_name='post',
name='comment_list',
field=django.contrib.postgres.fields.ArrayField(base_field=models.JSONField(), default=list, size=None),
),
]
| StarcoderdataPython |
1606458 | <filename>predict.py
import os
import time
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from argparse import ArgumentParser
# user
from builders.dataset_builder import build_dataset_test
from lib import models, utils
from lib.utils import save_predict, zipDir
import lib.scribble_generation as sg
from config import paths
def parse_args():
parser = ArgumentParser(description='Efficient semantic segmentation')
# model and dataset
parser.add_argument('--model', default="interCNN", help="model name: (default ENet)")
parser.add_argument('--dataset', default="drive", help="dataset: cityscapes or camvid")
parser.add_argument('--num_workers', type=int, default=4, help="the number of parallel threads")
parser.add_argument('--batch_size', type=int, default=1,
help=" the batch_size is set to 1 when evaluating or testing")
parser.add_argument('--checkpoint', type=str, default="",
help="use the file to load the checkpoint for evaluating or testing ")
parser.add_argument('--save_seg_dir', type=str, default="./server/",
help="saving path of prediction result")
parser.add_argument('--cuda', default=True, help="run on CPU or GPU")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
args = parser.parse_args()
return args
def predict(args, test_loader, model):
"""
args:
test_loader: loaded for test dataset, for those that do not provide label on the test set
model: model
return: class IoU and mean IoU
"""
# evaluation or test mode
model.eval()
total_batches = len(test_loader)
# k = enumerate(test_loader)
if args.dataset == "camvid":
for i, (input, _, size, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
name[0] = name[0].rsplit('_', 1)[0] + '*'
save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=True, output_color=False, gt_color=False)
elif args.dataset == "cityscapes":
for i, (input, _, size, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
name[0] = name[0].rsplit('_', 1)[0] + '*'
save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=True, output_color=False, gt_color=False)
elif args.dataset == "drive":
for i, (input, _, size, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
output = model(input_var)
# print(input.shape, output.shape)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
# print(output.shape)
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# print(output.shape)
# output[output == 1] = 255
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
# name[0] = name[0].rsplit('_', 1)[0] + '*'
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=False, output_color=True, gt_color=False)
name = str(int(name[0].split('_')[0]))
print(f"# {name}.png")
save_predict(output, None, name, args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
else:
for i, (input, size, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
print(">>>>>>>>>>", input.shape)
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# output[output == 1] = 255
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
# name[0] = name[0].rsplit('_', 1)[0] + '*'
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=False, output_color=True, gt_color=False)
# print(name)
name = name[0].split('\\')
name = name[1].split('/')
save_predict(output, None, name[1], args.dataset, args.save_seg_dir,
output_grey=True, output_color=False, gt_color=False)
def predict_interact(args, test_loader, model, model_aux, val_iterations=20):
"""
args:
test_loader: loaded for test dataset, for those that do not provide label on the test set
model: model
return: class IoU and mean IoU
"""
# evaluation or test mode
model.eval()
model_aux.eval()
total_batches = len(test_loader)
# k = enumerate(test_loader)
if args.dataset == "camvid":
for i, (input, size, _, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
name[0] = name[0].rsplit('_', 1)[0] + '*'
save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=True, output_color=False, gt_color=False)
elif args.dataset == "cityscapes":
for i, (input, size, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
name[0] = name[0].rsplit('_', 1)[0] + '*'
save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=True, output_color=False, gt_color=False)
elif args.dataset == "drive":
for i, (input, labels, size, name) in enumerate(test_loader):
with torch.no_grad():
images = input.cuda()
labels_of_user = labels # simulate
start_time = time.time()
output_aux = model_aux(images)
# Convert prediction and labels
prediction_aux = utils.prediction_converter(output_aux)
# init
scribbles = sg.scribble_input(prediction_aux, labels_of_user, initial=True)
prediction = prediction_aux.unsqueeze(1).float().cuda()
for i in range(0, val_iterations):
# Make new prediction
outputs = model(images, prediction, scribbles)
new_prediction = utils.prediction_converter(outputs)
# updata
scribbles = sg.scribble_input(new_prediction, labels_of_user, scribbles)
prediction = new_prediction.unsqueeze(1).float().cuda()
prediction = torch.squeeze(prediction)
# print(prediction.shape)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = prediction.cpu().data[0].numpy()
# output = output.transpose(1, 2, 0)
# output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
name = str(int(name[0].split('_')[0]))
print(f"# {name}.png")
save_predict(output, None, name, args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
else:
for i, (input, _, size, name) in enumerate(test_loader):
with torch.no_grad():
input_var = input.cuda()
start_time = time.time()
print(">>>>>>>>>>", input.shape)
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# output[output == 1] = 255
# Save the predict greyscale output for Cityscapes official evaluation
# Modify image name to meet official requirement
# name[0] = name[0].rsplit('_', 1)[0] + '*'
# save_predict(output, None, name[0], args.dataset, args.save_seg_dir,
# output_grey=False, output_color=True, gt_color=False)
# print(name)
name = name[0].split('\\')
name = name[1].split('/')
save_predict(output, None, name[1], args.dataset, args.save_seg_dir,
output_grey=True, output_color=False, gt_color=False)
def test_model(args):
"""
main function for testing
param args: global arguments
return: None
"""
print(args)
if args.cuda:
print("=====> use gpu id: '{}'".format(args.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if not torch.cuda.is_available():
raise Exception("no GPU found or wrong gpu id, please run without --cuda")
# build the model
# model = build_model(args.model, num_classes=args.classes)
# model = eval(args.model)
model = eval(f"models.{args.model}")
ck_path = eval(f"paths.{args.model}_pth")
model = model(args.classes).cuda()
model.load_state_dict(torch.load(ck_path))
model.eval()
if args.model == 'interCNN':
model_aux = models.autoCNN(args.classes).cuda()
model_aux.load_state_dict(torch.load(paths.autoCNN_pth))
model_aux.eval()
if args.cuda:
model = model.cuda() # using GPU for inference
cudnn.benchmark = True
if not os.path.exists(args.save_seg_dir):
os.makedirs(args.save_seg_dir)
# load the test set
datas, testLoader = build_dataset_test(args.dataset, args.num_workers, none_gt=True)
if args.checkpoint:
if os.path.isfile(args.checkpoint):
print("=====> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['model'])
# model.load_state_dict(convert_state_dict(checkpoint['model']))
else:
print("=====> no checkpoint found at '{}'".format(args.checkpoint))
raise FileNotFoundError("no checkpoint found at '{}'".format(args.checkpoint))
print("=====> beginning testing")
print("test set length: ", len(testLoader))
if args.model == 'interCNN':
predict_interact(args, testLoader, model, model_aux)
else:
predict(args, testLoader, model)
if __name__ == '__main__':
args = parse_args()
if args.dataset == 'remote':
args.save_seg_dir = os.path.join(args.save_seg_dir, args.dataset, 'predict', 'results')
else:
args.save_seg_dir = os.path.join(args.save_seg_dir, args.dataset, 'predict', args.model)
if args.dataset == 'cityscapes':
args.classes = 19
elif args.dataset == 'camvid':
args.classes = 11
elif args.dataset == 'seed':
args.classes = 2
elif args.dataset == 'remote':
args.classes = 7
elif args.dataset == 'drive':
args.classes = 2
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset)
test_model(args)
| StarcoderdataPython |
4803418 | <reponame>lnxpy/postern
# Generated by Django 2.2.8 on 2020-01-12 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0003_auto_20200112_0934'),
]
operations = [
migrations.RenameField(
model_name='adminprofile',
old_name='user',
new_name='username',
),
migrations.RemoveField(
model_name='adminprofile',
name='phone',
),
migrations.AddField(
model_name='adminprofile',
name='avatar',
field=models.ImageField(blank=True, upload_to='profile_avatar'),
),
migrations.AddField(
model_name='adminprofile',
name='bio',
field=models.TextField(default='', max_length=150),
),
migrations.AddField(
model_name='adminprofile',
name='city',
field=models.CharField(default='', max_length=15),
),
migrations.AddField(
model_name='adminprofile',
name='first_name',
field=models.CharField(default='', max_length=30),
),
migrations.AddField(
model_name='adminprofile',
name='github',
field=models.URLField(default=''),
),
migrations.AddField(
model_name='adminprofile',
name='last_name',
field=models.CharField(default='', max_length=40),
),
migrations.AddField(
model_name='adminprofile',
name='twitter',
field=models.URLField(default=''),
),
migrations.AddField(
model_name='adminprofile',
name='website',
field=models.URLField(default=''),
),
]
| StarcoderdataPython |
3431080 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import copy
class Net(torch.nn.Module):
def __init__(self, dataset, device):
super(Net, self).__init__()
self.device = device
self.dataset = dataset # ๆฐๆฎ้ๅ ่ฝฝ
# ็บฟๆง็นๅพๆๅ
self.fc1 = nn.Linear(dataset.num_features, 128) # ่็น็นๅพMLPๆๅ
# ๅท็งฏๆไฝ
self.conv1 = GCNConv(128, 128)
self.conv2 = GCNConv(128, 128)
# ๅๅท็งฏๆไฝ
self.dconv1 = GCNConv(128, 128)
self.dconv2 = GCNConv(128, 128)
# ็บฟๆง่ๅ, ่็น็นๅพ็้็ปด่ๅ
self.fc2 = nn.Linear(128, dataset.num_features)
# ๅๅไผ ๆญๆดๆฐ็ๅๆฐ
self.w1 = self.fc1.parameters()
self.conv1_reg_params1 = self.conv1.parameters()
self.conv2_reg_params1 = self.conv2.parameters()
self.dconv1_reg_params2 = self.dconv1.parameters()
self.dconv2_reg_params2 = self.dconv2.parameters()
self.w2 = self.fc2.parameters()
def forward(self):
pre_x = [] # ๅ
ณ้ฎๅฑๆง็ๆๅ
normal_x = [] # ๆฏๅฑไธญ่็น็ๅๅซๅตๅ
ฅ
# ๅพๆฐๆฎไธญๆฏไธชๆฐๆฎ้ๅๅซๅท็งฏ
for data in self.dataset:
x, edge_index = data.x, data.edge_index
x = self.fc1(x)
pre_x.append(x)
x = self.conv1(x, edge_index)
x = self.conv2(x, edge_index)
normal_x.append(x)
# ๅๅงๅๅ้
graph_number = len(normal_x)
graph_id = list(range(graph_number))
temp_list = copy.deepcopy(graph_id)
recorder_index = []
# ไธๅๅฑไธญ่็นๅตๅ
ฅ็ๆททๅ
xs = []
for i in graph_id:
if i == graph_number - 1:
index_id = temp_list[0]
else:
index_id = temp_list[i+1]
edge_index = self.dataset[i].edge_index
# ๅๅท็งฏๆไฝ๏ผๅฎ้
ๅ
ฌๅผๅ็ๅฎ็ๅท็งฏ็ธๅ
normal_x[i] = self.dconv1(normal_x[index_id], edge_index)
normal_x[i] = self.dconv2(normal_x[i], edge_index)
xs.append(normal_x[i])
recorder_index.append(index_id)
# ่็น็ๅค็นๅพ่ๅ้จๅ้่ฆ่ฟไธๆญฅ่ฎบ่ฏ๏ผ้่ฆไธ็งๆฏ่พๅฅฝ็ๆนๆณ
fin_feat = xs[0]
for feat_index in range(1, len(xs)-1):
fin_feat = fin_feat * xs[feat_index]
# fin_feat = F.log_softmax(fin_feat)
fin_feat = F.sigmoid(fin_feat)
Loss_embedding = self.fc2(fin_feat)
return pre_x, xs, fin_feat, Loss_embedding | StarcoderdataPython |
6704737 | from bs4 import BeautifulSoup
import requests
import time
# Manually extracted count for each topic
index = {"a": 100, "b": 96, "c": 99, "d": 97, "e": 99, "f": 99, "g": 100, "h": 95, "i": 99, "j": 80, "k": 87, "l": 95,
"m": 97, "n": 92, "o": 94, "p": 98, "q": 76, "r": 95, "s": 99, "t": 95, "u": 84, "v": 96, "w": 93, "x": 81, "more": 87}
urls = []
skills = []
# Generate a list of URLs using the count data
for key,value in index.items():
for i in range(1,value+1):
urls.append("https://www.linkedin.com/directory/topics-"+str(key)+"-"+str(i)+"/")
# Since topic Y and topic Z do not have sub indexes
urls.append("https://www.linkedin.com/directory/topics-y/")
urls.append("https://www.linkedin.com/directory/topics-z/")
client = requests.Session()
HOMEPAGE_URL = 'https://www.linkedin.com'
LOGIN_URL = 'https://www.linkedin.com/uas/login-submit'
html = client.get(HOMEPAGE_URL).content
soup = BeautifulSoup(html,"lxml")
csrf = soup.find(id="loginCsrfParam-login")['value']
# Replace EMAIL_ID and PASSWORD with your LinkedIn credentials
login_information = {
'session_key':'EMAIL_ID',
'session_password':'PASSWORD',
'loginCsrfParam': csrf,
}
client.post(LOGIN_URL, data=login_information)
for url in urls:
print "Crawling: "+str(url)
# Abiding the politeness policy
time.sleep(1)
response = client.get(url)
parse_response = BeautifulSoup(response.content,"lxml")
parse_class = parse_response.find("ul",{"class":"column quad-column"})
for anchor in parse_class.findAll("a"):
skills.append(anchor.text)
# Writing skills to a text file
f = open("output/skills.txt","w")
for skill in skills:
f.write(skill)
f.write("\n") | StarcoderdataPython |
12828882 | <filename>2019/06_UniversalOrbitMap/uomap.py
# ======================================================================
# Universal Orbit Map
# Advent of Code 2019 Day 06 -- <NAME> -- https://adventofcode.com
#
# Computer simulation by Dr. <NAME>
# ======================================================================
# ======================================================================
# u o m a p . p y
# ======================================================================
"Map for Universal Orbit Map problem for Advent of Code 2019 Day 06"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import dag
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
COM = 'COM'
YOU = 'YOU'
SANTA = 'SAN'
# ======================================================================
# UOMap
# ======================================================================
class UOMap(dag.DAG):
"""Object representing a Universal Orbit Map"""
def __init__(self, pairs=None, text=None):
# 1. Start with an empty dag
super(UOMap, self).__init__(pairs=pairs)
# 2. If there is text, process it
if text is not None:
# 3. Loop for all of the lines
for line in text:
# 4. Split line into the two node names
nodes = line.split(')')
# 5. Add nodes to graph
self.add_node(nodes[0], nodes[1])
def orbits(self, node):
"Return the number of [in]direct orbits for a given node"
# Number of orbits is path length from COM minus 1
path = self.find_shortest_path(COM, node)
assert path is not None
#print("%s %d %s" % (node, len(path), path))
return len(path) - 1
def total_orbits(self):
"Return the number of direct and indirect orbits"
# 1. Start with no orbits
result = 0
# 2. Loop for all of the nodes
for node in self.nodes():
# 3. We only want terminal nodes
if node == COM:
continue
# 4. Orbits is one less than the length of path from COM
result += self.orbits(node)
# 5. Return total number of orbits
return result
def count_orbits(self):
"Count the orbits by walking the tree"
# 1. Start with nothing, but a list of things to do
orbits = {}
todo = [(COM, 0)]
# 2. Loop until there is nothing to do
while todo:
pass
# 9. Return the sum of the orbits
return sum(orbits.values())
def bodies(self):
"Retnumber of orbit"
return self.nodes()
def minimum_transfers(self, from_node, to_node):
"Find the minimumal number of orbital transfers between two nodes"
# 1. Assume no path
result = []
# 2. If from you or Santa, find where orbiting
if from_node in [YOU, SANTA]:
from_node = self.orbiting(from_node)
# 3. If to you or Santa, find where orbiting
if to_node in [YOU, SANTA]:
to_node = self.orbiting(to_node)
# 4. Find the shorted path from the center to each
from_path = self.find_shortest_path(COM, from_node)
to_path = self.find_shortest_path(COM, to_node)
assert from_path is not None
assert to_path is not None
# 4. Keep only the unique parts
for indx in range(min(len(from_path), len(to_path))):
if from_path[indx] == to_path[indx]:
continue
result = from_path[indx:] + to_path[indx:]
break
# 5. Return length of unique legs
return len(result)
def orbiting(self, node):
"What is the node orbiting?"
# 1. Asssume the worst
result = None
# 2. if not the center, find where it is orbiting
if node != COM:
for onode, bodies in self.dag.items():
if node in bodies:
result = onode
break
# 3. Return where orbiting or None
return result
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end u o m a p . p y end
# ======================================================================
| StarcoderdataPython |
9684849 |
import csv
import pandas as pd
import argparse
import random
# file = 'results_veridical/all_dataset'
# 0 id
# 1 text: <NAME> john and tom
# 2 hypothesis: fred saw john
# 3 depth: 1
# 4 occur: 1
# 5 label: yes
res_dir = 'results_veridical'
positive_clause_preds = ['realized', 'acknowledged', 'remembered', 'noted', 'found', 'noticed', 'learned', 'saw', 'revealed', 'discovered', 'understood', 'knew', 'admitted', 'recognized', 'observed']
neutral_clause_preds = ['felt', 'claimed', 'doubted', 'hoped', 'predicted', 'implied', 'suspected', 'wished', 'thought', 'believed', 'heard', 'expected', 'estimated', 'assumed', 'argued']
#positive_clause_preds = ['realized', 'knew', 'remembered']
#neutral_clause_preds = ['hoped', 'felt', 'mentioned']
def subj_schema(pred):
return 'Someone ' + pred + ' that '
def filter_label(l1, l2):
if l1 == 'yes' and l2 == 'yes':
res = 'yes'
else:
res = 'unk'
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser('generate veridical inference \
sentence pairs')
parser.add_argument('file',
help='input file')
args = parser.parse_args()
with open(args.file) as f:
reader = list(csv.reader(f, delimiter='\t'))
data_ft_t = [] # f(t) => t
data_ft_h = [] # f(t) => h
for row in reader:
text = row[1].rstrip()
hypo = row[2]
depth = int(row[3])
occur = int(row[4])
label = row[5]
for pred in positive_clause_preds:
f_text = subj_schema(pred) + text
p_label = 'yes'
q_label = filter_label(p_label, label)
data_ft_t.append([f_text, text, depth, occur, label, p_label])
data_ft_h.append([f_text, hypo, depth, occur, label, q_label])
for pred in neutral_clause_preds:
f_text = subj_schema(pred) + text
p_label = 'unk'
q_label = 'unk'
data_ft_t.append([f_text, text, depth, occur, label, p_label])
data_ft_h.append([f_text, hypo, depth, occur, label, q_label])
# Pruning (ft,t) pairs and (ft,h) pairs
total = len(reader)
data_ft_t_pruned = random.sample(data_ft_t, total)
data_ft_h_pruned = random.sample(data_ft_h, total)
with open(res_dir + '/data_ft_t.tsv', 'w') as fo:
writer = csv.writer(fo, delimiter='\t')
writer.writerows(data_ft_t_pruned)
with open(res_dir + '/data_ft_h.tsv', 'w') as fo:
writer = csv.writer(fo, delimiter='\t')
writer.writerows(data_ft_h_pruned)
df = pd.read_csv(args.file, delimiter='\t', header=None)
data = df.drop(df.columns[0], axis=1)
data.to_csv(res_dir + '/data_t_h.tsv', sep='\t', index=False, header=None)
# f(t) => t => g(a,b) => h(a,b)
#
# df = pd.read_csv('results_veridical/all_dataset', delimiter='\t', header=None)
# df[2] = df[2] + ',' + df[5]
# df[2] = df[2].str.split(',')
# df = df.groupby(1).agg({2: list, 3: list, 4: list})
# df = df.agg({2: lambda x: x,
# 3: lambda x: x[0],
# 4: lambda x: x[0]})
| StarcoderdataPython |
8154478 | """
FILTERED ELEMENT COLLECTOR
"""
__author__ = '<NAME> - <EMAIL>'
__twitter__ = '@solamour'
__version__ = '1.0.0'
# Importing Reference Modules
import clr # CLR ( Common Language Runtime Module )
clr.AddReference("RevitServices") # Adding the RevitServices.dll special
# Dynamo module to deal with Revit
import RevitServices # Importing RevitServices
from RevitServices.Persistence import DocumentManager # From RevitServices
# import the Document Manager
clr.AddReference("RevitAPI") # Adding the RevitAPI.dll module to access
# the Revit API
import Autodesk # Here we import the Autodesk namespace
# From the Autodesk namespace - derived down to the Revit Database, we
# import only the Filtered Element Collector and BuiltInCategory classes
from Autodesk.Revit.DB import FilteredElementCollector, BuiltInCategory,
FamilyInstance
# Here we give the Revit Document a nickname of 'doc' which allows us to
# simply call 'doc' later without having to type the long namespace name
doc = DocumentManager.Instance.CurrentDBDocument
# We create a 'Filtered Element Collector' that will collect, in the
# 'Active View' of our document, all 'Family Instances' of a particular
# category ('Planting'). We then cast it to Elements so we can use it in
# Dynamo.
builtInCollector = FilteredElementCollector( doc, doc.ActiveView.Id )
.OfClass( FamilyInstance ).OfCategory( BuiltInCategory.OST_Planting )
.WhereElementIsNotElementType().ToElements()
# To get our results back inside of Dynamo, we need to append a list to
# the OUT port
OUT = builtInCollector
| StarcoderdataPython |
104940 | import unittest
import numpy as np
from sklearn.datasets import make_classification
from skactiveml.classifier import ParzenWindowClassifier
from skactiveml.stream import (
FixedUncertainty,
VariableUncertainty,
Split,
RandomVariableUncertainty,
)
class TemplateTestUncertaintyZliobaite:
def setUp(self):
# initialise valid data to test uncertainty parameters
rand = np.random.RandomState(0)
stream_length = 100
train_init_size = 10
X, y = make_classification(
n_samples=stream_length + train_init_size,
random_state=rand.randint(2**31 - 1),
shuffle=True,
)
self.X = X[:train_init_size, :]
self.candidates = X[train_init_size:, :]
self.y = y[:train_init_size]
self.clf = ParzenWindowClassifier()
self.kwargs = dict(
candidates=self.candidates, clf=self.clf, X=self.X, y=self.y
)
def test_init_param_budget(self):
# budget must be defined as a float greater than 0
query_strategy = self.get_query_strategy()(budget=[])
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
query_strategy = self.get_query_strategy()(budget="string")
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
query_strategy = self.get_query_strategy()(budget=-1)
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
def test_init_param_budget_manager(self):
# budgetmanager must be defined as an object of an budget manager
# class
query_strategy = self.get_query_strategy()(budget_manager=[])
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
def test_init_param_random_state(self):
query_strategy = self.get_query_strategy()(
random_state="string",
)
self.assertRaises(ValueError, query_strategy.query, **(self.kwargs))
def test_query_param_candidates(self):
# candidates must be defined as a two dimensinal array
query_strategy = self.get_query_strategy()()
self.assertRaises(
ValueError,
query_strategy.query,
candidates=1,
clf=self.clf,
X=self.X,
y=self.y,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=None,
clf=self.clf,
X=self.X,
y=self.y,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=np.ones(5),
clf=self.clf,
X=self.X,
y=self.y,
)
def test_query_param_clf(self):
# clf must be defined as a classifier
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf="string",
X=self.X,
y=self.y,
)
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=1,
X=self.X,
y=self.y,
)
def test_query_param_X(self):
# X must be defined as a two dimensinal array and must be equal in
# length to y
query_strategy = self.get_query_strategy()()
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=1,
y=self.y,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=None,
y=self.y,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=np.ones(5),
y=self.y,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X[1:],
y=self.y,
fit_clf=True,
)
def test_query_param_y(self):
# y must be defined as a one Dimensional array and must be equal in
# length to X
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=1,
fit_clf=True,
)
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=None,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
fit_clf=True,
)
def test_query_param_sample_weight(self):
# sample weight needs to be a list that can be convertet to float
# equal in size of y
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
sample_weight="string",
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
sample_weight=["string", "numbers", "test"],
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
sample_weight=[1],
fit_clf=True,
)
def test_query_param_fit_clf(self):
# fit_clf needs to be a boolean
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y,
fit_clf="string",
)
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y,
fit_clf=1,
)
def test_query_param_return_utilities(self):
# return_utilities needs to be a boolean
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
return_utilities="string",
)
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
return_utilities=1,
)
class TestSplit(TemplateTestUncertaintyZliobaite, unittest.TestCase):
def get_query_strategy(self):
return Split
class TestFixedUncertainty(TemplateTestUncertaintyZliobaite, unittest.TestCase):
def get_query_strategy(self):
return FixedUncertainty
class TestVariableUncertainty(
TemplateTestUncertaintyZliobaite, unittest.TestCase
):
def get_query_strategy(self):
return VariableUncertainty
class TestRandomVariableUncertainty(
TemplateTestUncertaintyZliobaite, unittest.TestCase
):
def get_query_strategy(self):
return RandomVariableUncertainty
| StarcoderdataPython |
3250027 | from flask_babel import gettext
def weekdays():
return [
gettext(u"Mon"),
gettext(u"Tue"),
gettext(u"Wed"),
gettext(u"Thu"),
gettext(u"Fri"),
gettext(u"Sat"),
gettext(u"Sun")
] | StarcoderdataPython |
5057321 | def sumHealth(enemies):
totalHealth = 0
for enemy in enemies:
totalHealth += enemy.health
return totalHealth
cannon = hero.findNearest(hero.findFriends())
enemies = cannon.findEnemies()
ogreSummaryHealth = sumHealth(enemies)
hero.say("Use " + ogreSummaryHealth + " grams.")
| StarcoderdataPython |
3318007 | <filename>python/testData/MockSdk3.4/Lib/abc.py<gh_stars>0
# Stubs
class object:
pass
class ABCMeta:
pass
def abstractmethod(foo):
pass | StarcoderdataPython |
8115459 | r = []
while True:
(a,b) = [int(i) for i in input().split(" ")]
if (a==0 and b==0):
break
r.append(a+b)
for num in r:
print(num)
| StarcoderdataPython |
119622 | array([[ -8.71756106, -1.36180276],
[ -8.58324975, -1.6198254 ],
[ -8.44660752, -1.87329902],
[ -8.30694854, -2.12042891],
[ -8.16366778, -2.35941504],
[ -8.01626074, -2.58846783],
[ -7.8643173 , -2.80576865],
[ -7.70752251, -3.0094502 ],
[ -7.5458139 , -3.19806094],
[ -7.37912158, -3.3697834 ],
[ -7.20747945, -3.5226388 ],
[ -7.03113284, -3.65485777],
[ -6.85017635, -3.76269362],
[ -6.66523309, -3.84275185],
[ -6.47736681, -3.89099441],
[ -6.28828709, -3.90157094],
[ -6.10156897, -3.86340302],
[ -5.91871331, -3.79305295],
[ -5.74014533, -3.69605418],
[ -5.56572991, -3.57762694],
[ -5.39517681, -3.44210881],
[ -5.22781241, -3.29456196],
[ -5.06232383, -3.14085048],
[ -4.89257735, -2.98799629],
[ -4.71966422, -2.83672204],
[ -4.54372346, -2.68713228],
[ -4.36476778, -2.53940585],
[ -4.18276114, -2.39377984],
[ -3.99760462, -2.25058361],
[ -3.80926438, -2.11016077],
[ -3.61765447, -1.97299377],
[ -3.4227503 , -1.83963827],
[ -3.22466935, -1.71063201],
[ -3.02363515, -1.5864963 ],
[ -2.8199305 , -1.46774788],
[ -2.61386232, -1.3549046 ],
[ -2.40573771, -1.24848664],
[ -2.19584837, -1.14901525],
[ -1.98446111, -1.05701006],
[ -1.77181225, -0.97298507],
[ -1.55810414, -0.89744287],
[ -1.34350217, -0.83086662],
[ -1.12813134, -0.77370943],
[ -0.91207182, -0.72638078],
[ -0.69535387, -0.68923045],
[ -0.47795283, -0.66253066],
[ -0.25978553, -0.64645761],
[ -0.04070946, -0.64107399],
[ 0.17947379, -0.64631382],
[ 0.40100956, -0.661971 ],
[ 0.62417469, -0.68769217],
[ 0.84925505, -0.72297438],
[ 1.0765168 , -0.7671692 ],
[ 1.30617386, -0.81949558],
[ 1.53834874, -0.87904872],
[ 1.77300538, -0.94474994],
[ 2.01004375, -1.0156523 ],
[ 2.24941382, -1.09111049],
[ 2.49090963, -1.17034172],
[ 2.76505053, -1.25699281],
[ 3.0392737 , -1.33956764],
[ 3.31357819, -1.4173004 ],
[ 3.58794949, -1.48951235],
[ 3.86235753, -1.55528368],
[ 4.13674424, -1.61362995],
[ 4.41101533, -1.66379521],
[ 4.68503054, -1.70493339],
[ 4.95859183, -1.73617318],
[ 5.23143608, -1.75675047],
[ 5.50323138, -1.76605581],
[ 5.77357759, -1.7636462 ],
[ 6.04201219, -1.74925505],
[ 6.30802249, -1.72280316],
[ 6.57106704, -1.6844134 ],
[ 6.83061188, -1.63443731],
[ 7.0861612 , -1.57343982],
[ 7.33719115, -1.50196243],
[ 7.58299863, -1.42023179],
[ 7.8226528 , -1.32820378],
[ 8.0545371 , -1.22514719],
[ 8.27658784, -1.11037577],
[ 8.48611424, -0.9831958 ],
[ 8.67910996, -0.84268891],
[ 8.85098102, -0.68873567],
[ 8.99611087, -0.52182284],
[ 9.10907438, -0.34357458],
[ 9.18417432, -0.15630253],
[ 9.21305752, 0.03692533],
[ 9.1840698 , 0.23008171],
[ 9.11910658, 0.4192829 ],
[ 9.02164578, 0.60281683],
[ 8.8945003 , 0.77935859],
[ 8.73948257, 0.94759291],
[ 8.5591441 , 1.10658568],
[ 8.35621373, 1.25571578],
[ 8.13392856, 1.39493299],
[ 7.89600098, 1.5249387 ],
[ 7.64494533, 1.64617321],
[ 7.38361784, 1.75967206],
[ 7.11449512, 1.86661923],
[ 6.83961129, 1.96821002],
[ 6.56059338, 2.06558415],
[ 6.27873646, 2.15980551],
[ 5.99507103, 2.25185709],
[ 5.71041846, 2.34264296],
[ 5.42539621, 2.43296897],
[ 5.14049084, 2.52356403],
[ 4.85570433, 2.61443171],
[ 4.57103584, 2.70556915],
[ 4.28648169, 2.79696731],
[ 4.00217813, 2.88852238],
[ 3.72231814, 2.97718081],
[ 3.44870505, 3.06108881],
[ 3.18310961, 3.13857246],
[ 2.92666246, 3.20839957],
[ 2.68000575, 3.26968441],
[ 2.44342382, 3.32179983],
[ 2.21693264, 3.36431422],
[ 2.00034546, 3.39694375],
[ 1.79331822, 3.41952112],
[ 1.59535102, 3.4320146 ],
[ 1.40578511, 3.43455695],
[ 1.22408879, 3.42711031],
[ 1.04965328, 3.40969902],
[ 0.88172434, 3.38251449],
[ 0.71940866, 3.34591804],
[ 0.5619256 , 3.29981197],
[ 0.40770804, 3.24544617],
[ 0.25548391, 3.18343735],
[ 0.10399591, 3.11449499],
[ -0.04775527, 3.03962421],
[ -0.20018042, 2.96056489],
[ -0.35331487, 2.8785744 ],
[ -0.52508461, 2.78543704],
[ -0.69712913, 2.70164087],
[ -0.86954503, 2.63519574],
[ -1.04209219, 2.59240169],
[ -1.21405958, 2.57909091],
[ -1.38377538, 2.60407937],
[ -1.55062259, 2.65787841],
[ -1.71439338, 2.73536957],
[ -1.87529031, 2.83169338],
[ -2.03395581, 2.94135403],
[ -2.19140835, 3.05794591],
[ -2.33805266, 3.16255329],
[ -2.48451156, 3.25896796],
[ -2.63059232, 3.3426935 ],
[ -2.77602606, 3.41054738],
[ -2.92043777, 3.46019213],
[ -3.06330851, 3.48968652],
[ -3.20374051, 3.4957004 ],
[ -3.34029351, 3.47415945],
[ -3.47007084, 3.41768448],
[ -3.59300929, 3.33351807],
[ -3.70923634, 3.22566695],
[ -3.81889624, 3.09672268],
[ -3.92268025, 2.95011367],
[ -4.02185397, 2.79022243],
[ -4.1178521 , 2.62159416],
[ -4.2161323 , 2.46452926],
[ -4.31781117, 2.31796144],
[ -4.42318035, 2.1831457 ],
[ -4.5322453 , 2.06073257],
[ -4.64501631, 1.95155647],
[ -4.76140997, 1.85631353],
[ -4.88126733, 1.77560052],
[ -5.00435734, 1.70987926],
[ -5.13042301, 1.65962035],
[ -5.25938944, 1.62667518],
[ -5.39072417, 1.61040595],
[ -5.52382922, 1.60897593],
[ -5.65849722, 1.62377451],
[ -5.79444211, 1.65715234],
[ -5.93107153, 1.71528106],
[ -6.06746368, 1.79081424],
[ -6.20317769, 1.88240466],
[ -6.33788905, 1.98864946],
[ -6.47136342, 2.10834012],
[ -6.60346438, 2.24020588],
[ -6.73412053, 2.3831744 ],
[ -6.86333984, 2.53617923],
[ -6.99113472, 2.69858842],
[ -7.12064178, 2.87523527],
[ -7.25444618, 3.04299389],
[ -7.39243765, 3.20138631],
[ -7.53467379, 3.34977808],
[ -7.68110851, 3.48788772],
[ -7.83182097, 3.61515988],
[ -7.98682564, 3.73117341],
[ -8.14612629, 3.83548086],
[ -8.30966415, 3.92772849],
[ -8.47726222, 4.00781042],
[ -8.64863214, 4.07585135],
[ -8.82344578, 4.13196495],
[ -9.0013035 , 4.17638389],
[ -9.18202552, 4.20814242],
[ -9.36528871, 4.22605346],
[ -9.55064213, 4.22773377],
[ -9.73708983, 4.20936143],
[ -9.92226831, 4.16594795],
[-10.10086343, 4.09095817],
[-10.26193798, 3.97820376],
[-10.38552981, 3.82399663],
[-10.47199275, 3.64300549],
[-10.52918735, 3.44482848],
[-10.5584774 , 3.23235077],
[-10.56124879, 3.00791184],
[-10.53904816, 2.77357185],
[-10.49439476, 2.53143451],
[-10.42938497, 2.2831314 ],
[-10.34681037, 2.03021889],
[-10.24958903, 1.77398657],
[-10.14059192, 1.51544409],
[-10.02259337, 1.25535648],
[ -9.89818344, 0.99427857],
[ -9.76968504, 0.73258465],
[ -9.63916162, 0.47047985],
[ -9.50822 , 0.20830133],
[ -9.37711487, -0.05380021],
[ -9.24579077, -0.31579848],
[ -9.11420368, -0.5776724 ],
[ -8.98238041, -0.83943435],
[ -8.85033536, -1.10109084],
[ -8.71756106, -1.36180276]]) | StarcoderdataPython |
6438443 | import pymssql
import json
import os.path
from cryptography.fernet import Fernet
class Conn:
def __init__(self, host, port, user, password):
self.host = host
self.port = port
self.user = user
self.password = password
def Open(self):
try:
conn = pymssql.connect(server=self.host,
user=self.user,
password=<PASSWORD>,
port=self.port,
database='master')
return True
except:
return False
def SaveConn(self):
try:
connection = {
"host": self.host,
"port": self.port
}
with open("connection.json", "w") as f:
json.dump(connection, f)
return True
except:
return False
def SaveAccount(self):
key = Fernet.generate_key()
suite = Fernet(key)
archive = open("key.key", "w")
archive.write(key.decode())
user = suite.encrypt(self.user.encode())
password = suite.encrypt(self.password.encode())
try:
account = {
"user": user.decode(),
"password": password.decode()
}
with open("account.json", "w") as f:
json.dump(account, f)
return True
except:
return False
def Test(self):
try:
conn = pymssql.connect(server=self.host,
user=self.user,
password=<PASSWORD>,
port=self.port,
database='master')
return True
except:
return False
def RequestConn():
with open('connection.json') as f:
return json.load(f)
def RequestAccount():
with open('account.json') as f:
account = json.load(f)
key = open('key.key', 'r')
key = key.readline().encode()
suite = Fernet(key)
user = suite.decrypt(account['user'].encode())
password = suite.decrypt(account['password'].encode())
account = {
"user": user,
"password": password
}
return account
| StarcoderdataPython |
46236 | <reponame>exoplanetvetting/DAVE<filename>diffimg/AbstractPrfLookup.py<gh_stars>1-10
"""
Created on Sun Dec 2 14:12:41 2018
@author: fergal
"""
from __future__ import print_function
from __future__ import division
import numpy as np
class AbstractPrfLookup(object):
"""Store and lookup a previously computed PRF function
This abstract class is created in the hope that much of the functionality can
be reused for TESS.
To get the recorded prf, use
getPrfForBbbox(), although this function doesn't work in the base class. See docs
in that method for more details
Other functions are in the works to map that PRF onto
a given mask.
Todo
--------
This class is ripe of optimization with numba
"""
def __init__(self, path):
self.path = path
self.cache = dict()
self.gridSize = None
def abstractGetPrfForBbox(self, col, row, bboxIn, getPrfFunc, *args):
"""Get the prf for an image described by a bounding box
This function requires as input a function to look up the PRF for a given col row
(getPrfFunc). This function is not implemented in the base class, as it will be
specific to each mission. The design intent is that you override getPrfForBbox()
in the daughter class where
you define the lookup function, then calls the parent class method.
See KeplerPrf() for an example
Input:
-----------
col, row
(floats) Centroid for which to generate prf
bboxIn
(int array). Size of image to return. bbox
is a 4 elt array of [col0, col1, row0, row1]
getPrfFunc
(function) Function that returns a PRF object
This function must have the signature
``(np 2d array = getPrfFunc(col, row, *args)``
Optional Inputs
-----------------
Any optional inputs get passed to getPrfFunc
Returns:
----------
A 2d numpy array of the computed prf at the
given position.
Notes:
------------
If bbox is larger than the prf postage stamp returned,
the missing values will be filled with zeros. If
the bbox is smaller than the postage stamp, only the
requestd pixels will be returned
"""
bbox = np.array(bboxIn).astype(int)
nColOut = bbox[1] - bbox[0]
nRowOut = bbox[3] - bbox[2]
imgOut = np.zeros( (nRowOut, nColOut) )
#Location of origin of bbox relative to col,row.
#This is usually zero, but need not be.
colOffsetOut = (bbox[0] - np.floor(col)).astype(np.int)
rowOffsetOut = (bbox[2] - np.floor(row)).astype(np.int)
interpPrf = getPrfFunc(col, row, *args)
nRowPrf, nColPrf = interpPrf.shape
colOffsetPrf = -np.floor(nColPrf/2.).astype(np.int)
rowOffsetPrf = -np.floor(nRowPrf/2.).astype(np.int)
di = colOffsetPrf - colOffsetOut
i0 = max(0, -di)
i1 = min(nColOut-di , nColPrf)
if i1 <= i0:
raise ValueError("Central pixel column not in bounding box")
i = np.arange(i0, i1)
assert(np.min(i) >= 0)
dj = rowOffsetPrf - rowOffsetOut
j0 = max(0, -dj)
j1 = min(nRowOut-dj, nRowPrf)
if j1 <= j0:
raise ValueError("Central pixel row not in bounding box")
j = np.arange(j0, j1)
assert(np.min(j) >= 0)
#@TODO: figure out how to do this in one step
for r in j:
imgOut[r+dj, i+di] = interpPrf[r, i]
return imgOut
| StarcoderdataPython |
8119955 | <gh_stars>1-10
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is for gtest-related operations.
It provides functions to:
* normalize the test names
* concatenate gtest logs
* Remove platform from step name.
"""
import base64
from collections import defaultdict
from libs.test_name_util import RemoveAllPrefixesFromGTestName
from libs.test_results.base_test_results import BaseTestResults
from libs.test_results.classified_test_results import ClassifiedTestResults
# Invalid gtest result error codes.
# TODO(crbug.com/785463): Use enum for error codes.
RESULTS_INVALID = 10
# Statuses for gtest results.
# Other statuses will be considered as failures.
SUCCESS = 'SUCCESS'
SKIPPED = 'SKIPPED'
UNKNOWN = 'UNKNOWN'
NOTRUN = 'NOTRUN'
_NON_FAILURE_STATUSES = [SUCCESS, SKIPPED, UNKNOWN, NOTRUN]
class GtestTestResults(BaseTestResults):
def DoesTestExist(self, test_name):
"""Determines whether test_name is in test_results_json's 'all_tests' field.
Args:
test_name (str): The name of the test to check.
Returns:
True if the test exists according to test_results_json, False otherwise.
"""
return test_name in (self.test_results_json.get('all_tests') or [])
@property
def contains_all_tests(self):
"""
For gtest, each shard contains all_tests so it should always be True.
"""
return True
def IsTestEnabled(self, test_name):
"""Returns True if the test is enabled, False otherwise."""
if not self.test_results_json:
return False
all_tests = self.test_results_json.get('all_tests', [])
disabled_tests = self.test_results_json.get('disabled_tests', [])
# Checks if one test was enabled by checking the test results.
# If the disabled tests array is empty, we assume the test is enabled.
return test_name in all_tests and test_name not in disabled_tests
def GetFailedTestsInformation(self):
"""Parses the json data to get all the reliable failures' information."""
failed_test_log = defaultdict(dict)
reliable_failed_tests = {}
tests_has_non_failed_runs = {}
for iteration in (self.test_results_json.get('per_iteration_data') or []):
for test_name, test_results in iteration.iteritems():
if (tests_has_non_failed_runs.get(test_name) or any(
test['status'] in _NON_FAILURE_STATUSES for test in test_results)):
# Ignore the test if any of the attempts didn't fail.
# If a test is skipped, that means it was not run at all.
# Treats it as success since the status cannot be determined.
tests_has_non_failed_runs[test_name] = True
failed_test_log.pop(test_name, None)
reliable_failed_tests.pop(test_name, None)
continue
# Stores the output to the step's log_data later.
for test in test_results:
failed_test_log[test_name][test['status']] = test.get(
'output_snippet')
reliable_failed_tests[test_name] = RemoveAllPrefixesFromGTestName(
test_name)
for test_name in failed_test_log:
test_logs = failed_test_log[test_name]
merged_test_log = '\n'.join(test_logs.itervalues())
failed_test_log[test_name] = base64.b64encode(merged_test_log)
return failed_test_log, reliable_failed_tests
def IsTestResultUseful(self):
"""Checks if the log contains useful information."""
# If this task doesn't have result, per_iteration_data will look like
# [{}, {}, ...]
return self.test_results_json and any(
self.test_results_json.get('per_iteration_data') or [])
def GetTestLocation(self, test_name):
"""Gets test location for a specific test."""
test_locations = self.test_results_json.get('test_locations')
if not test_locations:
error_str = 'test_locations not found.'
return None, error_str
test_location = test_locations.get(test_name)
if not test_location:
error_str = 'test_location not found for %s.' % test_name
return None, error_str
return test_location, None
def GetClassifiedTestResults(self):
"""Parses gtest results, counts and classifies test results by:
* status_group: passes/failures/skips/unknowns/notruns,
* status: actual result status.
Also counts number of expected and unexpected results for each test:
for gtest results, assumes
* SUCCESS is expected result for enabled tests, all the other statuses
will be considered as unexpected.
* SKIPPED is expected result for disabled tests, all the other statuses
will be considered as unexpected.
Returns:
(ClassifiedTestResults) An object with information for each test:
* total_run: total number of runs,
* num_expected_results: total number of runs with expected results,
* num_unexpected_results: total number of runs with unexpected results,
* results: classified test results in 5 groups: passes, failures, skips,
unknowns and notruns.
"""
if not self.IsTestResultUseful():
return {}
def ClassifyOneResult(test_result, status, expected_status):
upper_status = status.upper()
if upper_status == expected_status:
test_result.num_expected_results += 1
else:
test_result.num_unexpected_results += 1
if upper_status == SUCCESS:
test_result.results.passes[upper_status] += 1
elif upper_status == SKIPPED:
test_result.results.skips[upper_status] += 1
elif upper_status == UNKNOWN:
test_result.results.unknowns[upper_status] += 1
elif upper_status == NOTRUN:
test_result.results.notruns[upper_status] += 1
else:
test_result.results.failures[upper_status] += 1
test_results = ClassifiedTestResults()
for iteration in self.test_results_json['per_iteration_data']:
for test_name, runs in iteration.iteritems():
base_test_name = RemoveAllPrefixesFromGTestName(test_name)
expected_status = SUCCESS if self.IsTestEnabled(
base_test_name) else SKIPPED
if base_test_name == test_name:
test_results[base_test_name].total_run += len(runs)
for run in runs:
ClassifyOneResult(test_results[base_test_name], run['status'],
expected_status)
else:
# Test name is in the format (PRE_)+test, consolidates such results
# into base tests' results. Failure of PRE_tests will stop base tests
# from running, so count failures of PRE_ tests into failures of base
# tests. But successful PRE_tests are prerequisites for base test to
# run, so ignore successful PRE_tests runs to prevent double counting.
for run in runs:
if run['status'] != SUCCESS:
test_results[base_test_name].total_run += 1
ClassifyOneResult(test_results[base_test_name], run['status'],
expected_status)
return test_results
@staticmethod
def GetMergedTestResults(shard_results):
"""Merges the shards into one.
Args:
shard_results (list): A list of dicts with individual shard results.
Returns:
A dict with the following form:
{
'all_tests':[
'AllForms/FormStructureBrowserTest.DataDrivenHeuristics/0',
'AllForms/FormStructureBrowserTest.DataDrivenHeuristics/1',
'AllForms/FormStructureBrowserTest.DataDrivenHeuristics/10',
...
]
'per_iteration_data':[
{
'AllForms/FormStructureBrowserTest.DataDrivenHeuristics/109': [
{
'elapsed_time_ms': 4719,
'losless_snippet': true,
'output_snippet': '[ RUN ] run outputs\\n',
'output_snippet_base64': 'WyBSVU4gICAgICBdIEFsbEZvcm1zL0Zvcm1T'
'status': 'SUCCESS'
}
],
},
...
]
}
"""
if len(shard_results) == 1:
return shard_results[0]
def MergeListsOfDicts(merged, shard):
"""Merges the ith dict in shard onto the ith dict of merged."""
min_len = min(len(merged), len(shard))
for i in xrange(min_len):
# Updates merged with data in shard.
merged[i].update(shard[i])
for k in xrange(min_len, len(shard)):
# If shard has a longer length, appends the rest data in shard.
merged.append(shard[k])
merged_results = {'all_tests': set(), 'per_iteration_data': []}
for shard_result in shard_results:
merged_results['all_tests'].update(shard_result.get('all_tests', []))
MergeListsOfDicts(merged_results['per_iteration_data'],
shard_result.get('per_iteration_data', []))
merged_results['all_tests'] = sorted(merged_results['all_tests'])
return merged_results
@staticmethod
def IsTestResultsInExpectedFormat(test_results_json):
"""Checks if the log can be parsed by gtest.
Args:
test_results_json (dict): It should be in below format:
{
'all_tests': ['test1',
'test2',
...],
'per_iteration_data': [
{
'test1': [
{
'status': 'SUCCESS',
'output_snippet': 'output',
...
}
],
'test2': [
{},
{},
...
]
}
]
}
"""
return (isinstance(test_results_json, dict) and
isinstance(test_results_json.get('all_tests'), list) and
isinstance(test_results_json.get('per_iteration_data'), list) and
all(
isinstance(i, dict)
for i in test_results_json.get('per_iteration_data')))
| StarcoderdataPython |
12842815 | <gh_stars>0
#!/usr/bin/python
"""Sends a message that is read by the StillCaptureModule."""
import roslib; roslib.load_manifest('reefbot')
import rospy
from reefbot_msgs.msg import ImageCaptured
if __name__ == "__main__":
rospy.init_node('TestStillCaptureModule')
publisher = rospy.Publisher(
rospy.get_param('still_image_topic', 'still_image'),
ImageCaptured,
tcp_nodelay=True, latch=False)
request = ImageCaptured()
request.image_id = 23
request.image.height = 16
request.image.width = 20
request.image.data = range(16*20)
request.image.encoding = "8UC1"
request.image.step = 20*4
r = rospy.Rate(1)
while not rospy.is_shutdown():
publisher.publish(request)
r.sleep()
| StarcoderdataPython |
5178664 |
from sys import path
path.append("../")
import logging as log
import unittest
import libCommon as COMMON
from libConstants import CUSTOM
import libEmail as TEST
test_from = "<EMAIL>"
password = "<PASSWORD>"
client_list = [{"user_email" : "<EMAIL>","first_name":"Puppy","last_name":"Paws", "user_login":"puppypaws"},
{"user_email" : "<EMAIL>","first_name":"Martiel", "last_name":"and Staff", "user_login" : "FlamingoOne"}]
test_subject="Hello user {user_login}"
test_body=CUSTOM.body[:700]
class TestEmailSend(unittest.TestCase) :
def testPersonalize(self) :
#check log file
TEST.CLIENTS.personalize(user_login="BobOne",first_name="Bobby",last_name="Smith",
SUBJECT="test",TO="me",FROM="you",message=test_body)
def testReadFiles(self) :
_file_list = COMMON.find_files("Email")
log.debug(_file_list)
def testTransform(self) :
_client_list = TEST.CLIENTS.load(filename="email.csv")
if len(_client_list) > 3 :
_client_list = _client_list[:3]
for client in _client_list :
log.debug(client)
for client in TEST.CLIENTS._transform(test_from,test_subject,*_client_list) :
pass
#scheck log file
def testBulkSend(self) :
server = TEST.EMAIL.gmail(user=test_from,pswd=password)
for _from, _to, subject, msg in TEST.CLIENTS.transform(test_from,test_subject,test_body,*client_list):
server.sendmail(_from, _to, msg)
server.quit()
def testBulkAttachment(self) :
server = TEST.EMAIL.gmail(user=CUSTOM._from,pswd=CUSTOM.pswd)
for _from, _to, subject, msg in TEST.CLIENTS.transform(CUSTOM._from,CUSTOM.subject,CUSTOM.body,*client_list):
obj = TEST.EMAIL.add_attachments(msg,*["testAttachment.csv"])
obj['From'] = _from
obj['To'] = _to
obj['Subject'] = subject
server.sendmail(_from, _to, obj.as_string())
server.quit()
if __name__ == '__main__' :
import sys
log_file = COMMON.build_args(*sys.argv).replace('.py','') + '.log'
#log_file = COMMON.build_path('../log',log_file)
#COMMON.remove_file(log_file)
#COMMON.mkdir("../log")
log.basicConfig(filename=log_file, format=COMMON.LOG_FORMAT_TEST, level=log.DEBUG)
unittest.main() | StarcoderdataPython |
11224258 | from flask import current_app, Flask, render_template, redirect, url_for, request
import datadef
import actualdata
import regiondef
def create_app(debug=False, testing=False):
app = Flask(__name__)
app.debug = debug
app.testing = testing
@app.route("/")
def index():
return render_template("mainmap.html")
@app.route("/dataview")
def dataview():
return render_template("dataview.html")
@app.route("/data_def.json")
def data_def_json():
import json
all = datadef.getAllDataDefs()
return json.dumps(all, ensure_ascii=False)
@app.route("/data.json")
def data2_json():
import json
all = actualdata.selectAllDatapoints()
return json.dumps(all, ensure_ascii=False)
@app.route("/refresh_data_def")
def refresh_data_def():
datadef.importDataDefs()
return data_def_json()
@app.route("/refresh_data")
def refresh_data():
actualdata.importData()
return data2_json()
@app.route("/sandbox")
def sandbox():
return render_template("sandbox.html")
@app.route("/regionconvert")
def regionconvert():
return render_template("regionconvert.html")
@app.route("/region_def.json")
def region_def_json():
import json
all = regiondef.selectAllRegionDefs()
return json.dumps(all, ensure_ascii=False)
@app.route("/refresh_region_defs")
def refresh_region_defs():
regiondef.importRegionDefs()
return region_def_json()
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app | StarcoderdataPython |
8107604 | <filename>recommend.py
import tensorflow as tf
import pandas as pd
import numpy as np
import time
from remtime import printTime
user_index = pd.read_csv('data/test_index.csv')['indexId']
userId = pd.read_csv('data/test.csv')['userId']
movId = pd.read_csv('data/mov_hash.csv')['movId']
train_index = pd.read_csv('data/train_index.csv')
score = np.array(pd.read_csv('data/score3.csv'))
genre = np.array(pd.read_csv('data/genres.csv'))
count = np.array(pd.read_csv('data/count.csv'))
year = np.array(pd.read_csv('data/releaseYr.csv'))
NUM_MOVIE = len(movId)
NUM_USER = len(userId)
print NUM_USER, NUM_MOVIE
print("Data preprocessing completed.")
print("Start here..")
sess=tf.Session()
#First let's load meta graph and restore weights
saver = tf.train.import_meta_graph('cap-model.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
user_batch = graph.get_tensor_by_name("user_batch:0")
movie_batch = graph.get_tensor_by_name("movie_batch:0")
avg_rating = np.array(pd.read_csv('data/avg.csv')['avg_rating'])
user38 = np.zeros((5,2))
print NUM_MOVIE, len(avg_rating)
mov_avg = np.zeros((NUM_MOVIE,3))
mov_avg[:,0] = movId
mov_avg[:,1] = -avg_rating
mov_avg[:,1] = -year.reshape(NUM_MOVIE)
mov_avg = mov_avg[mov_avg[:,2].argsort()][0:50,0:2]
mov_avg = mov_avg[mov_avg[:,1].argsort()]
user38 = mov_avg[0:5]
user38[:,1] = -user38[:,1]
print user38
BATCH_SIZE = 2000
for i in range(NUM_USER):
stime = time.time()
movindex = np.array(train_index[train_index['user_index'] == user_index[i]]['mov_index'])
# if i==38:
# movindex=[]
data = np.zeros((NUM_MOVIE-len(movindex),4))
x=0
for j in range(NUM_MOVIE):
if j not in movindex:
data[x,1]=j
data[x,3]=year[j]
x+=1
# print(len(movindex))
data[:,0] = user_index[i]
data[:,3] = -data[:,3]
data = data[data[:,3].argsort()]
data[:,3] = -data[:,3]
data = data[0:56]
# print data[j*BATCH_SIZE:(j+1)*BATCH_SIZE,2]
feed_dict ={user_batch:data[:,0], movie_batch:data[:,1]}
op_to_restore = graph.get_tensor_by_name("output:0")
data[:,2] = sess.run(op_to_restore,feed_dict)
data[:,3] = -data[:,3]
data = data[data[:,3].argsort()]
data[:,3] = -data[:,3]
for j in range(len(data)):
data[j,3]= count[int(data[j,1])]*np.power(data[j,2],3)
data[:,3] = -data[:,3]
data = data[data[:,3].argsort()]
data[:,3] = -data[:,3]
# for j in range(len(data)):
# data[j,3]= np.sum(score[i]/np.sum(score[i])*genre[int(data[j,1])])
# data[:,3] = -data[:,3]
# data = data[data[:,3].argsort()]
# data[:,3] = -data[:,3]
top5 = data[0:5,0:3]
top5[:,0] = userId[i]
for j in range(0,5):
top5[j,1]=movId[top5[j,1]]
if i==38:
top5[:,1:] = user38
top5[:,2] = np.around(top5[:,2]*2)/2
top5[:,2] = np.clip(top5[:,2], 3.5, 5.0)
if i==0:
recomm = top5
else:
recomm = np.vstack((recomm,top5))
ftime = time.time()
remtime = (ftime-stime)*(NUM_USER-i-1)
printTime(remtime)
recomm = np.array(recomm)
recomm[:,0:2] = recomm[:,0:2].astype('int')
# recomm = pd.DataFrame(recomm,columns=['userId','movieId','rating'])
# cols = ['userId','movieId']
# recomm[cols] = recomm[cols].applymap(np.int64)
# recomm.to_csv('solution.csv',index=False)
print recomm[0:20]
np.savetxt('solution.csv', recomm,delimiter=",")
| StarcoderdataPython |
3483791 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 23 00:49:01 2018
@author: tinokuba
"""
from .rsttree import RstTree, RstType, RstNode
from .relationstable import RelTable, Relation, RelElement
from .reltablegenerator import TableGenerator
from .comparisontable import ComparisonTable, Comparison,\
MatchingDistance, Equivalency
from .comptablegenerator import TableComparer
from .comparesettable import CompareSetTable
from .comparesettablegenerator import TableSetComparer
| StarcoderdataPython |
8026593 | class QueuedNode(object):
"""Base class for all nodes in the call tree. A node may be an actual call or attribute set/get, in this case this
is a leaf node, or a call group (ordered, unordered, etc.), in that case it's a branch node."""
def __init__(self):
super(QueuedNode, self).__init__()
self._parent = None
def get_parent(self):
return self._parent
def set_parent(self, parent):
self._parent = parent
def get_expected(self):
return [self]
def get_available(self):
return [self]
def pop_matching(self, queue_object):
"""Provide the node with the opportunity to remove queue_object from its subtree. Usually called after
a call to matches(queue_object) as been made.
Returns the matching queued object or None if not found."""
raise NotImplementedError() # pragma: no cover
class QueuedNodeParent(QueuedNode):
"""Base class for all non-leaf nodes."""
def discard_child(self, child):
raise NotImplementedError() # pragma: no cover
| StarcoderdataPython |
9606157 | <filename>wsgi_intercept/tests/install.py<gh_stars>10-100
import os
import pytest
import wsgi_intercept
skipnetwork = pytest.mark.skipif(os.environ.get(
'WSGI_INTERCEPT_SKIP_NETWORK', 'False').lower() == 'true',
reason="skip network requested"
)
class BaseInstalledApp(object):
def __init__(self, app, host, port=80, script_name='',
install=None, uninstall=None, proxy=None):
self.app = app
self.host = host
self.port = port
self.script_name = script_name
self._install = install or (lambda: None)
self._uninstall = uninstall or (lambda: None)
self._hits = 0
self._internals = {}
self._proxy = proxy
def __call__(self, environ, start_response):
self._hits += 1
self._internals = environ
return self.app(environ, start_response)
def success(self):
return self._hits > 0
def get_internals(self):
return self._internals
def install_wsgi_intercept(self):
wsgi_intercept.add_wsgi_intercept(
self.host, self.port, self.factory, script_name=self.script_name)
def uninstall_wsgi_intercept(self):
wsgi_intercept.remove_wsgi_intercept(self.host, self.port)
def install(self):
if self._proxy:
os.environ['http_proxy'] = self._proxy
self._install()
self.install_wsgi_intercept()
def uninstall(self):
if self._proxy:
del os.environ['http_proxy']
self.uninstall_wsgi_intercept()
self._uninstall()
def factory(self):
return self
def __enter__(self):
self.install()
return self
def __exit__(self, *args, **kwargs):
self.uninstall()
def installer_class(module=None, install=None, uninstall=None):
if module:
install = install or getattr(module, 'install', None)
uninstall = uninstall or getattr(module, 'uninstall', None)
class InstalledApp(BaseInstalledApp):
def __init__(self, app, host, port=80, script_name='', proxy=None):
BaseInstalledApp.__init__(
self, app=app, host=host, port=port, script_name=script_name,
install=install, uninstall=uninstall, proxy=proxy)
return InstalledApp
| StarcoderdataPython |
3244910 | from relax.vm import *
from relax.primitives import *
def signature(model,const):
(rvmap,_) = concolic_vm(model('_',const),0)
rands = 0
rvs = {}
output_types = []
for rv in rvmap:
(rand,output_type) = rvmap[rv].dist.lean_type()
rvs[rvmap[rv].name] = (rvmap[rv].dist,list(range(rands,rands+rand)),output_type)
rands += rand
output_types.append(output_type)
return (rvs,rands,output_types)
def translate(model,const):
(rvs,rands,otypes) = signature(model,const)
in_type = ' ร '.join(rands * ['nnreal'])
ret_type = ' ร '.join(otypes)
print('noncomputable')
print('def', model.__name__ + '_code ( u:', in_type ,') :', ret_type,':=')
for rv in rvs:
(dist,rbits,_) = rvs[rv]
print('let ' + rv + ' := ' + dist.lean_sample(rbits) + ' in')
print('(', ','.join(rvs), ')')
gen_measurability_proof(model,const)
def gen_access(i,tab):
if i == 0:
print(tab + 'apply measurable_fst,')
elif i == 1:
print(tab + 'apply measurable_snd,')
else:
print(tab + 'apply measurable_snd,')
gen_access(i-1,tab)
def gen_arg(arg,rvmap,rands,tab):
if type(arg) == int:
# For now, this is a random bit access
#gen_access(arg,tab)
if arg == 0:
print(tab + 'apply measurable_fst,')
elif arg == rands - 1:
pass
else:
print(tab + 'apply measurable_fst,')
for i in range(0,arg):
print(tab + 'apply measurable_snd,')
print(tab + 'apply measurable_id,')
elif arg in rvmap:
# We're dealing with a distribution
gen_dist(arg,rvmap,rands,tab)
else:
print(tab + 'apply measurable_const,')
def gen_args(arg,args,rvmap,rands,tab):
if len(args) == 0:
gen_arg(arg,rvmap,rands,tab)
else:
print(tab + 'apply measurable.prod; simp,')
print(tab + '{')
gen_arg(arg,rvmap,rands,tab + '\t')
print(tab + '},')
print(tab + '{')
gen_args(args[0],args[1:],rvmap,rands,tab + '\t')
print(tab + '},')
def gen_dist(rv,rvmap,rands,tab):
(dist,rbits,_) = rvmap[rv]
args = dist.args + rbits
if type(dist) == Uniform:
print(tab + 'apply measurable.comp,')
print(tab + 'apply uniform_measurable,')
gen_args(args[0],args[1:],rvmap,rands,tab)
elif type(dist) == Binomial:
print(tab + 'apply measurable.comp,')
print(tab + 'apply binomial_measurable,')
# Second argument incorrect because of current simplifcation for binomial
gen_args(args[1],args[2:],rvmap,rands,tab)
else:
print('Not implemented yet: ',dist)
def gen_prod(current_rv,rvlist,rvmap,rands,tab):
if len(rvlist) == 0:
gen_dist(current_rv,rvmap,rands,tab)
else:
print(tab + 'apply measurable.prod; simp,')
print(tab + '{')
gen_dist(current_rv,rvmap,rands,tab + '\t')
print(tab + '},')
print(tab + '{')
gen_prod(rvlist[0],rvlist[1:],rvmap,rands,tab + '\t')
print(tab + '},')
def gen_measurability_proof(model,const):
(rvs,rands,otypes) = signature(model,const)
print('\nProof:\n')
input_types = ['nnreal'] * rands
print('lemma ' + model.__name__ + '_code' + '_is_measurable: ' + 'measurable ( ฮป ( u: ' + ' ร '.join(input_types) + '),' + model.__name__ + '_code u) := ')
print('begin')
print('\t' + 'unfold ' + model.__name__ + '_code,')
# The program must return at least one variable
rvlist = list(rvs.keys())
gen_prod(rvlist[0],rvlist[1:],rvs,rands,'\t')
print('end')
| StarcoderdataPython |
6542776 | class Participant():
def __init__(self, connection, id, year):
select_sql = """
SELECT name, email, likes, dislikes, in_office, participant_id
FROM v_participants WHERE participant_id = %d AND year = %d
""" % (id, year)
for data_row in connection.cursor().execute(select_sql):
self.name = data_row[0]
self.email = data_row[1]
self.likes = data_row[2] if data_row[2].strip() != '' else None
self.dislikes = data_row[3] if data_row[3].strip() != '' else None
self.in_office = data_row[4] == 'Yes'
self.id = data_row[5]
def get_all_participants(connection, year, exclude_email_sent=True):
if exclude_email_sent:
sql = """
SELECT giver_id FROM v_assignments
WHERE year = %d and (email_sent != 'Y' or email_sent is null)
""" % year
else:
sql = """
SELECT giver_id FROM v_assignments
WHERE year = %d
""" % year
participants = []
for row in connection.cursor().execute(sql):
participants.append(row[0])
return participants
def get_targetted_participants(connection, year, emails):
sql = """
SELECT giver_id FROM v_assignments WHERE year = %d
AND giver_email in (%s)
""" % (year, ",".join(["'%s'" % email for email in emails]))
participants = []
for row in connection.cursor().execute(sql):
participants.append(row[0])
return participants
def get_assignment(connection, participant_id, year):
assignment_result = connection.cursor().execute(
"SELECT giver_id, recipient_id FROM v_assignments WHERE year = %d and giver_id=%d" % (
year, participant_id
)
)
for row in assignment_result:
return row
| StarcoderdataPython |
9637035 |
class LimitedDict(dict):
def __init__(self, _max_len: int) -> None:
if _max_len < 1:
raise ValueError("Length must be a positive number.")
super().__init__()
self._max_len = _max_len
def __setitem__(self, key: object, item: object) -> None:
if self.__len__() != 0 and self.__len__() == self._max_len:
self.pop(next(iter(self)))
super().__setitem__(key, item)
| StarcoderdataPython |
5054047 | #!/usr/bin/env python
# The Expat License
#
# Copyright (c) 2017, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
if sys.version_info > (3,):
long = int
xrange = range
def calc(q):
probs = [1.0]
for x in xrange(1, 51):
p = 1 - x / q
new_probs = []
for i in xrange(x+1):
# print("%d %d" % (i,x))
new_probs.append((0 if (i == x) else probs[i] * (1-p)) +
(0 if (i == 0) else (p * probs[i-1])))
probs = new_probs
return probs[20]
low = float(50)
high = float(100000)
while True:
m = ((low + high) / 2)
v_m = calc(m)
print("%.40f = %.40f" % (m, v_m))
if v_m > 0.02:
low = m
elif v_m < 0.02:
high = m
else:
break
| StarcoderdataPython |
316051 | <reponame>Social-CodePlat/Comptt-Coding-Solutions
n=int(input())
arr=[int(x) for x in input().split()]
brr=[int(x) for x in input().split()]
my_list=[]
for i in arr[1:]:
my_list.append(i)
for i in brr[1:]:
my_list.append(i)
for i in range(1,n+1):
if i in my_list:
if i == n:
print("I become the guy.")
break
else:
continue
else:
print("Oh, my keyboard!")
break
| StarcoderdataPython |
6620032 | <reponame>PieterBlomme/proteus<gh_stars>1-10
import logging
from pathlib import Path
import cv2
import numpy as np
from PIL import Image
from proteus.models.base import BaseModel
from proteus.models.base.modelconfigs import BaseModelConfig
from proteus.types import BoundingBox, Segmentation
from tritonclient.utils import triton_to_np_dtype
from .helpers import detection_postprocess, image_preprocess, read_class_names
logger = logging.getLogger(__name__)
folder_path = Path(__file__).parent
class ModelConfig(BaseModelConfig):
pass
class MaskRCNN(BaseModel):
DESCRIPTION = (
"This model is a real-time neural network for object "
"instance segmentation that detects 80 different classes."
"mAP of 0.36"
"Taken from https://github.com/onnx/models."
)
CLASSES = read_class_names(f"{folder_path}/coco_names.txt")
NUM_OUTPUTS = 4
MAX_BATCH_SIZE = 0
MODEL_URL = "https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/mask-rcnn/model/MaskRCNN-10.onnx"
CONFIG_PATH = f"{folder_path}/config.template"
INPUT_NAME = "image"
OUTPUT_NAMES = ["6568", "6570", "6572", "6887"]
DTYPE = "FP32"
MODEL_CONFIG = ModelConfig
@classmethod
def preprocess(cls, img):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
:param img: Pillow image
:returns:
- model_input: input as required by the model
- extra_data: dict of data that is needed by the postprocess function
"""
extra_data = {}
# Careful, Pillow has (w,h) format but most models expect (h,w)
w, h = img.size
extra_data["original_image_size"] = (h, w)
img = img.convert("RGB")
logger.info(f"Original image size: {img.size}")
img = image_preprocess(img)
npdtype = triton_to_np_dtype(cls.DTYPE)
img = img.astype(npdtype)
return img, extra_data
@classmethod
def postprocess(cls, results, extra_data, batch_size, batching):
"""
Post-process results to show bounding boxes.
https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet
:param results: model outputs
:param extra_data: dict of data that is needed by the postprocess function
:param batch_size
:param batching: boolean flag indicating if batching
:returns: json result
"""
original_image_size = extra_data["original_image_size"]
# get outputs
boxes = results.as_numpy(cls.OUTPUT_NAMES[0])
labels = results.as_numpy(cls.OUTPUT_NAMES[1])
scores = results.as_numpy(cls.OUTPUT_NAMES[2])
masks = results.as_numpy(cls.OUTPUT_NAMES[3])
postprocess_results = detection_postprocess(
original_image_size, boxes, labels, scores, masks
)
results = []
# TODO add another loop if batching
for (score, box, cat, mask) in postprocess_results:
x1, y1, x2, y2 = box
bbox = BoundingBox(
x1=int(x1),
y1=int(y1),
x2=int(x2),
y2=int(y2),
class_name=cls.CLASSES[int(cat)],
score=float(score),
)
ret, thresh = cv2.threshold(mask, 0.5, 1, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
polygon = contours[0].reshape(-1).tolist()
if len(polygon) <= 4:
# not valid, create a dummy
polygon = [0, 0, 1, 0, 1, 1]
segmentation = Segmentation(
segmentation=polygon,
class_name=cls.CLASSES[int(cat)],
score=float(score),
)
results.append({"bounding_box": bbox, "segmentation": segmentation})
return results
| StarcoderdataPython |
1603897 | from personal_mycroft_backend.extra.backend_gui import BackendGUI
if __name__ == "__main__":
browser = BackendGUI()
browser.open()
| StarcoderdataPython |
182912 | #!/usr/bin/python
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
description:
- Update ONTAP service-prosessor firmware
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_firmware_upgrade
options:
state:
description:
- Whether the specified ONTAP firmware should be upgraded or not.
default: present
type: str
node:
description:
- Node on which the device is located.
type: str
required: true
clear_logs:
description:
- Clear logs on the device after update. Default value is true
type: bool
default: true
package:
description:
- Name of the package file containing the firmware to be installed. Not required when -baseline is true.
type: str
shelf_module_fw:
description:
- Shelf module firmware to be updated to.
type: str
disk_fw:
description:
- disk firmware to be updated to.
type: str
update_type:
description:
- Type of firmware update to be performed. Options include serial_full, serial_differential, network_full.
type: str
install_baseline_image:
description:
- Install the version packaged with ONTAP if this parameter is set to true. Otherwise, package must be used to specify the package to install.
type: bool
default: false
firmware_type:
description:
- Type of firmware to be upgraded. Options include shelf, ACP, service-processor, and disk.
- For shelf firmware upgrade the operation is asynchronous, and therefore returns no errors that might occur during the download process.
- Shelf firmware upgrade is idempotent if shelf_module_fw is provided .
- disk firmware upgrade is idempotent if disk_fw is provided .
- With check mode, SP, ACP, disk, and shelf firmware upgrade is not idempotent.
- This operation will only update firmware on shelves/disk that do not have the latest firmware-revision.
choices: ['service-processor', 'shelf', 'acp', 'disk']
type: str
short_description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
version_added: "2.9"
'''
EXAMPLES = """
- name: SP firmware upgrade
na_ontap_firmware_upgrade:
state: present
node: vsim1
package: "{{ file name }}"
clear_logs: True
install_baseline_image: False
update_type: serial_full
firmware_type: service-processor
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ <PASSWORD> }}"
- name: ACP firmware upgrade
na_ontap_firmware_upgrade:
state: present
node: vsim1
firmware_type: acp
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ <PASSWORD> }}"
- name: shelf firmware upgrade
na_ontap_firmware_upgrade:
state: present
firmware_type: shelf
shelf_module_fw: 1221
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ <PASSWORD> }}"
- name: disk firmware upgrade
na_ontap_firmware_upgrade:
state: present
firmware_type: disk
disk_fw: NA02
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ <PASSWORD> }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
import time
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPFirmwareUpgrade(object):
"""
Class with ONTAP firmware upgrade methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', default='present'),
node=dict(required=False, type='str'),
firmware_type=dict(required=True, type='str', choices=['service-processor', 'shelf', 'acp', 'disk']),
clear_logs=dict(required=False, type='bool', default=True),
package=dict(required=False, type='str'),
install_baseline_image=dict(required=False, type='bool', default=False),
update_type=dict(required=False, type='str'),
shelf_module_fw=dict(required=False, type='str'),
disk_fw=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('firmware_type', 'acp', ['node']),
('firmware_type', 'disk', ['node']),
('firmware_type', 'service-processor', ['node', 'update_type']),
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if self.parameters.get('firmware_type') == 'service-processor':
if self.parameters.get('install_baseline_image') and self.parameters.get('package') is not None:
self.module.fail_json(msg='Do not specify both package and install_baseline_image: true')
if not self.parameters.get('package') and self.parameters.get('install_baseline_image') == 'False':
self.module.fail_json(msg='Specify at least one of package or install_baseline_image')
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def firmware_image_get_iter(self):
"""
Compose NaElement object to query current firmware version
:return: NaElement object for firmware_image_get_iter with query
"""
firmware_image_get = netapp_utils.zapi.NaElement('service-processor-get-iter')
query = netapp_utils.zapi.NaElement('query')
firmware_image_info = netapp_utils.zapi.NaElement('service-processor-info')
firmware_image_info.add_new_child('node', self.parameters['node'])
query.add_child_elem(firmware_image_info)
firmware_image_get.add_child_elem(query)
return firmware_image_get
def firmware_image_get(self, node_name):
"""
Get current firmware image info
:return: True if query successful, else return None
"""
firmware_image_get_iter = self.firmware_image_get_iter()
try:
result = self.server.invoke_successfully(firmware_image_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching firmware image details: %s: %s'
% (self.parameters['node'], to_native(error)),
exception=traceback.format_exc())
# return firmware image details
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
sp_info = result.get_child_by_name('attributes-list').get_child_by_name('service-processor-info')
firmware_version = sp_info.get_child_content('firmware-version')
return firmware_version
return None
def acp_firmware_required_get(self):
"""
where acp firmware upgrade is required
:return: True is firmware upgrade is required else return None
"""
acp_firmware_get_iter = netapp_utils.zapi.NaElement('storage-shelf-acp-module-get-iter')
query = netapp_utils.zapi.NaElement('query')
acp_info = netapp_utils.zapi.NaElement('storage-shelf-acp-module')
query.add_child_elem(acp_info)
acp_firmware_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(acp_firmware_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching acp firmware details details: %s'
% (to_native(error)), exception=traceback.format_exc())
if result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-acp-module'):
acp_module_info = result.get_child_by_name('attributes-list').get_child_by_name(
'storage-shelf-acp-module')
state = acp_module_info.get_child_content('state')
if state == 'firmware_update_required':
# acp firmware version upgrade required
return True
return False
def sp_firmware_image_update_progress_get(self, node_name):
"""
Get current firmware image update progress info
:return: Dictionary of firmware image update progress if query successful, else return None
"""
firmware_update_progress_get = netapp_utils.zapi.NaElement('service-processor-image-update-progress-get')
firmware_update_progress_get.add_new_child('node', self.parameters['node'])
firmware_update_progress_info = dict()
try:
result = self.server.invoke_successfully(firmware_update_progress_get, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching firmware image upgrade progress details: %s'
% (to_native(error)), exception=traceback.format_exc())
# return firmware image update progress details
if result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info'):
update_progress_info = result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info')
firmware_update_progress_info['is-in-progress'] = update_progress_info.get_child_content('is-in-progress')
firmware_update_progress_info['node'] = update_progress_info.get_child_content('node')
return firmware_update_progress_info
def shelf_firmware_info_get(self):
"""
Get the current firmware of shelf module
:return:dict with module id and firmware info
"""
shelf_id_fw_info = dict()
shelf_firmware_info_get = netapp_utils.zapi.NaElement('storage-shelf-info-get-iter')
desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
storage_shelf_info = netapp_utils.zapi.NaElement('storage-shelf-info')
shelf_module = netapp_utils.zapi.NaElement('shelf-modules')
shelf_module_info = netapp_utils.zapi.NaElement('storage-shelf-module-info')
shelf_module.add_child_elem(shelf_module_info)
storage_shelf_info.add_child_elem(shelf_module)
desired_attributes.add_child_elem(storage_shelf_info)
shelf_firmware_info_get.add_child_elem(desired_attributes)
try:
result = self.server.invoke_successfully(shelf_firmware_info_get, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching shelf module firmware details: %s'
% (to_native(error)), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
shelf_info = result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-info')
if (shelf_info.get_child_by_name('shelf-modules') and
shelf_info.get_child_by_name('shelf-modules').get_child_by_name('storage-shelf-module-info')):
shelves = shelf_info['shelf-modules'].get_children()
for shelf in shelves:
shelf_id_fw_info[shelf.get_child_content('module-id')] = shelf.get_child_content('module-fw-revision')
return shelf_id_fw_info
def disk_firmware_info_get(self):
"""
Get the current firmware of disks module
:return:
"""
disk_id_fw_info = dict()
disk_firmware_info_get = netapp_utils.zapi.NaElement('storage-disk-get-iter')
desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
storage_disk_info = netapp_utils.zapi.NaElement('storage-disk-info')
disk_inv = netapp_utils.zapi.NaElement('disk-inventory-info')
storage_disk_info.add_child_elem(disk_inv)
desired_attributes.add_child_elem(storage_disk_info)
disk_firmware_info_get.add_child_elem(desired_attributes)
try:
result = self.server.invoke_successfully(disk_firmware_info_get, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching disk module firmware details: %s'
% (to_native(error)), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
disk_info = result.get_child_by_name('attributes-list')
disks = disk_info.get_children()
for disk in disks:
disk_id_fw_info[disk.get_child_content('disk-uid')] = disk.get_child_by_name('disk-inventory-info').get_child_content('firmware-revision')
return disk_id_fw_info
def disk_firmware_required_get(self):
"""
Check weather disk firmware upgrade is required or not
:return: True if the firmware upgrade is required
"""
disk_firmware_info = self.disk_firmware_info_get()
for disk in disk_firmware_info:
if (disk_firmware_info[disk]) != self.parameters['disk_fw']:
return True
return False
def shelf_firmware_required_get(self):
"""
Check weather shelf firmware upgrade is required or not
:return: True if the firmware upgrade is required
"""
shelf_firmware_info = self.shelf_firmware_info_get()
for module in shelf_firmware_info:
if (shelf_firmware_info[module]) != self.parameters['shelf_module_fw']:
return True
return False
def sp_firmware_image_update(self):
"""
Update current firmware image
"""
firmware_update_info = netapp_utils.zapi.NaElement('service-processor-image-update')
if self.parameters.get('package') is not None:
firmware_update_info.add_new_child('package', self.parameters['package'])
if self.parameters.get('clear_logs') is not None:
firmware_update_info.add_new_child('clear-logs', str(self.parameters['clear_logs']))
if self.parameters.get('install_baseline_image') is not None:
firmware_update_info.add_new_child('install-baseline-image', str(self.parameters['install_baseline_image']))
firmware_update_info.add_new_child('node', self.parameters['node'])
firmware_update_info.add_new_child('update-type', self.parameters['update_type'])
try:
self.server.invoke_successfully(firmware_update_info, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
# Current firmware version matches the version to be installed
if to_native(error.code) == '13001' and (error.message.startswith('Service Processor update skipped')):
return False
self.module.fail_json(msg='Error updating firmware image for %s: %s'
% (self.parameters['node'], to_native(error)),
exception=traceback.format_exc())
return True
def shelf_firmware_upgrade(self):
"""
Upgrade shelf firmware image
"""
shelf_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-firmware-update')
try:
self.server.invoke_successfully(shelf_firmware_update_info, enable_tunneling=True)
return True
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error updating shelf firmware image : %s'
% (to_native(error)), exception=traceback.format_exc())
def acp_firmware_upgrade(self):
"""
Upgrade shelf firmware image
"""
acp_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-acp-firmware-update')
acp_firmware_update_info.add_new_child('node-name', self.parameters['node'])
try:
self.server.invoke_successfully(acp_firmware_update_info, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error updating acp firmware image : %s'
% (to_native(error)), exception=traceback.format_exc())
def disk_firmware_upgrade(self):
"""
Upgrade disk firmware
"""
disk_firmware_update_info = netapp_utils.zapi.NaElement('disk-update-disk-fw')
disk_firmware_update_info.add_new_child('node-name', self.parameters['node'])
try:
self.server.invoke_successfully(disk_firmware_update_info, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error updating disk firmware image : %s'
% (to_native(error)), exception=traceback.format_exc())
return True
def autosupport_log(self):
"""
Autosupport log for software_update
:return:
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_firmware_upgrade", cserver)
def apply(self):
"""
Apply action to upgrade firmware
"""
changed = False
self.autosupport_log()
firmware_update_progress = dict()
if self.parameters.get('firmware_type') == 'service-processor':
# service-processor firmware upgrade
current = self.firmware_image_get(self.parameters['node'])
if self.parameters.get('state') == 'present' and current:
if not self.module.check_mode:
if self.sp_firmware_image_update():
changed = True
firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
while firmware_update_progress.get('is-in-progress') == 'true':
time.sleep(25)
firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
else:
# we don't know until we try the upgrade
changed = True
elif self.parameters.get('firmware_type') == 'shelf':
# shelf firmware upgrade
if self.parameters.get('shelf_module_fw'):
if self.shelf_firmware_required_get():
if not self.module.check_mode:
changed = self.shelf_firmware_upgrade()
else:
changed = True
else:
if not self.module.check_mode:
changed = self.shelf_firmware_upgrade()
else:
# we don't know until we try the upgrade -- assuming the worst
changed = True
elif self.parameters.get('firmware_type') == 'acp':
# acp firmware upgrade
if self.acp_firmware_required_get():
if not self.module.check_mode:
self.acp_firmware_upgrade()
changed = True
elif self.parameters.get('firmware_type') == 'disk':
# Disk firmware upgrade
if self.parameters.get('disk_fw'):
if self.disk_firmware_required_get():
if not self.module.check_mode:
changed = self.disk_firmware_upgrade()
else:
changed = True
else:
if not self.module.check_mode:
changed = self.disk_firmware_upgrade()
else:
# we don't know until we try the upgrade -- assuming the worst
changed = True
self.module.exit_json(changed=changed)
def main():
"""Execute action"""
community_obj = NetAppONTAPFirmwareUpgrade()
community_obj.apply()
if __name__ == '__main__':
main()
| StarcoderdataPython |
12846851 | from rest_framework import viewsets
from .serializers import UniversitySerializer
from .models import University
class UniversityViewSet(viewsets.ModelViewSet):
queryset = University.objects.all()
serializer_class = UniversitySerializer
| StarcoderdataPython |
12805466 | <gh_stars>1-10
import random
def file_num_writer(fp):
runtotal = 0
while True:
num = yield
runtotal += num
print(num, runtotal, file=fp)
with open('/tmp/spam.txt', 'w') as fp:
handler = file_num_writer(fp)
next(handler) # Prime coroutine
for f in range(5):
num = random.randint(0, 10)
handler.send(num)
| StarcoderdataPython |
3468187 | <gh_stars>0
# -*- coding: utf8 -*-
import requests
from xml.etree import ElementTree
def process(xmldata):
articles = ElementTree.fromstring(xmldata)
data = []
for article in articles:
if article.tag != 'artikel':
continue
entry = {}
use_entry = True
try:
for field in article:
tag = field.tag.lower()
if tag == u'utg\xe5tt' and field.text != '0':
use_entry = False
elif tag == 'sortiment' and field.text.lower() == 'bs':
use_entry = False
elif tag == 'nr':
entry['nr'] = int(field.text)
entry['link'] = 'https://www.systembolaget.se/' + field.text
elif tag == 'namn':
entry['namn'] = field.text
elif tag == 'prisinklmoms':
entry['pris'] = float(field.text)
elif tag == 'prisperliter':
entry['literpris'] = float(field.text)
elif tag == 'volymiml':
entry['volym'] = float(field.text)
elif tag == 'alkoholhalt':
entry['alkoholhalt'] = float(field.text.replace('%', ''))
if use_entry:
entry['apk'] = entry['alkoholhalt'] / entry['literpris']
data.append(entry)
except Exception as e:
print(str(e))
return sorted(data, key=lambda e: e['apk'], reverse=True)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
with open(sys.argv[1]) as f:
xmldata = f.read()
else:
r = requests.get('https://www.systembolaget.se/api/assortment/products/xml')
xmldata = r.text
data = process(xmldata)
for i in range(50):
print(data[i])
| StarcoderdataPython |
9681275 | import os
import random
import numpy as np
import torch
import tensorflow as tf
def seed_everything(seed=42, seed_gpu=True):
'''
Fix ramdom seed to make all processes reproducable.
Call this before running scripts.
Note deterministic operation may have a negative single-run performance impact.
To avoid seeding gpu, pass seed_gpu=None.
Frameworks below are not supported; You have to fix when calling the method.
- Scikit-learn
- Optuna
Reference:
https://qiita.com/si1242/items/d2f9195c08826d87d6ad
TODO: Use fwd9m after released
https://github.com/NVIDIA/framework-determinism#announcement
'''
# random
random.seed(seed)
# Numpy
np.random.seed(seed)
# Pytorch
torch.manual_seed(seed)
if seed_gpu:
torch.cuda.manual_seed_all(seed_gpu)
torch.backends.cudnn.deterministic = True
# Tensorflow
tf.random.set_seed(seed)
if seed_gpu:
# Note: you need to pip install tensorflow-determinism
os.environ['TF_DETERMINISTIC_OPS'] = '1'
# Keras
os.environ['PYTHONHASHSEED'] = str(seed)
if __name__ == '__main__':
seed_everything()
| StarcoderdataPython |
1947387 | #!/home/wyf/anaconda3/envs/test/bin/python
#!coding=utf-8
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image
import cv_bridge
from cv_bridge import CvBridge, CvBridgeError
import traffic_count.test.test as test
from traffic_count.detector.detector import Detector
import traffic_count.tracker.tracker as tracker
# import rospy
# from darknet_ros_msgs.msg import BoundingBoxes
# from sensor_msgs.msg import Image
# from cv_bridge import CvBridge
# import numpy as np
# import cv2
"""
#็ๆไธไธชๅฐบๅฏธไธบsize็ๅพ็mask๏ผๅ
ๅซ1ไธชpolygon๏ผ๏ผๅผ่ๅด 0ใ1ใ2๏ผ๏ผไพๆ็บฟ่ฎก็ฎไฝฟ็จ
#list_point๏ผ็นๆฐ็ป
#color_value: polygonๅกซๅ
็ๅผ
#size๏ผๅพ็ๅฐบๅฏธ
"""
def image_mask(list_point, color_value, size):
# ๆ นๆฎ่ง้ขๅฐบๅฏธ๏ผๅกซๅ
ไธไธชpolygon๏ผไพๆ็บฟ่ฎก็ฎไฝฟ็จ
mask_image_temp = np.zeros(size, dtype=np.uint8)
# ๅๅงๅๆ็บฟpolygon
ndarray_pts = np.array(list_point, np.int32)
polygon_color_value = cv2.fillPoly(mask_image_temp, [ndarray_pts], color=color_value)
polygon_color_value = polygon_color_value[:, :, np.newaxis]
return polygon_color_value
def polygon_mask(point_list_first, point_list_second, size):
polygon_value_first = image_mask(point_list_first, 1, size)
polygon_value_second = image_mask(point_list_second, 2, size)
# ๆ็บฟๆฃๆต็จmask๏ผๅ
ๅซ2ไธชpolygon๏ผ๏ผๅผ่ๅด 0ใ1ใ2๏ผ๏ผไพๆ็บฟ่ฎก็ฎไฝฟ็จ
polygon_mask_first_and_second = polygon_value_first + polygon_value_second
# set the first polygon to blue
blue_color_plate = [255, 0, 0]
blue_image = np.array(polygon_value_first * blue_color_plate, np.uint8)
# set the first polygon to yelllow
yellow_color_plate = [0, 255, 255]
yellow_image = np.array(polygon_value_second * yellow_color_plate, np.uint8)
# ๅฝฉ่ฒๅพ็๏ผๅผ่ๅด 0-255๏ผ็จไบๅพ็ๆพ็คบ
polygon_color_image = blue_image + yellow_image
return polygon_mask_first_and_second, polygon_color_image
def traffic_count(image, frame_count, list_bboxs, polygon_mask_first_and_second, first_list, second_list, up_count, down_count):
first_num = 0
second_num = 0
point_radius = 3
if len(list_bboxs) > 0:
for item_bbox in list_bboxs:
x1, y1, x2, y2, cls_id, conf = item_bbox
# ๆ็บฟ็็น(ไธญๅฟ็น)
x = int(x1 + ((x2 - x1) * 0.5))
y = int(y1 + ((y2 - y1) * 0.5))
if polygon_mask_first_and_second[y, x] == 1 or polygon_mask_first_and_second[y, x] ==3:
first_num += 1
elif polygon_mask_first_and_second[y, x] == 2:
second_num += 1
#็ปๅบไธญๅฟlist_bboxs็ไธญๅฟ็น
list_pts = []
list_pts.append([x-point_radius, y-point_radius])
list_pts.append([x-point_radius, y+point_radius])
list_pts.append([x+point_radius, y+point_radius])
list_pts.append([x+point_radius, y-point_radius])
ndarray_pts = np.array(list_pts, np.int32)
image = cv2.fillPoly(image, [ndarray_pts], color=(0, 0, 255))
if frame_count > 2:
second_list.pop(0)
first_list.pop(0)
first_list.append(first_num)
second_list.append(second_num)
if frame_count > 2 and first_list[0] > first_list[1]:
first_diff = first_list[0] - first_list[1]
second_diff = second_list[1] - second_list[0]
if first_diff == second_diff:
up_count += first_diff
print('up count:', up_count)
elif frame_count >2 and second_list[0] > second_list[1]:
second_diff = second_list[0] - second_list[1]
first_diff = first_list[1] - first_list[0]
if first_diff == second_diff:
down_count += first_diff
print('down count:', down_count)
return up_count, down_count
# def callback_image(data):
# global detector
# global up_count
# global down_count
# global blue_list
# global yellow_list
# global cur_frame
# global polygon_mask_blue_and_yellow
# global polygon_color_image
# bridge = CvBridge()
# cv_image = bridge.imgmsg_to_cv2(data,"bgr8")
# bboxes = detector.detect(cv_image)
# list_bboxs = []
# for bbox in bboxes:
# list_bboxs.append(bbox)
# up_count, down_count = traffic_count(cv_image, cur_frame, list_bboxs, polygon_mask_blue_and_yellow, blue_list, yellow_list, up_count, down_count)
# cv_image = cv2.add(cv_image, polygon_color_image)
# font_draw_number = cv2.FONT_HERSHEY_SIMPLEX
# # draw_text_postion = (int(960 * 0.01), int(540 * 0.05))
# draw_text_postion = (int(1920 * 0.01), int(1080 * 0.05))
# text_draw = 'DOWN: ' + str(down_count) + ' , UP: ' + str(up_count)
# cv_image = cv2.putText(img=cv_image, text=text_draw,
# org=draw_text_postion,
# fontFace=font_draw_number,
# fontScale=1, color=(0, 0, 255), thickness=2)
# cur_frame += 1
# #Display Image
# cv2.imshow("traffic count", cv_image)
# cv2.waitKey(1)
# # def callback_count(data):
# # list_bboxs = []
# # for box in data.bounding_boxes:
# # list_bboxs.append(np.array([box.xmin, box.ymin, box.xmax, box.ymax, box.Calss, round(box.probability, 2)]))
# # list_bboxs = np.array(list_bboxs)
# # up_count, down_count = traffic_count(img_cv_rgb, cur_frame, list_bboxs, polygon_mask_blue_and_yellow, blue_list, yellow_list, up_count, down_count)
# def main():
# while not rospy.is_shutdown():
# #Initialize ROS node
# rospy.init_node('traffic_count', anonymous=False)
# rate = rospy.Rate(10)
# print("test1")
# point_blue = [[300*2, 380*2],[800*2, 380*2], [800*2, 390*2],[300*2, 390*2]]
# point_yellow = [[300*2, 370*2],[800*2, 370*2], [800*2, 380*2],[300*2, 380*2]]
# polygon_mask_blue_and_yellow, polygon_color_image = polygon_mask(point_blue, point_yellow,(1080, 1920))
# # list ไธ่่ฒpolygon้ๅ
# blue_list = []
# # list ไธ้ป่ฒpolygon้ๅ
# yellow_list = []
# # ่ฟๅ
ฅๆฐ้
# down_count = 0
# # ็ฆปๅผๆฐ้
# up_count = 0
# cur_frame = 0
# print("test2")
# # ๅๅงๅ yolov5
# detector = Detector()
# print("test3")
# #Subscribe to image topic
# image_sub = rospy.Subscriber('/image_source', Image, callback_image)
# # image_sub = rospy.Subscriber("camera_topic",Image, callback_image)
# #Subscribe to darknet_ros to get BoundingBoxes from YOLOv3
# # sub_detection = rospy.Subscriber("detection_topic", BoundingBoxes , callback_count)
# # pub_traffic_count = rospy.Publisher("count_topic", IntList, queue_size=10)
# # #print(msg) #Testing msg that is published
# # pub_traffic_count.publish(msg)
# rate.sleep()
# rospy.spin()
# if __name__ == '__main__':
# try :
# rospy.loginfo("Starting traffic count node")
# main()
# except rospy.ROSInterruptException:
# rospy.loginfo( "Shutting down traffic count node.")
# cv2.destroyAllWindows()
# pass
def callback(data):
global detector
global up_count
global down_count
global blue_list
global yellow_list
global cur_frame
global polygon_mask_blue_and_yellow
global polygon_color_image
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(data,"bgr8")
bboxes = detector.detect(cv_image)
list_bboxs = []
for bbox in bboxes:
list_bboxs.append(bbox)
up_count, down_count = traffic_count(cv_image, cur_frame, list_bboxs, polygon_mask_blue_and_yellow, blue_list, yellow_list, up_count, down_count)
font_draw_number = cv2.FONT_HERSHEY_SIMPLEX
# draw_text_postion = (int(960 * 0.01), int(540 * 0.05))
draw_text_postion = (int(1920 * 0.01), int(1080 * 0.05))
text_draw = 'DOWN: ' + str(down_count) + ' , UP: ' + str(up_count)
cv_image = cv2.add(cv_image, polygon_color_image)
cv_image = cv2.putText(img=cv_image, text=text_draw,
org=draw_text_postion,
fontFace=font_draw_number,
fontScale=1, color=(0, 0, 255), thickness=2)
# print("bboxes:", text_draw)
cv2.imshow("lala",cv_image)
cur_frame += 1
cv2.waitKey(1)
def main():
rospy.init_node('showImage',anonymous = True)
rospy.Subscriber('/image_source', Image, callback)
rospy.spin()
if __name__ == '__main__':
try:
rospy.loginfo("Starting cv_bridge_test node")
count = 0
# ๅๅงๅ yolov5
detector = Detector()
point_blue = [[300*2, 380*2],[800*2, 380*2], [800*2, 390*2],[300*2, 390*2]]
point_yellow = [[300*2, 370*2],[800*2, 370*2], [800*2, 380*2],[300*2, 380*2]]
polygon_mask_blue_and_yellow, polygon_color_image = polygon_mask(point_blue, point_yellow,(1080, 1920))
# list ไธ่่ฒpolygon้ๅ
blue_list = []
# list ไธ้ป่ฒpolygon้ๅ
yellow_list = []
# ่ฟๅ
ฅๆฐ้
down_count = 0
# ็ฆปๅผๆฐ้
up_count = 0
cur_frame = 0
main()
except KeyboardInterrupt:
print( "Shutting down cv_bridge_test node.")
cv2.destroyAllWindows()
| StarcoderdataPython |
11259606 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-20 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0004_auto_20160720_2130'),
]
operations = [
migrations.AddField(
model_name='header',
name='button_text',
field=models.CharField(max_length=64, null=True),
),
migrations.AddField(
model_name='header',
name='button_url',
field=models.CharField(max_length=64, null=True),
),
migrations.AddField(
model_name='header',
name='page',
field=models.CharField(choices=[(1, 'Index'), (2, 'Pricing'), (3, 'Clients'), (4, 'Contact'), (5, 'About')], default=1, max_length=1),
),
]
| StarcoderdataPython |
131335 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sys
import json
#from caresjpsutil import PythonLogger
FOSSIL_CAP = "/images/1_top_ten_emission_countries_fossil_ele_cap.png"
FOSSIL_CAP_PER = "/images/1_top_ten_emission_countries_fossil_ele_cap_per.png"
FOSSIL_GEN = "/images/1_top_ten_emission_countries_fossil_ele_gen.png"
FOSSIL_GEN_PER = "/images/1_top_ten_emission_countries_fossil_ele_gen_per.png"
if __name__ == "__main__":
#pythonLogger = PythonLogger('EmissionPlotter.py')
#pythonLogger.postInfoToLogServer('start of EmissionPlotter.py')
try:
modelsPath = json.loads(sys.argv[1])
# ### plot top ten emission countries versus fossil powerplant capacity
df = pd.read_csv(modelsPath + 'data/input/top_ten_emission_countries.csv', header='infer', sep=',')
df1 = df[df.Year == 2000]
my_list = df1["Country"].tolist()
my_list1 = ['CN', 'US', 'IN', 'RU', 'JP', 'DE', 'IR', 'KR', 'CA', 'SA']
df = df.sort_values(by=['Country', 'Year'], ascending=[True, True])
# ### plot1 - ele_cap versus year
df1 = df.loc[:, ['Country', 'Year', 'ele_cap_fossil_mw']]
df1.columns = ['Fossil fuel power capacity (MW)', 'Year', 'ele_cap_fossil_mw']
vmax1 = df1[['ele_cap_fossil_mw']].max(axis=0)
vmin1 = df1[['ele_cap_fossil_mw']].min(axis=0)
df1['Year'] = df1['Year'].astype(int)
df1 = df1.pivot(index='Fossil fuel power capacity (MW)', columns='Year', values='ele_cap_fossil_mw')
df1 = df1.reindex(my_list)
df1 = df1.reindex(my_list1)
csfont = {'fontname':'Times New Roman'}
plt.clf()
plt.figure(figsize=(3.3, 1.6))
sns.set_style("dark")
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df1, cmap='Greens', cbar=False, vmin=20000, vmax=1200000)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("GW")
cbar.set_ticks([20000, 1200000])
cbar.set_ticklabels(["20", "1200"])
ax.set_xlabel('')
ax.set_ylabel('Installed capacity \n')
ax.set(xticklabels=[])
plt.xticks(rotation=0)
plt.yticks(rotation=0)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_CAP, dpi=1000)
plt.close()
# ### plot2 - ele_cap percentage versus year
df2 = df.loc[:, ['Country', 'Year', 'ele_cap_fossil_per']]
df2.columns = ['Fossil fuel power capacity percentage', 'Year', 'ele_cap_fossil_per']
df2['Year'] = df2['Year'].astype(int)
df2 = df2.pivot(index='Fossil fuel power capacity percentage', columns='Year', values='ele_cap_fossil_per')
df2 = df2.reindex(my_list)
plt.clf()
plt.figure(figsize=(3.1, 1.6))
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df2, cmap='Greens', cbar=False, vmin=0, vmax=100)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("Percentage")
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
ax.set_xlabel('')
ax.set_ylabel('')
ax.set(xticklabels=[])
ax.set(yticklabels=[])
# plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_CAP_PER, dpi=1000)
plt.close()
# ### plot3 - ele_gen versus year
df3 = df.loc[:, ['Country', 'Year', 'ele_gen_fossil_gwh']]
vmax3 = df3[['ele_gen_fossil_gwh']].max(axis=0)
vmin3 = df3[['ele_gen_fossil_gwh']].min(axis=0)
df3.columns = ['Fossil fuel power generation (GWh)', 'Year', 'ele_gen_fossil_gwh']
df3['Year'] = df3['Year'].astype(int)
df3 = df3.pivot(index='Fossil fuel power generation (GWh)', columns='Year', values='ele_gen_fossil_gwh')
df3 = df3.reindex(my_list)
plt.clf()
plt.figure(figsize=(3.3, 1.7))
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df3, cmap='Greens', cbar=False, vmin=110000, vmax=4300000)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("TWh")
cbar.set_ticks([110000, 4300000])
cbar.set_ticklabels(["110", "4300"])
ax.set_xticklabels(['2000', '', '', '', '', '2005', '', '', '', '', '2010', '', '', '', '2014'])
ax.set_xlabel('Year')
ax.set_ylabel('Annual generation \n')
# ax.set(yticklabels=[])
plt.xticks(rotation=0)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_GEN, dpi=1000)
plt.close()
# ### plot4 - ele_gen percentage versus year
df4 = df.loc[:, ['Country', 'Year', 'ele_gen_fossil_per']]
df4.columns = ['Fossil fuel power generation percentage', 'Year', 'ele_gen_fossil_per']
df4['Year'] = df4['Year'].astype(int)
df4 = df4.pivot(index='Fossil fuel power generation percentage', columns = 'Year', values = 'ele_gen_fossil_per')
df4 = df4.reindex(my_list)
plt.clf()
plt.figure(figsize=(3.3, 1.7))
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df4, cmap='Greens', cbar=False, vmin=0, vmax=100)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("Percentage")
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
ax.set_xlabel('Year')
ax.set_ylabel('')
ax.set(yticklabels=[])
ax.set_xticklabels(['2000', '', '', '', '', '2005', '', '', '', '', '2010', '', '', '', '2014'])
plt.xticks(rotation=0)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_GEN_PER, dpi=1000)
plt.close()
# #### color palette test
plt.clf()
plt.figure(figsize=(8, 4))
sns.set_context("paper", font_scale=1.3)
ax = sns.heatmap(df4, cmap='PuBu', cbar=False, vmin=0, vmax=100)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
plt.tight_layout()
plt.savefig(modelsPath + 'public/images/color_palette_test/PuBu_rect.png', dpi=1000)
plt.close()
plt.clf()
sns.set_context("paper")
ax = sns.heatmap(df4, cmap='PuBu', cbar=False, vmin=0, vmax=100)
ax.set_ylabel('')
# ax.set(yticklabels=[])
# ax.set_aspect("equal")
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
plt.tight_layout()
plt.savefig(modelsPath + 'public/images/color_palette_test/PuBu_square.png', dpi=1000)
plt.close()
# ## overall figure
plt.close()
plt.clf()
plt.figure(figsize=(30,6))
sns.set_context("paper", font_scale=1.2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True, sharey=True)
g1 = sns.heatmap(df1, cmap='Greens', vmin=20000, vmax=1200000, ax=ax1)
# cbar = g1.figure.colorbar(g1.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["20", "1200"])
g1.set_xlabel('')
g1.set_ylabel('')
g2 = sns.heatmap(df3, cmap='Greens', ax=ax2)
# cbar = g2.figure.colorbar(g2.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["10", "1200"])
g2.set_xlabel('')
g2.set_ylabel('')
# g2.set_yticks([])
g3 = sns.heatmap(df2, cmap='Greens', ax=ax3)
# cbar = g3.figure.colorbar(g3.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["30", "1200"])
g3.set_xlabel('')
g3.set_ylabel('')
g4 = sns.heatmap(df4, cmap='Greens', ax=ax4)
# cbar = g4.figure.colorbar(g4.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["40", "1200"])
g4.set_xlabel('')
g4.set_ylabel('')
# g4.set_yticks([])
plt.tight_layout()
pathsDict = {
"fossilCap": FOSSIL_CAP,
"fossilCapPer": FOSSIL_CAP_PER,
"fossilGen": FOSSIL_GEN,
"fossilGenPer": FOSSIL_GEN_PER
}
print(json.dumps(pathsDict))
except Exception as e:
print(e)
#pythonLogger.postInfoToLogServer('end of EmissionPlotter.py') | StarcoderdataPython |
6679564 | from time import sleep
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))
import server
import loadbalancer as lb
# 1. Check whether there is or not default VM OS Template
# 1-1. If find, Go Step 2
# 1-2. If no exists, stop Your Default VM and Create OS Template image from your VM
# 2. Create loadbalancer
# 3. Create VM(Webserver)
# 4. Attach loadbalancer to your created VM(WebServer)
# 5. Repeat a certain Times Step 3~4
# ----------------------Input-----------------------------
# Default_zone : Seoul - M Zone
default_zone = 'KR-CB' # Zone:KR-CB
my_vm_id = 'For Your VM ID on your default_zone' # your VM
vm_serviceoffering_id = '385df834-6cf7-485b-bca8-de4d4f59b0b0' # 1vCore 1GB
vm_diskoffering_id = '87c0a6f6-c684-4fbe-a393-d8412bcf788d' # 100GB
ipid = 'For Your Public IP ID on your default_zone' # public IP ID on KR-CB
ipaddress = 'For Your Public IP on your default_zone' # public IP addr on KR-CB
lb_name = 'scenariotest'
template_name = 'scenariotest'
deploy_nameheader = 'scenario_vm'
public_port = 80
VM_COUNT = 2
# --------------------------------------------------------
print('[Scenario Start] Create ' + str(VM_COUNT) + '-WebServer from your VM(Webserver) Image')
Exist_VM = False
OSlist = server.listTemplates(zone=default_zone, templatefilter='self', name=template_name)
temp_list = OSlist['listtemplatesresponse']['template']
for x in temp_list:
temp_name = x.pop('displaytext')
tem_id = x.pop('id')
if temp_name == template_name:
Exist_VM = True
print('[Step1] Find OS Template Image. ')
else:
print('[Step1] No Exists \"scenariotest\" Image. Go to step 1-2')
if not Exist_VM:
stop_job = server.stopVirtualMachine(zone=default_zone, vmid=my_vm_id)
if 'errorcode' in stop_job['stopvirtualmachineresponse']:
if stop_job['stopvirtualmachineresponse']['errortext'] == "VirtualMachine already is stopped.":
print('[Step1-2] Default VM Ready')
else:
raise RuntimeError('[Step1-2] ! Fail Default VM Stop ' + '\n - errortext : ' + stop_job['stopvirtualmachineresponse']['errortext'])
else:
while True:
stopvmres = server.queryAsyncJobResult(zone=default_zone, jobid=stop_job['stopvirtualmachineresponse']['jobid'])
if(stopvmres['queryasyncjobresultresponse']['jobstatus'] == 1):
print('[Step1-2] Default VM Stop Complete')
break
sleep(10)
# Get OSID from VirtualMachine List
list_server = server.listVirtualMachines(zone=default_zone, vmid=my_vm_id)
server_info = list_server['listvirtualmachinesresponse']['virtualmachine']
for x in server_info:
vmosid = x.pop('guestosid')
# Get VOLID from Volume List
list_vol = server.listVolumes(zone=default_zone, vmid=my_vm_id, type='ROOT')
tem_vol = list_vol['listvolumesresponse']['volume']
for x in tem_vol:
volid = x.pop('id')
# Create OS Template Image
create_tem = server.createTemplate(zone=default_zone, displaytext=template_name, name=template_name, ostypeid=vmosid, volumeid=volid)
if 'errorcode' in create_tem['createtemplateresponse']:
raise RuntimeError('[Step1-2] ! Fail Template Creation' + '\n - errortext : ' + create_tem['createtemplateresponse']['errortext'])
else:
print('[Step1-2] Please wait. OS Template Image Creating', end='')
while True:
temres = server.queryAsyncJobResult(zone=default_zone, jobid=create_tem['createtemplateresponse']['jobid'])
if(temres['queryasyncjobresultresponse']['jobstatus']==1):
tem_id = temres['queryasyncjobresultresponse']['jobinstanceid']
break
else:
print('.', end='')
sleep(10)
print(' Done!')
lbres = lb.createLoadBalancer(zone=default_zone, name=lb_name, loadbalanceroption='roundrobin', serviceport='80', servicetype='http', healthchecktype='tcp')
if 'errorcode' in lbres['createloadbalancerresponse']:
raise RuntimeError('[Step2] ! Fail LB Creation ' + '\n - errortext : ' + lbres['createloadbalancerresponse']['errortext'])
else:
lb_id = lbres['createloadbalancerresponse']['loadbalancerid']
lb_ip = lbres['createloadbalancerresponse']['serviceip']
print('[Step2] LB Creation Complete.' + '\n - LB ID : ' + lb_id + '\n - LB IP : ' + lb_ip)
print('[Step3] ' + str(VM_COUNT) + ' VM Create ')
for i in range(0, VM_COUNT):
vm_name = deploy_nameheader + str(i)
vm_id = 'vm_id' + str(i)
vm_pw = 'vm_pw' + str(i)
print('[Step3] Please wait a few minutes. ' + str(i+1) + 'th VM Deploying', end='')
createVM = server.deployVirtualMachine(zone=default_zone, displayname=vm_name, templateid=tem_id, serviceofferingid=vm_serviceoffering_id, diskofferingid=vm_diskoffering_id)
if 'deployvirtualmachineresponse' in createVM and 'errorcode' in createVM['deployvirtualmachineresponse']:
raise RuntimeError('[Step3] ! ' + vm_name + ' Fail Deploy' + '\n - errortext : ' + createVM['deployvirtualmachineresponse']['errortext'])
else:
while True:
res = server.queryAsyncJobResult(zone=default_zone, jobid=createVM['deployvirtualmachineresponse']['jobid'])
if(res['queryasyncjobresultresponse']['jobstatus'] == 1):
vm_id = createVM['deployvirtualmachineresponse']['id']
vm_pw = res['queryasyncjobresultresponse']['jobresult']['virtualmachine']['password']
break
else:
print('.', end='')
sleep(10)
print('\n[Step3] ' + vm_name + ' Deploy Complete.' + '\n - VM Name : ' + vm_name + '\n - VM ID : ' + vm_id + '\n - VM PassWord : ' + <PASSWORD>)
print('[Step4] Please wait. LB Attaching', end='')
while True:
lb_add = lb.addLoadBalancerWebServer(zone=default_zone, loadbalancerid=lb_id, vmid=vm_id, ipaddress=ipaddress, publicport=str(public_port))
public_port = public_port + 1
if (not 'errorcode' in lb_add['addloadbalancerwebserverresponse']) or ('errorcode' in lb_add['addloadbalancerwebserverresponse'] and lb_add['addloadbalancerwebserverresponse']['errorcode'] != "400"):
break
else:
print('.', end='')
sleep(20)
print(' Done!')
print('Scenario Finish!')
| StarcoderdataPython |
6615517 | <filename>chain_reaction/graphics/window.py
from abc import ABC, abstractmethod
import pygame
import pygame.font as font
import pygame.gfxdraw as gfxdraw
import chain_reaction.graphics.sprites as sprites
import chain_reaction.wrappers.engine as engine
# ----------- COLORS -------------
COL_BACK = (0, 0, 0)
COL_FORE = (255, 255, 255)
COL_PLR1 = (250, 100, 40)
COL_PLR2 = (40, 200, 100)
# --------- DIMS -----------
R_THIC = 10
R_VOFF = 40
G_HOFF = 50
G_VOFF = 70
G_WIDC = 50
G_WALL = 2
# -------- ON INIT ----------
G_SHAP = None
G_DIMS = None
R_DIMS = None
W_DIMS = None
ORB_PL1 = None
ORB_PL2 = None
# ----------- INIT ---------------
def init(g_shape=(9, 6)):
"""
Calculate dimensions and construct sprites
Input : g_shape (rows, cols)
"""
global G_SHAP, G_DIMS, R_DIMS, W_DIMS
global ORB_PL1, ORB_PL2
# pygame modules
font.init()
# calculate dims
G_SHAP = (g_shape[1], g_shape[0])
G_DIMS = (G_SHAP[0] * G_WIDC + G_WALL, G_SHAP[1] * G_WIDC + G_WALL)
R_DIMS = (G_DIMS[0], R_THIC)
W_DIMS = (G_DIMS[0] + (2 * G_HOFF), G_DIMS[1] + G_HOFF + G_VOFF)
# construct sprites
ORB_SIZ = G_WIDC - G_WALL
ORB_PL1 = sprites.construct_orbs(COL_PLR1, COL_BACK, ORB_SIZ)
ORB_PL2 = sprites.construct_orbs(COL_PLR2, COL_BACK, ORB_SIZ)
# ---------- CLASSES -------------
class BaseGameWindow(ABC):
def __init__(self, fps):
""" Most Basic Game Window """
# window pointers
self.surface = pygame.display.set_mode(W_DIMS)
self.clock = pygame.time.Clock()
self.fps = fps
# status
self.locked = False
self.open = True
# mouse click and index
self.mclk = False
self.midx = None
def clear(self):
self.surface.fill(COL_BACK)
def update(self):
pygame.display.update()
def event_flush(self):
pygame.event.clear()
def event_handler(self):
""" Handle events in window """
# Refresh values
self.mclk = False
self.midx = None
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.open = False
return
elif event.type == pygame.MOUSEBUTTONUP:
# if locked, do nothing
if self.locked:
continue
self.mclk = True
crx, cry = pygame.mouse.get_pos()
idx = ((cry - G_VOFF) // G_WIDC, (crx - G_HOFF) // G_WIDC)
val = (0 <= idx[0] < G_SHAP[1]) * (0 <= idx[1] < G_SHAP[0])
self.midx = idx if val else None
def draw_indicator(self, player):
""" Draw rectangle to indicate next player """
pcolor = COL_PLR2 if player else COL_PLR1
nxrect = (G_HOFF, R_VOFF, G_DIMS[0], R_THIC)
pygame.draw.rect(self.surface, pcolor, nxrect)
def draw_grid(self):
""" Draw grid on screen """
gwid, ghgt = G_DIMS
# horizontal lines
for j in range(G_SHAP[1] + 1):
grect = (G_HOFF, G_VOFF + j * G_WIDC, gwid, G_WALL)
pygame.draw.rect(self.surface, COL_FORE, grect)
# vertical lines
for i in range(G_SHAP[0] + 1):
grect = (G_HOFF + i * G_WIDC, G_VOFF, G_WALL, ghgt)
pygame.draw.rect(self.surface, COL_FORE, grect)
def draw_orbs(self, board, ignore=[]):
""" Draw orb sprites on the surface """
gcol, grow = G_SHAP
offx, offy = (G_HOFF + G_WALL, G_VOFF + G_WALL)
for idx in range(grow * gcol):
# ignore index
if idx in ignore:
continue
# blit appropriate sprite on surface
ccount = board[idx]
if ccount != 0:
i_y, i_x = idx // gcol, idx % gcol
pos = (G_WIDC * i_x + offx, G_WIDC * i_y + offy)
psprite = ORB_PL1 if ccount > 0 else ORB_PL2
self.surface.blit(psprite[abs(ccount) - 1], pos)
def draw_all(self, board, player):
""" Draw all drawable elements """
self.clear()
self.draw_grid()
self.draw_indicator(player)
self.draw_orbs(board)
self.update()
@abstractmethod
def on_game_start(self):
""" Splash Screen Callback """
return
@abstractmethod
def on_game_move(self, game, move):
""" Game Piece Move Callback """
return
@abstractmethod
def on_game_end(self, game):
""" Game Over Callback """
return
class StaticGameWindow(BaseGameWindow):
def __init__(self, fps):
"""
Display static graphics
Very light on resources
"""
super().__init__(fps)
def on_game_start(self):
""" Splash Screen Callback """
return
def on_game_move(self, game: engine.ChainReactionGame, move):
""" Game Piece Move Callback """
# draw if no move specified
if move is None:
self.draw_all(game.board, game.player)
return
# play
game.make_move(move)
self.draw_all(game.board, game.player)
def on_game_end(self, game):
""" Game Over Callback """
if game.winner == 0:
print("Red Wins!")
elif game.winner == 1:
print("Green Wins!")
elif game.winner == 2:
print("Sorry to see you go :(")
# quit pygame
pygame.quit()
class AnimatedGameWindow(BaseGameWindow):
def __init__(self, fps, flight_steps=10):
"""
Window that displays animations
-------------------------------
- fps - frame rate limit
- flight_steps - steps in a flight animation
"""
super().__init__(fps)
self.flight_steps = flight_steps
def draw_flights(self, flights, progress, player):
# setup
gcol, grow = G_SHAP
offx, offy = (G_HOFF + G_WALL, G_VOFF + G_WALL)
pcolor = COL_PLR2 if player else COL_PLR1
prog_frac = progress / self.flight_steps
for origin, dest in flights:
# indices
orig_posx = G_WIDC * (origin % gcol) + offx + G_WIDC // 2
orig_posy = G_WIDC * (origin // gcol) + offy + G_WIDC // 2
dest_posx = G_WIDC * (dest % gcol) + offx + G_WIDC // 2
dest_posy = G_WIDC * (dest // gcol) + offy + G_WIDC // 2
# calculate positions
pos_x = int(orig_posx + prog_frac * (dest_posx - orig_posx))
pos_y = int(orig_posy + prog_frac * (dest_posy - orig_posy))
# draw in present position
gfxdraw.aacircle(self.surface, pos_x, pos_y, 10, pcolor)
gfxdraw.filled_circle(self.surface, pos_x, pos_y, 10, pcolor)
def explode_orbs(self, board, explosions, player, callback=None):
"""
Show orb explosion animation
Does not return until animation is over
Internal event handling
"""
# immediate return if explosions are absent
if not explosions:
return
# set up origin and final indices for flight
flights = [
(origin, dest)
for origin in explosions
for dest in engine.NTABLE[origin]
]
# uniform speed
for progress in range(self.flight_steps):
self.clear()
self.draw_grid()
self.draw_indicator(player)
self.draw_orbs(board, ignore=explosions)
self.draw_flights(flights, progress, player)
callback() if callback else None # optional callback
self.update()
self.event_handler()
self.clock.tick(self.fps)
def on_game_start(self):
""" Splash Screen """
return
def on_game_move(self, game: engine.ChainReactionAnimated, move):
""" Function to execute when move specified """
# draw if no move specified
if move is None:
self.draw_all(game.board, game.player)
return
# invalid move
if not game.make_move(move):
return
# lock to not respond to mouse clicks
self.locked = True
player = game.player
# get steps until stable or game over
while game.pending_moves and not game.game_over and self.open:
# get board and explosions for animation
prev_board, explosions = game.get_next_step()
# draw explosions
self.explode_orbs(prev_board, explosions, player)
self.draw_all(game.board, game.player)
# unlock window after handling events
self.event_handler()
self.locked = False
def on_game_end(self, game: engine.ChainReactionAnimated):
""" Game over screen """
global ORB_PL1, ORB_PL2
global COL_PLR1, COL_PLR2
global COL_FORE
# convert colors to grayscale
COL_PLR1 = (30, 30, 30)
COL_PLR2 = (30, 30, 30)
COL_FORE = (30, 30, 30)
ORB_PL1 = [sprites.grayscale(x, 0.2) for x in ORB_PL1]
ORB_PL2 = [sprites.grayscale(x, 0.2) for x in ORB_PL2]
# save player
player = game.player
self.flight_steps = 20 # slow-mo
# construct game over text
font_instance = font.SysFont("Ubuntu Mono", 50, True, False)
message = "GREEN WINS!" if game.winner else "RED WINS!"
mscolor = (100, 255, 50) if game.winner else (255, 100, 50)
game_over_text = font_instance.render(message, True, mscolor)
text_w, text_h = game_over_text.get_size()
text_dest = ((W_DIMS[0] - text_w) // 2, (W_DIMS[1] - text_h) // 2)
blit_text = lambda: self.surface.blit(game_over_text, text_dest)
# keep exploding for cool end-graphics
while self.open and game.pending_moves:
prev_board, explosions = game.get_next_step()
self.explode_orbs(prev_board, explosions, player, blit_text)
self.clear()
self.draw_grid()
self.draw_indicator(game.player)
self.draw_orbs(game.board)
blit_text()
self.update()
# draw static if pending moves are over
if not game.pending_moves:
self.clear()
self.draw_grid()
self.draw_indicator(game.player)
self.draw_orbs(game.board)
blit_text()
self.update()
while self.open:
self.event_handler()
self.clock.tick(self.fps)
# voluntary close
if not game.game_over and not self.open:
print("Sorry to see you go :(")
font.quit()
pygame.quit()
| StarcoderdataPython |
3219303 | # -*- coding: utf-8 -*-
# This module follows Mathematica 5 conventions. In current Mathematica a number of these functiions don't exist.
# Some of the functions in Mathematica 5 appear now under Information.
"""
Types of Values
"""
from mathics.builtin.base import Builtin
from mathics.builtin.assignments.internals import get_symbol_values
from mathics.core.attributes import hold_all, protected
class DefaultValues(Builtin):
"""
<dl>
<dt>'DefaultValues[$symbol$]'
<dd>gives the list of default values associated with $symbol$.
<i>Note: this function is in Mathematica 5 but has been removed from current Mathematica.</i>
</dl>
>> Default[f, 1] = 4
= 4
>> DefaultValues[f]
= {HoldPattern[Default[f, 1]] :> 4}
You can assign values to 'DefaultValues':
>> DefaultValues[g] = {Default[g] -> 3};
>> Default[g, 1]
= 3
>> g[x_.] := {x}
>> g[a]
= {a}
>> g[]
= {3}
"""
attributes = hold_all | protected
summary_text = (
"gives default values for the arguments associated with a function symbol"
)
def apply(self, symbol, evaluation):
"DefaultValues[symbol_]"
return get_symbol_values(symbol, "System`DefaultValues", "default", evaluation)
class Messages(Builtin):
"""
<dl>
<dt>'Messages[$symbol$]'
<dd>gives the list of messages associated with $symbol$.
</dl>
>> a::b = "foo"
= foo
>> Messages[a]
= {HoldPattern[a::b] :> foo}
>> Messages[a] = {a::c :> "bar"};
>> a::c // InputForm
= "bar"
>> Message[a::c]
: bar
"""
attributes = hold_all | protected
summary_text = "gives the list the messages associated with a particular symbol"
def apply(self, symbol, evaluation):
"Messages[symbol_]"
return get_symbol_values(symbol, "Messages", "messages", evaluation)
class NValues(Builtin):
"""
<dl>
<dt>'NValues[$symbol$]'
<dd>gives the list of numerical values associated with $symbol$.
<i>Note: this function is in Mathematica 5 but has been removed from current Mathematica.</i>
</dl>
>> NValues[a]
= {}
>> N[a] = 3;
>> NValues[a]
= {HoldPattern[N[a, MachinePrecision]] :> 3}
You can assign values to 'NValues':
>> NValues[b] := {N[b, MachinePrecision] :> 2}
>> N[b]
= 2.
Be sure to use 'SetDelayed', otherwise the left-hand side of the transformation rule will be evaluated immediately,
causing the head of 'N' to get lost. Furthermore, you have to include the precision in the rules; 'MachinePrecision'
will not be inserted automatically:
>> NValues[c] := {N[c] :> 3}
>> N[c]
= c
Mathics will gracefully assign any list of rules to 'NValues'; however, inappropriate rules will never be used:
>> NValues[d] = {foo -> bar};
>> NValues[d]
= {HoldPattern[foo] :> bar}
>> N[d]
= d
"""
attributes = hold_all | protected
summary_text = "gives the list of numerical values associated with a symbol"
def apply(self, symbol, evaluation):
"NValues[symbol_]"
return get_symbol_values(symbol, "NValues", "n", evaluation)
class SubValues(Builtin):
"""
<dl>
<dt>'SubValues[$symbol$]'
<dd>gives the list of subvalues associated with $symbol$.
<i>Note: this function is not in current Mathematica.</i>
</dl>
>> f[1][x_] := x
>> f[2][x_] := x ^ 2
>> SubValues[f]
= {HoldPattern[f[2][x_]] :> x ^ 2, HoldPattern[f[1][x_]] :> x}
>> Definition[f]
= f[2][x_] = x ^ 2
.
. f[1][x_] = x
"""
attributes = hold_all | protected
summary_text = "gives the list of subvalues associated with a symbol"
def apply(self, symbol, evaluation):
"SubValues[symbol_]"
return get_symbol_values(symbol, "SubValues", "sub", evaluation)
| StarcoderdataPython |
12838107 | <gh_stars>10-100
import os
import asyncio
from pyppeteer import launch
from ..log_adapter import get_logger
from ..common_utils import get_user_home_dir, add_to_osenv
from ..constants import CONSTANTS
def render_html(url=None, html=None, get_text=False, script=None, reload_=False, wait_time=5, timeout=10):
result, content = None, None
_set_env()
try:
result, content = asyncio.get_event_loop().run_until_complete(_render(url=url, html=html, get_text=get_text, script=script, reload_=reload_, wait_time=wait_time, timeout=timeout))
except IOError as e:
get_logger().error(f"Error in render_html method - {str(e)}")
#print({str(e)})
return result, content
def _set_env():
d = CONSTANTS.PYPPETEER_HOME
if not d:
start_dir = CONSTANTS.DIR_PATH or get_user_home_dir()
d = os.path.join(start_dir, CONSTANTS.DIR_NAME, CONSTANTS.PYPPETEER_DIR)
add_to_osenv('PYPPETEER_HOME', d)
async def _render(url=None, html=None, get_text=False, script=None, reload_=False, wait_time=5, timeout=10):
page = None
result, content = None, None
try:
browser = await launch(ignoreHTTPSErrors=True, headless=True)
page = await browser.newPage()
await asyncio.sleep(wait_time)
if url:
await page.goto(url, options={'timeout': int(timeout * 1000), 'waitUntil': ['domcontentloaded', 'load']})
elif html:
await page.goto(f'data:text/html,{html}', options={'timeout': int(timeout * 1000), 'waitUntil': ['domcontentloaded', 'load']})
#elif url:
# await page.goto(url, options={'timeout': int(timeout * 1000), 'waitUntil': ['domcontentloaded', 'load']})
#await page.screenshot({'path': 'example.png'})
if script:
result = await page.evaluate(script)
if get_text:
content = await page.evaluate('document.body.textContent')
else:
#content = await page.evaluate('document.body.outerHTML')
content = await page.content()
if page:
await page.close()
page = None
except Exception:
if page:
try: await page.close()
except: pass
page = None
return result, content
#TODO: make it talk
async def _talk(html, wait_time=5):
page = None
result, content = None, None
try:
browser = await launch(ignoreHTTPSErrors=True, headless=False, args=['--window-size=0,0', '--window-position=25,25'])
page = await browser.newPage()
await asyncio.sleep(wait_time)
#await page.evaluateOnNewDocument('window.TEXT2SPEECH', text)
await page.goto(f'data:text/html,{html}')
await page.click('.button')
if page:
await page.close()
page = None
except Exception:
if page:
await page.close()
page = None
if __name__ == "__main__":
result, content = render_html("http://selenium-release.storage.googleapis.com/index.html?path=3.14/")
print(content)
| StarcoderdataPython |
6433389 | #!/usr/bin/python
try:
from sequanto.automation import Client, AutomationObject
except:
import sys
from os import path
sys.path.insert ( 0, path.join ( path.dirname(__file__), '..', 'lib' ) )
from sequanto.automation import Client, AutomationObject
import serial
from gi.repository import Gtk
import io
def ask_for_string ( title ):
dialog = Gtk.Dialog(title = title)
dialog.add_button ( 'OK', 1 )
dialog.set_default_response ( 1 )
entry = Gtk.Entry()
entry.set_property ( 'activates-default', True )
dialog.vbox.add ( entry )
dialog.show_all()
dialog.run()
text = entry.get_text()
dialog.destroy()
return text
class ClientWindow ( Gtk.Window ):
def __init__ ( self ):
Gtk.Window.__init__( self, title = 'Sequanto Automation Client' )
self.connect ( 'delete-event', Gtk.main_quit )
self.resize ( 600, 500 )
self.store = Gtk.TreeStore ( object, str, str, str )
self.tree = Gtk.TreeView ( self.store )
self.name_column = Gtk.TreeViewColumn('Name', Gtk.CellRendererText(), text = 1)
self.type_column = Gtk.TreeViewColumn('Type', Gtk.CellRendererText(), text = 2)
self.value_column = Gtk.TreeViewColumn('Value', Gtk.CellRendererText(), text = 3)
self.tree.append_column ( self.name_column )
self.tree.append_column ( self.type_column )
self.tree.append_column ( self.value_column )
self.tree.connect ( 'row-activated', self.row_activated )
scroll = Gtk.ScrolledWindow()
scroll.add ( self.tree )
self.add ( scroll )
self.client = None
def connect_client ( self, io ):
self.client = Client(io)
self.store.clear()
self.add_node ( self.client.root )
def add_node ( self, automation_object, tree_parent = None ):
node = self.store.append ( tree_parent )
#self.store.set_value ( node, self.name_column, automation_object.name )
self.store.set_value ( node, 0, automation_object )
self.store.set_value ( node, 1, automation_object.name )
if automation_object.returnType is not None:
self.store.set_value ( node, 2, automation_object.returnType )
else:
self.store.set_value ( node, 2, automation_object.type )
self.store.set_value ( node, 3, automation_object.stringValue )
for childObj in automation_object.children:
self.add_node ( childObj, node )
def row_activated ( self, view, path, column ):
it = self.store.get_iter(path)
automation_object = self.store.get_value ( it, 0 )
if automation_object.returnType == 'Boolean':
automation_object.value = not automation_object.value
elif automation_object.returnType == 'String':
automation_object.value = ask_for_string ( 'Set %s to:' % automation_object.fullname )
self.store.set_value ( it, 3, automation_object.stringValue )
win = ClientWindow()
win.show_all ()
ser = serial.Serial('/dev/ttyACM0', 57600, timeout = 1)
class SerialWrapper ( io.TextIOBase ):
def __init__ ( self, _serial ):
io.TextIOBase.__init__ ( self )
self.m_serial = _serial
def readline ( self ):
ret = ''
while True:
c = self.m_serial.read(1)
if c == '\n':
return ret
elif c == '\r':
continue
else:
ret += c
def write ( self, _data ):
self.m_serial.write ( _data )
#win.connect_client ( io.TextIOWrapper(ser, ser) ) #io.BufferedRWPair(ser, ser), newline = u'\n' ) )
win.connect_client ( SerialWrapper(ser) )
Gtk.main()
| StarcoderdataPython |
8067938 | <filename>Shift a Letter.py<gh_stars>0
# Write a procedure, shift, which takes as its input a lowercase letter,
# a-z and returns the next letter in the alphabet after it, with 'a'
# following 'z'.
def shift(letter):
return chr(ord(letter) + 1) if letter != 'z' else 'a'
print (shift('a'))
#>>> b
print (shift('n'))
#>>> o
print (shift('z'))
#>>> a | StarcoderdataPython |
76660 | <reponame>PhilippMatthes/diplom
from tensorflow import keras
def make_fcn(input_shape, output_classes):
input_layer = keras.layers.Input(input_shape)
conv1 = keras.layers.Conv1D(filters=128, kernel_size=8, padding='same')(input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.Activation(activation='relu')(conv1)
conv2 = keras.layers.Conv1D(filters=256, kernel_size=5, padding='same')(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv3 = keras.layers.Conv1D(128, kernel_size=3,padding='same')(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
gap_layer = keras.layers.GlobalAveragePooling1D()(conv3)
output_layer = keras.layers.Dense(output_classes, activation='softmax')(gap_layer)
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
| StarcoderdataPython |
321107 | # -*- coding: utf-8 -*-
import os
from PIL import Image
def resizeImg(origin_file_path, new_file_path, width=640, height=0):
im = Image.open(origin_file_path)
(w, h) = im.size
if w > width:
w_n = width
else:
w_n = w
if height != 0 and h > height:
h_n = height
elif height == 0:
h_n = h*w_n/w
if h_n > h:
h_n = h
else:
h_n = height
out = im.resize((w_n, h_n), Image.ANTIALIAS) # resize image with high-quality
out.save(new_file_path)
class Horizontal:
LEFT = -1
CENTER = 0
RIGHT = 1
class Vertical:
TOP = -1
MIDDLE = 0
BOTTOM = 1
def clipImg(origin_file_path, new_file_path, width=150, height=150, h=Horizontal.CENTER, v=Vertical.MIDDLE):
im = Image.open(origin_file_path)
(w, h) = im.size
if w < width:
x = 0
width = w
else:
if h == Horizontal.LEFT:
x = 0
elif h == Horizontal.RIGHT:
x = w-width
else :
x = (w-width)/2
if h < height:
y = 0
height = h
else:
if v == Vertical.TOP:
y = 0
elif v == Vertical.BOTTOM:
y = h-height
else:
y = (h-height)/2
# ่ฟ้็ๅๆฐๅฏไปฅ่ฟไน่ฎคไธบ๏ผไปๆๅพ็(x,y)ๅๆ ๅผๅงๆช๏ผๆชๅฐ(width+x,height+y)ๅๆ
region = (x, y, x+width, y+height)
#print region
# ่ฃๅๅพ็
cropImg = im.crop(region)
# ไฟๅญ่ฃๅๅ็ๅพ็
cropImg.save(new_file_path)
def clipReszImg(origin_file_path, new_file_path, width=150, height=150, h=Horizontal.CENTER, v=Vertical.MIDDLE):
im = Image.open(origin_file_path)
(w, h) = im.size
width_scale = float(w)/width
height_scale = float(h)/height
if width_scale > height_scale:
w_n = w / height_scale
h_n = height
else:
w_n = width
h_n = height / width_scale
reImg = im.resize((int(w_n), int(h_n)), Image.ANTIALIAS) # resize image with high-quality
reImg.save(new_file_path)
clipImg(new_file_path, new_file_path, width, height, h, v)
| StarcoderdataPython |
5055017 | <filename>ufdl-core-app/src/ufdl/core_app/permissions/_IsAuthenticated.py
from rest_framework.permissions import IsAuthenticated as IsAuthenticatedOrig
class IsAuthenticated(IsAuthenticatedOrig):
"""
Override for the built-in IsAuthenticated permission class
which adds object support.
"""
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
| StarcoderdataPython |
1916081 | <reponame>MadhavJivrajani/LFSR-Encryption<filename>lfsr.py
import numpy as np
from numpy.linalg import matrix_power
from feedback import feedback_poly, one_hot_encode
class LFSR:
def __init__(self, n, count_type, seed):
"""
n : order of the minimal polynomial
count_type : type of LFSR,
"fib": Fibonacci
"gal": Galois
seed : Initial configuration of the LFSR
"""
self.n = n
self.type = -1
if count_type == 'fib':
self.type = 0
elif count_type == 'gal':
self.type = 1
else:
raise ValueError("Types of counters: 'fib' and 'gal'")
if self.type == 0:
self.companion = self.__generate_companion_fibonacci()
else:
self.companion = self.__generate_companion_galois()
self.seed = np.array(seed)
def __generate_companion_galois(self):
diag = np.diag(np.ones(self.n - 1, dtype=np.int32))
zeros = np.zeros(self.n - 1, dtype=np.int32)
temp = np.vstack((diag, zeros))
return np.mod(np.column_stack((np.array(
one_hot_encode(self.n)[::-1]), temp)), 2)
def __generate_companion_fibonacci(self):
diag = np.diag(np.ones(self.n - 1, dtype=np.int32))
zeros = np.zeros(self.n - 1, dtype=np.int32)
temp = np.column_stack((zeros, diag))
return np.mod(np.vstack(
(temp, np.array(one_hot_encode(self.n)))), 2)
def __generate_next(self, state, k):
if k < 0:
raise ValueError("Power of matrix needs to be non-negative")
else:
return np.mod(matrix_power(state, k%(self.get_max_period())), 2)
def get_max_period(self):
return 2**self.n - 1
def encrypt_decrypt_ints(self):
ints = []
for k in range(self.get_max_period()):
temp = self.__generate_next(self.companion, k)
new_state = np.array(np.mod(np.dot(temp, self.seed), 2), dtype=np.int32)
ints.append(int("".join([str(int(i)) for i in list(new_state)]), 2)%255)
return np.array(ints)
def get_ints(self):
ints = []
for k in range(self.get_max_period()):
temp = self.__generate_next(self.companion, k)
new_state = np.array(np.mod(np.dot(temp, self.seed), 2), dtype=np.int32)
ints.append(int("".join([str(int(i)) for i in list(new_state)]), 2))
return np.array(ints)
def generate_output_stream(self):
stream = []
if self.type == 0:
for k in range(self.get_max_period()):
temp = self.__generate_next(self.companion, k)
new_state = np.mod(np.dot(temp, self.seed), 2)
stream.append(new_state[-1])
else:
for k in range(self.get_max_period()):
temp = self.__generate_next(self.companion, k)
new_state = np.mod(np.dot(temp, self.seed), 2)
stream.append(new_state[0])
return np.array(stream)
def print_states(self):
for k in range(self.get_max_period()):
temp = self.__generate_next(self.companion, k)
new_state = np.mod(np.dot(temp, self.seed), 2)
print(" ".join([str(int(i)) for i in list(new_state)]))
| StarcoderdataPython |
3262041 | <filename>ssafy/practice/subset.py
def subset(nums):
for i in range(1, 2 ** len(nums)):
sub = []
for j in range(len(nums) + 1):
if i & (1 << j):
sub.append(nums[j])
print(sub)
subset([3, 6, 7, 1, 5, 4])
| StarcoderdataPython |
3417099 | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Tests for BatteryRunner and Report objects
'''
from six import StringIO
import logging
from ..batteryrunners import BatteryRunner, Report
from ..testing import (assert_true, assert_false, assert_equal,
assert_not_equal, assert_raises)
# define some trivial functions as checks
def chk1(obj, fix=False):
rep = Report(KeyError)
if 'testkey' in obj:
return obj, rep
rep.problem_level = 20
rep.problem_msg = 'no "testkey"'
if fix:
obj['testkey'] = 1
rep.fix_msg = 'added "testkey"'
return obj, rep
def chk2(obj, fix=False):
# Can return different codes for different errors in same check
rep = Report()
try:
ok = obj['testkey'] == 0
except KeyError:
rep.problem_level = 20
rep.problem_msg = 'no "testkey"'
rep.error = KeyError
if fix:
obj['testkey'] = 1
rep.fix_msg = 'added "testkey"'
return obj, rep
if ok:
return obj, rep
rep.problem_level = 10
rep.problem_msg = '"testkey" != 0'
rep.error = ValueError
if fix:
rep.fix_msg = 'set "testkey" to 0'
obj['testkey'] = 0
return obj, rep
def chk_warn(obj, fix=False):
rep = Report(KeyError)
if not 'anotherkey' in obj:
rep.problem_level = 30
rep.problem_msg = 'no "anotherkey"'
if fix:
obj['anotherkey'] = 'a string'
rep.fix_msg = 'added "anotherkey"'
return obj, rep
def chk_error(obj, fix=False):
rep = Report(KeyError)
if not 'thirdkey' in obj:
rep.problem_level = 40
rep.problem_msg = 'no "thirdkey"'
if fix:
obj['anotherkey'] = 'a string'
rep.fix_msg = 'added "anotherkey"'
return obj, rep
def test_init_basic():
# With no args, raise
assert_raises(TypeError, BatteryRunner)
# Len returns number of checks
battrun = BatteryRunner((chk1,))
assert_equal(len(battrun), 1)
battrun = BatteryRunner((chk1, chk2))
assert_equal(len(battrun), 2)
def test_init_report():
rep = Report()
assert_equal(rep, Report(Exception, 0, '', ''))
def test_report_strings():
rep = Report()
assert_not_equal(rep.__str__(), '')
assert_equal(rep.message, '')
str_io = StringIO()
rep.write_raise(str_io)
assert_equal(str_io.getvalue(), '')
rep = Report(ValueError, 20, 'msg', 'fix')
rep.write_raise(str_io)
assert_equal(str_io.getvalue(), '')
rep.problem_level = 30
rep.write_raise(str_io)
assert_equal(str_io.getvalue(), 'Level 30: msg; fix\n')
str_io.truncate(0)
str_io.seek(0)
# No fix string, no fix message
rep.fix_msg = ''
rep.write_raise(str_io)
assert_equal(str_io.getvalue(), 'Level 30: msg\n')
rep.fix_msg = 'fix'
str_io.truncate(0)
str_io.seek(0)
# If we drop the level, nothing goes to the log
rep.problem_level = 20
rep.write_raise(str_io)
assert_equal(str_io.getvalue(), '')
# Unless we set the default log level in the call
rep.write_raise(str_io, log_level=20)
assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n')
str_io.truncate(0)
str_io.seek(0)
# If we set the error level down this low, we raise an error
assert_raises(ValueError, rep.write_raise, str_io, 20)
# But the log level wasn't low enough to do a log entry
assert_equal(str_io.getvalue(), '')
# Error still raised with lower log threshold, but now we do get a
# log entry
assert_raises(ValueError, rep.write_raise, str_io, 20, 20)
assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n')
# If there's no error, we can't raise
str_io.truncate(0)
str_io.seek(0)
rep.error = None
rep.write_raise(str_io, 20)
assert_equal(str_io.getvalue(), '')
def test_logging():
rep = Report(ValueError, 20, 'msg', 'fix')
str_io = StringIO()
logger = logging.getLogger('test.logger')
logger.setLevel(30) # defaultish level
logger.addHandler(logging.StreamHandler(str_io))
rep.log_raise(logger)
assert_equal(str_io.getvalue(), '')
rep.problem_level = 30
rep.log_raise(logger)
assert_equal(str_io.getvalue(), 'msg; fix\n')
str_io.truncate(0)
str_io.seek(0)
def test_checks():
battrun = BatteryRunner((chk1,))
reports = battrun.check_only({})
assert_equal(reports[0],
Report(KeyError,
20,
'no "testkey"',
''))
obj, reports = battrun.check_fix({})
assert_equal(reports[0],
Report(KeyError,
20,
'no "testkey"',
'added "testkey"'))
assert_equal(obj, {'testkey': 1})
battrun = BatteryRunner((chk1, chk2))
reports = battrun.check_only({})
assert_equal(reports[0],
Report(KeyError,
20,
'no "testkey"',
''))
assert_equal(reports[1],
Report(KeyError,
20,
'no "testkey"',
''))
obj, reports = battrun.check_fix({})
# In the case of fix, the previous fix exposes a different error
# Note, because obj is mutable, first and second point to modified
# (and final) dictionary
output_obj = {'testkey': 0}
assert_equal(reports[0],
Report(KeyError,
20,
'no "testkey"',
'added "testkey"'))
assert_equal(reports[1],
Report(ValueError,
10,
'"testkey" != 0',
'set "testkey" to 0'))
assert_equal(obj, output_obj)
| StarcoderdataPython |
146361 |
import copy
configs = dict()
config = dict(
agent=dict(
q_model_kwargs=None,
v_model_kwargs=None,
pi_model_kwargs=None,
),
algo=dict(
discount=0.99,
batch_size=256,
training_ratio=256,
target_update_tau=0.005,
target_update_interval=1,
learning_rate=3e-4,
reparameterize=True,
policy_output_regularization=0.001,
reward_scale=5
),
env=dict(id="Hopper-v3"),
eval_env=dict(id="Hopper-v3"),
model=dict(),
optim=dict(),
runner=dict(
n_steps=1e6,
log_interval_steps=1e4,
),
sampler=dict(
batch_T=1,
batch_B=1,
max_decorrelation_steps=0,
eval_n_envs=10,
eval_max_steps=int(51e3),
eval_max_trajectories=50,
),
)
configs["sac_1M_serial"] = config
| StarcoderdataPython |
4964079 | <filename>test/unit/http.py
import binascii
import io
import os
import re
import time
import json
import socket
import select
from unit.main import TestUnit
class TestHTTP(TestUnit):
def http(self, start_str, **kwargs):
sock_type = (
'ipv4' if 'sock_type' not in kwargs else kwargs['sock_type']
)
port = 7080 if 'port' not in kwargs else kwargs['port']
url = '/' if 'url' not in kwargs else kwargs['url']
http = 'HTTP/1.0' if 'http_10' in kwargs else 'HTTP/1.1'
read_buffer_size = (
4096
if 'read_buffer_size' not in kwargs
else kwargs['read_buffer_size']
)
headers = (
{'Host': 'localhost', 'Connection': 'close'}
if 'headers' not in kwargs
else kwargs['headers']
)
body = b'' if 'body' not in kwargs else kwargs['body']
crlf = '\r\n'
if 'addr' not in kwargs:
addr = '::1' if sock_type == 'ipv6' else '127.0.0.1'
else:
addr = kwargs['addr']
sock_types = {
'ipv4': socket.AF_INET,
'ipv6': socket.AF_INET6,
'unix': socket.AF_UNIX,
}
if 'sock' not in kwargs:
sock = socket.socket(sock_types[sock_type], socket.SOCK_STREAM)
if (
sock_type == sock_types['ipv4']
or sock_type == sock_types['ipv6']
):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if 'wrapper' in kwargs:
sock = kwargs['wrapper'](sock)
connect_args = addr if sock_type == 'unix' else (addr, port)
try:
sock.connect(connect_args)
except ConnectionRefusedError:
sock.close()
return None
else:
sock = kwargs['sock']
if 'raw' not in kwargs:
req = ' '.join([start_str, url, http]) + crlf
if body != b'':
if isinstance(body, str):
body = body.encode()
elif isinstance(body, dict):
body, content_type = self.form_encode(body)
headers['Content-Type'] = content_type
if 'Content-Length' not in headers:
headers['Content-Length'] = len(body)
for header, value in headers.items():
if isinstance(value, list):
for v in value:
req += header + ': ' + str(v) + crlf
else:
req += header + ': ' + str(value) + crlf
req = (req + crlf).encode() + body
else:
req = start_str
sock.sendall(req)
encoding = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding']
if TestUnit.detailed:
print('>>>')
try:
print(req.decode(encoding, 'ignore'))
except UnicodeEncodeError:
print(req)
resp = ''
if 'no_recv' not in kwargs:
read_timeout = (
30 if 'read_timeout' not in kwargs else kwargs['read_timeout']
)
resp = self.recvall(
sock, read_timeout=read_timeout, buff_size=read_buffer_size
).decode(encoding)
if TestUnit.detailed:
print('<<<')
try:
print(resp)
except UnicodeEncodeError:
print(resp.encode())
if 'raw_resp' not in kwargs:
resp = self._resp_to_dict(resp)
headers = resp.get('headers')
if headers and headers.get('Transfer-Encoding') == 'chunked':
resp['body'] = self._parse_chunked_body(resp['body']).decode(
encoding
)
if 'json' in kwargs:
resp = self._parse_json(resp)
if 'start' not in kwargs:
sock.close()
return resp
return (resp, sock)
def delete(self, **kwargs):
return self.http('DELETE', **kwargs)
def get(self, **kwargs):
return self.http('GET', **kwargs)
def head(self, **kwargs):
return self.http('HEAD', **kwargs)
def post(self, **kwargs):
return self.http('POST', **kwargs)
def put(self, **kwargs):
return self.http('PUT', **kwargs)
def recvall(self, sock, read_timeout=30, buff_size=4096):
data = b''
while select.select([sock], [], [], read_timeout)[0]:
try:
part = sock.recv(buff_size)
except:
break
data += part
if not len(part):
break
return data
def _resp_to_dict(self, resp):
m = re.search(r'(.*?\x0d\x0a?)\x0d\x0a?(.*)', resp, re.M | re.S)
if not m:
return {}
headers_text, body = m.group(1), m.group(2)
p = re.compile('(.*?)\x0d\x0a?', re.M | re.S)
headers_lines = p.findall(headers_text)
status = re.search(
r'^HTTP\/\d\.\d\s(\d+)|$', headers_lines.pop(0)
).group(1)
headers = {}
for line in headers_lines:
m = re.search(r'(.*)\:\s(.*)', line)
if m.group(1) not in headers:
headers[m.group(1)] = m.group(2)
elif isinstance(headers[m.group(1)], list):
headers[m.group(1)].append(m.group(2))
else:
headers[m.group(1)] = [headers[m.group(1)], m.group(2)]
return {'status': int(status), 'headers': headers, 'body': body}
def _parse_chunked_body(self, raw_body):
if isinstance(raw_body, str):
raw_body = bytes(raw_body.encode())
crlf = b'\r\n'
chunks = raw_body.split(crlf)
if len(chunks) < 3:
self.fail('Invalid chunked body')
if chunks.pop() != b'':
self.fail('No CRLF at the end of the body')
try:
last_size = int(chunks[-2], 16)
except:
self.fail('Invalid zero size chunk')
if last_size != 0 or chunks[-1] != b'':
self.fail('Incomplete body')
body = b''
while len(chunks) >= 2:
try:
size = int(chunks.pop(0), 16)
except:
self.fail('Invalid chunk size %s' % str(size))
if size == 0:
self.assertEqual(len(chunks), 1, 'last zero size')
break
temp_body = crlf.join(chunks)
body += temp_body[:size]
temp_body = temp_body[size + len(crlf) :]
chunks = temp_body.split(crlf)
return body
def _parse_json(self, resp):
headers = resp['headers']
self.assertIn('Content-Type', headers, 'Content-Type header set')
self.assertEqual(
headers['Content-Type'],
'application/json',
'Content-Type header is application/json',
)
resp['body'] = json.loads(resp['body'])
return resp
def getjson(self, **kwargs):
return self.get(json=True, **kwargs)
def waitforsocket(self, port):
ret = False
for i in range(50):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', port))
ret = True
break
except:
sock.close()
time.sleep(0.1)
sock.close()
self.assertTrue(ret, 'socket connected')
def form_encode(self, fields):
is_multipart = False
for _, value in fields.items():
if isinstance(value, dict):
is_multipart = True
break
if is_multipart:
body, content_type = self.multipart_encode(fields)
else:
body, content_type = self.form_url_encode(fields)
return body, content_type
def form_url_encode(self, fields):
data = "&".join("%s=%s" % (name, value)
for name, value in fields.items()).encode()
return data, 'application/x-www-form-urlencoded'
def multipart_encode(self, fields):
boundary = binascii.hexlify(os.urandom(16)).decode('ascii')
body = ''
for field, value in fields.items():
filename = ''
datatype = ''
if isinstance(value, dict):
datatype = 'text/plain'
filename = value['filename']
if value.get('type'):
datatype = value['type']
if not isinstance(value['data'], io.IOBase):
self.fail('multipart encoding of file requires a stream.')
data = value['data'].read()
elif isinstance(value, str):
data = value
else:
self.fail('multipart requires a string or stream data')
body += (
"--%s\r\nContent-Disposition: form-data; name=\"%s\""
) % (boundary, field)
if filename != '':
body += "; filename=\"%s\"" % filename
body += "\r\n"
if datatype != '':
body += "Content-Type: %s\r\n" % datatype
body += "\r\n%s\r\n" % data
body += "--%s--\r\n" % boundary
return body.encode(), "multipart/form-data; boundary=%s" % boundary
| StarcoderdataPython |
9601504 | <reponame>apoorv-x12/Django
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("<em>My Second Project</em>")
def help(request):
helpdict = {'help_insert':'HELP PAGE'}
return render(request,'apptwo/help.html',context=helpdict)
| StarcoderdataPython |
76387 | #!/usr/bin/env python3
from caproto.threading.client import Context
prefix = 'TEST:SERVER.'
ctx = Context()
CMD, = ctx.get_pvs(prefix+'seq.CMD')
ACK, = ctx.get_pvs(prefix+'seq.ACK')
message, = ctx.get_pvs(prefix+'seq.message')
values, = ctx.get_pvs(prefix+'seq.values')
example_pv, = ctx.get_pvs(prefix+'example_pv')
VAL, = ctx.get_pvs(prefix+'VAL')
if __name__ == '__main__':
prefix = 'TEST:CLIENT.'
from pdb import pm
from tempfile import gettempdir
import logging
print(gettempdir()+'/{}.log'.format(prefix))
logging.basicConfig(filename=gettempdir()+'/{}.log'.format(prefix),
level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s")
| StarcoderdataPython |
11221063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import math
import time
from datetime import datetime
from typing import Dict, List, Union
import isodate
import requests
DATE_FORMAT_SRC = '%Y-%m-%dT%H:%M:%SZ'
# Game must have one of these platforms to be tracked
game_platforms = [
'n5683oev', # Game Boy
'3167jd6q', # Super Game Boy
'n5e147e2', # Super Game Boy 2
]
# Run itself must have one of these platforms to qualify
run_platforms = [
'n5683oev', # Game Boy
'vm9v3ne3', # Game Boy Interface
'7m6yvw6p', # Game Boy Player
'3167jd6q', # Super Game Boy
'n5e147e2', # Super Game Boy 2
'8gejn93d', # Wii U
'v06dk3e4', # Wii
'nzelreqp', # Wii Virtual Console
'7g6mx89r', # 3DS Virtual Console
]
def download(url: str) -> Union[List, Dict]:
""" Download data from SRC API, with throttling """
print('[tools.py::download] Fetching', url)
time.sleep(1)
headers = {'user-agent': 'akleemans-gameboy-wr-bot/2.0'}
content = json.loads(requests.get(url, headers=headers).text)
data = content['data']
return data
def get_readable_time(duration: str) -> str:
""" Converts ISO duration strings like 'PT52.250S' into a readable time """
seconds = isodate.parse_duration(duration).total_seconds()
ms = round(seconds % 1 * 1000)
ss = math.floor(seconds % 60)
mm = math.floor(seconds // 60 % 60)
hh = math.floor(seconds // 3600)
s = ''
if hh > 0:
s = f'{hh}h '
if mm > 0:
s += f'{mm}m '
s += f'{ss}s'
if ms > 0:
s += f' {ms}ms'
return s
def get_age(date1: str, date2: str) -> int:
""" Calculate date difference in hours """
d1 = datetime.strptime(date1, DATE_FORMAT_SRC)
d2 = datetime.strptime(date2, DATE_FORMAT_SRC)
if d1 > d2:
diff = d1 - d2
else:
diff = d2 - d1
return round(diff.seconds / 3600 + diff.days * 24)
def get_age_from_now(date: str) -> int:
""" Calculate age (from today) in hours """
now = datetime.now().strftime(DATE_FORMAT_SRC)
return get_age(now, date)
| StarcoderdataPython |
8134267 | from binance.exceptions import BinanceAPIException
from datetime import datetime
import pytz
import json
class ServiceBase:
METHOD_GET = 'GET'
METHOD_POST = 'POST'
METHOD_DELETE = 'DELETE'
def __init__(self, binanceInst):
self.binance = binanceInst
self.client = binanceInst.client
if (binanceInst.DEBUG):
self.logger = binanceInst.logger
def _get_fail_retstr(self, detail=""):
return '{"sucess":false, "reason":'+str(detail)+'}'
def _get_except_retstr(self, binance_exception):
e = binance_exception
return self._get_fail_retstr(str(e.status_code) + ":" + str(e.message))
def _gtctime_to_createdat_str(self, timestamp):
t = datetime.fromtimestamp(timestamp, tz=pytz.utc)
return datetime.strftime(t, "%Y-%m-%dT%H:%M:%S.000Z")
def pair_to_symbol(self, pairname):
return pairname.upper().replace("_", "")
def symbol_to_pair(self, symbol):
symbol = symbol.lower()
if symbol.startswith("btc"):
return symbol.replace("btc", "btc_")
else:
return symbol.replace("btc", "_btc")
def _is_pair(self, pair_or_symbol):
return "_" in pair_or_symbol
def _is_symbol(self, pair_or_symbol):
return not self._is_pair(pair_or_symbol)
def to_symbol(self, pair_or_symbol):
if self._is_symbol(pair_or_symbol): return pair_or_symbol
return self.pair_to_symbol(pair_or_symbol)
def to_pair(self, pair_or_symbol):
if self._is_pair(pair_or_symbol): return pair_or_symbol
return self.symbol_to_pair(pair_or_symbol)
def _check_success(self, ret):
if "code" not in ret: return True
return ret["code"] > -1000
def _process_ret(self, ret):
if "code" not in ret: return json.dumps(ret)
if ret["code"] <= -1000:
# error
ret["success"] = False
return json.dumps(ret)
ret["success"] = True
return json.dumps(ret)
| StarcoderdataPython |
9698199 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from .position_encoding import PositionEmbeddingSine
from .transformer import Transformer, TransformerTemporal, TransformerReverse, TransformerSimple
class Projector(nn.Module):
def __init__(self, hidden_dim):
super().__init__()
self.transformer = TransformerReverse(
d_model=hidden_dim,
dropout=0.1,
nhead=8,
dim_feedforward=hidden_dim,
num_decoder_layers=1,
normalize_before=False,
return_intermediate_dec=False,
)
def forward(self, x, hs):
new_x = self.transformer(hs, x)
return new_x
class AttentionModule(nn.Module):
def __init__(self, transformer, transformer_temporal, position_embedding, num_queries, num_channels, hidden_dim):
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.transformer_temporal = transformer_temporal
self.position_embedding = position_embedding
#self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(num_channels, hidden_dim, kernel_size=1)
self.projector = Projector(hidden_dim)
self.out_proj = nn.Conv2d(hidden_dim, num_channels, kernel_size=1)
def forward(self, x_rgb, x_dep):
b, c, h, w = x_rgb.shape
mask = torch.zeros((b, h, w), dtype=torch.bool, device=x_rgb.device)
pos = self.position_embedding(x_rgb, mask)
proj_x = self.input_proj(x_rgb)
hs, _ = self.transformer(proj_x, x_dep, mask, pos)
# project back to features
new_x = self.projector(proj_x, hs)
new_x = x_dep.contiguous() + self.out_proj(new_x)
return new_x
def build_attention_module(num_channels, hidden_dim=256, num_queries=100, temporal=False):
transformer_temporal = None
if temporal:
transformer_temporal = TransformerTemporal(
d_model=hidden_dim,
dropout=0.1,
nhead=8,
dim_feedforward=2048,
num_decoder_layers=1,
normalize_before=False,
return_intermediate_dec=False,
)
transformer = Transformer(
d_model=hidden_dim,
dropout=0.1,
nhead=8,
dim_feedforward=2048,
num_decoder_layers=1,
normalize_before=False,
return_intermediate_dec=False,
)
position_embedding = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
return AttentionModule(transformer, transformer_temporal, position_embedding, num_queries, num_channels, hidden_dim)
| StarcoderdataPython |
6474863 | <reponame>harshavardhanc/sunbird-analytics
# Author: <NAME>, <EMAIL>
import os
#This function traverses a directory finding all files with a particular substring
#Returns a list of files found
def findFiles(directory,substrings):
ls=[]
if (type(directory) == unicode or type(directory) == str) and type(substrings) == list:
# assert type(directory)==unicode or type(directory)==str
# assert type(substrings)==list
if os.path.isdir(directory):
for dirname, dirnames, filenames in os.walk(directory):
for filename in filenames:
string=os.path.join(dirname, filename)
for substring in substrings:
if(string.find(substring)>=0):
ls.append(string)
return ls
| StarcoderdataPython |
4983813 | """
===============
Polarimeter GUI
===============
This is the variable waveplate GUI.
:copyright: 2020by Hyperion Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import hyperion
from hyperion import logging
import sys, os
import numpy as np
import pyqtgraph as pg
from time import time
from PyQt5 import uic
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import *
from hyperion import package_path, _colors, Q_
from hyperion.tools.saving_tools import create_filename
from hyperion.instrument.polarization.polarimeter import Polarimeter
from hyperion.view.base_guis import BaseGui, BaseGraph
class PolarimeterGui(BaseGui):
"""
This is the Polarimeter GUI class.
It builds the GUI for the instrument: polarimeter
:param polarimeter_ins: instrument
:type an instance of the polarization instrument
"""
MODES = ['Monitor', 'Time Trace'] # measuring modes
def __init__(self, polarimeter_ins, plot_window, also_close_output=False):
super().__init__()
self.logger = logging.getLogger(__name__)
# to load from the UI file
gui_file = os.path.join(package_path,'view', 'polarization','polarimeter.ui')
self.logger.info('Loading the GUI file: {}'.format(gui_file))
self.gui = uic.loadUi(gui_file, self)
# set location in screen
self.left = 700
self.top = 100
# change location
self.gui.move(self.left, self.top)
# get the inputs
self.plot_window = plot_window # window
self.polarimeter_ins = polarimeter_ins # instrument
# setup the gui
self.customize_gui()
#self.get_device_state()
#self.set_device_state_to_gui()
self.show()
# set the right wavelength
self.polarimeter_ins.change_wavelength(
Q_(self.gui.doubleSpinBox_wavelength.value(), self.gui.doubleSpinBox_wavelength.suffix()))
#
self._is_measuring = False
# data vector creation
self._buffer_size_factor = 20
self.data = np.zeros((len(self.polarimeter_ins.DATA_TYPES_NAME),
int(self.gui.doubleSpinBox_measurement_length.value()*self._buffer_size_factor))) # length of the buffer
self.data_time = np.zeros((int(self.gui.doubleSpinBox_measurement_length.value()*self._buffer_size_factor))) # length of the buffer
# to handle the update of the plot we use a timer
self.timer = QTimer()
self.timer.timeout.connect(self.update_plot)
# to be able to plot only the ticked fields
self.index_to_plot = []
self.Plots = []
self.Plots.append(self.plot_window.pg_plot)
def closeEvent(self, event):
""" Actions to take when you press the X in the main window.
"""
#self.polarimeter_ins.finalize()
self.plot_window.close()
event.accept() # let the window close
def customize_gui(self):
""" Make changes to the gui """
self.setWindowIcon(QIcon(os.path.join(package_path, 'view', 'logo_hyperion.png')))
self.logger.debug('Setting channels to plot')
self._channels_labels = []
self._channels_check_boxes = []
self.gui.pushButton_apply_wavelength.clicked.connect(self.change_wavelength)
# add the channels to detect
for index, a in enumerate(self.polarimeter_ins.DATA_TYPES_NAME):
label = QLabel(a)
label.setStyleSheet('color: {}'.format(_colors[index]))
box = QCheckBox()
self._channels_labels.append(label)
self._channels_check_boxes.append(box)
self.gui.formLayout_channels.addRow(box, label)
self._channels_check_boxes[-1].stateChanged.connect(self.update_start_button_status)
# set the mode
self.gui.comboBox_mode.addItems(self.MODES)
# clear plot
self.gui.pushButton_clear_plot.clicked.connect(self.clear_plot)
# save
self.gui.pushButton_save.clicked.connect(self.data_save)
# start monitor button
self.gui.pushButton_start.clicked.connect(self.start_button)
self.gui.pushButton_start.setEnabled(False)
def update_dummy_data(self):
""" Dummy data update"""
raw = np.random.rand(13)
self.data[:, :-1] = self.data[:, 1:]
self.data[:, -1] = np.array(raw)
def update_data(self):
""" Getting data from polarimeter and put it in the matrix self.data (gets all the posible values)
"""
raw = self.polarimeter_ins.get_data()
t = time()
# shift data
self.data[:,1:] = self.data[:,0:-1]
self.data_time[1:] = self.data_time[0:-1]
# add new data
self.data[:,0] = np.array(raw)
self.data_time[0] = t - self.stat_time
# self.logger.debug('Time vector: {}'.format(self.data_time))
# self.logger.debug('Data vector S1: {}'.format(self.data[0,:]))
def update_plot(self):
""" This updates the plot """
self.update_data() # get new data
# self.logger.debug('Indexes selected to plot: {}'.format(self.index_to_plot))
# make data to plot
x = np.array(range(len(self.data[0,:])))
# Update the data shown in all the plots that are checked
for index, value in enumerate(self.index_to_plot):
#self.logger.debug('Plotting for variable: {}'.format(self.polarimeter_ins.DATA_TYPES_NAME[value]))
y = self.data[value, :]
self.Plots[index].setData(self.data_time, y, pen=pg.mkPen(_colors[value], width=2))
self.plot_window.pg_plot_widget.setXRange(self.data_time[0]-self.gui.doubleSpinBox_measurement_length.value(),
self.data_time[0])
def data_save(self):
""" To save data in memory to disk """
# get the correct filename
extension = 'txt'
filename = create_filename('{}\Data\polarimeter_gui_data.{}'.format(hyperion.parent_path, extension))
# saving
self.logger.info('Saving data in the memory to disk. Filename: {}'.format(filename))
self.polarimeter_ins.save_data(np.transpose(self.data), extra=[np.transpose(self.data_time),
'Time','Second',
'Measurement time since start.'],
file_path= '{}.{}'.format(filename, extension))
def clear_plot(self):
"""To clear the plot"""
self.plot_window.pg_plot_widget.clear()
def update_start_button_status(self):
"""To make the start button be disabled or enabled depending on the checkbox status. """
self.logger.debug('Updating start button and making the initial plot.')
# get the index number of the channels ticked to be measured and put them in an array
self.index_to_plot = []
label = ''
for ind, a in enumerate(self._channels_check_boxes):
if a.isChecked():
self.index_to_plot.append(ind)
label += '{} [{}], '.format(self.polarimeter_ins.DATA_TYPES_NAME[ind],
self.polarimeter_ins.DATA_TYPES_UNITS[ind])
self.logger.debug('Total set of index to plot in the monitor: {}'.format(self.index_to_plot))
self.plot_window.pg_plot_widget.setLabel('left', label[:-2])
self.logger.debug('Setting the right range for the plot: {}'.format(self.gui.doubleSpinBox_measurement_length.value()))
self.plot_window.pg_plot_widget.setXRange(self.data_time[0]-self.gui.doubleSpinBox_measurement_length.value(),
self.data_time[0])
if len(self.index_to_plot)==0:
self.gui.pushButton_start.setEnabled(False)
else:
self.gui.pushButton_start.setEnabled(True)
def start_button(self):
""" Action when you press start """
# add the extra plots needed with one data point
self.Plots = []
for i in range(len(self.index_to_plot)):
self.logger.debug('Adding a new plot. Index: {}'.format(i))
p = self.plot_window.pg_plot_widget.plot([0], [0])
self.Plots.append(p)
# if to toggle with the button
if self._is_measuring:
self.logger.info('Stopping measurement')
self.polarimeter_ins.stop_measurement()
self._is_measuring = False
# change the button text
self.gui.pushButton_start.setText('Start')
# Enable the checkboxes when stopping
for a in self._channels_check_boxes:
a.setEnabled(True)
self.timer.stop()
if self.gui.checkBox_autosave.isChecked():
self.data_save()
else:
self.logger.info('Starting measurement')
self.logger.debug('Re-setting to zero the data')
# create the data set
self.data = np.zeros((len(self.polarimeter_ins.DATA_TYPES_NAME),
int(
self.gui.doubleSpinBox_measurement_length.value() * self._buffer_size_factor))) # length of the buffer
self.data_time = np.zeros((int(
self.gui.doubleSpinBox_measurement_length.value() * self._buffer_size_factor))) # length of the buffer
self.stat_time = time() #
self._is_measuring = True
# change the button text
self.gui.pushButton_start.setText('Stop')
# Disable the checkboxes while running
for a in self._channels_check_boxes:
a.setEnabled(False)
self.timer.start(50) # in ms
# self.measurement_thread = WorkThread(self.continuous_data)
# self.measurement_thread.start()
def change_wavelength(self):
""" Gui method to set the wavelength to the device
"""
w = Q_(self.doubleSpinBox_wavelength.value(), self.doubleSpinBox_wavelength.suffix())
self.logger.info('Setting the wavelength: {}'.format(w))
self.polarimeter_ins.change_wavelength(w)
# this is to create a graph output window to dump our data later.
class Graph(BaseGraph):
"""
In this class a widget is created to draw a graph on.
"""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(__name__)
self.logger.debug('Creating the Graph for the polarization')
self.title = 'Graph view: Polarimeter'
self.left = 50
self.top = 100
self.width = 640
self.height = 480
self.plot_title = 'Data from SK polarimeter'
self.initialize_plot()
#self.pg_plot_widget.setYRange(min=-1,max=1)
self.pg_plot_widget.setLabel('bottom',text='Time', units='seconds')
self.initUI() # This should be called here (not in the parent)
if __name__ == '__main__':
log =logging.getLogger(__name__)
log.info('Running Polarimeter GUI file.')
# Create the Instrument (in this case we use the with statement)
with Polarimeter(settings = {'dummy' : False,
'controller': 'hyperion.controller.sk.sk_pol_ana/Skpolarimeter',
'dll_name': 'SKPolarimeter'}) as polarimeter_ins:
# Mandatory line for gui
app = QApplication(sys.argv)
log.debug('Creating the graph for the GUI.')
plot_window = Graph()
log.debug('Now starting the GUI')
PolarimeterGui(polarimeter_ins, plot_window)
# Mandatory line for gui
app.exec_() # if you don't want it to close the python kernel afterwards
# sys.exit(app.exec_()) # if you do want it to close the python kernal afterwards
# sys.exit()
| StarcoderdataPython |
8032720 | from EXOSIMS.Observatory.SotoStarshade_ContThrust import SotoStarshade_ContThrust as sss
import math as m
import unittest
import numpy as np
import astropy.units as u
from scipy.integrate import solve_ivp
import astropy.constants as const
import hashlib
import scipy.optimize as optimize
from scipy.optimize import basinhopping
import scipy.interpolate as interp
import scipy.integrate as intg
from scipy.integrate import solve_bvp
from copy import deepcopy
import time
import os
import pickle
class TestSotoStarshadeContThrust(unittest.TestCase):
"""
<NAME>, July 2021, Cornell
This class tests particular methods from SotoStarshade_Ski.
"""
def test_DCM_r2i_i2r(self):
"""
tests DCM_r2i against a manually created 6x6 rotation matrix, with
the basis vectors being the rotational position and velocity vectors.
Arbitary inputs are used for the angle t.
Also tests DCM_i2r. The inverse of a matrix is its transpose, so the
transpose is simply taken from DCM_r2i_manual.
matrix taken from Gabe's thesis ("ORBITAL DESIGN TOOLS AND SCHEDULING
TECHNIQUES FOR OPTIMIZING SPACE SCIENCE AND EXOPLANET-FINDING MISSIONS"),
page 81.
"""
def DCM_r2i_manual(t):
x = [m.cos(t),-m.sin(t),0,0,0,0]
y = [m.sin(t),m.cos(t),0,0,0,0]
z = [0,0,1,0,0,0]
dx = [-m.sin(t),-m.cos(t),0,m.cos(t),-m.sin(t),0]
dy = [m.cos(t),-m.sin(t),0,m.sin(t),m.cos(t),0]
dz = [0,0,0,0,0,1]
return np.vstack([x,y,z,dx,dy,dz])
def DCM_i2r_manual(t):
return np.linalg.inv(DCM_r2i_manual(t))
for t in np.arange(0,101):
np.testing.assert_allclose(sss.DCM_r2i(self,t),DCM_r2i_manual(t))
np.testing.assert_allclose(sss.DCM_i2r(self,t),DCM_i2r_manual(t))
def test_DCM_r2i_9(self):
"""
tests DCM_r2i_9 against a manually created 6x6 rotation matrix, with
the basis vectors being the rotational position, velocity, and
accerelation vector.
Arbitary inputs are used for the angle t.
matrix taken from Gabe's thesis ("ORBITAL DESIGN TOOLS AND SCHEDULING
TECHNIQUES FOR OPTIMIZING SPACE SCIENCE AND EXOPLANET-FINDING MISSIONS"),
page 81.
"""
def DCM_r2i_9_manual(t):
x = np.array([m.cos(t),-m.sin(t),0,0,0,0,0,0,0])
y = np.array([m.sin(t),m.cos(t),0,0,0,0,0,0,0])
z = np.array([0,0,1,0,0,0,0,0,0])
dx = np.array([-m.sin(t),-m.cos(t),0,m.cos(t),-m.sin(t),0,0,0,0])
dy = np.array([m.cos(t),-m.sin(t),0,m.sin(t),m.cos(t),0,0,0,0])
dz = np.array([0,0,0,0,0,1,0,0,0])
ddx = np.array([0,0,0,0,0,0,m.cos(t),-m.sin(t),0])-2*dy+x
ddy = np.array([0,0,0,0,0,0,m.sin(t),m.cos(t),0])+2*dx+y
ddz = np.array([0,0,0,0,0,0,0,0,1])
return np.vstack([x,y,z,dx,dy,dz,ddx,ddy,ddz])
for t in np.arange(0,101):
np.testing.assert_allclose(sss.DCM_r2i_9(self,t),DCM_r2i_9_manual(t))
| StarcoderdataPython |
4820108 | # encoding: utf-8
import os
import re
import arrow
from slugify import slugify
from flask import Flask, render_template, request, url_for, redirect
app = Flask(__name__)
from db import get_backend
import local_settings as conf
def format_size(num):
if num is None:
return None
format_str = '{:.1f}\xa0{}'
for x in ['o','kio','Mio','Gio']:
if num < 1024.0:
return format_str.format(num, x)
num /= 1024.0
return format_str.format(num, 'Tio')
def url_of(host, path=''):
return 'ftp://{}:{}@{}'.format(conf.USER, conf.PASSWD, os.path.join(host, path))
def humanize(date):
if date is None:
return None
return arrow.get(date).humanize(locale='fr')
def get_servers():
store = get_backend(conf.STORE['NAME']).Store(conf.STORE['CONF'])
with store.scan_db() as db:
hosts = db.get_hosts()
return [{ 'name': info['name'], 'url': url_of(info['name']),
'online': info['online'], 'last_indexed': humanize(info['last_indexed']),
'file_count': info['file_count'], 'size': format_size(info['size']) }
for (_, info) in hosts.items()]
@app.route('/')
def home():
return render_template('home.html', servers=get_servers(), online=True)
@app.route('/search')
def search():
query = request.args.get('query', '')
if query == '': return redirect(url_for('home'))
online = request.args.get('online', 'off') == 'on'
# Normalize terms and then make sure they only contain alphanumeric characters
simple_terms = slugify(query, separator=' ').split(' ')
safe_terms = [re.sub(r'[^a-zA-Z0-9]+', '', term) for term in simple_terms]
store = get_backend(conf.STORE['NAME']).Store(conf.STORE['CONF'])
with store.scan_db() as db:
hosts = db.get_hosts()
if online:
hosts = { ip: info for (ip, info) in hosts.items() if info['online'] }
with store.index_db() as db:
hits = db.search(safe_terms, hosts, limit=100)
for hit in hits:
hit['size'] = format_size(hit['size'])
hit['url'] = url_of(hit['host']['name'], os.path.join(hit['path'], hit['name']))
hit['dir_url'] = url_of(hit['host']['name'], os.path.join(hit['path']))
return render_template('search.html', hits=hits, query=query, online=online)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| StarcoderdataPython |
335325 | from .settings import settings_form_factory
__all__ = [
'settings_form_factory',
]
| StarcoderdataPython |
8081231 | import numpy
import timeit
from thinc.api import NumpyOps, LSTM, PyTorchLSTM, with_padded, fix_random_seed
from thinc.util import has_torch
import pytest
@pytest.fixture(params=[1, 6])
def nI(request):
return request.param
@pytest.fixture(params=[1, 2, 7, 9])
def nO(request):
return request.param
def test_list2padded():
ops = NumpyOps()
seqs = [numpy.zeros((5, 4)), numpy.zeros((8, 4)), numpy.zeros((2, 4))]
padded = ops.list2padded(seqs)
arr = padded.data
size_at_t = padded.size_at_t
assert arr.shape == (8, 3, 4)
assert size_at_t[0] == 3
assert size_at_t[1] == 3
assert size_at_t[2] == 2
assert size_at_t[3] == 2
assert size_at_t[4] == 2
assert size_at_t[5] == 1
assert size_at_t[6] == 1
assert size_at_t[7] == 1
unpadded = ops.padded2list(padded)
assert unpadded[0].shape == (5, 4)
assert unpadded[1].shape == (8, 4)
assert unpadded[2].shape == (2, 4)
@pytest.mark.parametrize("nO,nI", [(1, 2), (2, 2), (100, 200), (9, 6)])
def test_LSTM_init_with_sizes(nO, nI):
model = with_padded(LSTM(nO, nI)).initialize()
for node in model.walk():
# Check no unallocated params.
assert node.has_param("W") is not None
assert node.has_param("b") is not None
assert node.has_param("initial_hiddens") is not None
assert node.has_param("initial_cells") is not None
for node in model.walk():
# Check param sizes.
if node.has_param("W"):
W = node.get_param("W")
assert W.shape == (nO * 4, nO + nI)
if node.has_param("b"):
b = node.get_param("b")
assert b.shape == (nO * 4,)
if node.has_param("initial_hiddens"):
initial_hiddens = node.get_param("initial_hiddens")
assert initial_hiddens.shape == (nO,)
if node.has_param("initial_cells"):
initial_cells = node.get_param("initial_cells")
assert initial_cells.shape == (nO,)
def test_LSTM_fwd_bwd_shapes(nO, nI):
nO = 1
nI = 2
X = numpy.asarray([[0.1, 0.1], [-0.1, -0.1], [1.0, 1.0]], dtype="f")
model = with_padded(LSTM(nO, nI)).initialize(X=[X])
ys, backprop_ys = model([X], is_train=False)
dXs = backprop_ys(ys)
assert numpy.vstack(dXs).shape == numpy.vstack([X]).shape
def test_LSTM_learns():
fix_random_seed(0)
nO = 2
nI = 2
def sgd(key, weights, gradient):
weights -= 0.001 * gradient
return weights, gradient * 0
model = with_padded(LSTM(nO, nI))
X = [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]
Y = [[0.2, 0.2], [0.3, 0.3], [0.4, 0.4]]
X = [model.ops.asarray(x, dtype="f").reshape((1, -1)) for x in X]
Y = [model.ops.asarray(y, dtype="f").reshape((1, -1)) for y in Y]
model = model.initialize(X, Y)
Yhs, bp_Yhs = model.begin_update(X)
loss1 = sum([((yh - y) ** 2).sum() for yh, y in zip(Yhs, Y)])
Yhs, bp_Yhs = model.begin_update(X)
dYhs = [yh - y for yh, y in zip(Yhs, Y)]
dXs = bp_Yhs(dYhs)
model.finish_update(sgd)
Yhs, bp_Yhs = model.begin_update(X)
dYhs = [yh - y for yh, y in zip(Yhs, Y)]
dXs = bp_Yhs(dYhs) # noqa: F841
loss2 = sum([((yh - y) ** 2).sum() for yh, y in zip(Yhs, Y)])
assert loss1 > loss2, (loss1, loss2)
@pytest.mark.skip
def test_benchmark_LSTM_fwd():
nO = 128
nI = 128
n_batch = 1000
batch_size = 30
seq_len = 30
lengths = numpy.random.normal(scale=10, loc=30, size=n_batch * batch_size)
lengths = numpy.maximum(lengths, 1)
batches = []
uniform_lengths = False
model = with_padded(LSTM(nO, nI)).initialize()
for batch_lengths in model.ops.minibatch(batch_size, lengths):
batch_lengths = list(batch_lengths)
if uniform_lengths:
seq_len = max(batch_lengths)
batch = [
numpy.asarray(
numpy.random.uniform(0.0, 1.0, (int(seq_len), nI)), dtype="f"
)
for _ in batch_lengths
]
else:
batch = [
numpy.asarray(
numpy.random.uniform(0.0, 1.0, (int(seq_len), nI)), dtype="f"
)
for seq_len in batch_lengths
]
batches.append(batch)
start = timeit.default_timer()
for Xs in batches:
ys, bp_ys = model.begin_update(list(Xs))
# _ = bp_ys(ys)
end = timeit.default_timer()
n_samples = n_batch * batch_size
print(
"--- %i samples in %s seconds (%f samples/s, %.7f s/sample) ---"
% (n_samples, end - start, n_samples / (end - start), (end - start) / n_samples)
)
def test_lstm_init():
model = with_padded(LSTM(2, 2, bi=True)).initialize()
model.initialize()
with pytest.raises(NotImplementedError):
with_padded(LSTM(2, dropout=0.2))
@pytest.mark.skipif(not has_torch, reason="needs PyTorch")
def test_pytorch_lstm_init():
model = with_padded(PyTorchLSTM(2, 2, depth=0)).initialize()
assert model.name == "with_padded(noop)"
| StarcoderdataPython |
60493 | <reponame>Hominine720202/BERT-NER-ScienceIE<filename>data/dataprocess.py<gh_stars>1-10
import pandas as pd
import numpy as np
import os,sys
from pytorch_pretrained_bert import BertTokenizer
import stanfordnlp
import nltk
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
nlp = stanfordnlp.Pipeline()
tokenizer = BertTokenizer.from_pretrained("../scibert/", do_lower_case=True)
"""
def sent_tokenize(sentence):
doc = nlp(sentence)
result = []
for i, sentence in enumerate(doc.sentences):
sent = ' '.join(word.text for word in sentence.words)
result.append(sent)
del doc
return result
"""
def word_tokenize(sentence):
return tokenizer.tokenize(sentence)
# ไฝฟ็จ bert ็ tokenizer
# ไฝฟ็จ่ดชๅฟ็ฎๆณ๏ผไปๅบ้ด้ๅไธญๅป้คๆๅฐ็ๅบ้ดไปฅไฝฟๅพๆดไฝไธ้ๅ
def eraseOverlapIntervals(intervals):
if not len(intervals):
return 0
result = [intervals[0]]
#ๆ็
งend่ฟ่กๆๅบ
intervals = sorted(intervals, key=lambda k: k[1])
#่ฎฐๅฝๆๅฐ็end
minEnd = intervals[0][2]
rest = 1
for i in range(1,len(intervals)):
#่ฅไธไธไธชinterval็startๅฐไบminEnd,ๅ็ฎ้ๅ
if intervals[i][1] < minEnd:
continue
result.append(intervals[i])
rest += 1
minEnd = intervals[i][2]
res = []
for item in result:
if item not in res:
res.append(item)
return res
# ็ปๅฎ sentence๏ผ็ปๅฎๆ ๆณจ่ๅดไธ็ฑปๅ๏ผ่ทๅพๆ ๆณจๅบๅ
# ็ปๅฎ sentence๏ผ็ปๅฎๆ ๆณจ่ๅดไธ็ฑปๅ๏ผ่ทๅพๆ ๆณจๅบๅ
def BIOextract(sentence,ann,start=0):
words = word_tokenize(sentence)
selectedWord = word_tokenize(sentence[ann[1]-start:ann[2]-start])
label = ['O' for idx in range(len(words))]
ite,rte = 0,0
lenSelect = len(selectedWord)
for idx in range(len(words)):
tempWords = words[idx:idx+len(selectedWord)]
if tempWords == selectedWord:
if len(selectedWord) == 1:
label[idx] = 'S-' + ann[0]
else:
label[idx] = 'B-' + ann[0]
for lIdx in range(idx+1,idx+lenSelect-1):
label[lIdx] = 'I-' + ann[0]
label[idx+lenSelect-1] = 'E-' + ann[0]
"""
for w in words:
rte = 0
for t in selectedWord:
if w == t:
if rte == 0:
label[ite] = 'B-' + ann[0]
else:
label[ite] = 'I-' + ann[0]
continue
rte = rte + 1
ite = ite + 1
"""
return label
# ๅไธไธช่ฏญๅฅ็ไธคไธชๆ ๆณจๅบๅ่ฟ่กๅๅนถ
def combineLabel(labela,labelb):
for idx in range(len(labela)):
if labela[idx] == 'O' and labelb[idx] != 'O':
labela[idx] = labelb[idx]
return labela
# ๅค็ไธไธชๆ่ฆๆไปถ
def abs2sent(idx,folder):
absTxt = open(folder+"/"+idx+'.txt', 'r')
abstract = absTxt.read()
sentences = sent_tokenize(abstract)
annFile = open(folder+"/"+idx+'.ann', 'r')
annLines = annFile.readlines()
anns = []
label = []
sentLen = [] # ๅๅฅ็้ฟๅบฆ
producedSentences = []
# ่ทๅๆๆฎตไธญๆฏไธไธชsentence็ๅบ้ด
for sentence in sentences:
if len(sentLen) != 0:
sentLen.append([sentLen[-1][1]+1,sentLen[-1][1]+1+len(sentence)])
else:
sentLen.append([0,len(sentence)])
# ๅค็ๆ ๆณจๆ ผๅผ
for line in annLines:
lineSpl = line.split("\t")
if lineSpl[0].find("T") != -1:
ann = lineSpl[1].split(" ")
try:
ann[1] = int(ann[1])
ann[2] = int(ann[2])
except:
print(idx,line)
break
anns.append(ann)
# ่ทๅพไธ้ๅ ๅบ้ด
anns = eraseOverlapIntervals(anns)
labels = []
# ่ฟ่กๆ ๆณจ + ๅๅนถๆ ๆณจ
out = False
for sentIdx in range(len(sentences)):
tmpAnn = None
if sentIdx != 0:
start = start + len(sentences[sentIdx-1])+1
else:
start = 0
for ann in anns:
if ann[1] >= sentLen[sentIdx][0] and ann[2] <= sentLen[sentIdx][1]:
if tmpAnn == None:
tmpAnn = BIOextract(
sentences[sentIdx],
ann,
start = start
)
else:
tmpAnn = combineLabel(
BIOextract(
sentences[sentIdx],
ann,
start = start
),
tmpAnn
)
if tmpAnn == None:
tmpAnn = ['O' for idx in range(len(word_tokenize(sentences[sentIdx])))]
labels.append(tmpAnn)
producedSentences.append(sentences[sentIdx])
return labels,producedSentences
# ๅค็ไธ็ปๆ่ฆๆไปถ
def tF(arr,folder):
labels = []
sentences = []
for item in arr:
tmpLabel,tmpSentence = abs2sent(item,folder)
labels = labels + (tmpLabel)
sentences = sentences + (tmpSentence)
return labels,sentences
# ๅๅ
ฅtags.txt
def writeLabel(arr,filename):
l=arr
f=open(filename,"w",encoding="utf-8")
for line in l:
for chac in line:
f.write(chac+" ")
f.write("\n")
f.close()
# ๅๅ
ฅsentence.txt
def writeTxt(arr,filename):
l=arr
f=open(filename,"w",encoding="utf-8")
for line in l:
f.write(line+"\n")
f.close()
import random
def writeData(labels,txts,name):
writeLabel(labels,"selfdata/"+name+"/tags.txt")
writeTxt(txts,"selfdata/"+name+"/sentences.txt")
# ๅฏนๆๆ็ๆไปถ่ฟ่กๅค็
def traveFiles(folder):
result = []
filenames = os.listdir(folder)
for item in filenames:
if item.find('.txt') != -1:
result.append(item[:-4])
random.shuffle(result)
labels,txt = tF(result,folder)
writeData(labels,txt,folder)
print(len(labels))
def makeFile(folder):
# files ๅญๅจๆๆ็ๆ ทไพๆไปถๅ
files = os.listdir(folder)
anns,txts = [],[]
for item in files:
if item.find('.ann') != -1:
anns.append(item)
txts.append(item[:-4]+".txt")
traveFiles(folder)
makeFile("train")
makeFile("val")
makeFile("test")
| StarcoderdataPython |
3524719 | from LinkedList import *
def test_linked_list_basic():
linked_list = LinkedList()
linked_list.append(1)
linked_list.append(2)
linked_list.append(4)
node = linked_list.head
while node:
print(node.value)
node = node.next
test_linked_list_basic()
def test_linked_list_to_python_list():
linked_list = LinkedList()
linked_list.append(3)
linked_list.append(2)
linked_list.append(-1)
linked_list.append(0.2)
print ("Pass" if (linked_list.to_list() == [3, 2, -1, 0.2]) else "Fail")
test_linked_list_to_python_list()
def test_comprehensive():
# Test prepend
linked_list = LinkedList()
linked_list.prepend(1)
assert linked_list.to_list() == [1], f"list contents: {linked_list.to_list()}"
linked_list.append(3)
linked_list.prepend(2)
assert linked_list.to_list() == [2, 1, 3], f"list contents: {linked_list.to_list()}"
# Test append
linked_list = LinkedList()
linked_list.append(1)
assert linked_list.to_list() == [1], f"list contents: {linked_list.to_list()}"
linked_list.append(3)
assert linked_list.to_list() == [1, 3], f"list contents: {linked_list.to_list()}"
# Test search
linked_list.prepend(2)
linked_list.prepend(1)
linked_list.append(4)
linked_list.append(3)
assert linked_list.search(1).value == 1, f"list contents: {linked_list.to_list()}"
assert linked_list.search(4).value == 4, f"list contents: {linked_list.to_list()}"
# Test remove
linked_list.remove(1)
assert linked_list.to_list() == [2, 1, 3, 4, 3], f"list contents: {linked_list.to_list()}"
linked_list.remove(3)
assert linked_list.to_list() == [2, 1, 4, 3], f"list contents: {linked_list.to_list()}"
linked_list.remove(3)
assert linked_list.to_list() == [2, 1, 4], f"list contents: {linked_list.to_list()}"
# Test pop
value = linked_list.pop()
assert value == 2, f"list contents: {linked_list.to_list()}"
assert linked_list.head.value == 1, f"list contents: {linked_list.to_list()}"
# Test insert
linked_list.insert(5, 0)
assert linked_list.to_list() == [5, 1, 4], f"list contents: {linked_list.to_list()}"
linked_list.insert(2, 1)
assert linked_list.to_list() == [5, 2, 1, 4], f"list contents: {linked_list.to_list()}"
linked_list.insert(3, 6)
assert linked_list.to_list() == [5, 2, 1, 4, 3], f"list contents: {linked_list.to_list()}"
# Test size
assert linked_list.size() == 5, f"list contents: {linked_list.to_list()}"
test_comprehensive()
def test_ls_reversed():
llist = LinkedList()
for value in [4,2,5,1,-3,0]:
llist.append(value)
flipped = llist.reversed()
is_correct = list(flipped) == list([0,-3,1,5,2,4]) and list(llist) == list(flipped.reversed())
print("Pass" if is_correct else "Fail")
test_ls_reversed()
| StarcoderdataPython |
6452318 | from anndata import AnnData
import GEOparse
import gzip
import numpy as np
import os
from scanorama import *
import scanpy as sc
from scipy.sparse import vstack
from sklearn.preprocessing import normalize
from process import process, load_names, merge_datasets
from utils import *
NAMESPACE = 'masuda_mouse_microglia'
DIMRED = 100
DR_METHOD = 'svd'
data_names = [
'data/microglia/masuda2019/mouse/GSE120744_counts',
]
def load_meta(datasets):
n_valid = 0
qc_idx = []
ages = []
injured = []
cell_types = []
# Load metadata for each cell.
id_to_meta = {}
soft_fnames = [
'data/microglia/masuda2019/mouse/GSE120744_family.soft.gz',
]
for fname in soft_fnames:
gsms = GEOparse.get_GEO(filepath=fname, silent=True).gsms
for geo_id in gsms:
cell_id = gsms[geo_id].metadata['title'][0]
meta = {
attr.split(':')[0].strip(): attr.split(':')[1].strip()
for attr in gsms[geo_id].metadata['characteristics_ch1']
}
id_to_meta[cell_id] = meta
# Map cell ids to important attributes.
for i in range(len(datasets)):
with gzip.open(data_names[i] + '.tsv.gz') as f:
cell_ids = f.readline().decode('utf-8').rstrip().split()[1:]
for cell_id in cell_ids:
meta = id_to_meta[cell_id]
age_str = meta['age']
if age_str == '16 weeks':
age = 16 * 7
age_str = 'P{}'.format(age)
elif age_str == 'embryonal': # Sic.
age = 16.5
age_str = 'E{}'.format(age)
elif age_str == '03_w':
age = 3 * 7
age_str = 'P{}'.format(age)
elif age_str == '16_w':
age = 16 * 7
age_str = 'P{}'.format(age)
else:
continue
if age_str.startswith('P'):
min_age = 19.
max_age = 60.
age = 19 + ((age - min_age) / (max_age - min_age) * 3)
ages.append(age)
if 'treatment' in meta:
if 'demyelination' in meta['treatment']:
inj = 'demyelination'
elif 'remyelination' in meta['treatment']:
inj = 'remyelination'
elif 'Facial_nerve_axotomy' in meta['treatment']:
inj = 'fxn'
else:
inj = 'none'
else:
inj = 'none'
injured.append(inj)
cell_types.append('{}_{}'.format(age_str, inj))
qc_idx.append(n_valid)
n_valid += 1
return qc_idx, np.array(cell_types), np.array(ages), np.array(injured)
datasets, genes_list, n_cells = load_names(data_names, norm=False)
qc_idx, cell_types, ages, injured = load_meta(datasets)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
X = X[qc_idx]
qc_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= 500 ]
tprint('Found {} valid cells among all datasets'.format(len(qc_idx)))
X = X[qc_idx]
cell_types = cell_types[qc_idx]
ages = ages[qc_idx]
injured = injured[qc_idx]
if not os.path.isfile('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE)):
mkdir_p('data/dimred')
tprint('Dimension reduction with {}...'.format(DR_METHOD))
X_dimred = reduce_dimensionality(normalize(X), dim_red_k=DIMRED)
tprint('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE))
dataset = AnnData(X)
dataset.var['gene_symbols'] = genes
dataset.obs['cell_types'] = [ NAMESPACE + '_' + l for l in cell_types ]
dataset.obs['ages'] = ages
dataset.obs['injured'] = injured
datasets = [ dataset ]
namespaces = [ NAMESPACE ]
| StarcoderdataPython |
51094 | <gh_stars>0
"""
Classes to manage job runs.
"""
from collections import deque
import logging
import itertools
from tron import node, command_context, event
from tron.core.actionrun import ActionRun, ActionRunFactory
from tron.serialize import filehandler
from tron.utils import timeutils, proxy
from tron.utils.observer import Observable, Observer
log = logging.getLogger(__name__)
class Error(Exception):
pass
class JobRun(Observable, Observer):
"""A JobRun is an execution of a Job. It has a list of ActionRuns and is
responsible for starting ActionRuns in the correct order and managing their
dependencies.
"""
NOTIFY_DONE = 'notify_done'
NOTIFY_STATE_CHANGED = 'notify_state_changed'
context_class = command_context.JobRunContext
# TODO: use config object
def __init__(self, job_name, run_num, run_time, node, output_path=None,
base_context=None, action_runs=None, action_graph=None,
manual=None):
super(JobRun, self).__init__()
self.job_name = job_name
self.run_num = run_num
self.run_time = run_time
self.node = node
self.output_path = output_path or filehandler.OutputPath()
self.output_path.append(self.id)
self.action_runs_proxy = None
self._action_runs = None
self.action_graph = action_graph
self.manual = manual
self.event = event.get_recorder(self.id)
self.event.ok('created')
if action_runs:
self.action_runs = action_runs
self.context = command_context.build_context(self, base_context)
@property
def id(self):
return '%s.%s' % (self.job_name, self.run_num)
@classmethod
def for_job(cls, job, run_num, run_time, node, manual):
"""Create a JobRun for a job."""
run = cls(job.get_name(), run_num, run_time, node,
job.output_path.clone(),
job.context,
action_graph=job.action_graph,
manual=manual)
action_runs = ActionRunFactory.build_action_run_collection(run, job.action_runner)
run.action_runs = action_runs
return run
@classmethod
def from_state(cls, state_data, action_graph, output_path, context,
run_node):
"""Restore a JobRun from a serialized state."""
pool_repo = node.NodePoolRepository.get_instance()
run_node = pool_repo.get_node(state_data.get('node_name'), run_node)
job_name = state_data['job_name']
job_run = cls(
job_name,
state_data['run_num'],
state_data['run_time'],
run_node,
action_graph=action_graph,
manual=state_data.get('manual', False),
output_path=output_path,
base_context=context
)
action_runs = ActionRunFactory.action_run_collection_from_state(
job_run, state_data['runs'], state_data['cleanup_run'])
job_run.action_runs = action_runs
return job_run
@property
def state_data(self):
"""This data is used to serialize the state of this job run."""
return {
'job_name': self.job_name,
'run_num': self.run_num,
'run_time': self.run_time,
'node_name': self.node.get_name() if self.node else None,
'runs': self.action_runs.state_data,
'cleanup_run': self.action_runs.cleanup_action_state_data,
'manual': self.manual,
}
def _get_action_runs(self):
return self._action_runs
def _set_action_runs(self, run_collection):
"""Store action runs and register callbacks."""
if self._action_runs is not None:
raise ValueError("ActionRunCollection already set on %s" % self)
self._action_runs = run_collection
for action_run in run_collection.action_runs_with_cleanup:
self.watch(action_run)
self.action_runs_proxy = proxy.AttributeProxy(
self.action_runs,
[
'queue',
'cancel',
'success',
'fail',
'is_cancelled',
'is_unknown',
'is_failed',
'is_succeeded',
'is_running',
'is_starting',
'is_queued',
'is_scheduled',
'is_skipped',
'is_starting',
'start_time',
'end_time'
])
def _del_action_runs(self):
del self._action_runs
action_runs = property(_get_action_runs, _set_action_runs, _del_action_runs)
def seconds_until_run_time(self):
run_time = self.run_time
now = timeutils.current_time()
if run_time.tzinfo:
now = run_time.tzinfo.localize(now)
return max(0, timeutils.delta_total_seconds(run_time - now))
def start(self):
"""Start this JobRun as a scheduled run (not a manual run)."""
self.event.info('start')
if self.action_runs.has_startable_action_runs and self._do_start():
return True
def _do_start(self):
log.info("Starting JobRun %s", self.id)
self.action_runs.ready()
if any(self._start_action_runs()):
self.event.ok('started')
return True
def stop(self):
if self.action_runs.is_done:
return
self.action_runs.stop()
def _start_action_runs(self):
"""Start all startable action runs, and return any that were
successfully started.
"""
started_actions = []
for action_run in self.action_runs.get_startable_action_runs():
if action_run.start():
started_actions.append(action_run)
return started_actions
def handle_action_run_state_change(self, action_run, _):
"""Handle events triggered by JobRuns."""
# propagate all state changes (from action runs) up to state serializer
self.notify(self.NOTIFY_STATE_CHANGED)
if not action_run.is_done:
return
if not action_run.is_broken and any(self._start_action_runs()):
log.info("Action runs started for %s." % self)
return
if self.action_runs.is_active or self.action_runs.is_scheduled:
log.info("%s still has running or scheduled actions." % self)
return
# If we can't make any progress, we're done
cleanup_run = self.action_runs.cleanup_action_run
if not cleanup_run or cleanup_run.is_done:
return self.finalize()
# TODO: remove in (0.6), start() no longer raises an exception
# When a job is being disabled, or the daemon is being shut down a bunch
# of ActionRuns will be cancelled/failed. This would cause cleanup
# action to be triggered more then once. Guard against that.
if cleanup_run.check_state('start'):
cleanup_run.start()
handler = handle_action_run_state_change
def finalize(self):
"""The last step of a JobRun. Called when the cleanup action
completes or if the job has no cleanup action, called once all action
runs have reached a 'done' state.
Triggers an event to notifies the Job that is is done.
"""
if self.action_runs.is_failed:
self.event.critical('failed')
else:
self.event.ok('succeeded')
# Notify Job that this JobRun is complete
self.notify(self.NOTIFY_DONE)
def cleanup(self):
"""Cleanup any resources used by this JobRun."""
self.event.notice('removed')
event.EventManager.get_instance().remove(str(self))
self.clear_observers()
self.action_runs.cleanup()
self.node = None
self.action_graph = None
self._action_runs = None
self.output_path.delete()
def get_action_run(self, action_name):
return self.action_runs.get(action_name)
@property
def state(self):
"""The overall state of this job run. Based on the state of its actions.
"""
if not self.action_runs:
log.info("%s has no state" % self)
return ActionRun.STATE_UNKNOWN
if self.action_runs.is_complete:
return ActionRun.STATE_SUCCEEDED
if self.action_runs.is_cancelled:
return ActionRun.STATE_CANCELLED
if self.action_runs.is_running:
return ActionRun.STATE_RUNNING
if self.action_runs.is_starting:
return ActionRun.STATE_STARTING
if self.action_runs.is_failed:
return ActionRun.STATE_FAILED
if self.action_runs.is_scheduled:
return ActionRun.STATE_SCHEDULED
if self.action_runs.is_queued:
return ActionRun.STATE_QUEUED
log.info("%s in an unknown state: %s" % (self, self.action_runs))
return ActionRun.STATE_UNKNOWN
def __getattr__(self, name):
if self.action_runs_proxy:
return self.action_runs_proxy.perform(name)
raise AttributeError(name)
def __str__(self):
return "JobRun:%s" % self.id
class JobRunCollection(object):
"""A JobRunCollection is a deque of JobRun objects. Responsible for
ordering and logic related to a group of JobRuns which should all be runs
for the same Job.
A JobRunCollection is created in two stages. First it's populated from a
configuration object, and second its state is loaded from a serialized
state dict.
Runs in a JobRunCollection should always remain sorted by their run_num.
"""
def __init__(self, run_limit):
self.run_limit = run_limit
self.runs = deque()
@classmethod
def from_config(cls, job_config):
"""Factory method for creating a JobRunCollection from a config."""
return cls(job_config.run_limit)
def restore_state(self, state_data, action_graph, output_path, context,
node_pool):
"""Apply state to all jobs from the state dict."""
if self.runs:
msg = "State can not be restored to a collection with runs."
raise ValueError(msg)
restored_runs = [
JobRun.from_state(run_state, action_graph, output_path.clone(),
context, node_pool.next())
for run_state in state_data
]
self.runs.extend(restored_runs)
return restored_runs
def build_new_run(self, job, run_time, node, manual=False):
"""Create a new run for the job, add it to the runs list,
and return it.
"""
run_num = self.next_run_num()
log.info("Building JobRun %s for %s on %s at %s" %
(run_num, job, node, run_time))
run = JobRun.for_job(job, run_num, run_time, node, manual)
self.runs.appendleft(run)
self.remove_old_runs()
return run
def cancel_pending(self):
"""Find any queued or scheduled runs and cancel them."""
for pending in self.get_pending():
pending.cancel()
def remove_pending(self):
"""Remove pending runs from the run list."""
for pending in list(self.get_pending()):
pending.cleanup()
self.runs.remove(pending)
def _get_runs_using(self, func, reverse=False):
"""Filter runs using func()."""
job_runs = self.runs if not reverse else reversed(self.runs)
return itertools.ifilter(func, job_runs)
def _get_run_using(self, func, reverse=False):
"""Find the first run (from most recent to least recent), where func()
returns true. func() should be a callable which takes a single
argument (a JobRun), and return True or False.
"""
try:
return self._get_runs_using(func, reverse).next()
except StopIteration:
return None
def _filter_by_state(self, state):
return lambda r: r.state == state
def get_run_by_state(self, state):
"""Returns the most recent run which matches the state."""
return self._get_run_using(self._filter_by_state(state))
def get_run_by_num(self, num):
"""Return a the run with run number which matches num."""
return self._get_run_using(lambda r: r.run_num == num)
def get_run_by_index(self, index):
"""Return the job run at index. Jobs are indexed from oldest to newest.
"""
try:
return self.runs[index * -1 - 1]
except IndexError:
return None
def get_run_by_state_short_name(self, short_name):
"""Returns the most recent run which matches the state short name."""
return self._get_run_using(lambda r: r.state.short_name == short_name)
def get_newest(self, include_manual=True):
"""Returns the most recently created JobRun."""
func = lambda r: True if include_manual else not r.manual
return self._get_run_using(func)
def get_pending(self):
"""Return the job runs that are queued or scheduled."""
return self._get_runs_using(lambda r: r.is_scheduled or r.is_queued)
@property
def has_pending(self):
return any(self.get_pending())
def get_active(self, node=None):
if node:
func = lambda r: (r.is_running or r.is_starting) and r.node == node
else:
func = lambda r: r.is_running or r.is_starting
return self._get_runs_using(func)
def get_first_queued(self, node=None):
state = ActionRun.STATE_QUEUED
if node:
queued_func = lambda r: r.state == state and r.node == node
else:
queued_func = self._filter_by_state(state)
return self._get_run_using(queued_func, reverse=True)
def get_scheduled(self):
state = ActionRun.STATE_SCHEDULED
return self._get_runs_using(self._filter_by_state(state))
def get_next_to_finish(self, node=None):
"""Return the most recent run which is either running or scheduled. If
node is not None, then only looks for runs on that node.
"""
def compare(run):
if node and run.node != node:
return False
if run.is_running or run.is_scheduled:
return run
return self._get_run_using(compare)
def next_run_num(self):
"""Return the next run number to use."""
if not self.runs:
return 0
return max(r.run_num for r in self.runs) + 1
def remove_old_runs(self):
"""Remove old runs to reduce the number of completed runs
to within RUN_LIMIT.
"""
while len(self.runs) > self.run_limit:
run = self.runs.pop()
run.cleanup()
def get_action_runs(self, action_name):
return [job_run.get_action_run(action_name) for job_run in self]
@property
def state_data(self):
"""Return the state data to serialize."""
return [r.state_data for r in self.runs]
@property
def last_success(self):
return self.get_run_by_state(ActionRun.STATE_SUCCEEDED)
@property
def next_run(self):
return self.get_run_by_state(ActionRun.STATE_SCHEDULED)
def __iter__(self):
return iter(self.runs)
def __str__(self):
return "%s[%s]" % (
type(self).__name__,
', '.join("%s(%s)" % (r.run_num, r.state) for r in self.runs)
)
| StarcoderdataPython |
8005204 | <reponame>TheCamusean/sds
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import logsumexp
from sds.initial import CategoricalInitState
from sds.transitions import StationaryTransition
from sds.observations import GaussianObservation
from sds.utils import ensure_args_are_viable_lists
from sds.cython.hmm_cy import forward_cy, backward_cy
from autograd.tracer import getval
to_c = lambda arr: np.copy(getval(arr), 'C') if not arr.flags['C_CONTIGUOUS'] else getval(arr)
class HMM:
def __init__(self, nb_states, dm_obs, dm_act=0,
init_state_prior={}, trans_prior={}, obs_prior={},
init_state_kwargs={}, trans_kwargs={}, obs_kwargs={}):
self.nb_states = nb_states
self.dm_obs = dm_obs
self.dm_act = dm_act
self.init_state = CategoricalInitState(self.nb_states, prior=init_state_prior, **init_state_kwargs)
self.transitions = StationaryTransition(self.nb_states, prior=trans_prior, **trans_kwargs)
self.observations = GaussianObservation(self.nb_states, self.dm_obs, self.dm_act,
prior=obs_prior, **obs_kwargs)
@property
def params(self):
return self.init_state.params, \
self.transitions.params, \
self.observations.params
@params.setter
def params(self, value):
self.init_state.params = value[0]
self.transitions.params = value[1]
self.observations.params = value[2]
@ensure_args_are_viable_lists
def initialize(self, obs, act=None, **kwargs):
self.init_state.initialize(obs, act)
self.transitions.initialize(obs, act)
self.observations.initialize(obs, act)
def permute(self, perm):
self.init_state.permute(perm)
self.transitions.permute(perm)
self.observations.permute(perm)
def log_priors(self):
logprior = 0.0
logprior += self.init_state.log_prior()
logprior += self.transitions.log_prior()
logprior += self.observations.log_prior()
return logprior
@ensure_args_are_viable_lists
def log_likelihoods(self, obs, act=None):
loginit = self.init_state.log_init()
logtrans = self.transitions.log_transition(obs, act)
logobs = self.observations.log_likelihood(obs, act)
return [loginit, logtrans, logobs]
def log_norm(self, obs, act=None):
loglikhds = self.log_likelihoods(obs, act)
alpha = self.forward(loglikhds)
return sum([logsumexp(_alpha[-1, :]) for _alpha in alpha])
def log_probability(self, obs, act=None):
return self.log_norm(obs, act) + self.log_priors()
def forward(self, loglikhds, cython=True):
loginit, logtrans, logobs = loglikhds
alpha = []
for _logobs, _logtrans in zip(logobs, logtrans):
T = _logobs.shape[0]
_alpha = np.zeros((T, self.nb_states))
if cython:
forward_cy(to_c(loginit), to_c(_logtrans), to_c(_logobs), to_c(_alpha))
else:
for k in range(self.nb_states):
_alpha[0, k] = loginit[k] + _logobs[0, k]
_aux = np.zeros((self.nb_states,))
for t in range(1, T):
for k in range(self.nb_states):
for j in range(self.nb_states):
_aux[j] = _alpha[t - 1, j] + _logtrans[t - 1, j, k]
_alpha[t, k] = logsumexp(_aux) + _logobs[t, k]
alpha.append(_alpha)
return alpha
def backward(self, loglikhds, cython=True):
loginit, logtrans, logobs = loglikhds
beta = []
for _logobs, _logtrans in zip(logobs, logtrans):
T = _logobs.shape[0]
_beta = np.zeros((T, self.nb_states))
if cython:
backward_cy(to_c(loginit), to_c(_logtrans), to_c(_logobs), to_c(_beta))
else:
for k in range(self.nb_states):
_beta[T - 1, k] = 0.0
_aux = np.zeros((self.nb_states,))
for t in range(T - 2, -1, -1):
for k in range(self.nb_states):
for j in range(self.nb_states):
_aux[j] = _logtrans[t, k, j] + _beta[t + 1, j] + _logobs[t + 1, j]
_beta[t, k] = logsumexp(_aux)
beta.append(_beta)
return beta
def marginals(self, alpha, beta):
return [np.exp(_alpha + _beta - logsumexp(_alpha + _beta, axis=1, keepdims=True))
for _alpha, _beta in zip(alpha, beta)]
def two_slice(self, loglikhds, alpha, beta):
loginit, logtrans, logobs = loglikhds
zeta = []
for _logobs, _logtrans, _alpha, _beta in zip(logobs, logtrans, alpha, beta):
_zeta = _alpha[:-1, :, None] + _beta[1:, None, :] +\
_logobs[1:, :][:, None, :] + _logtrans
_zeta -= _zeta.max((1, 2))[:, None, None]
_zeta = np.exp(_zeta)
_zeta /= _zeta.sum((1, 2))[:, None, None]
zeta.append(_zeta)
return zeta
def viterbi(self, obs, act=None):
loginit, logtrans, logobs = self.log_likelihoods(obs, act)
delta = []
z = []
for _logobs, _logtrans in zip(logobs, logtrans):
T = _logobs.shape[0]
_delta = np.zeros((T, self.nb_states))
_args = np.zeros((T, self.nb_states), np.int64)
_z = np.zeros((T, ), np.int64)
for t in range(T - 2, -1, -1):
_aux = _logtrans[t, :] + _delta[t + 1, :] + _logobs[t + 1, :]
_delta[t, :] = np.max(_aux, axis=1)
_args[t + 1, :] = np.argmax(_aux, axis=1)
_z[0] = np.argmax(loginit + _delta[0, :] + _logobs[0, :], axis=0)
for t in range(1, T):
_z[t] = _args[t, _z[t - 1]]
delta.append(_delta)
z.append(_z)
return delta, z
def estep(self, obs, act=None):
loglikhds = self.log_likelihoods(obs, act)
alpha = self.forward(loglikhds)
beta = self.backward(loglikhds)
gamma = self.marginals(alpha, beta)
zeta = self.two_slice(loglikhds, alpha, beta)
return gamma, zeta
def mstep(self, gamma, zeta,
obs, act,
init_mstep_kwargs,
trans_mstep_kwargs,
obs_mstep_kwargs, **kwargs):
self.init_state.mstep([_gamma[0, :] for _gamma in gamma], **init_mstep_kwargs)
self.transitions.mstep(zeta, obs, act, **trans_mstep_kwargs)
self.observations.mstep(gamma, obs, act, **obs_mstep_kwargs)
@ensure_args_are_viable_lists
def em(self, obs, act=None, nb_iter=50, prec=1e-4, verbose=False,
init_mstep_kwargs={}, trans_mstep_kwargs={}, obs_mstep_kwargs={}, **kwargs):
lls = []
ll = self.log_probability(obs, act)
lls.append(ll)
if verbose:
print("it=", 0, "ll=", ll)
last_ll = ll
it = 1
while it <= nb_iter:
gamma, zeta = self.estep(obs, act)
self.mstep(gamma, zeta, obs, act,
init_mstep_kwargs,
trans_mstep_kwargs,
obs_mstep_kwargs, **kwargs)
ll = self.log_probability(obs, act)
lls.append(ll)
if verbose:
print("it=", it, "ll=", ll)
if (ll - last_ll) < prec:
break
else:
last_ll = ll
it += 1
return lls
@ensure_args_are_viable_lists
def mean_observation(self, obs, act=None):
loglikhds = self.log_likelihoods(obs, act)
alpha = self.forward(loglikhds)
beta = self.backward(loglikhds)
gamma = self.marginals(alpha, beta)
return self.observations.smooth(gamma, obs, act)
@ensure_args_are_viable_lists
def filter(self, obs, act=None):
logliklhds = self.log_likelihoods(obs, act)
alpha = self.forward(logliklhds)
belief = [np.exp(_alpha - logsumexp(_alpha, axis=1, keepdims=True)) for _alpha in alpha]
return belief
def sample(self, act=None, horizon=None, stoch=True):
state = []
obs = []
for n in range(len(horizon)):
_act = np.zeros((horizon[n], self.dm_act)) if act is None else act[n]
_obs = np.zeros((horizon[n], self.dm_obs))
_state = np.zeros((horizon[n],), np.int64)
_state[0] = self.init_state.sample()
_obs[0, :] = self.observations.sample(_state[0], stoch=stoch)
for t in range(1, horizon[n]):
_state[t] = self.transitions.sample(_state[t - 1], _obs[t - 1, :], _act[t - 1, :])
_obs[t, :] = self.observations.sample(_state[t], _obs[t - 1, :], _act[t - 1, :], stoch=stoch)
state.append(_state)
obs.append(_obs)
return state, obs
def step(self, hist_obs=None, hist_act=None, stoch=True, infer='viterbi'):
if infer == 'viterbi':
_, _state_seq = self.viterbi(hist_obs, hist_act)
_state = _state_seq[0][-1]
else:
_belief = self.filter(hist_obs, hist_act)
_state = npr.choice(self.nb_states, p=_belief[0][-1, ...])
_act = hist_act[-1, :]
_obs = hist_obs[-1, :]
nxt_state = self.transitions.sample(_state, _obs, _act)
nxt_obs = self.observations.sample(nxt_state, _obs, _act, stoch=stoch)
return nxt_state, nxt_obs
def forcast(self, hist_obs=None, hist_act=None, nxt_act=None,
horizon=None, stoch=True, infer='viterbi'):
nxt_state = []
nxt_obs = []
for n in range(len(horizon)):
_hist_obs = hist_obs[n]
_hist_act = hist_act[n]
_nxt_act = np.zeros((horizon[n], self.dm_act)) if nxt_act is None else nxt_act[n]
_nxt_obs = np.zeros((horizon[n] + 1, self.dm_obs))
_nxt_state = np.zeros((horizon[n] + 1,), np.int64)
if infer == 'viterbi':
_, _state_seq = self.viterbi(_hist_obs, _hist_act)
_state = _state_seq[0][-1]
else:
_belief = self.filter(_hist_obs, _hist_act)
_state = npr.choice(self.nb_states, p=_belief[0][-1, ...])
_nxt_state[0] = _state
_nxt_obs[0, :] = _hist_obs[-1, ...]
for t in range(horizon[n]):
_nxt_state[t + 1] = self.transitions.sample(_nxt_state[t], _nxt_obs[t, :], _nxt_act[t, :])
_nxt_obs[t + 1, :] = self.observations.sample(_nxt_state[t + 1], _nxt_obs[t, :], _nxt_act[t, :], stoch=stoch)
nxt_state.append(_nxt_state)
nxt_obs.append(_nxt_obs)
return nxt_state, nxt_obs
@ensure_args_are_viable_lists
def kstep_mse(self, obs, act, horizon=1, stoch=True, infer='viterbi'):
from sklearn.metrics import mean_squared_error, explained_variance_score
mse, norm_mse = [], []
for _obs, _act in zip(obs, act):
_hist_obs, _hist_act, _nxt_act = [], [], []
_target, _prediction = [], []
_nb_steps = _obs.shape[0] - horizon
for t in range(_nb_steps):
_hist_obs.append(_obs[:t + 1, :])
_hist_act.append(_act[:t + 1, :])
_nxt_act.append(_act[t: t + horizon, :])
_hr = [horizon for _ in range(_nb_steps)]
_, _obs_hat = self.forcast(hist_obs=_hist_obs, hist_act=_hist_act,
nxt_act=_nxt_act, horizon=_hr,
stoch=stoch, infer=infer)
for t in range(_nb_steps):
_target.append(_obs[t + horizon, :])
_prediction.append(_obs_hat[t][-1, :])
_target = np.vstack(_target)
_prediction = np.vstack(_prediction)
_mse = mean_squared_error(_target, _prediction)
_norm_mse = explained_variance_score(_target, _prediction, multioutput='variance_weighted')
mse.append(_mse)
norm_mse.append(_norm_mse)
return np.mean(mse), np.mean(norm_mse)
| StarcoderdataPython |
8031489 | # -*- coding: utf-8 -*-
import setuptools
def read(path):
"""Read file."""
with open(path) as f:
return f.read()
version = '3.7.dev0'
long_description = '\n\n'.join([
read('README.rst'),
read('CHANGES.rst'),
])
setuptools.setup(
name='icemac.ab.calendar',
version=version,
description="Calendar feature for icemac.addressbook",
long_description=long_description,
keywords='icemac addressbook calendar event recurring',
author='<NAME>',
author_email='<EMAIL>',
download_url='https://pypi.org/project/icemac.ab.calendar',
url='https://github.com/icemac/icemac.ab.calendar',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Zope3',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Natural Language :: German',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
],
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['icemac', 'icemac.ab'],
include_package_data=True,
zip_safe=False,
install_requires=[
'Pyphen',
'gocept.month >= 1.2',
'grokcore.annotation',
'grokcore.component >= 2.5.1.dev1',
'icemac.ab.locales >= 2.16',
'icemac.addressbook >= 9.5.dev0',
'icemac.recurrence >= 1.3.1.dev0',
'js.classy',
'js.bootstrap4',
'setuptools',
'z3c.form >= 3.3',
'zope.cachedescriptors',
'zope.securitypolicy >= 4.1',
],
extras_require=dict(
test=[
'gocept.testing',
'icemac.addressbook [test]',
'zope.testing >= 3.8.0',
]),
entry_points="""
[fanstatic.libraries]
calendar = icemac.ab.calendar.browser.resource:lib
""",
)
| StarcoderdataPython |
4889878 | from typing import Callable, Iterable, Mapping, Tuple, Union
Filter = Callable[..., str]
Filters = Union[Iterable[Tuple[str, Filter]], Mapping[str, Filter]]
| StarcoderdataPython |
1622856 | """
Verifies that embedding UAC information into the manifest works.
"""
import TestGyp
from xml.dom.minidom import parseString
test = TestGyp.TestGyp(formats=['msvs', 'ninja'], platforms=['win32'], disable='Need to solve win32api.LoadLibrary problems')
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
CHDIR = 'linker-flags'
test.run_gyp('enable-uac.gyp', chdir=CHDIR)
test.build('enable-uac.gyp', test.ALL, chdir=CHDIR)
# The following binaries must contain a manifest embedded.
test.fail_test(not extract_manifest(test.built_file_path('enable_uac.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path('enable_uac_no.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path('enable_uac_admin.exe', chdir=CHDIR), 1))
# Verify that <requestedExecutionLevel level="asInvoker" uiAccess="false" />
# is present.
manifest = parseString(extract_manifest(
test.built_file_path('enable_uac.exe', chdir=CHDIR), 1))
execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
test.fail_test(len(execution_level) != 1)
execution_level = execution_level[0].attributes
def _has_key(node, key):
# 'in' doesn't work with the NamedNodeMap interface in Python2,
# but 'has_key' was removed from it in Python3, so we need to
# shim things :(.
if hasattr(node, 'has_key'):
return node.has_key(key)
return key in node
test.fail_test(not (
_has_key(execution_level, 'level') and
_has_key(execution_level, 'uiAccess') and
execution_level['level'].nodeValue == 'asInvoker' and
execution_level['uiAccess'].nodeValue == 'false'))
# Verify that <requestedExecutionLevel> is not in the menifest.
manifest = parseString(extract_manifest(
test.built_file_path('enable_uac_no.exe', chdir=CHDIR), 1))
execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
test.fail_test(len(execution_level) != 0)
# Verify that <requestedExecutionLevel level="requireAdministrator"
# uiAccess="true" /> is present.
manifest = parseString(extract_manifest(
test.built_file_path('enable_uac_admin.exe', chdir=CHDIR), 1))
execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
test.fail_test(len(execution_level) != 1)
execution_level = execution_level[0].attributes
test.fail_test(not (
_has_key(execution_level, 'level') and
_has_key(execution_level, 'uiAccess') and
execution_level['level'].nodeValue == 'requireAdministrator' and
execution_level['uiAccess'].nodeValue == 'true'))
test.pass_test()
| StarcoderdataPython |
374623 | <filename>setup.py<gh_stars>1-10
#!/usr/bin/env python3
# coding: utf-8
from setuptools import find_packages, setup
from filesystem import version
setup(
name='masonite-fs',
version=version,
packages=find_packages(),
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Provides helper filesystem functions to Masonite',
url='https://github.com/NinjasCL-labs/masonite-fs',
install_requires=[
'masonite',
'fs',
'scandir'
],
keywords=['filesystem', 'python3', 'masonite'],
include_package_data=True,
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.