blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a880d4a2fac9bcfae35aa08cc8947dfbe299834 | 13f87656c4267c461deaafcc509ecc5a456650f9 | /mysite/main/admin.py | a1321208a561a7a6151084d3dc0c26da72fee64b | [] | no_license | ishan-agarwal/Tutorials-Website | 5b1eab1c0cbf4d888faa6e77ec076532e2d46b85 | d0b6403d6ca53ecf49421aa801311a30f1f8efaf | refs/heads/master | 2022-06-29T06:53:26.817617 | 2020-05-06T03:21:55 | 2020-05-06T03:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from django.contrib import admin
from .models import Tutorial, Question
from tinymce.widgets import TinyMCE
from django.db import models
# Register your models here.
class TutorialAdmin(admin.ModelAdmin):
fields = ["tutorial_title",
"tutorial_content",
]
formfield_overrides = {
models.TextField: {'widget': TinyMCE()},
}
class QuestionAdmin(admin.ModelAdmin):
fields = ["question",
"option_1",
"option_2",
"option_3",
"option_4",
"correct_option",
"tutorial_it_belongs_to",
]
admin.site.register(Tutorial, TutorialAdmin)
admin.site.register(Question, QuestionAdmin)
| [
"ishanagarwal1605@gmail.com"
] | ishanagarwal1605@gmail.com |
e3e81e9a7cb19b08d57ebf9dcdce2402c63f2434 | 7ceac01483e5b6a2f76f5184462506b773ae3ee0 | /venv/Scripts/pip-script.py | d847241f2d40144619d67cad4e3c1ed5239eb065 | [] | no_license | leandroph/Python-CursoEmVideo | e75c4ae6e8b0801d5b5cf95c6c0ad4ed3504e9df | 95ea1b3c3ece1d9f042fc5973c71ee507a312222 | refs/heads/master | 2020-03-20T20:00:13.282220 | 2020-02-19T01:32:26 | 2020-02-19T01:32:26 | 137,665,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!D:\Projetos\Python\Python-CursoEmVideo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"leandro.h@sou.unijui.edu.br"
] | leandro.h@sou.unijui.edu.br |
cbe8f456cf36d47b0a67a67919aa01d85a3138c1 | cb4df15292c2d8b797d65fa59574aba5ee4d44bd | /backend/threads/migrations/0004_auto_20180424_1654.py | 31e198615ac9240c1b399e6a43ce92c59ddd10f7 | [
"MIT"
] | permissive | CommonShares/rengorum | a9bb18b1eb9e36314bccc759130b55e7367e77ee | 22068651b943e9abbf91ee7f4b95e96d6cfa5be6 | refs/heads/master | 2023-01-07T14:02:42.566836 | 2020-11-14T16:37:07 | 2020-11-14T16:37:07 | 312,552,449 | 0 | 0 | MIT | 2022-10-10T09:13:13 | 2020-11-13T11:06:56 | JavaScript | UTF-8 | Python | false | false | 366 | py | # Generated by Django 2.0.3 on 2018-04-24 16:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('threads', '0003_auto_20180424_1650'),
]
operations = [
migrations.AlterModelOptions(
name='thread',
options={'ordering': ['-pinned', '-last_activity']},
),
]
| [
"endiliey@gmail.com"
] | endiliey@gmail.com |
3cffb25396bb01bc80f2f33b12dc67aefb015410 | 87917aa9971cbea67e9d3d9f3ba17ff38a68f637 | /clean-NN/mnist-conv.py | 91f15f3b2ad9aaad8b8c84b6a434db1e4e0c1fc4 | [] | no_license | daterka/Comparison-of-effectiveness-of-Regular-and-Convolutional-NN-in-handwritten-digit-recognition | a1e23485e0f56467115c0f097f7acadbe30c7b99 | 7c8c46b0ed149fe25acfc2321d89bdd8a8eed481 | refs/heads/master | 2020-09-23T06:19:24.170579 | 2019-12-05T16:28:37 | 2019-12-05T16:28:37 | 225,426,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,939 | py | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense,Conv2D,Dropout,Flatten,MaxPooling2D
import matplotlib.pyplot as plt
import numpy as np
batch_size = 128
num_classes = 10
epochs = 8
# input image dimensions
img_rows, img_cols = 28, 28
# TRAINING AND TEST DATA DOWNLOADING
# the data, split between train and test sets
#mnist = tf.keras.datasets.mnist
(x_train,y_train),(x_test,y_test)= mnist.load_data()
print("Data sets downloaded")
# DATA FORMATTING
#Reshaping the array to 4-dims so that it can work with the Keras API
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (img_rows, img_cols, 1)
# Making sure that the values are float so that we can get decimal points after division
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalizing the RGB codes by dividing it to the max RGB value
x_train /= 255
x_test /= 255
#convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print("Data formatted")
# DEFINING MODEL ARCHITECTURE
model = Sequential()
# First hidden layer - Convolutional
model.add(Conv2D(1, kernel_size=(3,3), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
# Second hidden layer - Fully Connected with 200 neurons
model.add(Dense(72,activation = 'relu'))
# Random neurons dropout (30% of)
model.add(Dropout(0.3))
# Third Layer - Output Layer
model.add(Dense(10,activation="softmax"))
model.summary()
# MODEL BUILT AND TRAINING WITH TRAINING DATASET
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size = batch_size, epochs = epochs, verbose=1, validation_split=0.1)
#model.evaluate(x_test, y_test)
# ACCUARENCY CHECK WITH FEW TEST DATASET ELEMENTS
# score = model.evaluate(x_test, y_test, verbose=1)
loss, accuracy = model.evaluate(x_test, y_test, verbose=1)
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n')
print('Test loss:', loss)
print('Test accuracy:', accuracy)
# MODEL AND WEIGHTS SAVE
# model_json = model.to_json()
# with open("model.json", "w") as json_file:
# json_file.write(model_json)
# model.save_weights("model.h5")
# print("model saved")
# model.summary()
# print(history.history.keys())
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='best')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| [
"simple@pop-os.localdomain"
] | simple@pop-os.localdomain |
d6ea747b5957732d583916c70b4f80bc1cdb39b4 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/brocade_mpls_rpc/show_mpls_ldp_fec_prefix_prefix/input/__init__.py | 414039c71bac5dac5968fee3843019053441ab0c | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,492 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-ldp-fec-prefix-prefix/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ldp_fec_prefix',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__ldp_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-ldp-fec-prefix-prefix', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-ldp-fec-prefix-prefix', u'input']
def _get_ldp_fec_prefix(self):
"""
Getter method for ldp_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp_fec_prefix_prefix/input/ldp_fec_prefix (inet:ipv4-prefix)
YANG Description: IP address/Subnet mask length
"""
return self.__ldp_fec_prefix
def _set_ldp_fec_prefix(self, v, load=False):
"""
Setter method for ldp_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp_fec_prefix_prefix/input/ldp_fec_prefix (inet:ipv4-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_fec_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_fec_prefix() directly.
YANG Description: IP address/Subnet mask length
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_fec_prefix must be of a type compatible with inet:ipv4-prefix""",
'defined-type': "inet:ipv4-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)""",
})
self.__ldp_fec_prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_ldp_fec_prefix(self):
self.__ldp_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)
ldp_fec_prefix = __builtin__.property(_get_ldp_fec_prefix, _set_ldp_fec_prefix)
_pyangbind_elements = {'ldp_fec_prefix': ldp_fec_prefix, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
541e43b87cc5ec674f11ed89b1690f6a19cdc383 | 5b2f9eda8797c52ff50fc504b6f3c7c0812eef31 | /smarthome-master/webserver/SMHome/Database/migrations/0004_auto_20190408_2052.py | 3e99edc60383c216c5ba034c82c8a1579f97ca77 | [] | no_license | LKowalczykProj/Projects | 279e4f5a39704eae214872d07c95fccf90d7a3e1 | 95f2eb56fc397d03bd3e9a95319547fa6a044cc0 | refs/heads/master | 2023-08-03T10:55:45.799715 | 2019-07-17T12:46:33 | 2019-07-17T12:46:33 | 178,564,862 | 0 | 0 | null | 2023-07-22T00:03:05 | 2019-03-30T13:53:14 | Java | UTF-8 | Python | false | false | 1,538 | py | # Generated by Django 2.1.5 on 2019-04-08 20:52
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Database', '0003_auto_20190408_2039'),
]
operations = [
migrations.AlterField(
model_name='lamp',
name='intensity',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
migrations.AlterField(
model_name='room',
name='device',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sensors', to='Database.Device'),
),
migrations.AlterField(
model_name='room',
name='humidity',
field=models.DecimalField(decimal_places=1, max_digits=3, null=True),
),
migrations.AlterField(
model_name='room',
name='people',
field=models.BooleanField(),
),
migrations.AlterField(
model_name='room',
name='temperature',
field=models.DecimalField(decimal_places=1, max_digits=3, null=True),
),
migrations.AlterField(
model_name='rtv',
name='volume',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
| [
"kowalczyk.m.lukasz@gmail.com"
] | kowalczyk.m.lukasz@gmail.com |
1f55fa097417e8fa873ef1d263c81b8567c2790c | 318d8c05894c252413f0a68e70583498fc394ca5 | /manage.py | e9ad36564a438069accbcd9b441e4137049c4dbb | [] | no_license | ualexzt/fxportal | aa0326ef1379c3c553647b90dd366f3ae3c96ea0 | 0bc4cbbceb67f0b95dfc3d61d5f63a8da2c07ee3 | refs/heads/master | 2022-12-23T13:23:01.060318 | 2020-02-20T14:36:06 | 2020-02-20T14:36:06 | 231,742,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fxportal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ualexzt@gmail.com"
] | ualexzt@gmail.com |
95ba742a2c0d23addf49e446d3d2ca9c64c13433 | 8d2b2a54d73d10863de29c8030b0fa9891355e26 | /ProjectGroup/my_app/migrations/0007_auto_20200319_0920.py | 87fc2f5e85f873b728e55c19bd109451855e8c06 | [] | no_license | denglia/cs493 | bc9d08611edd6a6045c05f2542b23c6460a5b32a | 7c1e8998caa0acc4a196d96ea94d223d848dc7f3 | refs/heads/master | 2021-05-24T14:12:45.452004 | 2020-04-06T19:51:18 | 2020-04-06T19:51:18 | 253,600,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # Generated by Django 2.1.7 on 2020-03-19 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_app', '0006_auto_20200319_0115'),
]
operations = [
migrations.AlterField(
model_name='car',
name='product_date',
field=models.DateField(auto_now_add=True, verbose_name='Product Date'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
ebddad0a10200abb12d5766947407ad330b1c73e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03773/s930206485.py | 73718780627fbe486462c2c936d41e242d1de545 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | import sys
input = sys.stdin.readline
a,b=map(int,input().split())
print((a+b)%24)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8588051727a218161e3bf792bad3dd3b97e22116 | 3594a6077a73e5881efc4307e6af6652ffe570ce | /web/diceuser/migrations/0001_initial.py | acc99c8f41d37859a5cb882e1e58fbe1e7597489 | [
"MIT"
] | permissive | whitepurple/VideoStreamingPlatform | 1de9b2203511cd80fa308103c83ad95f2a2d62ed | 35b62e76987398c49b33c6e4f99fbd7ad3839584 | refs/heads/master | 2023-02-22T01:42:18.012743 | 2021-01-25T06:17:07 | 2021-01-25T06:17:07 | 294,710,702 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,841 | py | # Generated by Django 3.0.4 on 2020-08-28 02:16
import diceuser.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='DiceUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=20, unique=True)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', diceuser.models.UserManager()),
],
),
]
| [
"qazplook@naver.com"
] | qazplook@naver.com |
8b35811c9a9336f4057789c80a146327e4bc69e8 | 26eb6f140873106c3b412852a9e33826617e758a | /rescueapp/migrations/0022_address_breakdown.py | 7f5294abd4b672dd7d03e16bea79aaec9413ab3a | [] | no_license | Aqudei/RescueServer | 95c1b247d77a11ed7122b1db2e607691be78c037 | d95d15404e9cda92729b40607698d724b3b7a8af | refs/heads/master | 2021-07-18T07:49:18.349700 | 2017-10-22T11:33:11 | 2017-10-22T11:33:11 | 103,828,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-16 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rescueapp', '0021_incident_info'),
]
operations = [
migrations.AddField(
model_name='evacuationcenter',
name='Barangay',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='evacuationcenter',
name='City',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='evacuationcenter',
name='Municipality',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='evacuationcenter',
name='Province',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AlterField(
model_name='evacuationcenter',
name='Amenities',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='evacuationcenter',
name='InCharge',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='evacuationcenter',
name='InChargeCellphone',
field=models.CharField(blank=True, max_length=64, null=True),
),
]
| [
"yodi_cute07@yahoo.com"
] | yodi_cute07@yahoo.com |
a5d475c35d02fb1c51d8aa4b9757784721f30919 | 5cd5a33f3b96aee8048af24a278c509538fe12ed | /Assignment3/Assignment3.2.py | 13de72fb2f5249aaff2e9e87c5e85a3852d3b46d | [] | no_license | priyankadighe20/Learning-Algorithms | 2e4c0c050b10b96f89d808cb6d88903a4538f9a4 | a8e0305987bbf1ab12b114e36688b49d8df760ad | refs/heads/master | 2021-05-04T05:17:40.676570 | 2016-10-17T07:52:47 | 2016-10-17T07:52:47 | 70,973,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | import random
import math
import matplotlib.pyplot as py
import numpy as np
def LIKELIHOOD_WEIGHT(b):
return (0.65/1.35)*(0.35**math.fabs(Z-b))
def GENERATE_B(n):
num = 0;
for i in range(0,n):
num = num << 1
num += random.randint(0, 1)
return num
def PR_QUERY_W_EVIDENCE(N):
countBi = 0
totalCount = 0
probArray=[]
for i in range(0,N):
B = GENERATE_B(10) # Generate a random B
lw = LIKELIHOOD_WEIGHT(B)
totalCount = totalCount + lw
if B >= 2**6: # That means that ith bit is set
countBi = countBi + lw
prob = countBi/totalCount
probArray.append(prob)
return probArray, prob
Z = 64
X = 1000000
probArray, finalProb = PR_QUERY_W_EVIDENCE(10**7)
print("Probability for 10^7 samples is ", finalProb)
probArray, finalProb = PR_QUERY_W_EVIDENCE(X)
print(finalProb)
py.plot(list(range(X)), probArray )
py.show() | [
"pdighe@ucsd.edu"
] | pdighe@ucsd.edu |
ce8ac6565ee928adbfd31d1c35aa7d97bfe71606 | f99846f1c25d1acc3185c24066b07c5121c7d02b | /currency_converter.py | ceb380f93af992f5ab79210da9f7f879443051b2 | [] | no_license | tomasd1/currency_converter | e6a30acc84b3a9f2856118b3c5be6069b4fda47b | f367a767864fb48aedca2c524a3bbe2beb8f6af9 | refs/heads/master | 2022-11-19T11:00:31.109394 | 2020-07-13T15:43:25 | 2020-07-13T15:43:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,420 | py | """
Currency Converter, currency_converter.py by Tomas D., 2016-01-16
Converts input currency to output currency(ies) at currrent ROX
Usage examples:
currency_converter.py --amount 100.0 --input_currency EUR --output_currency CZK
currency_converter.py --amount 0.9 --input_currency ¥ --output_currency AUD
currency_converter.py --amount 10.92 --input_currency £
Options:
-h --help
--amount mandatory
--input_currency mandatory
--output_currency optional
--source optional, rates provider, eg. cs (Ceska Sporitelna)
Configuration:
through config.json
"""
import argparse
import simplejson
import sys
with open('config.json', encoding='utf-8') as data_file:
config = simplejson.load(data_file)
def normalize_currency(c):
"""
Accepts ISO 4217 currency code or currency symbol, returns currency code.
Default currency code for particular symbols can be set in config.json.
"""
with open('currencies.json', encoding='utf-8') as data_file:
currencies = simplejson.load(data_file)
if c in currencies:
return c
elif c in config:
return config[c]
else:
with open('symbols.json', encoding='utf-8') as data_file:
symbols = simplejson.load(data_file)
if c in symbols.values():
return list(symbols.keys())[list(symbols.values()).index(c)]
else:
raise ValueError('Could not find {} in our list.'.format(c))
def import_from(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--amount', type = float, required = True,
help = "amount to convert in input currency")
parser.add_argument('--input_currency', required = True,
help = "3 letters name or currency symbol")
parser.add_argument('--output_currency',
help = "optional, 3 letters name or currency symbol")
parser.add_argument('--source',
help = "source of rates, default: currencyconverterapi")
return parser
def launch(amount, input_currency, output_currency, source='currencyconverterapi'):
if not source:
source = 'currencyconverterapi'
curr_in = normalize_currency(input_currency)
if output_currency:
curr_out = normalize_currency(output_currency)
else:
curr_out = output_currency
if curr_in == curr_out:
raise ValueError('Input and output currency identical. Nothing to convert.')
provider = source.lower()
try:
processor = import_from("providers.{}".format(provider), '{}'.format(provider))
except:
raise ImportError('Selected rates provider invalid. Module missing.')
result = {}
result["output"] = dict(processor(curr_in,curr_out, amount))
result["input"] = {"amount": amount, "currency": curr_in }
# for easier unittests, vary output depending on how script is run
if __name__ == '__main__':
sys.stdout.write(simplejson.dumps(result, indent = 4, sort_keys=True))
else:
return(simplejson.dumps(result, indent = 4, sort_keys=True))
def main():
parser = create_parser()
args = parser.parse_args()
launch(args.amount, args.input_currency, args.output_currency, args.source)
if __name__ == '__main__':
main()
| [
"tomas.drahokoupil@gmail.com"
] | tomas.drahokoupil@gmail.com |
52624b4327a85e5fcae2b3c2e848a59ed503ef0a | 2ddc71961db7b88b734b93e5716706c5ba22cd8b | /venv/bin/cmake | 308ec0fc49730a3f2ffb731f415302f50f898b55 | [] | no_license | zweistein1326/FaceIdAttendance | 5d6d4fbf34cf7b397ef3ed5aa4f3f2ac46f7b6a0 | 87e79679c5776033cb026e286778812aa2fdff6c | refs/heads/main | 2023-08-28T09:18:23.808146 | 2021-10-07T11:07:52 | 2021-10-07T11:07:52 | 333,559,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/Users/sid/PycharmProjects/FaceIDAttendance/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from cmake import cmake
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cmake())
| [
"sid.agarwal45@gmail.com"
] | sid.agarwal45@gmail.com | |
5f4d4cbae4be77f5c897b06ef5e96f1d6c45ff12 | 4f875744ccae8fa9225318ce16fc483b7bf2735e | /amazon/missingNumber.py | 51bc4b88b741139e123462001c9eba084a29d249 | [] | no_license | nguyenngochuy91/companyQuestions | 62c0821174bb3cb33c7af2c5a1e83a60e4a29977 | c937fe19be665ba7ac345e1729ff531f370f30e8 | refs/heads/master | 2020-07-27T05:58:36.794033 | 2020-04-10T20:57:15 | 2020-04-10T20:57:15 | 208,893,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 1 23:59:19 2020
@author: huyn
"""
#Missing Number
#Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array.
from typing import List
class Solution:
def missingNumber(self, nums: List[int]) -> int:
size = len(nums)
return size*(size+1)//2-sum(nums) | [
"huyn@cvm6h4zv52.cvm.iastate.edu"
] | huyn@cvm6h4zv52.cvm.iastate.edu |
387e9b1e5ad5002216240831e93cad2309d95df5 | a4ad81b09feb7e2816481c96864a5c274e6132c4 | /auth_with_lambda/main_run.py | f383b85c84d56abe8f009d47703b7e55d40bff28 | [] | no_license | GAbdallah/Python-JavaSample | 015aa1d81744f0d1903856b82a84e9838d042711 | d774469edb6cafc8a338b654d73b7ec763d69956 | refs/heads/master | 2023-06-08T09:11:16.881371 | 2021-06-28T20:59:10 | 2021-06-28T20:59:10 | 381,159,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | from database import DatabaseLayer
from passlib.hash import pbkdf2_sha512
from datetime import datetime
from psycopg2 import pool
import psycopg2
rds_database = ''
rds_user = ''
rds_password = ''
rds_host = ''
rds_port = ''
ip = ''
source = 'phone'
postgreSQL_pool = psycopg2.pool.ThreadedConnectionPool(5, 20,
user = "postgres",
password = rds_password,
host = rds_host,
port = rds_port,
database = rds_database)
ps_connection = postgreSQL_pool.getconn()
databaseLayer = DatabaseLayer(ps_connection)
def check_if_connection_isdead(ps_connection):
""" Check if connection is dead or closed
"""
assert postgreSQL_pool is not None
if not ps_connection or ps_connection.closed !=0:
ps_connection = postgreSQL_pool.getconn()
if databaseLayer:
databaseLayer.set_connection(ps_connection)
else:
databaseLayer = DatabaseLayer(ps_connection)
def run_job(event):
if event['body']['employee_id'] and event['body']['password'] and event['method'] == 'POST' and event['body']:
# Check the connection to DB
check_if_connection_isdead(ps_connection)
# Here we get's the employee data and encrypted password from the users table
employee_id = event['body']['employee_id']
entered_password = event['body']['password']
encrypted_data = databaseLayer.get_users_password_data(employee_id)
# Checks data we get the data or not
if encrypted_data:
original_password = encrypted_data['password']
emp_id = encrypted_data['emp_no']
verification = pbkdf2_sha512.verify(entered_password, original_password)
if verification:
event = 'successful_login'
employee_data = databaseLayer.get_employee_data(employee_id)
response = {
'statusCode': 200,
'body': employee_data
}
return response
response = {
'statusCode': 400,
'body': 'Not a valid data or request'
}
return response
def lambda_handler(event, context):
response = run_job(event)
# TODO implement
return response
| [
"abdallah@rxvirtual.net"
] | abdallah@rxvirtual.net |
fb3ae55ef32a7d7e54a07342319d8f0569c814a0 | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/education/azext_education/vendored_sdks/education/operations/_education_class_assignment_submission_operations.py | 40c6e974d3dfc0da75b45fd0d3fe60b3bdfb72ea | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,715 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EducationClassAssignmentSubmissionOperations(object):
"""EducationClassAssignmentSubmissionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~education.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def return_method(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmission"
"""Invoke action return.
Invoke action return.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmission, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmission"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.return_method.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
return_method.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/microsoft.graph.return'} # type: ignore
def submit(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmission"
"""Invoke action submit.
Invoke action submit.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmission, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmission"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.submit.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/microsoft.graph.submit'} # type: ignore
def unsubmit(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmission"
"""Invoke action unsubmit.
Invoke action unsubmit.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmission, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmission"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.unsubmit.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unsubmit.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/microsoft.graph.unsubmit'} # type: ignore
def list_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum139"]]]
select=None, # type: Optional[List[Union[str, "models.Enum140"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfEducationOutcome"]
"""Get outcomes from education.
Get outcomes from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~education.models.Enum139]
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum140]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEducationOutcome or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~education.models.CollectionOfEducationOutcome]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEducationOutcome"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEducationOutcome', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes'} # type: ignore
def create_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
id=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationOutcome"
"""Create new navigation property to outcomes for education.
Create new navigation property to outcomes for education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param id: Read-only.
:type id: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param display_name: The identity's display name. Note that this may not always be available or
up to date. For example, if a user changes their display name, the API may show the new value
in a future response, but the items associated with the user won't show up as having changed
when using delta.
:type display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationOutcome, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationOutcome
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationOutcome"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationOutcome(id=id, last_modified_date_time=last_modified_date_time, id_last_modified_by_user_id=microsoft_graph_identity_id, display_name_last_modified_by_user_display_name=display_name, id_last_modified_by_device_id=id1, display_name_last_modified_by_device_display_name=microsoft_graph_identity_display_name, id_last_modified_by_application_id=id2, display_name_last_modified_by_application_display_name=display_name1)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationOutcome')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationOutcome', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes'} # type: ignore
def get_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_outcome_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum141"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationOutcome"
"""Get outcomes from education.
Get outcomes from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_outcome_id: key: educationOutcome-id of educationOutcome.
:type education_outcome_id: str
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum141]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationOutcome, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationOutcome
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationOutcome"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationOutcome-id': self._serialize.url("education_outcome_id", education_outcome_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationOutcome', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes/{educationOutcome-id}'} # type: ignore
def update_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_outcome_id, # type: str
id=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property outcomes in education.
Update the navigation property outcomes in education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_outcome_id: key: educationOutcome-id of educationOutcome.
:type education_outcome_id: str
:param id: Read-only.
:type id: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param display_name: The identity's display name. Note that this may not always be available or
up to date. For example, if a user changes their display name, the API may show the new value
in a future response, but the items associated with the user won't show up as having changed
when using delta.
:type display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationOutcome(id=id, last_modified_date_time=last_modified_date_time, id_last_modified_by_user_id=microsoft_graph_identity_id, display_name_last_modified_by_user_display_name=display_name, id_last_modified_by_device_id=id1, display_name_last_modified_by_device_display_name=microsoft_graph_identity_display_name, id_last_modified_by_application_id=id2, display_name_last_modified_by_application_display_name=display_name1)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationOutcome-id': self._serialize.url("education_outcome_id", education_outcome_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationOutcome')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes/{educationOutcome-id}'} # type: ignore
def list_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum142"]]]
select=None, # type: Optional[List[Union[str, "models.Enum143"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfEducationSubmissionResource"]
"""Get resources from education.
Get resources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~education.models.Enum142]
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum143]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEducationSubmissionResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~education.models.CollectionOfEducationSubmissionResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEducationSubmissionResource', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources'} # type: ignore
def create_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Create new navigation property to resources for education.
Create new navigation property to resources for education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources'} # type: ignore
def get_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum144"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Get resources from education.
Get resources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum144]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources/{educationSubmissionResource-id}'} # type: ignore
def update_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property resources in education.
Update the navigation property resources in education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources/{educationSubmissionResource-id}'} # type: ignore
def list_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum145"]]]
select=None, # type: Optional[List[Union[str, "models.Enum146"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfEducationSubmissionResource0"]
"""Get submittedResources from education.
Get submittedResources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~education.models.Enum145]
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum146]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEducationSubmissionResource0 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~education.models.CollectionOfEducationSubmissionResource0]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEducationSubmissionResource0"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEducationSubmissionResource0', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources'} # type: ignore
def create_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Create new navigation property to submittedResources for education.
Create new navigation property to submittedResources for education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources'} # type: ignore
def get_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum147"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Get submittedResources from education.
Get submittedResources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum147]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources/{educationSubmissionResource-id}'} # type: ignore
def update_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property submittedResources in education.
Update the navigation property submittedResources in education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources/{educationSubmissionResource-id}'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
5f7da7fa319f1cc207012d4a7c768df8dbb81213 | b096dbccb31d3bd181259e930816964c71034ff4 | /tests/test_asynchronous/test_task.py | 73017b8eafee20bb87f1953d01201f1490e7e409 | [] | no_license | cosphere-org/lily | b68f95720381a69ce0caa5f47fca461b3f5242a9 | f6a8281e10eedcccb86fcf3a26aaf282d91f70f4 | refs/heads/master | 2023-02-18T13:49:03.568989 | 2022-06-30T09:58:23 | 2022-06-30T09:58:23 | 175,789,374 | 6 | 0 | null | 2023-02-15T18:49:10 | 2019-03-15T09:28:05 | Python | UTF-8 | Python | false | false | 369 | py |
from unittest import TestCase
from lily.asynchronous import AsyncTask
class AsyncTaskTestCase(TestCase):
def test_init(self):
def fn():
pass
task = AsyncTask(callback=fn, args=[9, 1])
assert task.callback == fn
assert task.args == [9, 1]
assert task.successful is False
assert task.result is None
| [
"maciej@cosphere.org"
] | maciej@cosphere.org |
c098e841a18b5c063ff3cacb87764cdf91444d91 | 064adee6fd8ca73906c7f1de0fcc701962a957ed | /LeetCodePractice/Graph/MakeConnected.py | 0c98fce1da972dedadfaaf560409caa58ebc312e | [] | no_license | hj59172507/AlgorithmPractice | d2c5e7d37b7feff6e93f6ca48d41b325b38618c2 | cc752cf5e98790bc9222454bc2d6f9b19952945c | refs/heads/master | 2021-07-08T05:57:09.399505 | 2020-08-03T16:20:20 | 2020-08-03T16:20:20 | 166,489,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | """
1319. Number of Operations to Make Network Connected
There are n computers numbered from 0 to n-1 connected by ethernet cables connections forming a network where connections[i] = [a, b] represents a connection between computers a and b. Any computer can reach any other computer directly or indirectly through the network.
Given an initial computer network connections. You can extract certain cables between two directly connected computers, and place them between any pair of disconnected computers to make them directly connected. Return the minimum number of times you need to do this in order to make all the computers connected. If it's not possible, return -1.
Example 1:
Input: n = 4, connections = [[0,1],[0,2],[1,2]]
Output: 1
Explanation: Remove cable between computer 1 and 2 and place between computers 1 and 3.
Example 2:
Input: n = 6, connections = [[0,1],[0,2],[0,3],[1,2],[1,3]]
Output: 2
Example 3:
Input: n = 6, connections = [[0,1],[0,2],[0,3],[1,2]]
Output: -1
Explanation: There are not enough cables.
Example 4:
Input: n = 5, connections = [[0,1],[0,2],[3,4],[2,3]]
Output: 0
Constraints:
1 <= n <= 10^5
1 <= connections.length <= min(n*(n-1)/2, 10^5)
connections[i].length == 2
0 <= connections[i][0], connections[i][1] < n
connections[i][0] != connections[i][1]
There are no repeated connections.
No two computers are connected by more than one cable.
Sol
Time O(connection.length)
Space O(n)
First we construct list of set where li[i] store all computers can reach by i
Let visited list store if we have seen this computer.
Run a dfs on all computers, and sum the number of disjoint cluster
"""
from typing import List
class Solution:
def makeConnected(self, n: int, connections: List[List[int]]) -> int:
cables = len(connections)
if cables < n - 1:
return -1
graph = [set() for i in range(n)]
for c in connections:
graph[c[0]].add(c[1])
graph[c[1]].add(c[0])
visited = [False for i in range(n)]
def dfs(i):
if visited[i]:
return 0
visited[i] = True
for j in graph[i]:
dfs(j)
return 1
return sum(dfs(i) for i in range(n)) - 1
| [
"huaj59172507@gmail.com"
] | huaj59172507@gmail.com |
85fbf72f12c93bd8cbc07a20077d8f3efcaed883 | d8e0d9f7a1ab10fe8c8ac9fc5310973962d32e48 | /django_demo/api/utils.py | 2a977e18134d1a3cd917ab6e83303582acb3839e | [] | no_license | kmcgon/django_demo | 4e6e7dd9bff1c7879d699c799311ad72425b6a99 | be3acafcd40021819ab67a1f5d84aa52bf6827ac | refs/heads/master | 2022-12-15T08:37:25.626794 | 2020-09-15T02:45:24 | 2020-09-15T02:45:24 | 295,596,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import subprocess
def get_date():
"""Get the current date and time from the system."""
return subprocess.check_output("date").decode("utf-8")
def get_cpuinfo():
"""Get all of the contents of /proc/cpuinfo.
This is equivalient to 'cat /proc/cpuinfo' in bash.
"""
with open("/proc/cpuinfo", "r") as f:
cpuinfo = f.readlines()
cpuinfo = [tuple(s.strip("\n").split("\t:")) for s in cpuinfo]
return cpuinfo
| [
"kmcgon@gmail.com"
] | kmcgon@gmail.com |
25788f2b9bda8b347883b6978af3d158f8ee4b59 | c3afa54f177ae302c3b326a63a15c65d464f2e20 | /primrose/base/postprocess.py | 81b54d029c1f97a188e274b1736cf461328178ff | [
"Apache-2.0"
] | permissive | pombredanne/primrose | 4675d7dfe5e0d34cd221bdc1381b892f3558710a | 7d1b4d2a75c4e6ec0115e43d755396b5a1b4d9e3 | refs/heads/master | 2020-09-22T22:05:18.380669 | 2019-11-27T18:06:05 | 2019-11-27T18:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | """Module with abstract postprocess class to specify interface needed for future postprocesses
Author(s):
Michael Skarlinski (michael.skarlinski@weightwatchers.com)
Carl Anderson (carl.anderson@weightwatchers.com)
"""
from primrose.base.node import AbstractNode
class AbstractPostprocess(AbstractNode):
"""Postprocess module which must have an postprocess method to send data to an external source"""
pass
| [
"michael.skarlinski@weightwatchers.com"
] | michael.skarlinski@weightwatchers.com |
8b1c43c02b43a82f211b757ec0a1944b54c95ff9 | 190ed35d0c36368a9d775ac27f90575906da527c | /python_scripts/file/download_file.py | 1d4facc0ec1acef245f25dae107132079d898d3a | [] | no_license | bestzmr/python-demo | 420a0079445393753ff30eb93bb32fd8b33ea612 | 789fee140d1824695876d4d419ba41f43970aa1c | refs/heads/master | 2023-04-21T09:58:55.341423 | 2021-05-17T03:01:35 | 2021-05-17T03:01:35 | 322,514,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2021/1/15 9:53
# @Author : ZhaoQiang
# @File : download_file.py
# @Software: PyCharm
# @Description: 下载文件
import requests
def download_file(url, filepath, header):
# stream=True 开启懒下载,防止文件过大,放入内存时,内存会溢出,通过调用iter_content分块将文件放入内存
response = requests.get(url, header=header, stream=True)
# 若response返回响应状态不是200,会抛出错误,若为200,返回None
response.raise_for_status()
fp = open(filepath, 'wb')
for chunk in response.iter_content(chunk_size=100000):
if chunk:
fp.write(chunk)
fp.close()
| [
"z2485782203@163.com"
] | z2485782203@163.com |
8bf0b25f614e368a825205d186b8622cf8e8c727 | 35895890ba77cbd4657280e489971515f0013ac5 | /envirovote/races/tests.py | f6344ddc6c4e2663f805a8c756f535d41159e151 | [] | no_license | brianboyer/envirovote | 2215573397f42edf4bdb25716fbc9932d68f8b28 | 4c868bc367843f9264ab88c49ec31f53f7fc8b1d | refs/heads/master | 2021-01-10T19:42:05.722060 | 2008-12-13T17:25:40 | 2008-12-13T17:25:40 | 32,231,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,146 | py | import unittest
from races.models import Candidate, Race
from endorsements.models import Endorsement, Organization
from races.helpers import calculate_meter_info
class HelpersTestCase(unittest.TestCase):
def setUp(self):
org1 = Organization.objects.create(name='Sierra Club')
org2 = Organization.objects.create(name='League of Conservation Voters')
org3 = Organization.objects.create(name='The Strangelove Institute for Environmental Studies')
mi_prev_race = Race.objects.create(race_type='sen',state='MI',year=1994,last_race=None)
mi_prev_can1 = Candidate.objects.create(name='Mitt Romney',race=mi_prev_race)
mi_prev_can2 = Candidate.objects.create(name='Joe Patt',race=mi_prev_race)
mi_prev_race.winner = mi_prev_can2
mi_prev_race.save()
mi_prev_can1_end1 = Endorsement.objects.create(organization=org3,candidate=mi_prev_can1)
mi_prev_can2_end1 = Endorsement.objects.create(organization=org1,candidate=mi_prev_can2)
mi_prev_can2_end2 = Endorsement.objects.create(organization=org2,candidate=mi_prev_can2)
mi_curr_race = Race.objects.create(race_type='sen',state='MI',year=2000,last_race=mi_prev_race)
mi_curr_can1 = Candidate.objects.create(name='Mitt Romney',race=mi_curr_race)
mi_curr_can2 = Candidate.objects.create(name='Kwame Kilpatrick',race=mi_curr_race)
mi_curr_race.winner = mi_curr_can2
mi_curr_race.save()
ak_prev_race = Race.objects.create(race_type='sen',state='AK',year=1994,last_race=None)
ak_curr_race = Race.objects.create(race_type='sen',state='AK',year=2000,last_race=ak_prev_race)
il_prev_race = Race.objects.create(race_type='sen',state='IL',year=1994,last_race=None)
il_prev_can1 = Candidate.objects.create(name='Dennis Hastert',race=il_prev_race)
il_prev_can2 = Candidate.objects.create(name='Barack Obama',race=il_prev_race)
il_prev_race.winner = il_prev_can1
il_prev_race.save()
il_prev_can2_end1 = Endorsement.objects.create(organization=org1,candidate=il_prev_can2)
il_curr_race = Race.objects.create(race_type='sen',state='IL',year=2000,last_race=il_prev_race)
il_curr_can1 = Candidate.objects.create(name='Dennis Hastert',race=il_curr_race)
il_curr_can2 = Candidate.objects.create(name='Dick Durbin',race=il_curr_race)
il_curr_race.winner = il_curr_can1
il_curr_race.save()
il_curr_can2_end1 = Endorsement.objects.create(organization=org1,candidate=il_curr_can2)
ca_prev_race = Race.objects.create(race_type='sen',state='CA',year=1994,last_race=None)
ca_prev_can1 = Candidate.objects.create(name='Arnold Schwartzenegger',race=ca_prev_race)
ca_prev_can2 = Candidate.objects.create(name='Gary Coleman',race=ca_prev_race)
ca_prev_race.winner = ca_prev_can2
ca_prev_race.save()
ca_prev_can2_end1 = Endorsement.objects.create(organization=org1,candidate=ca_prev_can2)
ca_curr_race = Race.objects.create(race_type='sen',state='CA',year=2000,last_race=ca_prev_race)
ca_curr_can1 = Candidate.objects.create(name='Arnold Schwartzenegger',race=ca_curr_race)
ca_curr_can2 = Candidate.objects.create(name='Gary Coleman',race=ca_curr_race)
ca_curr_race.winner = ca_curr_can2
ca_curr_race.save()
ca_curr_can2_end1 = Endorsement.objects.create(organization=org1,candidate=ca_curr_can2)
ny_prev_race = Race.objects.create(race_type='sen',state='NY',year=1994,last_race=None)
ny_prev_can1 = Candidate.objects.create(name='Rudy Guiliani',race=ny_prev_race)
ny_prev_can2 = Candidate.objects.create(name='Hillary Clinton',race=ny_prev_race)
ny_prev_race.winner = ny_prev_can1
ny_prev_race.save()
ny_prev_can2_end1 = Endorsement.objects.create(organization=org1,candidate=ny_prev_can2)
ny_curr_race = Race.objects.create(race_type='sen',state='NY',year=2000,last_race=ny_prev_race)
ny_curr_can1 = Candidate.objects.create(name='Rudy Guiliani',race=ny_curr_race)
ny_curr_can2 = Candidate.objects.create(name='Hillary Clinton',race=ny_curr_race)
ny_curr_race.winner = ny_curr_can2
ny_curr_race.save()
ny_curr_can2_end1 = Endorsement.objects.create(organization=org1,candidate=ny_curr_can2)
ma_prev_race = Race.objects.create(race_type='sen',state='MA',year=1994,last_race=None)
ma_prev_can1 = Candidate.objects.create(name='Joe Lieberman',race=ma_prev_race)
ma_prev_can2 = Candidate.objects.create(name='Joe Biden',race=ma_prev_race)
ma_prev_race.winner = ma_prev_can1
ma_prev_race.save()
ma_prev_can2_end1 = Endorsement.objects.create(organization=org1,candidate=ma_prev_can2)
ma_curr_race = Race.objects.create(race_type='sen',state='MA',year=2000,last_race=ma_prev_race)
ma_curr_can1 = Candidate.objects.create(name='Joe Lieberman',race=ma_curr_race)
ma_curr_can2 = Candidate.objects.create(name='Joe Biden',race=ma_curr_race)
ma_curr_race.winner = ma_curr_can2
ma_curr_race.save()
ma_curr_can2_end1 = Endorsement.objects.create(organization=org1,candidate=ma_curr_can2)
def test_calculate_meter_info(self):
# no races
races = Race.objects.filter(year=1999)
info = calculate_meter_info(races)
self.assertEqual(info['decided_races'],0)
self.assertEqual(info['remaining_races'],0)
self.assertEqual(info['green_races'],0)
self.assertEqual(info['percent_green'],0)
self.assertEqual(info['percent_change'],0)
# no races decided
races = Race.objects.filter(year=2000,state='AK')
info = calculate_meter_info(races)
self.assertEqual(info['decided_races'],0)
self.assertEqual(info['remaining_races'],1)
self.assertEqual(info['green_races'],0)
self.assertEqual(info['percent_green'],0)
self.assertEqual(info['percent_change'],0)
# one race decided
races = Race.objects.filter(year=2000,state='MI')
info = calculate_meter_info(races)
self.assertEqual(info['decided_races'],1)
self.assertEqual(info['remaining_races'],0)
# previously green winner, current winner no endorsements
# and, previous green winner is opposed in his greenness
races = Race.objects.filter(year=2000,state='MI')
info = calculate_meter_info(races)
self.assertEqual(info['green_races'],0)
self.assertEqual(info['percent_green'],0)
self.assertEqual(info['percent_change'],-100)
#previously green winner, current green winner
races = Race.objects.filter(year=2000,state='CA')
info = calculate_meter_info(races)
self.assertEqual(info['green_races'],1)
self.assertEqual(info['percent_green'],100)
self.assertEqual(info['percent_change'],0)
#previously not green winner, current not green winner
races = Race.objects.filter(year=2000,state='IL')
info = calculate_meter_info(races)
self.assertEqual(info['green_races'],0)
self.assertEqual(info['percent_green'],0)
self.assertEqual(info['percent_change'],0)
#previously not green winner, current green winner
races = Race.objects.filter(year=2000,state='NY')
info = calculate_meter_info(races)
self.assertEqual(info['green_races'],1)
self.assertEqual(info['percent_green'],100)
self.assertEqual(info['percent_change'],100)
#the whole shebang
races = Race.objects.filter(year=2000)
info = calculate_meter_info(races)
self.assertEqual(info['decided_races'],5)
self.assertEqual(info['remaining_races'],1)
self.assertEqual(info['green_races'],3)
self.assertEqual(info['percent_green'],100*3/5)
self.assertEqual(info['percent_change'],100*1/5)
def tearDown(self):
pass
| [
"benderbending@aeaa4eaa-a835-11dd-a887-7ffe1a420f8d"
] | benderbending@aeaa4eaa-a835-11dd-a887-7ffe1a420f8d |
cd11f4c013bbbf9e2770dc15bde51f95098d6eac | a43cf3cacf518096737dd39833fd39624f8cf543 | /tests/test_csv_adapters.py | 071ac85a5c6f6098d645e145a468f026e11bcd6a | [
"Apache-2.0"
] | permissive | Mickey1964/antevents-python | f6ad4f9b056550055a223f7d4a7d34bc030c1dfb | 5b9226813583141986014fc83f6f74342a5f271e | refs/heads/master | 2021-06-15T11:23:56.253643 | 2017-03-31T05:25:59 | 2017-03-31T05:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,236 | py | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Verify the csv reader/writer through a round trip
"""
import unittest
import time
from tempfile import NamedTemporaryFile
import os
import asyncio
import datetime
from antevents.base import Scheduler, IterableAsPublisher, SensorEvent
from antevents.adapters.csv import CsvReader, default_event_mapper
import antevents.linq.dispatch
from utils import make_test_publisher, CaptureSubscriber, \
SensorEventValidationSubscriber
NUM_EVENTS=5
class TestCases(unittest.TestCase):
def test_default_mapper(self):
"""Verify the class that maps between an event and a sensor
"""
event = SensorEvent(ts=time.time(), sensor_id=1, val=123.456)
row = default_event_mapper.event_to_row(event)
event2 = default_event_mapper.row_to_event(row)
self.assertEqual(event2, event,
"Round-tripped event does not match original event")
def test_file_write_read(self):
tf = NamedTemporaryFile(mode='w', delete=False)
tf.close()
try:
sensor = make_test_publisher(1, stop_after_events=NUM_EVENTS)
capture = CaptureSubscriber()
sensor.subscribe(capture)
sensor.csv_writer(tf.name)
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(sensor)
print("Writing sensor events to temp file")
scheduler.run_forever()
self.assertTrue(capture.completed, "CaptureSubscriber did not complete")
self.assertEqual(len(capture.events), NUM_EVENTS,
"number of events captured did not match generated events")
reader = CsvReader(tf.name)
vs = SensorEventValidationSubscriber(capture.events, self)
reader.subscribe(vs)
scheduler.schedule_recurring(reader)
print("reading sensor events back from temp file")
scheduler.run_forever()
self.assertTrue(vs.completed, "ValidationSubscriber did not complete")
finally:
os.remove(tf.name)
# data for rollover test
ROLLING_FILE1 = 'dining-room-2015-01-01.csv'
ROLLING_FILE2 = 'dining-room-2015-01-02.csv'
FILES = [ROLLING_FILE1, ROLLING_FILE2]
def make_ts(day, hr, minute):
return (datetime.datetime(2015, 1, day, hr, minute) - datetime.datetime(1970,1,1)).total_seconds()
EVENTS = [SensorEvent('dining-room', make_ts(1, 11, 1), 1),
SensorEvent('dining-room', make_ts(1, 11, 2), 2),
SensorEvent('dining-room', make_ts(2, 11, 1), 3),
SensorEvent('dining-room', make_ts(2, 11, 2), 4)]
# data for dispatch test
sensor_ids = ['dining-room', 'living-room']
ROLLING_FILE3 = 'living-room-2015-01-01.csv'
ROLLING_FILE4 = 'living-room-2015-01-02.csv'
FILES2 = [ROLLING_FILE1, ROLLING_FILE2, ROLLING_FILE3, ROLLING_FILE4]
EVENTS2 = [SensorEvent('dining-room', make_ts(1, 11, 1), 1),
SensorEvent('living-room', make_ts(1, 11, 2), 2),
SensorEvent('living-room', make_ts(2, 11, 1), 3),
SensorEvent('dining-room', make_ts(2, 11, 2), 4)]
def make_rule(sensor_id):
return (lambda evt: evt.sensor_id==sensor_id, sensor_id)
dispatch_rules = [make_rule(s) for s in sensor_ids]
class TestRollingCsvWriter(unittest.TestCase):
def _cleanup(self):
for f in FILES2:
if os.path.exists(f):
os.remove(f)
def setUp(self):
self._cleanup()
def tearDown(self):
self._cleanup()
def test_rollover(self):
def generator():
for e in EVENTS:
yield e
sensor = IterableAsPublisher(generator(), name='sensor')
sensor.rolling_csv_writer('.', 'dining-room')
vs = SensorEventValidationSubscriber(EVENTS, self)
sensor.subscribe(vs)
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(sensor)
scheduler.run_forever()
for f in FILES:
self.assertTrue(os.path.exists(f), 'did not find file %s' % f)
print("found log file %s" % f)
def test_dispatch(self):
"""Test a scenario where we dispatch to one of several writers
depending on the sensor id.
"""
def generator():
for e in EVENTS2:
yield e
sensor = IterableAsPublisher(generator(), name='sensor')
dispatcher = sensor.dispatch(dispatch_rules)
for s in sensor_ids:
dispatcher.rolling_csv_writer('.', s, sub_topic=s)
dispatcher.subscribe(lambda x: self.assertTrue(False, "bad dispatch of %s" % x))
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(sensor)
scheduler.run_forever()
for f in FILES2:
self.assertTrue(os.path.exists(f), 'did not find file %s' % f)
cnt = 0
with open(f, 'r') as fobj:
for line in fobj:
cnt +=1
self.assertEqual(2, cnt, "File %s did not have 2 lines" % f)
print("found log file %s" % f)
if __name__ == '__main__':
unittest.main()
| [
"jeff@data-ken.org"
] | jeff@data-ken.org |
3ba8f7fac04d4e7b45bfe7128eff82a0fb4248dc | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/ipu/test_gelu_op_ipu.py | 5877341afb1264b0ffe18dd0fbecc822be5d9904 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 2,151 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test_ipu import IPUOpTest
import paddle
import paddle.static
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {"approximate": False}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.nn.functional.gelu(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 2e-3
self.rtol_fp16 = 1e-3
def set_op_attrs(self):
self.attrs = {"approximate": True}
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
15168f8997c802aa12de9a3e6c807d5268b738c5 | c998a5505531d67dfc6c89045c58c188364f23f8 | /hyperfet/extractions.py | e10f57998910e8c871e8b729deeaf8ca88d3a205 | [
"MIT"
] | permissive | samueljamesbader/HyperFET_Project | 7f99931be80842acb2dd6f3e73c6fa36cc93bcd3 | c9b7a870aa9c63f0bac76d3b9370ef4814acda0b | refs/heads/master | 2021-01-19T11:53:45.973059 | 2017-03-03T01:59:42 | 2017-03-03T01:59:42 | 82,488,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import numpy as np
from hyperfet.devices import Direction
def left(VG,If,Ib):
hyst=(~np.isclose(If,Ib))
i=np.argmin(VG[hyst])
return VG[hyst][i],If[hyst][i],Ib[hyst][i]
def right(VG,If,Ib):
hyst=~np.isclose(If,Ib)
i=np.argmax(VG[hyst])
return VG[hyst][i],If[hyst][i],Ib[hyst][i]
def is_point_hysteretic(hyperfet,VD,VG):
If,Ib=hyperfet.I_double(VD,VG)
return not np.isclose(If,Ib)
def boundaries_nonhysteretic(hyperfet,VDD):
return not (is_point_hysteretic(hyperfet,VDD,0) or is_point_hysteretic(hyperfet,VDD,VDD))
def shift(VG,hyperfet,If,Ib,I,VDD):
Vl=left(VG,If,Ib)[0]
assert Vl>0, "Curve already hysteretic at VG=0, left-shifting cannot improve device."
i=np.argmin(Ib-np.ravel(I)[0])
Vshift=VG[i]
if Vshift>Vl:
print("Shifting limited by left-hysteretic bound.")
Vshift=Vl-(VG[1]-VG[0])
assert not is_point_hysteretic(hyperfet,VDD,VDD), "Curve still hysteretic at VG=VDD"
return Vshift, hyperfet.I(VDD,VG+Vshift,Direction.FORWARD)
#assert is_point_hysteretic(hyperfet,VD=VDD,VG=0)
| [
"samuel.james.bader@gmail.com"
] | samuel.james.bader@gmail.com |
4441e66f99b1c1064e3927508a2b0066d814ffdb | adf8f911698d91e4b725a774c8883fbea6c633c6 | /mysite/common/urls.py | 8851f09bb74b916e50247ce0998e43bd8d8c642d | [] | no_license | SangminKwon/django | e21df9c27f416b9b764705eee310f4acfb59c384 | cf20169f033de96530e3c864a8bd354bcacc8aee | refs/heads/master | 2023-05-31T15:54:18.486637 | 2021-06-17T00:17:38 | 2021-06-17T00:17:38 | 376,989,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
app_name = 'common'
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name = 'common/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('signup/', views.signup, name='signup'),
] | [
"ksm9905@gmail.com"
] | ksm9905@gmail.com |
051cf3b0ce8c33d2703249127fc0b89868329c66 | 84b8ffefb55cffe1973995d16bf009262c61794c | /Test RPG Game/Main.py | b1d6dcb8b9cdf21164ab9437c36008c5fbff87a9 | [] | no_license | LupinotheWolf/Python | 581e560bf906781431a504dda8649591f0e515f9 | 927a7331cae4707fd5a9e7d9918332566c23e1ac | refs/heads/master | 2023-02-02T22:58:28.890732 | 2020-12-21T15:41:38 | 2020-12-21T15:41:38 | 323,378,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,039 | py | import pygame
import time
import random
pygame.init()
FPS = 30
clock = pygame.time.Clock()#sets clock so you can create FPS
#define colours
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
#Set window size
display_width = 800
display_height = 600
game_display = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Test Game')
"""
flipbook by using frames - updates entire surface
pygame.display.flip()
"""
#sets exiting game to false for normal play
font = pygame.font.SysFont(None, 25)#number is font size
def message_to_screen(msg,color):
screen_text = font.render(msg, True, color)
game_display.blit(screen_text, [display_width / 2, display_height / 2])#where the message shows
def snake(block_size, snakelist):
for XnY in snakelist:
pygame.draw.rect(game_display, black, [XnY[0],XnY[1],block_size,block_size])#draws the snake
block_size = 10
"""
MAIN LOOP
"""
def gameLoop():#calls main loop
gameExit = False
gameOver = False
#lead is leader of blocks in snake
lead_x = display_width / 2
lead_y = display_height / 2
lead_x_change = 0
lead_y_change = 0
snakelist = []
snakelength = 1
AppleThickness = 30
#round dimensions of random apple to the nearest 10
randAppleX = round(random.randrange(0, display_width - block_size) / 10.0) * 10.0
randAppleY = round(random.randrange(0, display_height - block_size) / 10.0) * 10.0
while not gameExit:
#if you get a game over!!!!!!!
while gameOver == True:
game_display.fill(white)
message_to_screen("You lose... WHITE BOY.. press c to play again or q to quit", red)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameExit = True
gameOver = False#make sure to set to false to break loop
if event.key == pygame.K_c:
gameLoop()#makes gameloop run again
for event in pygame.event.get():#event handling loop
if event.type == pygame.QUIT:#if you press closewindow
gameExit = True
if event.type == pygame.KEYDOWN:#if keydown is pressed
if event.key == pygame.K_LEFT:#goes left
lead_x_change = -block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT:#goes right
lead_x_change = block_size
lead_y_change = 0
elif event.key == pygame.K_UP:
lead_y_change = -block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN:
lead_y_change = block_size
lead_x_change = 0
#if you hit the edge of the screen
if lead_x >= display_width or lead_x <= 0 or lead_y >= display_height or lead_y <= 0:
gameOver = True
#rendering/drawing block
lead_x += lead_x_change#adds change in x to current state of x
lead_y += lead_y_change
game_display.fill(white)#fills the screen with white
pygame.draw.rect(game_display, red, [randAppleX, randAppleY, AppleThickness, AppleThickness])#draws the apple
#lists for the snake
snakehead = []
snakehead.append(lead_x)
snakehead.append(lead_y)
snakelist.append(snakehead)
if len(snakelist) > snakelength:
del snakelist[0]
for eachsegment in snakelist[:-1]:
if eachsegment == snakehead:
gameOver = True
snake(block_size, snakelist)
pygame.display.update()#updates the display/window
#asks if snake is same coordinates as the apple
# if lead_x == randAppleX and lead_y == randAppleY:
# randAppleX = round(random.randrange(0, display_width - block_size) / 10.0) * 10.0
# randAppleY = round(random.randrange(0, display_height - block_size) / 10.0) * 10.0
# snakelength += 1
if lead_x >= randAppleX and lead_x <= randAppleX + AppleThickness:
if lead_y >= randAppleY and lead_y <= randAppleY + AppleThickness:
randAppleX = round(random.randrange(0, display_width - block_size) / 10.0) * 10.0
randAppleY = round(random.randrange(0, display_height - block_size) / 10.0) * 10.0
snakelength += 1
clock.tick(FPS)#sets FPS for events
#quits game
pygame.quit()
quit()
gameLoop()
"""
NOTES
v this event makes it so the block moves as long as you have it pressed v
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
lead_x_change = 0
message_to_screen("YOU DIED", red)
pygame.display.update()#updates the screen again
time.sleep(2)#forces the system to wait for set seconds
"""
| [
"74966209+LupinotheWolf@users.noreply.github.com"
] | 74966209+LupinotheWolf@users.noreply.github.com |
449c3c5e309070e99dc56af5ec86f50dc0f73458 | 5292b03998384c0d2bb5858058892d7e45c5365b | /Hack.lu/2020/right-spot/run.py | 8c7042ab7fb76f17d7a1c5a6d887097b60883e2d | [
"MIT"
] | permissive | TheusZer0/ctf-archives | 430ef80d367b44fd81449bcb108e367842cb8e39 | 033ccf8dab0abdbdbbaa4f0092ab589288ddb4bd | refs/heads/main | 2023-09-04T17:56:24.416820 | 2021-11-21T06:51:27 | 2021-11-21T06:51:27 | 430,603,430 | 1 | 0 | MIT | 2021-11-22T07:24:08 | 2021-11-22T07:24:07 | null | UTF-8 | Python | false | false | 2,998 | py | #!/usr/bin/env python3
import zlib
import sys
import os
import subprocess
import io
import bz2
import sys
from flag import flag
COMPRESSED_LIMIT = 2**20 # 1 MB compressed
DECOMPRESSED_LIMIT = 30*2**20 # 30 MB uncompressed
EXPECTED_STRING = b"pwned!\n"
NUM_TESTS = 4
def compress(data):
if len(data) > DECOMPRESSED_LIMIT:
print('ERROR: File size limit exceeded!')
exit(0)
return bz2.compress(data, compresslevel=9)
def decompress(data):
bz2d = bz2.BZ2Decompressor()
output = bz2d.decompress(data, max_length=DECOMPRESSED_LIMIT)
if bz2d.needs_input == True:
print('ERROR: File size limit exceeded!')
exit(0)
return output
print(f"Welcome! Please send bz2 compressed binary data. How many bytes will you send (MAX: {COMPRESSED_LIMIT})?", flush=True)
try:
num_bytes = int(sys.stdin.readline())
except ValueError:
print("A valid number, please")
exit(0)
if not (0 < num_bytes <= COMPRESSED_LIMIT):
print("Bad number of bytes. Bye!")
exit(0)
print("What is your calculated CRC of the compressed data (hex)?", flush=True)
try:
crc = int(sys.stdin.readline(), 16)
except ValueError:
print("A valid hex crc, please")
exit(0)
print(f"Okay got CRC: {crc:x}, please start sending data", flush=True)
compressed_payload = sys.stdin.buffer.read(num_bytes)
while len(compressed_payload) < num_bytes:
compressed_payload += sys.stdin.buffer.read(0, num_bytes - len(compressed_payload))
print(f"Read {len(compressed_payload)} bytes")
calc_crc = zlib.crc32(compressed_payload)
if crc == calc_crc:
print("[+] CRC Checks out, all good.", flush=True)
else:
print(f"CRC mismatch. Calculated CRC: {calc_crc:x}, expected: {crc:x}")
exit(0)
payload = decompress(compressed_payload)
if len(payload) > DECOMPRESSED_LIMIT:
print(f"Payload too long. Got: {len(payload)} bytes. Limit: {DECOMPRESSED_LIMIT}")
exit(0)
print("[+] Decompressed payload", flush=True)
for seed in range(1 << 5):
print(f"Trying seed: 0x{seed:x}", flush=True)
for i in range(1, NUM_TESTS + 1):
print(f"Try #{i}", flush=True)
try:
p = subprocess.Popen(["./right_spot", str(seed)], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_output, stderr_output = p.communicate(input=payload, timeout=5)
if stdout_output != EXPECTED_STRING:
print("[-] Mh, not the correct output.")
print(f"Output was: {stdout_output}")
exit(0)
if p.returncode != 0:
print(f"[-] Did not return success status code. Status was: {p.returncode}")
exit(0)
except subprocess.TimeoutExpired as e:
print("[-] Process timed out")
p.kill()
exit(0)
except Exception as e:
print("Something unforeseen went wrong...")
print(e)
p.kill()
exit(0)
print(f"Congrats, here is your flag: {flag}", flush=True)
| [
"sajjadium@google.com"
] | sajjadium@google.com |
8c9c6f86415414eac65099b6ad036d598482a6ef | cc88beafd7a59a832fecff45f436490f805ba000 | /demos/json_schema.py | 72f3cf8eec40655d6dac240792698b62d8a3ff2c | [
"BSD-3-Clause"
] | permissive | RobSpectre/structures | 6ead59bf37ef02e3c3d2181dc941a2e60f98becb | 5345fb63658eecdc59e08882372294f13b0df889 | refs/heads/master | 2020-12-25T04:29:11.389945 | 2012-08-25T17:45:15 | 2012-08-25T17:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #!/usr/bin/env python
"""{'type': 'object', 'properties': {'personal_thoughts': {'type': 'string', 'id': 'personal_thoughts', 'maxLength': 255}, 'title': {'type': 'string', 'id': 'title', 'maxLength': 40}, 'id': {'type': 'string'}, 'year': {'minimum': 1950, 'type': 'integer', 'id': 'year', 'maximum': 2011}}}
"""
import datetime
from structures.models import Model
from structures.types import StringType, IntType
###
### The base class
###
class Movie(Model):
"""Simple model that has one StringType member
"""
title = StringType(max_length=40)
year = IntType(min_value=1950, max_value=datetime.datetime.now().year)
personal_thoughts = StringType(max_length=255)
m = Movie(title='Some Movie',
year=2011,
personal_thoughts='It was pretty good')
print m.for_jsonschema()
| [
"jd@j2labs.net"
] | jd@j2labs.net |
f48e9c86d62820de6dafa538d6a9e4d6ac681258 | 365b81432adf4850028407f08ed40c4259ac24be | /scripts/sine.py | c034a9c6d9105e88177885d2c7fe08062bdfd777 | [] | no_license | massimilianocasini/openhab2 | 173fa620d313a14a2dd9a880fbdb77375a273a0d | 7933a12f0362f216b3d4df500801a8260dc1559b | refs/heads/master | 2021-07-03T06:37:04.834167 | 2021-06-06T07:38:17 | 2021-06-06T07:38:17 | 288,466,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | #!/usr/bin/python
# A little script to send test data to an influxdb installation
# Attention, the non-core library 'requests' is used. You'll need to install it first:
# http://docs.python-requests.org/en/master/user/install/
import json
import math
import requests
import sys
from time import sleep
IP = "localhost" # The IP of the machine hosting your influxdb instance
DB = "openhab_db" # The database to write to, has to exist
USER = "openhab" # The influxdb user to authenticate with
PASSWORD = "openhab" # The password of that user
TIME = 1 # Delay in seconds between two consecutive updates
STATUS_MOD = 5 # The interval in which the updates count will be printed to your console
n = 0
while True:
for d in range(0, 360):
v = 'sine_wave value=%s' % math.sin(math.radians(d))
## without autentication
#r = requests.post("http://%s:8086/write?db=%s" %(IP, DB), data=v)
## with autentication
r = requests.post("http://%s:8086/write?db=%s" %(IP, DB), auth=(USER, PASSWORD), data=v)
if r.status_code != 204:
print 'Failed to add point to influxdb (%d) - aborting.' %r.status_code
sys.exit(1)
n += 1
sleep(TIME)
if n % STATUS_MOD == 0:
print '%d points inserted.' % n
| [
"massimiliano.casini@gmail.com"
] | massimiliano.casini@gmail.com |
d248b71876a13eb1dced36510f2490ce62092796 | b19bc58dd9855409dfb0a0361714763f64e8950b | /project/app/migrations/0004_auto_20170711_2215.py | 798fc39fadf0ebc781e5392c3ec76366cefe6f46 | [] | no_license | caroid/Django-mysql-data-management | 6e9fc66c762083ede2517e432cb24ba64649adf9 | 23892b252b34c08b3d81cffad9a1a697cc0459e3 | refs/heads/master | 2021-04-03T05:13:56.612097 | 2017-07-17T11:26:35 | 2017-07-17T11:26:35 | 125,162,127 | 1 | 0 | null | 2018-03-14T05:53:12 | 2018-03-14T05:53:12 | null | UTF-8 | Python | false | false | 469 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-11 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20170711_2209'),
]
operations = [
migrations.AlterField(
model_name='nodedata',
name='signal_str',
field=models.CharField(default='100', max_length=120),
),
]
| [
"rajkumarmeena25996@gmail.com"
] | rajkumarmeena25996@gmail.com |
5097b846d08047afa9caae82e49275ea9f3c46fa | af8f42e890126aa9af0535991e7c7109db1cedf7 | /hw1/reports/sec2plot.py | b3a31adb9c253b6768e3e192addbcc8116a3fcb0 | [
"MIT"
] | permissive | mwhittaker/homework | 1c482a346a85c0eb9364185cb90ab5efdc67d632 | 2faa90662ea0b256625bd07d0d26de39b4e9a455 | refs/heads/master | 2021-01-22T07:39:51.446384 | 2017-10-18T17:31:05 | 2017-10-18T17:31:05 | 102,309,475 | 4 | 0 | null | 2017-09-04T02:16:25 | 2017-09-04T02:16:25 | null | UTF-8 | Python | false | false | 378 | py | import matplotlib.pyplot as plt
import numpy as np
def main():
X = np.genfromtxt("sec2.txt", delimiter=" ")
steps = X[:,0]
loss = X[:,1]
plt.figure()
plt.semilogy(steps, loss)
plt.grid()
plt.xlabel("Training iteration")
plt.ylabel("Training loss (average mean squared error)")
plt.savefig("sec2.pdf")
if __name__ == "__main__":
main()
| [
"mjwhittaker@berkeley.edu"
] | mjwhittaker@berkeley.edu |
7d18be02197d5ac26671c7436dd67fee6601902e | c319730a44788d106540dc9a203fd72eb898bd43 | /ex05.py | a04f60465f312a415c72934bd905026c7d00af56 | [] | no_license | kelvinadams/PythonTheHardWay | 469fff91561a8f6ac270cd68a3b8c05e5fdf8af8 | 6f9c33cf6e0475a524876bb581e38419e6777456 | refs/heads/main | 2023-07-27T22:28:28.083520 | 2021-08-24T22:03:36 | 2021-08-24T22:03:36 | 383,270,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # Python the Hard Way - Exercise 5
my_name = 'Kelvin Adams'
my_age = 31 # Technically almost 32
my_height = 72.5 # Inches...and yes, that 0.5 is important!
my_weight = 185 # Pounds
my_eyes = "Grey" # they alternate between blue and green
my_teeth = 'White' # Especially after my dentist appt today!
my_hair = 'Dark Brown'
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} inches tall.")
print(f"He's {my_weight} pounds heavy.")
print("Actually, he's not too heavy.")
print(f"He's got {my_eyes} eyes and {my_hair} hair.")
print(f"His teeth are usually {my_teeth}, depending on coffee.")
# a supposedly tricky line
total = my_age + my_height + my_weight
print(f"If I add {my_age} (my age), {my_height} (my height), and {my_weight} (my weight) I get a total of: {total}.")
| [
"noreply@github.com"
] | noreply@github.com |
4ffb855b13fd38f3ff0bf76db89c7a878afc1c77 | e210c28eeed9d38eb78c14b3a6388eca1e0e85d8 | /examples/advanced/sklearn-svm/jobs/sklearn_svm_base/app/custom/svm_learner.py | 070ceb832d5e6448975001a4a6fd155dcae0fea3 | [
"Apache-2.0"
] | permissive | NVIDIA/NVFlare | 5a2d2e4c85a3fd0948e25f1ba510449727529a15 | 1433290c203bd23f34c29e11795ce592bc067888 | refs/heads/main | 2023-08-03T09:21:32.779763 | 2023-07-05T21:17:16 | 2023-07-05T21:17:16 | 388,876,833 | 442 | 140 | Apache-2.0 | 2023-09-14T19:12:35 | 2021-07-23T17:26:12 | Python | UTF-8 | Python | false | false | 3,884 | py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
from sklearn.metrics import roc_auc_score
from sklearn.svm import SVC
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_opt.sklearn.data_loader import load_data_for_range
class SVMLearner(Learner):
def __init__(
self,
data_path: str,
train_start: int,
train_end: int,
valid_start: int,
valid_end: int,
):
super().__init__()
self.data_path = data_path
self.train_start = train_start
self.train_end = train_end
self.valid_start = valid_start
self.valid_end = valid_end
self.train_data = None
self.valid_data = None
self.n_samples = None
self.svm = None
self.kernel = None
self.params = {}
def load_data(self) -> dict:
train_data = load_data_for_range(self.data_path, self.train_start, self.train_end)
valid_data = load_data_for_range(self.data_path, self.valid_start, self.valid_end)
return {"train": train_data, "valid": valid_data}
def initialize(self, fl_ctx: FLContext):
data = self.load_data()
self.train_data = data["train"]
self.valid_data = data["valid"]
# train data size, to be used for setting
# NUM_STEPS_CURRENT_ROUND for potential use in aggregation
self.n_samples = data["train"][-1]
# model will be created after receiving global parameter of kernel
def train(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
if curr_round == 0:
# only perform training on the first round
(x_train, y_train, train_size) = self.train_data
self.kernel = global_param["kernel"]
self.svm = SVC(kernel=self.kernel)
# train model
self.svm.fit(x_train, y_train)
# get support vectors
index = self.svm.support_
local_support_x = x_train[index]
local_support_y = y_train[index]
self.params = {"support_x": local_support_x, "support_y": local_support_y}
elif curr_round > 1:
self.system_panic("Federated SVM only performs training for one round, system exiting.", fl_ctx)
return self.params, self.svm
def validate(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# local validation with global support vectors
# fit a standalone SVM with the global support vectors
svm_global = SVC(kernel=self.kernel)
support_x = global_param["support_x"]
support_y = global_param["support_y"]
svm_global.fit(support_x, support_y)
# validate global model
(x_valid, y_valid, valid_size) = self.valid_data
y_pred = svm_global.predict(x_valid)
auc = roc_auc_score(y_valid, y_pred)
self.log_info(fl_ctx, f"AUC {auc:.4f}")
metrics = {"AUC": auc}
return metrics, svm_global
def finalize(self, fl_ctx: FLContext) -> None:
# freeing resources in finalize
del self.train_data
del self.valid_data
self.log_info(fl_ctx, "Freed training resources")
| [
"noreply@github.com"
] | noreply@github.com |
22e6134ec7f1f2c305fd830471c62ba2d52f78ba | e3a97b316fdf07b170341da206163a865f9e812c | /python/kwiver/vital/tests/test_category_hierarchy.py | c9497979ff437d836cb13a371ab1157285bb6372 | [
"BSD-3-Clause"
] | permissive | Kitware/kwiver | 09133ede9d05c33212839cc29d396aa8ca21baaf | a422409b83f78f31cda486e448e8009513e75427 | refs/heads/master | 2023-08-28T10:41:58.077148 | 2023-07-28T21:18:52 | 2023-07-28T21:18:52 | 23,229,909 | 191 | 92 | NOASSERTION | 2023-06-26T17:18:20 | 2014-08-22T15:22:20 | C++ | UTF-8 | Python | false | false | 16,958 | py | """
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::category_hierarchy
"""
from kwiver.vital.types import CategoryHierarchy
import nose.tools as nt
import os
import tempfile
import unittest
class TestVitalCategoryHierarchy(unittest.TestCase):
def setUp(self):
"""
First create the following hierarchy using lists
class0
/ \
/ \
class1_0 class1_1
/
/
class2_0
where class0 has id 0,
class1_0 has id 1,
class1_1 has id 2, and
class2_0 has id 3
"""
self.class_names = ["class0", "class1_0", "class1_1", "class2_0"]
self.parent_names = ["", "class0", "class0", "class1_0"]
self.ids = [0, 1, 2, 3]
# Now write to a file to create a similar hierarchy
# Unfortunately, in order for this to work on Windows, we can't
# utilize tempfile's automatic cleanup, as the C++ process won't be
# able to read the file if it's still open in Python
# So create a file and manually delete in tearDown()
self.fp = tempfile.NamedTemporaryFile(mode="w+", delete=False)
# This hierarchy is the same as the one constructed using lists,
# Except class2_0 also has class1_1 as a parent. Each class
# also has 2 synonyms of the form:
# {classname}_syn{syn_num}, where syn_num is 0 or 1
self.fp.writelines(
[
"class0 class0_syn0 class0_syn1",
"\nclass1_0 :parent=class0 class1_0_syn0 class1_0_syn1",
"\nclass1_1 class1_1_syn0 class1_1_syn1 :parent=class0",
"\nclass2_0 class2_0_syn0 :parent=class1_0 :parent=class1_1 class2_0_syn1",
"\n#class5",
]
)
self.fp.flush()
# Close so C++ can read
self.fp.close()
# Manually delete the file
def tearDown(self):
os.remove(self.fp.name)
nt.assert_false(os.path.exists(self.fp.name))
def test_default_constructor(self):
CategoryHierarchy()
def test_construct_from_file(self):
CategoryHierarchy(self.fp.name)
def test_constructor_from_file_no_exist(self):
expected_err_msg = "Unable to open nonexistant_file.txt"
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
CategoryHierarchy("nonexistant_file.txt")
def test_construct_from_lists(self):
# Should be able to call with just class_names
CategoryHierarchy(self.class_names)
# class_names and parent_names
CategoryHierarchy(self.class_names, self.parent_names)
# class_names and ids
CategoryHierarchy(self.class_names, ids=self.ids)
# and all 3
CategoryHierarchy(self.class_names, self.parent_names, self.ids)
def _create_hierarchies(self):
empty = CategoryHierarchy()
from_file = CategoryHierarchy(self.fp.name)
from_lists = CategoryHierarchy(self.class_names, self.parent_names, self.ids)
return (empty, from_file, from_lists)
def test_constructor_throws_exceptions(self):
# Passing class_names and parent_names of different sizes
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names, self.parent_names[:-1])
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names[:-1], self.parent_names)
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy([], self.parent_names)
# Passing class_names and ids of different sizes
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names, ids=self.ids[:-1])
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names[:-1], ids=self.ids)
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy([], ids=self.ids)
# Passing empty class_names also throws exception
with nt.assert_raises_regexp(ValueError, "Parameter vector are empty."):
CategoryHierarchy([])
def test_initial_classes(self):
empty, from_file, from_lists = self._create_hierarchies()
# First check that each hierarchy does/does not
# have the expected class names
for name, id_ in zip(self.class_names, self.ids):
# empty
nt.assert_false(
empty.has_class_name(name),
"Empty hierarchy had classname {}".format(name),
)
# from_file
nt.ok_(
from_file.has_class_name(name),
"heirarchy constructed from file does not have {}".format(name),
)
nt.assert_equals(from_file.get_class_id(name), id_)
nt.assert_equals(from_file.get_class_name(name), name)
# from_lists
nt.ok_(
from_lists.has_class_name(name),
"heirarchy constructed from lists does not have {}".format(name),
)
nt.assert_equals(from_lists.get_class_id(name), id_)
nt.assert_equals(from_lists.get_class_name(name), name)
# Tests for empty
nt.assert_equals(empty.all_class_names(), [])
nt.assert_equals(empty.size(), 0)
# Tests for from_file
nt.assert_equals(from_file.all_class_names(), self.class_names)
# Each class has 2 synonyms, so size is 3 * # classes
nt.assert_equals(from_file.size(), 3 * len(self.class_names))
# Make sure class5, which was commented out, is not present
nt.assert_false(from_file.has_class_name("class5"))
# Tests for from_lists
nt.assert_equals(from_lists.all_class_names(), self.class_names)
nt.assert_equals(from_lists.size(), len(self.class_names))
# Only hierarchies constructed from files can be constructed with synonyms
def test_initial_synonyms(self):
ch = CategoryHierarchy(self.fp.name)
for cname in ch.all_class_names():
syn0_name = cname + "_syn0"
syn1_name = cname + "_syn1"
nt.ok_(ch.has_class_name(syn0_name))
nt.ok_(ch.has_class_name(syn1_name))
nt.assert_equals(ch.get_class_name(syn0_name), cname)
nt.assert_equals(ch.get_class_name(syn1_name), cname)
def test_initial_relationships(self):
empty, from_file, from_lists = self._create_hierarchies()
# Tests for empty
nt.assert_equals(empty.child_class_names(), [])
# Tests for from_file
nt.assert_equals(from_file.child_class_names(), ["class2_0"])
nt.assert_equals(from_file.get_class_parents("class0"), [])
nt.assert_equals(from_file.get_class_parents("class2_0"), ["class1_0", "class1_1"])
nt.assert_equals(from_file.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(from_file.get_class_parents("class1_1"), ["class0"])
# Tests for from_lists
nt.assert_equals(from_lists.child_class_names(), ["class1_1", "class2_0"])
nt.assert_equals(from_lists.get_class_parents("class0"), [])
nt.assert_equals(from_lists.get_class_parents("class2_0"), ["class1_0"])
nt.assert_equals(from_lists.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(from_lists.get_class_parents("class1_1"), ["class0"])
def test_add_class(self):
ch = CategoryHierarchy()
# Check default for parent_name and id params
ch.add_class("class0")
nt.assert_equals(ch.get_class_id("class0"), -1)
# Now for parent_name
ch.add_class("class1", id=0)
nt.assert_equals(ch.get_class_id("class1"), 0)
# Now for id
ch.add_class("class2", parent_name="class1")
nt.assert_equals(ch.get_class_id("class2"), -1)
# Check has_class_name returns correct result
nt.ok_(ch.has_class_name("class0"))
nt.ok_(ch.has_class_name("class1"))
nt.ok_(ch.has_class_name("class2"))
# Check class list
nt.assert_equals(ch.all_class_names(), ["class1", "class0", "class2"])
nt.assert_equals(ch.size(), 3)
# Check relationships are correct
# TODO: Should this only be class2 and class0? Current implementation
# of add_class only adds class1 to class2's parents. Class2 isn't added
# to Class1's list of children, which makes it a child class.
nt.assert_equals(ch.child_class_names(), ["class1", "class0", "class2"])
nt.assert_equals(ch.get_class_parents("class0"), [])
nt.assert_equals(ch.get_class_parents("class1"), [])
nt.assert_equals(ch.get_class_parents("class2"), ["class1"])
def test_add_class_already_exists(self):
ch = CategoryHierarchy(self.class_names, self.parent_names, self.ids)
with nt.assert_raises_regexp(RuntimeError, "Category already exists"):
ch.add_class(self.class_names[0])
ch.add_class("new_class")
with nt.assert_raises_regexp(RuntimeError, "Category already exists"):
ch.add_class("new_class")
def test_add_relationship(self):
ch = CategoryHierarchy()
ch.add_class("class0")
ch.add_class("class1_0")
ch.add_class("class1_1")
ch.add_class("class2_0")
# Same as the file
ch.add_relationship("class1_0", "class0")
ch.add_relationship("class1_1", "class0")
ch.add_relationship("class2_0", "class1_0")
ch.add_relationship("class2_0", "class1_1")
nt.assert_equals(ch.child_class_names(), ["class2_0"])
nt.assert_equals(ch.get_class_parents("class2_0"), ["class1_0", "class1_1"])
nt.assert_equals(ch.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(ch.get_class_parents("class1_1"), ["class0"])
def test_add_synonym(self):
ch = CategoryHierarchy(self.class_names, self.parent_names, self.ids)
ch.add_synonym("class2_0", "class2_0_syn0")
ch.add_synonym("class2_0", "class2_0_syn1")
ch.add_synonym("class1_0", "class1_0_syn0")
ch.add_synonym("class1_0", "class1_0_syn1")
# First check the old classes exist
nt.assert_equals(ch.all_class_names(), self.class_names)
# Check the size
nt.assert_equals(ch.size(), 8)
# Now check synonyms exist
nt.ok_(ch.has_class_name("class2_0_syn0"))
nt.ok_(ch.has_class_name("class2_0_syn1"))
nt.ok_(ch.has_class_name("class1_0_syn0"))
nt.ok_(ch.has_class_name("class1_0_syn1"))
# Check the name of the actual category
nt.assert_equals(ch.get_class_name("class2_0_syn0"), "class2_0")
nt.assert_equals(ch.get_class_name("class2_0_syn1"), "class2_0")
nt.assert_equals(ch.get_class_name("class1_0_syn0"), "class1_0")
nt.assert_equals(ch.get_class_name("class1_0_syn1"), "class1_0")
# Now check that the relationships are still intact
nt.assert_equals(ch.get_class_parents("class2_0_syn0"), ["class1_0"])
nt.assert_equals(ch.get_class_parents("class2_0_syn1"), ["class1_0"])
nt.assert_equals(ch.get_class_parents("class1_0_syn0"), ["class0"])
nt.assert_equals(ch.get_class_parents("class1_0_syn1"), ["class0"])
def test_add_synonym_already_exists(self):
ch = CategoryHierarchy()
ch.add_class("class0")
ch.add_synonym("class0", "class0_syn0")
ch.add_synonym("class0", "class0_syn1")
expected_err_msg = "Synonym name already exists in hierarchy"
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_synonym("class0", "class0_syn0")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_synonym("class0", "class0_syn1")
def test_load_from_file(self):
ch = CategoryHierarchy()
ch.add_class("class-1")
ch.add_synonym("class-1", "class-1_syn0")
ch.add_synonym("class-1", "class-1_syn1")
ch.load_from_file(self.fp.name)
nt.assert_equals(ch.all_class_names(), self.class_names + ["class-1"])
# Check synonyms
for cname in self.class_names + ["class-1"]:
nt.ok_(ch.has_class_name(cname + "_syn0"))
nt.ok_(ch.has_class_name(cname + "_syn1"))
nt.assert_equals(ch.get_class_name(cname + "_syn0"), cname)
nt.assert_equals(ch.get_class_name(cname + "_syn1"), cname)
# Now check that the relationships are still intact
nt.assert_equals(ch.child_class_names(), ["class2_0", "class-1"])
nt.assert_equals(ch.get_class_parents("class0"), [])
nt.assert_equals(ch.get_class_parents("class2_0"), ["class1_0", "class1_1"])
nt.assert_equals(ch.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(ch.get_class_parents("class1_1"), ["class0"])
def test_load_from_file_not_exist(self):
ch = CategoryHierarchy()
expected_err_msg = "Unable to open nonexistant_file.txt"
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.load_from_file("nonexistant_file.txt")
# Some functions throw exceptions if the category
# can't be found. Those will be tested here
def test_category_not_exist(self):
chs = list(self._create_hierarchies())
expected_err_msg = "Class node absent_class does not exist"
for ch in chs:
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_class("new_class1", "absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.get_class_name("absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.get_class_id("absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.get_class_parents("absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_relationship("absent_class", "another_absent_class")
ch.add_class("new_class2")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_relationship("new_class2", "absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_synonym("absent_class", "synonym")
# Extra test for the sort function used
# in a few member functions. all_class_names()
# essentially returns the result
def test_sort(self):
ch = CategoryHierarchy()
# Adding them in this way forces
# every type of comparison to be made
ch.add_class("a", id=1)
ch.add_class("c")
ch.add_class("b")
ch.add_class("d", id=0)
# names with ids are first sorted (in alphabetical order), followed by
# names without ids in alphabetical order
nt.assert_equals(ch.all_class_names(), ["d", "a", "b", "c"])
nt.assert_equals(ch.child_class_names(), ["d", "a", "b", "c"])
| [
"john.parent@kitware.com"
] | john.parent@kitware.com |
cc7de0584bc82949c6b905c784c992296760e455 | 8dd64f6b99654da666430cb16a0c549124e7b9b3 | /server/api/urls.py | 8539ff62b9554b3e43e10bce9a6e84ddedbdf7e6 | [] | no_license | purveshmakode24/django-angular | 1a435c20fd0ddfdeb7759b6d3abbeef8b0a5be5d | 59f30cc5f2e2337ea9839a7cc4ed787974f2f791 | refs/heads/master | 2023-06-02T05:46:29.066687 | 2021-06-13T16:37:41 | 2021-06-13T16:37:41 | 376,569,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.urls import path
from api import views
urlpatterns = [
path('', views.home, name = 'home'),
# path('', home, name='home'),
] | [
"makodepurvesh@gmail.com"
] | makodepurvesh@gmail.com |
b8d013dbdb8bf21831bd114e91e73dc1e882763c | 35f7d935b17fbb3be4cf3d04ccba8bf5205a19ec | /Gtbit IT.py | 8caaa10f2ee8029822e21dbfa81f501346b48bc9 | [] | no_license | mgmanthan26/Gtbit-IT-Portal | cf7c4438f79c10d74a18ed6d59aaeaa955c4850f | 8cd98064deedf37ae6285643418087e659bb6047 | refs/heads/main | 2023-06-18T10:21:03.204536 | 2020-12-05T13:38:50 | 2020-12-05T13:38:50 | 318,791,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def home():
return render_template('index.html')
@app.route("/insights")
def about():
return render_template('insights.html')
@app.route("/post")
def post():
return render_template('post.html')
@app.route("/contact")
def contact():
return render_template('contact.html')
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
0c91e11b9a0c78fc00e0b9cca338a0526b119143 | fe5a3d6e5f2586210a768c437bd028817a5150e1 | /Polynomial_Class2.py | 85855804267de6a92762b6cc1cd00fcd884e4aaa | [] | no_license | 340730/chicken | c36b17482958092bfaf1d618b280946de0233ed7 | 3780ce90574e36ff3bbaf8a74271c52798cde8c1 | refs/heads/master | 2020-03-27T01:55:13.613072 | 2018-12-11T22:24:12 | 2018-12-11T22:24:12 | 145,753,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py |
"""
Created on Thu Sep 27 17:40:48 2018
@author: randydavila
In this script we will define a class called
Polynomial.
"""
class Polynomial:
def __init__(self, coefficients):
self.coeff = coefficients
def degree(self):
return len(self.coeff) - 1
def __str__(self):
s = ''
for i in range(len(self.coeff)):
s += f' ({self.coeff[i]})x^{self.degree() - i} +'
return s[:-1]
def __call__(self, x):
value = 0
for i in range(len(self.coeff)):
value += self.coeff[i]*(x**(self.degree() - i))
return value
def derivative(self):
df = []
for i in range(len(self.coeff) - 1):
df.append(self.coeff[i]*(self.degree() - i))
return Polynomial(df)
| [
"noreply@github.com"
] | noreply@github.com |
0827a0936743d568a6e345d0183c0b9bdde812f5 | aebd8376eeb1215bab28f41544e7c3568c66112c | /blog/views.py | 5c15de0cdc3ab3faf6106f1c98edfcadfb36ab61 | [] | no_license | unovikash/testBlog8210 | 3ccb099efa7e51c02479b4470c31b73c68f0e050 | 78d3f24fb08342d44a34c27f7a9e864fc5cbc533 | refs/heads/master | 2021-08-19T22:49:19.720768 | 2018-11-22T21:29:59 | 2018-11-22T21:29:59 | 146,684,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post
from .forms import PostForm
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) | [
"vikashkumar@unomaha.edu"
] | vikashkumar@unomaha.edu |
8ed04321cc923a0b0cf6b320d0b25f6205625691 | b50c44ad44f95035332f371517808406e4a756d0 | /cbvSerializers/cbvApp/migrations/0001_initial.py | 3af20fe7c5f546e1c79919b7d44819cc546f7478 | [] | no_license | anandkumar-design/api1 | d970e336f15b46dceb07ef480aa57fd544a3bd93 | ae767463828138b97f4cf5ef6f7ac2ae4ac33afa | refs/heads/main | 2023-04-25T00:18:13.406364 | 2021-05-13T12:43:35 | 2021-05-13T12:43:35 | 367,045,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # Generated by Django 3.2 on 2021-05-04 08:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('score', models.DecimalField(decimal_places=10, max_digits=19)),
],
),
]
| [
"you@example.com"
] | you@example.com |
9b274e73476a2bedc83d122ad2d7a65c035c80bc | fc43283aa2b12b217e5da7064d45f38bd62bab87 | /btree.py | b99d7689f3070e20602bd361dd9d719d59d4598f | [] | no_license | cxt90730/btree | db0a5d163b98c09fa9ba699d694ddd5d0286bc95 | eae21210ae98720bf89968a6736f920dc6341fa2 | refs/heads/master | 2021-01-22T23:53:16.912615 | 2017-03-21T08:09:50 | 2017-03-21T08:09:50 | 85,675,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | # coding=utf-8
import graph
MAX_CHILD_NUM = 4 # Max num of child , if beyond it, that node need to split
t = (MAX_CHILD_NUM >> 1) - 1 # Mid Idx
class BPlusTreeNode(object):
def __init__(self):
self.isRoot = False
self.isLeaf = False
self.n = 0 # num of key
self.key = []
self.child = []
self.last = None
self.next = None
self.p = None # parent
class BPlusTree(object):
def __init__(self):
self.root = BPlusTreeNode()
self.root.isRoot = True
self.root.isLeaf = True
def allocateNode(self):
return BPlusTreeNode()
def split(self, x, i): # x is current node, i is child index on x
z = self.allocateNode() # Temp Node
y = x.child[i]
z.isLeaf = y.isLeaf
z.n = t
for j in range(0, t): # copy keys from y
z.key.append(y.key[j + t + 1])
if y.isLeaf is False: # copy childs from y
for j in range(0, t + 1 ):
z.child.append(y.child[j + t + 1])
y.n = t
for j in range(x.n, i)[::-1]: # resort child of x node
if j == x.n:
x.child.append(x.child[j])
else:
x.child[j + 1] = x.child[j]
if x.child.__len__() == i + 1: # new node
x.child.append(z)
else:
x.child[i + 1] = z
for j in range(x.n - 1, i - 1)[::-1]: # resort key of x node
if j == x.n - 1:
x.key.append(x.key[j])
else:
x.key[j + 1] = x.key[j]
if x.key.__len__() == i: # new node
x.key.append(y.key[t])
else:
x.key[i] = y.key[t]
x.n = x.n + 1
def insert(self, k):
r = self.root
if r.n == (2 * t + 1):
s = self.allocateNode()
self.root = s
s.isLeaf = False
s.n = 0
s.child.append(r)
self.split(s, 0)
self.insert_non_full(s, k)
else:
self.insert_non_full(r, k)
def insert_non_full(self, x, k):
i = x.n - 1
if x.isLeaf is True: # x is leaf
while i >= 0 and k < x.key[i]:
if i == x.n - 1:
x.key.append(x.key[i])
else:
x.key[i + 1] = x.key[i]
i = i - 1
if i == x.n - 1:
x.key.append(k)
else:
x.key[i + 1] = k
x.n = x.n + 1
else:
while i >= 0 and k < x.key[i]:
i = i - 1
i = i + 1
if x.child[i].n == (2 * t + 1):
self.split(x, i)
if k > x.key[i]: # k > key[i] , right child of key[i]
i = i + 1
self.insert_non_full(x.child[i], k)
if __name__ == '__main__':
tree = BPlusTree()
tree.insert(1)
tree.insert(2)
tree.insert(4)
tree.insert(6)
tree.insert(8)
tree.insert(12)
tree.insert(13)
tree.insert(14)
tree.insert(15)
# tree.insert(16)
# tree.insert(11)
g = graph.Graph()
g.GraphTree(tree)
g.View()
| [
"cuixiaotian@le.com"
] | cuixiaotian@le.com |
181af69f0d3013dde6823a9d1fed5231d26c900e | d4b85f462c88dbc2e004151509771bc7a4be6d6b | /pypdf2table/Output.py | 4d4d7681546ef89c32f31215fc0febd4dac4cb44 | [] | no_license | Sandy4321/pypdf2table | b7dcd8be425e3212c667838f819ff4a4c176ae99 | 79f801cf03a74f458dac271cdb811f3482bb172d | refs/heads/master | 2021-05-26T21:24:38.027331 | 2012-02-07T20:21:33 | 2012-02-07T20:21:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,010 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 ISrg (NSI, CEFETCAMPOS, BRAZIL) and Contributors.
# All Rights Reserved.
# Vanderson Mota dos Santos <vanderson.mota@gmail.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
from TextElement import TextElement
from Table import Table
from Font import Font
from Column import Column
from GetOutputFilePathList import GetOutputFilePathList
from GetOutputStringIOList import GetOutputStringIOList
class Output:
"""Creates the output File"""
def __init__(self,tableList=[], fontList=[], path=""):
self.__tableList = tableList
self.__fontList = fontList
self.__path = path
self.createStylesheet()
self.createTablesDtd()
def createStylesheet(self):
"""Creates the XML Stylesheet. that is, a XSL File that will define the table Layout"""
stylesheetFile = open(self.__path + "/table_view.xsl", "w")
#variable to keep the xml stylesheet (xsl file) information
xslValue = "<?xml version=\"1.0\" encoding=\"iso-8859-1\" ?>\n" + \
"<xsl:stylesheet xmlns:xsl=\"" + \
"http://www.w3.org/1999/XSL/Transform\" version=\"1.0\">\n" + \
"<xsl:output method=\"html\" />\n" + \
"<xsl:template match=\"/\">" + \
"<html>\n" + \
"<body>\n" + \
"<xsl:for-each select=\"tables/table\">\n" + \
"<table border=\"1\">\n" + \
"<caption>\n" + \
"<xsl:value-of select=\"title\"/>\n" + \
"</caption>\n" + \
"<xsl:for-each select=\"header/header_line\">\n" + \
"<tr>\n" + \
"<xsl:for-each select=\"header_element\">\n" + \
"<th bgcolor=\"#ccdddd\" colspan=\"{@colspan}\">\n" + \
"<xsl:value-of select=\".\" /> \n" + \
"</th>\n" + \
"</xsl:for-each>\n" + \
"</tr>\n" + \
"</xsl:for-each>\n" + \
"<xsl:for-each select=\"tbody/data_row\">\n" + \
"<tr>\n" + \
"<xsl:for-each select=\"cell\">\n" + \
"<td colspan=\"{@colspan}\">\n" + \
"<xsl:if test=\"@format='bold'\">\n" + \
"<b>\n" + \
"<xsl:value-of select=\".\" />\n" + \
"</b>\n" + \
"</xsl:if>\n" + \
"<xsl:if test=\"@format='italic'\">\n" + \
"<i>\n" + \
"<xsl:value-of select=\".\" />\n" + \
"</i>\n" + \
"</xsl:if>\n" + \
"<xsl:if test=\"@format='bolditalic'\">\n" + \
"<b><i>\n" + \
"<xsl:value-of select=\".\" />\n" + \
"</i></b>\n" + \
"</xsl:if>\n" + \
"<xsl:if test=\"@format=''\">\n" + \
"<xsl:value-of select=\".\" />\n" + \
"</xsl:if>\n" + \
"</td>\n" + \
"</xsl:for-each>\n" + \
"</tr>\n" + \
"</xsl:for-each>\n" + \
"<BR> </BR>\n" + \
"<BR> </BR>\n" + \
"<BR> </BR>\n" + \
"</table>\n" + \
"</xsl:for-each>\n" + \
"</body>\n" + \
"</html>\n" + \
"</xsl:template>\n" + \
"</xsl:stylesheet>\n" \
stylesheetFile.write(xslValue)
stylesheetFile.close()
def createTablesDtd(self):
"""Creates a DTD File"""
dtdFile = open(self.__path + "/tables.dtd", "w")
dtdValue = "<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n" + \
"<!ELEMENT tables(table+,fontspec*)>\n" + \
"<!ELEMENT fontspec EMPTY>\n" + \
"<!ATTLIST fontspec\n" + \
"id CDATA #REQUIRED\n" + \
"size CDATA #REQUIRED\n" + \
"family CDATA #REQUIRED\n" + \
"color CDATA #REQUIRED\n" + \
">\n" + \
"<!ELEMENT table (header,tbody)>\n" + \
"<!ELEMENT header (header_element)*>\n" + \
"<!ELEMENT header_element (#PCDATA)>\n" + \
"<!ATTLIST header_element\n" + \
"id CDATA #REQUIRED\n" + \
"sh CDATA #REQUIRED\n" + \
"font CDATA #REQUIRED\n" + \
"colspan CDATA #REQUIRED\n" + \
">\n" + \
"<!ELEMENT tbody (data_row)*>\n" + \
"<!ELEMENT data_row (cell)*>\n" + \
"<!ELEMENT cell (#PCDATA)>\n" + \
"<!ATTLIST cell\n" + \
"sh CDATA #REQUIRED\n" + \
"font CDATA #REQUIRED\n" + \
"colspan CDATA #REQUIRED\n" + \
"format CDATA #REQUIRED\n" + \
">\n"
dtdFile.write(dtdValue)
dtdFile.close()
def createOutput(self):
"""Creates a HTML file for each table"""
tableNumber = 1
self.__outputFilesList = []
for tableObj in self.__tableList:
#creating the output files
outputFilePathString = self.__path + "/table_" + str(tableNumber) + ".xml"
outputFile = open(outputFilePathString, "w")
outputFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>" + "\n")
outputFile.write("<?xml-stylesheet href=\"table_view.xsl\" type=\"text/xsl\" ?>" + "\n")
outputFile.write("<tables>" + "\n")
#Fonts
for fontObj in self.__fontList:
outputFile.write("<fontspec id=\"" + fontObj.getFontId() + \
"\" size=\"" + str(fontObj.getSize()) + "\" family=\"" + \
fontObj.getFamily() + "\" color=\"" + fontObj.getColor() + "\"/>")
cellsOnColumn = 0
outputFile.write("<table>" + "\n")
outputFile.write("<title>" + "TABLE ON PAGE " + tableObj.getPage() + "</title>" +"\n")
outputFile.write("<header>" + "\n")
for j in range(0, tableObj.getDatarowBegin()):
p = 0
outputFile.write("<header_line>" + "\n")
while p < len(tableObj.getColumnList()):
columnObj = tableObj.getColumnListElement(p)
cellsOnColumn = len(columnObj.getCellsList())
columnObj.setHeader(p+j)
textElementObj = columnObj.getCellsListElement(j)
outputFile.write("<header_element id=\"" + str(p+j) + "\" sh=\"" + str(columnObj.getHeader()))
outputFile.write("\" font=\"" + str(textElementObj.getFont()) + "\" colspan=\"" + \
str(textElementObj.getColspan()) + "\">" + "\n")
outputFile.write("<![CDATA[")
if textElementObj.getValue() != "null":
outputFile.write(textElementObj.getValue())
outputFile.write("]]>" + "\n")
outputFile.write("</header_element>" + "\n")
p = p + int(textElementObj.getColspan())
outputFile.write("</header_line>" + "\n")
outputFile.write("</header>" + "\n")
outputFile.write("<tbody>" + "\n")
for counter in range(tableObj.getDatarowBegin(), cellsOnColumn):
outputFile.write("<data_row>" + "\n")
k = 0
while k < len(tableObj.getColumnList()):
columnObj = tableObj.getColumnListElement(k)
try:
textElement = columnObj.getCellsListElement(counter)
except:
break
outputFile.write("<cell sh=\"" + str(columnObj.getHeader()) + "\" font=\"" + str(textElement.getFont()))
outputFile.write("\" colspan=\"" + str(textElement.getColspan()) + "\" format=\"" + textElement.getFormat() + "\">" + "\n")
outputFile.write("<![CDATA[")
if textElement.getValue() != "null":
outputFile.write(textElement.getValue())
outputFile.write("]]>" + "\n")
outputFile.write("</cell>" + "\n")
k = k + textElement.getColspan()
outputFile.write("</data_row>" + "\n")
outputFile.write("</tbody>" + "\n")
outputFile.write("</table>" + "\n")
outputFile.write("</tables>" + "\n")
outputFile.close()
tableNumber += 1
os.system('xmlto -x "' + os.path.join(self.__path, 'table_view.xsl') + '" -o ' + self.__path + ' html-nochunks ' + outputFilePathString + ' --skip ')
os.system("rm " + os.path.join(self.__path, "*.xml"))
self.__outputFilesList.append(outputFilePathString.replace(".xml", ".html"))
os.system("rm " + os.path.join(self.__path, "*.dtd"))
os.system("rm " + os.path.join(self.__path, "table_view.xsl"))
def setOutputType(self, outputTypeObj):
self.__outputType = outputTypeObj
def getOutputList(self):
return self.__outputType.getOutputList(self.__outputFilesList)
| [
"d.camata@gmail.com"
] | d.camata@gmail.com |
29b2c492995b0dfded24806b18f9fb1644669ccd | 8b8989ec3bc73f1710c03ef3e3e5fd93ddf56711 | /jobs/migrations/0004_auto_20200328_1228.py | 32135eaf6ce287c5a4aad231865d2c719c020216 | [] | no_license | petergitzz/portfolio-project | 5f8c618172a35058feec88952dcd07e85339a61a | 4ebda97913ab2d4d596863133fbd5e5a12fd7d8b | refs/heads/master | 2022-12-09T15:22:14.075580 | 2020-04-15T19:48:41 | 2020-04-15T19:48:41 | 252,603,180 | 0 | 0 | null | 2022-11-22T05:27:35 | 2020-04-03T01:23:06 | Python | UTF-8 | Python | false | false | 386 | py | # Generated by Django 3.0.4 on 2020-03-28 09:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0003_auto_20200328_1224'),
]
operations = [
migrations.AlterField(
model_name='job',
name='image',
field=models.ImageField(upload_to='images/'),
),
]
| [
"petergitzz@icloud.com"
] | petergitzz@icloud.com |
26f3b4a714e138482e4799aecd77cd946190d8db | bdd6ab129de61947945b380a487a3ee923f542f3 | /real_genomes/datasets/gbk2dbfiles.py | 73549d27723c8a1e8b7be53aa05418915d9867e5 | [] | no_license | InfOmics/pangenes-review | 1e27c1fd1a93fb7a5fd764c4090f7a4a2a207b0b | a74f8f9d615de6a76aa1918c2c4e9d0c1f0c8385 | refs/heads/master | 2021-01-07T12:14:37.891227 | 2020-06-01T14:20:27 | 2020-06-01T14:20:27 | 241,686,261 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | #!/usr/bin/python3
import sys
from os import listdir
from os.path import isfile, join
import re
from Bio import SeqIO
idir = sys.argv[1]
odir = sys.argv[2]
print("reading gbk files from", idir)
gbkfiles = [f for f in listdir(idir) if isfile(join(idir, f)) and re.match('^.+\.gbk$', f)]
print(gbkfiles)
def read_gbk(ifile, genome_id):
print(genome_id)
genome_cdslist = genome2cdstag.get(genome_id, list())
for record in SeqIO.parse(ifile, "genbank"):
sequence_id = record.id
genome_id = record.annotations['accessions'][-1]
print("\t"+genome_id+"\t"+sequence_id)
for feature in record.features:
#print(feature)
if (feature.type == 'source'):
genome_length[genome_id] = genome_length.get(genome_id, 0) + feature.location.end
elif (feature.type == 'CDS'):
if ('translation' in feature.qualifiers):
tag = (genome_id, sequence_id, feature.qualifiers['locus_tag'][0])
genome_cdslist.append(tag)
cdstag2genome[tag] = genome_id
cdsseqs[tag] = feature.qualifiers['translation'][0]
if 'product' in feature.qualifiers:
cdstag2product[tag] = (feature.qualifiers['product'][0]).replace('\t','')
else:
cdstag2product[tag] = 'noproduct'
genome2cdstag[genome_id] = genome_cdslist
for gbk in gbkfiles:
print(gbk)
genome_length = dict()
genome2cdstag = dict()
cdstag2genome = dict()
cdstag2product = dict()
cdsseqs = dict()
read_gbk(idir + gbk, re.sub('\.gbk$', '', gbk))
uniques = dict()
ofile = odir+"/"+ gbk.replace('.gbk','_aa.fasta')
print('writing to', ofile)
with open(ofile, 'w') as off:
for k in sorted(cdsseqs.keys()):
gen_id = k[0]+":"+k[1]
if gen_id not in uniques:
uniques[ gen_id ] = dict()
uniques[ gen_id ][k[2]] = uniques[ gen_id ].get(k[2],0) + 1
cc = uniques[ gen_id ][k[2]]
acc = k[0]+":"+k[1]+":"+k[2]+":"+str(cc)
off.write(">"+ acc +"\n")
cdsseqs[k] = cdsseqs[k].replace('TERM','*').replace('U','*').replace('*','').replace('-','').replace('B','').replace('Z','').replace('J','').replace('X','')
#cdsseqs[k] = cdsseqs[k].replace("X","A").replace('B','A').replace('Z','E').replace('J','L').replace('TERM','*').replace('U','A').replace('*','').replace('-','')
off.write(cdsseqs[k]+"\n")
| [
"vincenzo.bonnici@gmail.com"
] | vincenzo.bonnici@gmail.com |
98f56d9d28cad1e19b4d224bf103db80fbe2e222 | 55d7a1f65449dcc68e9a0698e036cdc36a3154f7 | /code/12-tuple元组.py | e13bd658dbd20cac937dfc99c46c051a1c21b174 | [] | no_license | kuan1/test-python | 0e94742a2ccfa1e4df6c76cf40fb7da11df9da90 | 48b52877cfd67379416a38274f6f67a52e31d4e2 | refs/heads/master | 2021-12-09T04:01:37.259751 | 2021-10-04T12:17:28 | 2021-10-04T12:17:28 | 185,113,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # tuple不可修改列表
my_tuple = ()
print(my_tuple, type(my_tuple))
my_tuple = tuple(range(10))
print(my_tuple)
# 可以省略括号
my_tuple = 1, 2, 3, 4
print(my_tuple)
# 只有一个逗号的时候,必须有一个逗号
my_tuple = (1,)
print(my_tuple)
my_tuple = 1,
print(my_tuple)
# 元组解构
my_tuple = 'a', 'b', 'c'
(a, b, c) = my_tuple
print(a, b, c)
# 解构必须一致
my_tuple = (1, 2, 3, 4, 5, 6)
(a, b, *c) = my_tuple
print(a, b, c)
# 交换变量
a = 1
b = 2
(a, b) = (b, a)
print(a, b)
| [
"luzhongk@126.com"
] | luzhongk@126.com |
f25cc8bf0e04bf9284cbee6ccd9c2174ba7ca2d7 | 7542d0e1e7d30f9d215e6886d1707deefc903011 | /src/trailingZeroes.py | 773e391aa1b45ffb7c7d4214c5fc588773912e33 | [] | no_license | zhengminhui/leetcode-python | 3acc989d73ce50205b0f7e0cf13049fa5643cc8a | 6d26596a3c18750687082ca2b9f55746628576e6 | refs/heads/master | 2023-01-25T05:13:51.989092 | 2023-01-14T07:00:15 | 2023-01-14T07:00:15 | 104,976,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | def trailingZeroes(n):
"""
:type n: int
:rtype: int
"""
ans = 0
while n:
ans += n/5
n /= 5
return ans
| [
"zhmh1025@hotmail.com"
] | zhmh1025@hotmail.com |
06810a4a86bb50e0517a53d81faa51ea576bfa68 | a69206bd76ad02c213425ab024046f6a71120560 | /eos/utils/version.py | 5c6b5dad5b4c14beb06aa65a873febbf75d5702c | [
"Beerware"
] | permissive | YSaxon/eos | f120f3d11366ada131aa7e04820a01436b1d4c5f | 0cebeb2fd2d1952d6bb0d040a22f909fd5ae6efd | refs/heads/master | 2023-05-22T03:12:24.376238 | 2021-06-10T17:55:18 | 2021-06-10T17:55:18 | 375,783,898 | 0 | 0 | NOASSERTION | 2021-06-10T17:55:19 | 2021-06-10T17:50:54 | Python | UTF-8 | Python | false | false | 770 | py | """
Versions helper.
Wrapper on distutils.version to allow more laziness.
"""
from distutils.version import LooseVersion
class Version(LooseVersion):
"""
EOS Version.
Simple wrapper on distutils.LooseVersion to provide more abstraction on version comparison.
"""
def _cmp(self, other):
"""
Comparison override to support extra types.
If other is not an Version instance (or LooseVersion by inheritance), cast it to string and Version.
This provides integers support among others.
The base method is then called.
:param other: the other object to compare against
"""
if not isinstance(other, Version):
other = Version(str(other))
return super()._cmp(other)
| [
"aevy-syn"
] | aevy-syn |
68ea1dea2939203e6f537230def02ae234372113 | e13c98f36c362717fdf22468b300321802346ef5 | /documents/migrations/0002_auto_20161206_1643.py | a5421c43dfb9d72c97c9d451bbe268874e6e6229 | [] | no_license | alexmon1989/libraries_portal | 2415cc49de33459266a9f18ed8bb34ac99d3eb7c | 277081e09f6347c175775337bffba074a35f3b92 | refs/heads/master | 2021-01-23T07:25:53.884795 | 2018-12-25T14:29:29 | 2018-12-25T14:29:29 | 80,501,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 14:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='anotherperson',
options={'verbose_name': 'Другая персоны', 'verbose_name_plural': 'Другие персоны'},
),
migrations.AlterModelOptions(
name='document',
options={'verbose_name': 'Документ', 'verbose_name_plural': 'Документы'},
),
migrations.AlterModelOptions(
name='documenttype',
options={'verbose_name': 'Тип документа', 'verbose_name_plural': 'Типы документов'},
),
migrations.AddField(
model_name='document',
name='catalog_number',
field=models.CharField(default=1, max_length=255, verbose_name='Шифр хранения (№ в каталоге)'),
preserve_default=False,
),
]
| [
"alex.mon1989@gmail.com"
] | alex.mon1989@gmail.com |
0135a94e64c60142d8c29bfdaba4788908690526 | 376150fe6b4dd5d8c3caa65714aa47bc35e31784 | /nintendo/games.py | dd8c5a5db2e8ac06d7e378a4ab5f9226ca8181f6 | [
"MIT"
] | permissive | tenda-gumi/NintendoClients | 977d5de576a0216a2e6c894bfa5de1658f8ef5de | 67f2600f68e441980187932e8521d6dcc69dcc24 | refs/heads/master | 2020-06-17T14:33:52.314419 | 2019-07-10T04:19:38 | 2019-07-10T04:19:38 | 195,951,685 | 0 | 0 | null | 2019-07-09T07:03:23 | 2019-07-09T07:03:23 | null | UTF-8 | Python | false | false | 1,320 | py |
#===== Wii U Games =====#
class Friends:
TITLE_ID_EUR = 0x10001C00
TITLE_ID_USA = 0x10001C00
TITLE_ID_JAP = 0x10001C00
LATEST_VERSION = 0
GAME_SERVER_ID = 0x3200
ACCESS_KEY = "ridfebb9"
NEX_VERSION = 20000
class DKCTF:
TITLE_ID_EUR = 0x0005000010138300
TITLE_ID_USA = 0x0005000010137F00
TITLE_ID_JAP = 0x0005000010144800
LATEST_VERSION = 17
GAME_SERVER_ID = 0x10144800
ACCESS_KEY = "7fcf384a"
NEX_VERSION = 30400 #3.4.0
SERVER_VERSION = 3
class MK8:
TITLE_ID_EUR = 0x000500001010ED00
TITLE_ID_USA = 0x000500001010EC00
TITLE_ID_JAP = 0x000500001010EB00
LATEST_VERSION = 64
GAME_SERVER_ID = 0x1010EB00
ACCESS_KEY = "25dbf96a"
NEX_VERSION = 30504 #3.5.4 (AMK patch)
SERVER_VERSION = 2002
class SMM:
TITLE_ID_EUR = 0x000500001018DD00
TITLE_ID_USA = 0x000500001018DC00
TITLE_ID_JAP = 0x000500001018DB00
LATEST_VERSION = 272
GAME_SERVER_ID = 0x1018DB00
ACCESS_KEY = "9f2b4678"
NEX_VERSION = 30810 #3.8.10 (AMA patch)
SERVER_VERSION = 3017
#===== Switch Games =====#
class MK8Deluxe:
GAME_SERVER_ID = 0x2B309E01
ACCESS_KEY = "09c1c475"
NEX_VERSION = 40007 #4.0.7 (apptrbs)
class SMO:
GAME_SERVER_ID = 0x255BA201
ACCESS_KEY = "afef0ecf"
NEX_VERSION = 40302 #4.3.2
class SMM2:
GAME_SERVER_ID = 0x22306D00
ACCESS_KEY = "fdf6617f"
NEX_VERSION = 40601 #4.6.15 (appslop)
| [
"ymarchand@me.com"
] | ymarchand@me.com |
3dbf29b81d4125c09676964c81ebeb19e854dc5f | e57220aca129b4b6d19e10d016ce3d29198e6cc3 | /profiles_project/settings.py | 24a1193cea04f949a18ac2c0dc52b711fa76b16b | [
"MIT"
] | permissive | hm289/profiles-rest-api | d693b5a67cbfab05171f85c0cd9ddc1d73732016 | acfeed0553994d4e86f1146b5537d24beb23d434 | refs/heads/master | 2023-01-23T07:48:30.442050 | 2020-12-06T19:41:40 | 2020-12-06T19:41:40 | 314,030,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&5ymn7ttb!8&c!0^o-0bc=dgj=x)^wz7(etf$kq@!ix=j(qc94'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| [
"maleki289@gmail.com"
] | maleki289@gmail.com |
511a92818a5b5df64a9fa1f2695d0cec0ec27022 | a2238429ea0e84e30441e7bf7319cd9810d0eb22 | /posts/models.py | b5af6f962c2417dca8750ad41ee565db81bbf079 | [] | no_license | praveenvino39/instagram_clone | f00a33d21eb75fa692468ce1c158d25bbd457b9c | b3316751a0ce419c2f88153b5e99096976c317c5 | refs/heads/main | 2023-08-23T15:23:12.500964 | 2021-10-15T20:10:13 | 2021-10-15T20:10:13 | 339,464,615 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(User, related_name="user",
related_query_name="user", on_delete=models.CASCADE)
video_url = models.URLField(blank=False, null=True)
caption = models.TextField(blank=True, null=True)
is_video = models.BooleanField(default=False)
post_image = models.ImageField(blank=True, null=True)
likes = models.JSONField(encoder=None, blank=True, null=True, default=[])
comments = models.JSONField(
encoder=None, blank=True, null=True, default=[])
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.caption)
| [
"praveena4e@gmail.com"
] | praveena4e@gmail.com |
1bf98132707c79679412594e52a4b60a9a481f22 | d78ae69ee26ee596d44bbebb3019b3fe27f1669e | /uriel/core/weakset.py | 24247860db7dd06f2bd504e99c40e45bd9a99a8e | [] | no_license | gregr/uriel | cd35c1ee128606147e22c6599fda21d3b1efcf84 | 1e68c771cf041a6951d1c853dd20f99a4b5e7eb5 | refs/heads/master | 2020-05-17T13:01:30.767504 | 2013-04-21T14:49:14 | 2013-04-21T14:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Gregory Rosenblatt
from weakref import ref
class WeakSet(object):
"""A set which weakly references its contents."""
def __init__(self, sequence=[]):
self.set = set(sequence)
def __iter__(self):
return iter(self.set)
def Add(self, obj):
self.set.add(ref(obj, self.set.remove))
def Remove(self, obj):
self.set.remove(ref(obj))
| [
"greg.weiqi@gmail.com"
] | greg.weiqi@gmail.com |
4b27e8803d48b26d90c568aa778d7eec4c44dc85 | 2210a763aa4679283b8d3b59e862caf691d4d5af | /projects/migrations/0003_userprofile.py | b8d9373aa4b9f3d471c16018d1fdfdf8b3e7faea | [
"BSD-2-Clause"
] | permissive | dymaxionlabs/analytics-backend | 21c7dd4579188b20214c7c8ac92db26ca04348f8 | fb801b184e4e510d54e8addb283fd202c9dfe7b1 | refs/heads/master | 2022-05-10T23:30:35.958044 | 2022-04-24T13:58:59 | 2022-04-24T13:58:59 | 192,096,995 | 0 | 0 | BSD-3-Clause | 2022-04-24T13:59:11 | 2019-06-15T15:55:28 | Python | UTF-8 | Python | false | false | 863 | py | # Generated by Django 2.1.5 on 2019-01-21 03:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0002_auto_20181130_1704'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(blank=True, max_length=120)),
('phone', models.CharField(blank=True, max_length=40)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"munshkr@gmail.com"
] | munshkr@gmail.com |
56ba9749edd02ce19359eec905a0c7711e644444 | 5d7e5624da132dcad0d0ef74d612d8c834c9f24f | /download_slack.py | b68737bb51eaaa956fbd2537448c0ace52c47ca9 | [] | no_license | indyhall/slack-export | 790a9925361231427e2d4ebad99a6b4599bfdffd | 2fdd73de0074c32090977671c69f3195dd391cd5 | refs/heads/master | 2021-01-10T09:48:16.925379 | 2015-10-27T18:48:05 | 2015-10-27T18:48:05 | 45,062,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,628 | py | #!/usr/bin/env python
# Allow CAPS in function names
# pylint: disable=C0103
"""
Download all files linked in a Slack export archive.
All uploaded files are downloaded into their respective channel folders.
Optionally, Slack IDs can be used instead of file names.
"""
import argparse
from functools import partial
import json
import os.path
from multiprocessing import Pool
import urllib2
import sys
def find_directories(root_directory):
"""Return a list of subdirectories prefixed with the parent directory."""
search_directories = []
if os.path.isdir(root_directory):
files_and_folders = os.listdir(root_directory)
for item in files_and_folders:
sub_directory = os.path.join(root_directory, item)
if os.path.isdir(sub_directory):
search_directories.append(sub_directory)
return search_directories
else:
sys.exit("Error: {} is not a valid directory".format(root_directory))
def find_URLs(directory, options):
"""Find URLs in JSON files."""
files = os.listdir(directory)
filtered_files = []
files_for_download = []
for item in files:
if item.endswith(".json"):
filtered_files.append(item)
for item in filtered_files:
file_path = os.path.join(directory, item)
with open(file_path, "r") as json_file:
payload = json.load(json_file)
for message in payload:
if ("subtype" in message
and message.get("subtype") == "file_share" and message.get("file") is not None):
download_URL = message.get("file").get("url_download")
if download_URL is not None and download_URL.startswith('https://slack-files.com/files-pub/'):
if options.remote_name:
download_filename = message.get("file").get("id")
else:
download_filename = message.get("file").get("name")
if download_filename.startswith("-."):
download_filename = download_filename.lstrip("-")
download_filename = "{}{}".format(
message.get("file").get("id"),
download_filename)
files_for_download.append(
(download_filename, download_URL))
download_URLs(files_for_download, directory)
def download_URLs(files_for_download, directory):
"""Download the files."""
for pair in files_for_download:
path = os.path.join(directory, pair[0])
download = urllib2.urlopen(pair[1])
content = download.read()
with open(path, "wb") as downloaded_file:
downloaded_file.write(content)
print downloaded_file
def parse_arguments():
"""Parse given command line arguments."""
text_folder = "the unzipped Slack export directory"
text_remote_name = "keep Slack file IDs instead of using the file names"
parser = argparse.ArgumentParser()
parser.add_argument("folder", help=text_folder)
parser.add_argument("--remote-name", help=text_remote_name,
action="store_true")
arguments = parser.parse_args()
return arguments
def main():
"""Download all files linked in a Slack export archive."""
options = parse_arguments()
directories = find_directories(options.folder)
for dir in directories:
url = find_URLs(dir, options)
if __name__ == "__main__":
main()
| [
"dangerouslyawesome@gmail.com"
] | dangerouslyawesome@gmail.com |
aca386663ee2b7bb52e07fbc3da653ace55ccb39 | fa3e527114cd5799dddb0a25067da4923eae354e | /FastSim/JUNO/reco/reco_test_v1.py | 44cb6f1977fa86490d524b1432e2d1ebb9f8a52f | [] | no_license | wenxingfang/FastSim_ML | e64c6b56ce2afd703d1ddda0ada2de6f65fde049 | d2f1abbb2f6879313d5f4f137b64c4d8bf10fe83 | refs/heads/master | 2022-11-28T01:35:39.727895 | 2020-08-03T15:47:37 | 2020-08-03T15:47:37 | 284,734,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,898 | py | import math
import yaml
import h5py
import json
import argparse
import numpy as np
from keras.models import model_from_json
from keras.models import model_from_yaml
from keras.models import load_model
from sklearn.utils import shuffle
def get_parser():
parser = argparse.ArgumentParser(
description='Run CalGAN training. '
'Sensible defaults come from [arXiv/1511.06434]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--batch-size', action='store', type=int, default=2,
help='batch size per update')
parser.add_argument('--disc-lr', action='store', type=float, default=2e-5,
help='Adam learning rate for discriminator')
parser.add_argument('--gen-lr', action='store', type=float, default=2e-4,
help='Adam learning rate for generator')
parser.add_argument('--adam-beta', action='store', type=float, default=0.5,
help='Adam beta_1 parameter')
parser.add_argument('--prog-bar', action='store_true',
help='Whether or not to use a progress bar')
parser.add_argument('--no-attn', action='store_true',
help='Whether to turn off the layer to layer attn.')
parser.add_argument('--debug', action='store_true',
help='Whether to run debug level logging')
parser.add_argument('--model-in', action='store',type=str,
default='',
help='input of trained reg model')
parser.add_argument('--weight-in', action='store',type=str,
default='',
help='input of trained reg weight')
parser.add_argument('--datafile', action='store', type=str,
help='yaml file with particles and HDF5 paths (see '
'github.com/hep-lbdl/CaloGAN/blob/master/models/'
'particles.yaml)')
parser.add_argument('--output', action='store',type=str,
default='',
help='output of result real vs reco')
return parser
if __name__ == '__main__':
parser = get_parser()
parse_args = parser.parse_args()
model = load_model(parse_args.model_in)
d = h5py.File(parse_args.datafile, 'r')
first = np.expand_dims(d['firstHitTimeByPMT'][:], -1)
second = np.expand_dims(d['nPEByPMT'][:], -1)
infoMuon = d['infoMuon'][:,:4]
d.close()
print('infoMuon dtype',infoMuon.dtype)
infoMuon = infoMuon.astype(float)
print('infoMuon dtype',infoMuon.dtype)
### normalize muon info ##############
infoMuon[:,0]=(infoMuon[:,0])/math.pi
infoMuon[:,1]=(infoMuon[:,1])/math.pi
infoMuon[:,2]=(infoMuon[:,2])/math.pi
infoMuon[:,3]=(infoMuon[:,3])/math.pi
#infoMuon[:,4]=(infoMuon[:,4])/18000#17700.0
first, second, infoMuon = shuffle(first, second, infoMuon, random_state=0)
nBatch = int(first.shape[0]/parse_args.batch_size)
iBatch = np.random.randint(nBatch, size=1)
iBatch = iBatch[0]
input_first = first [iBatch*parse_args.batch_size:(iBatch+1)*parse_args.batch_size]
input_second = second[iBatch*parse_args.batch_size:(iBatch+1)*parse_args.batch_size]
result = model.predict([input_first, input_second], verbose=True)
ptheta = result[0]
pphi = result[1]
rtheta = result[2]
rphi = result[3]
print('ptheta:',ptheta[:10])
print('pphi:' ,pphi [:10])
print('rtheta:',rtheta[:10])
print('rphi:' ,rphi [:10])
result = np.concatenate((ptheta, pphi, rtheta, rphi),axis=-1)
real = infoMuon[iBatch*parse_args.batch_size:(iBatch+1)*parse_args.batch_size]
result1= model.test_on_batch([input_first, input_second],[real[:,0], real[:,1], real[:,2], real[:,3]])
result2= model.predict_on_batch([input_first, input_second])
print('test_on_batch:', result1)
print('predict_on_batch:', result2)
print('choose batch:', iBatch)
print('pred:\n',result)
print('real:\n',real)
print('diff:\n',result - real)
######### transfer to actual value #######
real[:,0] = real[:,0]*math.pi
real[:,1] = real[:,1]*math.pi
real[:,2] = real[:,2]*math.pi
real[:,3] = real[:,3]*math.pi
#real[:,4] = real[:,4]*18000
result[:,0] = result[:,0]*math.pi
result[:,1] = result[:,1]*math.pi
result[:,2] = result[:,2]*math.pi
result[:,3] = result[:,3]*math.pi
#result[:,4] = result[:,4]*18000
abs_diff = np.abs(result - real)
print('abs error:\n', abs_diff)
print('mean abs error:\n',np.mean(abs_diff, axis=0))
print('std abs error:\n',np.std (abs_diff, axis=0))
###### save ##########
hf = h5py.File(parse_args.output, 'w')
hf.create_dataset('input_info', data=real)
hf.create_dataset('reco_info' , data=result)
hf.close()
print ('Done')
| [
"1473717798@qq.com"
] | 1473717798@qq.com |
9d58362ddfed7ce96220d7ec389352353b2a2c8a | b05a3094820e8d8b315846ee1f4d90ee325d24f1 | /python/comp.py | 6062d3adb7e2dff67deb297a59889a56aaa53b60 | [] | no_license | aistairc/DLTSkip | 7841aaece9cba31c144212e3fee6a102fb1fc4f2 | d357f12d4ea3a91f3dce1632257e90be56be8c7c | refs/heads/master | 2020-03-21T17:45:58.695316 | 2018-08-03T02:58:07 | 2018-08-03T02:58:07 | 138,852,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,749 | py | # -*- coding: utf-8 -*-
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
#sys.path.append('/home/genta/work/spams-python/install/lib/python2.7/site-packages')
import spams
import util
class Compressor:
def print_setting(self):
print()
print(self.__class__.__name__)
class SparseDecomposition(Compressor):
def __init__(self, plot_params=None):
if plot_params is None:
self.plot_params = {
'cost': True,
'dendrogram': False,
'dictionary': False,
'row_atoms': 10,
'decomposition': False,
'num_decompositions': 1,
'alpha': False,
'alpha_thinned': False,
}
else:
self.plot_params = plot_params
'''
def fit(self, Xs, fit_params):
self.fit_params = fit_params
lambda1 = fit_params['lambda1']
lambda2 = 0.0
num_atoms = fit_params['num_atoms']
batch_size = 512 # default
iter_min = fit_params['iter_min']
params_dl = {
'mode': spams.PENALTY,
'lambda1': lambda1,
'lambda2': lambda2,
'K': num_atoms,
'posAlpha': True,
'posD': False,
'numThreads': -1,
'batchsize': batch_size,
'iter': iter_min,
'return_model': True,
'verbose': False,
}
params_sd = {
'mode': spams.PENALTY,
'lambda1': lambda1,
'lambda2': lambda2,
'pos': True,
'numThreads': -1,
}
X = self.stack(Xs)
self.train_dictionary(X, params_dl, params_sd)
if self.plot_params['dictionary']:
self.plot_dictionary()
'''
def fit(self, Xs, fit_params):
self.fit_params = fit_params
lambda1 = fit_params['lambda1']
lambda2 = 0.0
num_atoms = fit_params['num_atoms']
batch_size = 512 # default
iter_min = fit_params['iter_min']
params_dl = {
'mode': spams.PENALTY,
'lambda1': lambda1,
'lambda2': lambda2,
'K': num_atoms,
'posAlpha': True,
'posD': False,
'numThreads': -1,
'batchsize': batch_size,
'iter': iter_min,
'return_model': False,
'verbose': False,
}
params_sd = {
'mode': spams.PENALTY,
'lambda1': lambda1,
'lambda2': lambda2,
'pos': True,
'numThreads': -1,
}
X = self.stack(Xs)
self.train_dictionary(X, params_dl, params_sd)
if self.plot_params['dictionary']:
self.plot_dictionary()
def stack(self, Xs):
for (i, X) in enumerate(Xs):
window_size = X.shape[1]
num_sensors = X.shape[2]
if i == 0:
stacked = X.reshape([-1, window_size*num_sensors]).T
else:
stacked = np.c_[stacked, X.reshape([-1, window_size*num_sensors]).T]
self.window_size = window_size
self.num_sensors = num_sensors
return stacked
'''
def train_dictionary(self, X, params_dl, params_sd):
iter_max = self.fit_params['iter_max']
X = np.asfortranarray(X)
#tic = time.time()
(D, model) = spams.trainDL(X, **params_dl)
#tac = time.time()
#print '{0} sec / {1} iter'.format(tac - tic, model['iter'])
alpha = spams.lasso(X, D, **params_sd)
cost = [self.get_cost(X, D, alpha, params_dl)]
while model['iter'] < iter_max:
(D, model) = spams.trainDL(X, model=model, D=D, **params_dl)
alpha = spams.lasso(X, D, **params_sd)
cost.append(self.get_cost(X, D, alpha, params_dl))
if self.plot_params['cost'] and len(cost) > 1:
plt.title('cost = {0:g}'.format(cost[-1]))
plt.plot(cost)
plt.show()
self.D = D
'''
def train_dictionary(self, X, params_dl, params_sd):
X = np.asfortranarray(X)
# tic = time.time()
D = spams.trainDL(X, **params_dl)
# tac = time.time()
# print('{0} sec / {1} iter'.format(tac - tic, model['iter']))
self.D = D
def get_cost(self, X, D, alpha, params_dl):
mode = params_dl['mode']
lambda1 = params_dl['lambda1']
lambda2 = params_dl['lambda2']
A = alpha.toarray()
if mode == 0:
cost = 0.5 * np.power(np.linalg.norm(X - D * alpha, 2), 2)
elif mode == 1:
cost = np.linalg.norm(A, 1)
elif mode == 2:
cost = 0.5 * np.power(
np.linalg.norm(X - D * alpha, 2), 2
) + lambda1 * np.linalg.norm(A, 1
) + 0.5 * lambda2 * np.power(np.linalg.norm(A, 2), 2)
elif mode == 3:
cost = 0.5 * np.power(np.linalg.norm(X - D * alpha, 2), 2)
elif mode == 4:
cost = np.linalg.norm(A, 0)
elif mode == 5:
cost = 0.5 * np.power(
np.linalg.norm(X - D * alpha, 2), 2
) + lambda1 * np.linalg.norm(A, 0)
else:
cost = 0.0
return cost
def plot_dictionary(self):
D = self.D
window_size = self.window_size
num_sensors = self.num_sensors
num_atoms = D.shape[1]
row_atoms = self.plot_params['row_atoms']
plt.figure(figsize=(util.plot_unit_width * row_atoms,
util.plot_unit_height * (
(num_atoms - 1) / row_atoms + 1)))
for a in range(num_atoms):
plt.subplot((num_atoms-1)/row_atoms+1, row_atoms, a+1)
plt.title('[' + str(a) + ']')
plt.ylim([np.min(D), np.max(D)])
plt.plot(D[:, a].reshape([window_size, num_sensors]))
plt.tight_layout()
plt.show()
def compress(self, Xs, windowses, labels, compress_params):
self.compress_params = compress_params
window_size = self.window_size
num_sensors = self.num_sensors
lambda1 = compress_params['lambda1']
lambda2 = 0.0
params_sd = {
'mode': spams.PENALTY,
'lambda1': lambda1,
'lambda2': lambda2,
'pos': True,
'numThreads': -1,
}
alphas = []
windows_indices = []
for (X, windows) in zip(Xs, windowses):
alpha = self.decompose(X.reshape(
[-1, window_size * num_sensors]).T, params_sd)
windows_index = self.thin_windows(alpha, windows)
alphas.append(alpha)
windows_indices.append(windows_index)
if self.plot_params['decomposition']:
self.plot_decomposition(X, alpha)
if self.plot_params['alpha']:
self.plot_alpha(alpha)
if self.plot_params['alpha_thinned']:
self.plot_alpha(alpha, windows_index)
self.alphas = alphas
self.windows_indices = windows_indices
self.labels = labels
num_nonzeros = 0
for (alpha, window_index) in zip(alphas, windows_indices):
A = alpha.toarray()
num_nonzeros += ((A[:, windows_index] != 0.0) * 1).sum()
return num_nonzeros
def decompress(self):
D = self.D
window_size = self.window_size
num_sensors = self.num_sensors
Rs = []
for alpha in self.alphas:
A = alpha.toarray()
R = D.dot(A)
R = R.T.reshape([-1, window_size, num_sensors])
Rs.append(R)
return (Rs, self.windows_indices)
def decompose(self, X, params_sd):
X = np.asfortranarray(X)
alpha = spams.lasso(X, self.D, **params_sd)
return alpha
def thin_windows(self, alpha, windows):
num_windows = len(windows)
mode = self.compress_params['mode']
if mode == 'fixed':
interval = self.compress_params['interval']
windows_index = list(range(0, num_windows, interval))
if windows_index[-1] != num_windows-1:
windows_index.append(num_windows-1)
elif mode == 'variable':
A = alpha.toarray()
windows_index = [0] + self.segment(A) + [num_windows-1]
else:
windows_index = range(0, num_windows)
return windows_index
def segment(self, A, windows=None):
interval_max = self.compress_params['interval_max']
interval_min = self.compress_params['interval_min']
if windows is None:
if A.shape[1] > interval_max:
a = A[:, interval_min: -interval_min].max(axis=1).argmax()
w = A[a, interval_min: -interval_min].argmax() + interval_min
return self.segment(A[:, :w+1]) + [w] + map(
lambda x: x + w, self.segment(A[:, w:]))
else:
return []
else: # use windows information
return []
def plot_decomposition(self, X, alpha):
D = self.D
window_size = self.window_size
num_sensors = self.num_sensors
R = D.dot(alpha.toarray())
R = R.T.reshape([-1, window_size, num_sensors])
num_windows = X.shape[0]
row_atoms = self.plot_params['row_atoms']
for w in range(num_windows):
if w >= self.plot_params['num_decompositions']:
break
plt.figure(figsize=(util.plot_unit_width * (row_atoms + 1),
util.plot_unit_height))
# plot X & R
plt.subplot(1, row_atoms + 1, 1)
plt.plot(X[w], 'o-')
plt.plot(R[w])
# plot alpha
col = 2
for indptr in range(alpha.indptr[w], alpha.indptr[w+1]):
plt.subplot(1, row_atoms+1, col)
plt.title('[{0}] {1:.3f}'.format(
alpha.indices[indptr], alpha.data[indptr]))
plt.ylim([D.min(), D.max()])
plt.yticks([])
plt.plot(D[:, alpha.indices[indptr]].reshape(
[window_size, num_sensors]))
if col > row_atoms:
break
col += 1
plt.show()
def plot_alpha(self, alpha, windows_index=None):
A = alpha.toarray()
if windows_index is not None:
mask = np.zeros(A.shape[1])
mask[windows_index] = 1
A *= mask
compression_rate = 1.0 * A.nonzero()[0].size / A.size
plt.figure(figsize=(A.shape[1] * util.plot_cell_size,
A.shape[0] * util.plot_cell_size))
plt.title('compression rate = {0:g}'.format(compression_rate))
sns.heatmap(A, xticklabels=False, yticklabels=False,
cbar=False, square=True)
plt.show()
def aggregate(self, alpha, windows_index):
mode = self.extract_params['mode']
A = alpha.toarray()
if mode == 'max':
aggregated = A[:, windows_index].max(axis=1)
elif mode == 'sum':
aggregated = A[:, windows_index].sum(axis=1)
elif mode == 'mean':
aggregated = A[:, windows_index].mean(axis=1)
elif mode == 'hist':
A_binary = A[:, windows_index]
A_binary[A_binary > 0.0] = 1
aggregated = A_binary.sum(axis=1)
elif mode == 'hist_max':
num_atoms = A.shape[0]
aggregated = np.histogram(A[:, windows_index].argmax(axis=0),
bins=num_atoms, range=(0, num_atoms))[0]
feature = aggregated
return feature
def extract_augmented(self, extract_params):
return self.extract(extract_params, augmented=True)
def extract(self, extract_params, augmented=False):
self.extract_params = extract_params
if augmented:
mode = self.compress_params['mode']
interval = self.compress_params['interval']
features = []
labels = []
if mode == 'fixed':
for (alpha, label) in zip(self.alphas, self.labels):
num_windows = alpha.shape[1]
for i in range(interval):
windows_index = range(i, num_windows, interval)
if len(windows_index) == 0:
break
feature = self.aggregate(alpha, windows_index)
features.append(feature)
labels.append(label)
elif mode == 'variable': # XXX 未定義
pass
return (np.array(features), np.array(labels))
else:
features = []
for (alpha, windows_index) in zip(
self.alphas, self.windows_indices):
feature = self.aggregate(alpha, windows_index)
features.append(feature)
return (np.array(features), np.array(self.labels))
class SingularValueDecomposition(Compressor): # SVD
def __init__(self, plot_params=None):
if plot_params is None:
self.plot_params = {
'basis': True,
'row_basis': 10,
'coefficient': True,
'coefficient_truncated': False,
'coefficient_thinned': True,
}
else:
self.plot_params = plot_params
def fit(self, Xs, fit_params):
self.fit_params = fit_params
mode = fit_params['mode']
if mode == 'window_sensor_vs_time' or mode == 'window_vs_sensor_time':
X = self.stack(Xs)
(U, s, V) = np.linalg.svd(X)
# self.U = U
# self.S = np.diag(s)
self.V = V
if self.plot_params['basis']:
self.plot_basis()
def stack(self, Xs):
mode = self.fit_params['mode']
if mode == 'window_sensor_vs_time':
for (i, X) in enumerate(Xs):
window_size = X.shape[1]
num_sensors = X.shape[2]
if i == 0:
stacked = X.transpose((0, 2, 1)).reshape([-1, window_size])
else:
stacked = np.r_[stacked, X.transpose((0, 2, 1)).reshape(
[-1, window_size])]
self.window_size = window_size
self.num_sensors = num_sensors
return stacked
elif mode == 'window_vs_sensor_time':
for (i, X) in enumerate(Xs):
window_size = X.shape[1]
num_sensors = X.shape[2]
if i == 0:
stacked = X.reshape([-1, window_size * num_sensors])
else:
stacked = np.r_[stacked, X.reshape(
[-1, window_size * num_sensors])]
self.window_size = window_size
self.num_sensors = num_sensors
return stacked
def plot_basis(self):
V = self.V
V_max = V.max()
V_min = V.min()
num_basis = V.shape[0]
mode = self.fit_params['mode']
row_basis = self.plot_params['row_basis']
if mode == 'window_sensor_vs_time':
plt.figure(figsize=(util.plot_unit_width * row_basis,
util.plot_unit_height * (
(num_basis - 1) / row_basis + 1)))
for b in range(num_basis):
plt.subplot((num_basis - 1) / row_basis + 1, row_basis, b + 1)
plt.title('[' + str(b) + ']')
plt.ylim([V_min, V_max])
plt.plot(V[b])
plt.tight_layout()
plt.show()
elif mode == 'window_vs_sensor_time':
window_size = self.window_size
num_sensors = self.num_sensors
plt.figure(figsize=(util.plot_unit_width * row_basis,
util.plot_unit_height * (
(num_basis - 1) / row_basis + 1)))
for b in range(num_basis):
plt.subplot((num_basis-1)/row_basis+1, row_basis, b+1)
plt.title('[' + str(b) + ']')
plt.ylim([V_min, V_max])
plt.plot(V[b].reshape([window_size, num_sensors]))
plt.tight_layout()
plt.show()
def compress(self, Xs, windowses, compress_params):
self.compress_params = compress_params
mode = self.fit_params['mode']
if mode == 'sensor_vs_time':
Css = []
Vss = []
windows_indices = []
for (X, windows) in zip(Xs, windowses):
num_windows = X.shape[0]
Cs = []
Vs = []
for w in range(num_windows):
(U, s, V) = np.linalg.svd(X[w].T, full_matrices=False)
US = U.dot(np.diag(s))
C = self.truncate(US)
Cs.append(C)
Vs.append(V)
Css.append(Cs)
Vss.append(Vs)
windows_index = self.thin_windows(windows)
windows_indices.append(windows_index)
self.window_size = Xs[0].shape[1]
self.num_sensors = Xs[0].shape[2]
self.Css = Css
self.Vss = Vss
self.windows_indices = windows_indices
elif mode == 'window_sensor_vs_time':
V_inv = self.V.T # np.linalg.inv(self.V)
window_size = self.window_size
num_sensors = self.num_sensors
Cs = []
windows_indices = []
for (X, windows) in zip(Xs, windowses):
US = X.transpose((0, 2, 1)).reshape(
[-1, window_size]).dot(V_inv)
C = self.truncate(US)
windows_index = self.thin_windows(windows)
Cs.append(C)
windows_indices.append(windows_index)
if self.plot_params['coefficient']:
self.plot_coefficient(US)
if self.plot_params['coefficient_truncate']:
self.plot_coefficient(C)
if self.plot_params['coefficient_thin']:
self.plot_coefficient(C, windows_index)
self.Cs = Cs
self.windows_indices = windows_indices
elif mode == 'window_vs_sensor_time':
V_inv = self.V.T # np.linalg.inv(self.V)
window_size = self.window_size
num_sensors = self.num_sensors
Cs = []
windows_indices = []
for (X, windows) in zip(Xs, windowses):
US = X.reshape([-1, window_size*num_sensors]).dot(V_inv)
C = self.truncate(US)
windows_index = self.thin_windows(windows)
Cs.append(C)
windows_indices.append(windows_index)
if self.plot_params['coefficient']:
self.plot_coefficient(US)
if self.plot_params['coefficient_truncated']:
self.plot_coefficient(C)
if self.plot_params['coefficient_thinned']:
self.plot_coefficient(C, windows_index)
self.Cs = Cs
self.windows_indices = windows_indices
def truncate(self, US):
k = self.compress_params['k']
mode = self.compress_params['mode']
if mode == 'shared':
mask = np.zeros(US.shape)
mask[:, :k] = 1
C = US * mask
elif mode == 'unshared':
num_windows = US.shape[0]
argsort = np.absolute(US).argsort(axis=1)[:, ::-1][:, :k]
mask = np.zeros(US.shape)
for w in range(num_windows):
mask[w, argsort[w]] = 1
C = US * mask
return C
def thin_windows(self, windows):
num_windows = len(windows)
interval = self.compress_params['interval']
windows_index = range(0, num_windows, interval)
if windows_index[-1] != num_windows-1:
windows_index.append(num_windows-1)
return windows_index
def plot_coefficient(self, C, windows_index=None):
mode = self.fit_params['mode']
if mode == 'sensor_vs_time':
pass
elif mode == 'window_sensor_vs_time':
num_sensors = self.num_sensors
if windows_index is not None:
mask = np.zeros(C.shape)
for s in range(num_sensors):
mask[np.array(windows_index) * num_sensors+s] = 1
C *= mask # XXX updating C is not good
elif mode == 'window_vs_sensor_time':
if windows_index is not None:
mask = np.zeros(C.shape)
mask[windows_index] = 1
C *= mask # XXX updating C is not good
compression_rate = 1.0 * C.nonzero()[0].size / C.size
plt.figure(figsize=(C.shape[0] * util.plot_cell_size,
C.shape[1] * util.plot_cell_size))
plt.title('compression rate = {0:g}'.format(compression_rate))
sns.heatmap(C.T, xticklabels=False, yticklabels=False,
cbar=False, square=True)
plt.show()
def decompress(self):
mode = self.fit_params['mode']
if mode == 'sensor_vs_time':
Rss = []
for (Cs, Vs) in zip(self.Css, self.Vss):
Rs = []
for (C, V) in zip(Cs, Vs):
R = C.dot(V).T
Rs.append(R)
Rss.append(np.array(Rs))
return (Rss, self.windows_indices)
elif mode == 'window_sensor_vs_time':
V = self.V
window_size = self.window_size
num_sensors = self.num_sensors
Rs = []
for C in self.Cs:
R = C.dot(V)
R = R.reshape([-1, num_sensors, window_size]).transpose(
(0, 2, 1))
Rs.append(R)
return (Rs, self.windows_indices)
elif mode == 'window_vs_sensor_time':
V = self.V
window_size = self.window_size
num_sensors = self.num_sensors
Rs = []
for C in self.Cs:
R = C.dot(V)
R = R.reshape([-1, window_size, num_sensors])
Rs.append(R)
return (Rs, self.windows_indices)
class PiecewiseAggregateApproximation(Compressor): # PAA
def __init__(self):
pass
def compress(self, Xs, windowses, compress_params):
self.compress_params = compress_params
As = []
windows_indices = []
for (X, windows) in zip(Xs, windowses):
A = X.mean(axis=1)
windows_index = self.thin_windows(windows)
As.append(A)
windows_indices.append(windows_index)
self.As = As
self.windows_indices = windows_indices
def thin_windows(self, windows):
num_windows = len(windows)
interval = self.compress_params['interval']
windows_index = range(0, num_windows, interval)
if windows_index[-1] != num_windows-1:
windows_index.append(num_windows-1)
return windows_index
def decompress(self):
interval = self.compress_params['interval']
Rs = []
for (A, windows_index) in zip(self.As, self.windows_indices):
num_sensors = A.shape[1]
R = A.repeat(interval, axis=0).reshape([-1, interval, num_sensors])
Rs.append(R)
return (Rs, self.windows_indices)
#class SymbolicAggregateApproximation(Compressor): # SAX
#class PiecewiseLinearApproximation(Compressor): # PLA
#class AdaptivePiecewiseConstantApproximation(Compressor): # APCA
#class DiscreteFourierTransform(Compressor): # DFT
#class DiscreteWaveletTransform(Compressor): # DWT
| [
"nis@soum.co.jp"
] | nis@soum.co.jp |
90fa0d53bd2415d8601241b859795afb31b89fc0 | ef7a8dd99c890d640b74247fbb27e2978041e6b6 | /migrations/versions/d545899a6ca_added_qty_to_corporate_purchase.py | 4fb6c77d9ebdf77c6eb4aa9ec8ea0ef9414ce4e5 | [] | no_license | softwarelivre/segue | b0e89528ffd179719e1aa009cde42028e75febfe | d045ea652f86bd93c27bf9d183a9692a3ae2b3ba | refs/heads/master | 2020-04-05T07:47:51.898553 | 2017-08-16T00:23:35 | 2017-08-16T00:23:35 | 81,842,293 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | """Added qty to corporate purchase
Revision ID: d545899a6ca
Revises: 4f89d195687c
Create Date: 2016-04-01 15:07:42.001784
"""
# revision identifiers, used by Alembic.
revision = 'd545899a6ca'
down_revision = '4f89d195687c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'purchase', sa.Column('cr_qty', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'purchase', 'cr_qty')
### end Alembic commands ###
| [
"inscricao@softwarelivre.org"
] | inscricao@softwarelivre.org |
8f650f01937a81fbc4be7902be1f150c38b6b484 | 0c8eb6a1f8eb6c7d3276d69887e4e3938c99d657 | /intro/investment.py | 3e9983bc0b2bd0e17e7b116c78b9fe44e4040e8e | [] | no_license | symatevo/python_basics | 54ebcde0069516dfb8d1734c32c4a622f3d634a8 | d8c68189797d8dafab3c99d49ebbd13e23de65a8 | refs/heads/main | 2023-07-09T18:56:25.145671 | 2021-08-17T19:12:49 | 2021-08-17T19:12:49 | 397,365,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | P = 10000
n = 12
r = 0.08
t = int(input('years: ;))
A = P * ((1 + r/n) ** (n * t))
print(A)
| [
"noreply@github.com"
] | noreply@github.com |
eef8ad3df7f2867949338d395c89c8fdc5d64834 | 1058984cbca36552120092af1f849cea27662c50 | /rebench/tests/interop/time_adapter_test.py | a73f964d66ac61d2533d516993e656b4d65258f5 | [
"MIT"
] | permissive | smarr/ReBench | 21437c7a348a1821f8c5e5016539211376439447 | fd8fa6beeac13c87e848ea76efb1243d1e6ee3ae | refs/heads/master | 2023-08-28T00:38:18.378579 | 2023-08-06T15:11:50 | 2023-08-06T15:11:50 | 1,263,079 | 71 | 19 | MIT | 2023-08-06T15:11:52 | 2011-01-17T10:43:28 | Python | UTF-8 | Python | false | false | 2,355 | py | # Copyright (c) 2016 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unittest import TestCase
from ...interop.adapter import OutputNotParseable
from ...interop.time_adapter import TimeAdapter, TimeManualAdapter
class _TestRunId(object):
def cmdline_for_next_invocation(self):
return "FOO"
class TimeAdapterTest(TestCase):
def test_acquire_command(self):
adapter = TimeAdapter(False, None)
cmd = adapter.acquire_command(_TestRunId())
self.assertRegex(cmd, r"^/.*/bin/g?time")
def test_parse_data(self):
data = """real 11.00
user 5.00
sys 1.00"""
adapter = TimeAdapter(False, None)
TimeAdapter._use_formatted_time = False
data = adapter.parse_data(data, None, 1)
self.assertEqual(1, len(data))
measurements = data[0].get_measurements()
self.assertEqual(3, len(measurements))
self.assertEqual(11000, data[0].get_total_value())
def test_parse_no_data(self):
adapter = TimeAdapter(False, None)
self.assertRaises(OutputNotParseable, adapter.parse_data, "", None, 1)
def test_manual_adapter(self):
adapter = TimeManualAdapter(False, None)
cmd = adapter.acquire_command(_TestRunId())
self.assertEqual("FOO", cmd)
| [
"git@stefan-marr.de"
] | git@stefan-marr.de |
4e6831c00eea402266ff39cd2de8df9a3ff7de0f | a72cb4d00528fb3d2d47f99a1ccca1b8b9b41ff7 | /scripts/addons_extern/space_view3d_rotview.py | e9abc14ef5ebc45a9695ad927f5db3e218dce9c0 | [] | no_license | talocan/blenderpython | b05204881183ff901ec189916a3bcc1d3e9d3e20 | 056ac37e76a1b410696c9efe4fe0ea09fdc68c0e | refs/heads/master | 2021-01-18T05:16:47.221786 | 2014-07-11T17:01:53 | 2014-07-11T17:01:53 | 21,749,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,151 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__bpydoc__ = """\
The RotView addon serves the purpose of setting fixed rotation values for each of the
right/left/front/back/top/bottom views.
Documentation
First go to User Preferences->Addons and enable the RotView addon in the 3D View category.
To change the rotation in realtime first press one of the numerical keypad
view shortcuts to switch into a view and set the rotation
value with the slider (doubleclick for keyboard input) or use the <-90 and 90-> buttons to
switch to the next multiple of 90 degrees value. Button 0 goes back to zero rotation.
The rotation value of each of the views will be remembered when switching into it again from
the numerical keypad.
REMARK - when first enabling the addon, when in an affected view already, rotation will not work.
Enable the view again with numerical keypad shortcut.
REMARK - will not work when switching view through the View menu
"""
bl_info = {
"name": "RotView",
"author": "Gert De Roost",
"version": (0, 1, 7),
"blender": (2, 6, 3),
"location": "View3D > UI",
"description": "Set fixed view rotation values",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "3D View"}
if "bpy" in locals():
import imp
import bpy
from mathutils import *
import math
from time import sleep
activated = 0
frontrot = 0
backrot = 0
rightrot = 0
leftrot = 0
toprot = 0
bottomrot = 0
inview = 0
oldangle = 0
oldview = 0
adapt = 0
viewstring = ["", "FRONT", "BACK", "RIGHT", "LEFT", "TOP", "BOTTOM"]
viewnum = 1
bpy.types.Scene.Angle = bpy.props.FloatProperty(
name = "Rotation angle",
description = "Enter rotation angle",
default = 0,
min = -360,
max = 360)
class RotViewPanel(bpy.types.Panel):
bl_label = "RotView"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw_header(self, context):
global activated
layout = self.layout
if not(activated):
layout.operator("view3d.rotview", text="Activate")
def draw(self, context):
global frontrot, backrot, rightrot, leftrot, toprot, bottomrot, matrot
global oldangle, inview
scn = bpy.context.scene
layout = self.layout
if activated:
layout.label(viewstring[viewnum])
layout.prop(scn, "Angle")
row = layout.row()
row.operator("view.minus90",
text="<-90")
row.operator("view.nill",
text="0")
row.operator("view.plus90",
text="90->")
if viewnum == 1:
frontrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 2:
backrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(180), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 3:
rightrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 4:
leftrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(-scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 5:
toprot = scn.Angle
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 6:
bottomrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(180), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
if (inview == 1) and scn.Angle != oldangle:
bpy.context.space_data.region_3d.view_rotation = quat
# matrix = bpy.context.space_data.region_3d.view_matrix.to_3x3()
# matrix.rotate(matrot)
# bpy.context.space_data.region_3d.view_matrix = matrix.to_4x4()
# bpy.context.region.tag_redraw()
oldangle = scn.Angle
if inview == 2:
bpy.context.space_data.region_3d.view_rotation = quat
inview = 0
class Minus90(bpy.types.Operator):
bl_idname = "view.minus90"
bl_label = ""
bl_description = "To next 90 degrees multiple"
def invoke(self, context, event):
scn = bpy.context.scene
if (scn.Angle // 90) == (scn.Angle / 90):
if scn.Angle == -360:
scn.Angle = 270
else:
scn.Angle -= 90
else:
scn.Angle = (scn.Angle // 90) * 90
return {'FINISHED'}
class Plus90(bpy.types.Operator):
bl_idname = "view.plus90"
bl_label = ""
bl_description = "To previous 90 degrees multiple"
def invoke(self, context, event):
scn = bpy.context.scene
if scn.Angle == 360:
scn.Angle = -270
else:
scn.Angle = ((scn.Angle // 90) + 1) * 90
return {'FINISHED'}
class Nill(bpy.types.Operator):
bl_idname = "view.nill"
bl_label = ""
bl_description = "Set rotation to 0"
def invoke(self, context, event):
scn = bpy.context.scene
scn.Angle = 0
return {'FINISHED'}
class RotView(bpy.types.Operator):
bl_idname = "view3d.rotview"
bl_label = "RotView"
bl_description = "Set fixed view rotation values"
bl_options = {"REGISTER"}
def invoke(self, context, event):
global activated
activated = 1
do_rotview(self)
context.window_manager.modal_handler_add(self)
self._handle = context.region.callback_add(redraw, (), 'POST_VIEW')
return {'RUNNING_MODAL'}
def modal(self, context, event):
global frontrot, backrot, rightrot, leftrot, toprot, bottomrot
global quat
global inview, adapt, viewnum
scn = bpy.context.scene
if event.type == "TIMER":
bpy.context.window_manager.event_timer_remove(timer)
bpy.context.space_data.region_3d.view_rotation = quat
bpy.context.region.tag_redraw()
if event.type == "MIDDLEMOUSE":
inview = 0
if viewnum == 1:
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 2:
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(180), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 3:
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 4:
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 5:
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 6:
matrotX = Matrix.Rotation(math.radians(180), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
bpy.context.space_data.region_3d.view_rotation = quat
elif event.type == "NUMPAD_1":
if event.value == "RELEASE":
if not(event.ctrl):
viewnum = 1
scn.Angle = frontrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
else:
viewnum = 2
scn.Angle = backrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(180), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
return {"RUNNING_MODAL"}
return {"PASS_THROUGH"}
elif event.type == "NUMPAD_3":
if event.value == "RELEASE":
if not(event.ctrl):
viewnum = 3
scn.Angle = rightrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
inview = 1
bpy.context.region.tag_redraw()
else:
viewnum = 4
scn.Angle = leftrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(-scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
inview = 1
bpy.context.region.tag_redraw()
return {"RUNNING_MODAL"}
return {"PASS_THROUGH"}
elif event.type == "NUMPAD_7":
if event.value == "RELEASE":
if not(event.ctrl):
viewnum = 5
scn.Angle = toprot
adapt = 1
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
else:
viewnum = 6
scn.Angle = bottomrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(180), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
return {"RUNNING_MODAL"}
return {"PASS_THROUGH"}
return {"PASS_THROUGH"}
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
def do_rotview(self):
global regionui
for region in bpy.context.area.regions:
if region.type == "UI":
regionui = region
def redraw():
global adapt, timer
if adapt == 1:
adapt = 0
timer = bpy.context.window_manager.event_timer_add(0.1, bpy.context.window)
# bpy.context.region.tag_redraw()
# elif adapt == 2:
# sleep(0.1)
# adapt = 0
# bpy.context.space_data.region_3d.view_rotation = quat
| [
"super3dblender@yahoo.com"
] | super3dblender@yahoo.com |
21359e6eb60a8334bc8d00d4679f91b27603b881 | 309870c1bb943928f91dcbfc9a0fddc6bc241c25 | /Perceptron.py | e15ff63d7cac0a692044884bce3b49cc96431e0c | [] | no_license | yuesong-feng/Machine-Learning-Algorithms | b16ca25827e5181bc6febe5584ce4694a30f54ec | e566eb2c95179a8fd9d63009e94e8bc5fa902e58 | refs/heads/main | 2023-09-05T05:01:40.304268 | 2021-11-14T05:27:43 | 2021-11-14T05:27:43 | 324,681,013 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | import numpy as np
class Perceptron(object):
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0, scale=0.01, size=1 + X.shape[1])
self.errors = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0)
self.errors.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0, 1, -1) | [
"yuesong-feng@foxmail.com"
] | yuesong-feng@foxmail.com |
204a0594892cc09b0c8369bade86c79c6ef2dcf6 | 0bea4ef65cc7c73108aca58d50f9710ec06cba69 | /ChinaMarket/single_stock_recent_change_check.py | b4f1dd85f9fb2946783c376f12cabd7cf1849f37 | [] | no_license | LeoFengShou/StockInvestmentAssistant | 2af12ac69e517805b8e74fc949a7e7a27e3a07a2 | 4ecc3a6b5026865c2b5452fb39e41d2913a2d2dd | refs/heads/master | 2021-04-28T22:13:16.383042 | 2017-01-01T19:31:29 | 2017-01-01T19:31:29 | 77,755,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,177 | py | '''
Author: Shoujun Leo Feng
1T9 Engsci, UofT
purpose: To check the stock price at a certain number of days ago
For example, check the price of 600510 3 days ago
'''
import urllib2
import time
MONTH_SHORT = ["","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
MONTH_DAYS_LEAP = ["",31,29,31,30,31,30,31,31,30,31,30,31]
MONTH_DAYS_REG = ["",31,28,31,30,31,30,31,31,30,31,30,31]
WEEKDAYS = {"Mon":1,"Tue":2,"Wed":3,"Thu":4,"Fri":5,"Sat":5,"Sun":5}
def get_back_price(symbol = None, back_days = 0):
if symbol == None:
symbol = input("Enter the symbol of the stock you want to check:")
if back_days == 0:
back_days = input("Enter how many days backwards you want to check(<30):")
if back_days >= 30:
back_days = input("Enter how many days backwards you want to check(<30):")
search_date = data_calculator(back_days)
html = urllib2.urlopen(
"http://money.finance.sina.com.cn/corp/go.php/vMS_MarketHistory/stockid/" +
symbol + '.phtml').read()
start_position = html.find(search_date)
count = 0
if start_position == -1:
while start_position == -1:
back_days += 1
search_date = data_calculator(back_days)
start_position = html.find(search_date)
count += 1
if count >= 5:
break
if start_position == -1:
return None,None
print search_date
for i in range(3):
start_position = html.find("<td><div align=\"center\">",start_position
+ len("<td><div align=\"center\">"))
start_position = start_position + len("<td><div align=\"center\">")
end_position = html.find("</div>",start_position)
back_day_price = float(html[start_position:end_position])
return back_day_price, search_date
def data_calculator(back_days):
'''return the day when the function should search for the history price'''
# dd/mm/yyyy format
current_day = int(time.strftime("%d"))
current_month = int(time.strftime("%m"))
current_year = int(time.strftime("%Y"))
current_weekday = time.strftime("%a")
if back_days >= WEEKDAYS[current_weekday]:
if back_days > 5:
back_days += 2*(int(back_days/5))
else:
back_days += 2
if back_days >= current_day:
search_month = current_month - 1
search_year = current_year
if search_month <= 0:
search_year = current_year - 1
search_month = 12
if search_year % 4 == 0:
search_day = MONTH_DAYS_LEAP[search_month] - (back_days - current_day)
else:
search_day = MONTH_DAYS_REG[search_month] - (back_days - current_day)
else:
search_day = current_day - back_days
search_month = current_month
search_year = current_year
if search_day < 10:
search_day = "0"+str(search_day)
else:
search_day = str(search_day)
if search_month < 10:
search_month = "0"+str(search_month)
else:
search_month = str(search_month)
return str(search_year) + "-" + search_month + "-" + search_day
| [
"noreply@github.com"
] | noreply@github.com |
4f6b1dc211e4bc1cc3ec122d5bc8cba70661d87d | f11600b9a256bf6a2b584d127faddc27a0f0b474 | /normal/1401.py | 52bff56683cf923937222e17b79a7c0999757b14 | [] | no_license | longhao54/leetcode | 9c1f0ce4ca505ec33640dd9b334bae906acd2db5 | d156c6a13c89727f80ed6244cae40574395ecf34 | refs/heads/master | 2022-10-24T07:40:47.242861 | 2022-10-20T08:50:52 | 2022-10-20T08:50:52 | 196,952,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | class Solution:
def checkOverlap(self, radius: int, x_center: int, y_center: int, x1: int, y1: int, x2: int, y2: int) -> bool:
# 条件 1:首先判断圆心是否在矩形内
if x1 <= x_center <= x2 and y1 <= y_center <= y2:
return True
# 条件 2:圆心位于矩形的上下左右四个区域
elif x_center > x2 and y1 <= y_center <= y2: # 右
return radius >= x_center - x2
elif y_center < y1 and x1 <= x_center <= x2: # 下
return radius >= y1 - y_center
elif x_center < x1 and y1<= y_center <= y2: # 左
return radius >= x1 - x_center
elif y_center > y2 and x1 <= x_center <= x2: # 上
return radius >= y_center - y2
else:
# 条件 3:判断矩形的四个顶点是否在圆的内部
return min((x1 - x_center) ** 2 + (y2 - y_center) ** 2,\
(x2 - x_center) ** 2 + (y2 - y_center) ** 2, \
(x2 - x_center) ** 2 + (y1 - y_center) ** 2, \
(x1 - x_center) ** 2 + (y1 - y_center) ** 2) <= radius ** 2
| [
"jinlha@jiedaibao.com"
] | jinlha@jiedaibao.com |
85d9eaac3bd912d3a58222497c62c5436bd24f38 | 11a2b91ed5d077854b0dcf3deb5748dd67db9cf2 | /src/account/migrations/0001_initial.py | c15474bfa5cbd2dca5ab93953c67fdf8bbf6f859 | [
"MIT"
] | permissive | lettoosoft/lettoo-weixin-platform-back | 920fe80f81f9e47bd00f93e51f4859dd8df9a7bc | 757e7620cfa8218677b6e6d9058bccbb87a4eb63 | refs/heads/master | 2021-01-23T16:40:14.659645 | 2014-09-01T02:19:58 | 2014-09-01T02:19:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,421 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EmailConfirmation'
db.create_table(u'account_emailconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'account', ['EmailConfirmation'])
def backwards(self, orm):
# Deleting model 'EmailConfirmation'
db.delete_table(u'account_emailconfirmation')
models = {
u'account.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account'] | [
"zhiwehu@gmail.com"
] | zhiwehu@gmail.com |
2e4720e753bc3732ab57849977a72d37e3ea27ec | d5160655af67bb348e2eb7ad83dfc6beb92c9639 | /server/bin/flask | 63064fa476f26e9fc6e29ae6959ba8b293c57a5c | [] | no_license | Pavanisoma/ImageCounter | d01a26a60237a0635ae3dbaf4701c44886e4019c | 31a89323d668672a6a48d23bba3227105d19f6b5 | refs/heads/main | 2022-12-28T20:25:50.402123 | 2020-10-11T21:28:01 | 2020-10-11T21:28:01 | 301,018,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/Users/pavani/Documents/flask-vue-crud/server/env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"pavani.somarouthu@sjsu.edu"
] | pavani.somarouthu@sjsu.edu | |
f89baa17881057d2791812fb4e6a13d8e002cd60 | 11f1610c9f2e120c6dfcd3d2ceacb5d9998c24a1 | /flasky/lab7/flasky/app/main/__init__.py | 601a75f7856d42dc85a95fff53212c38b733929d | [] | no_license | theguyisnoone/flaskk | c2f759027b840e2c84a29be4e41bf451fedc367b | 0758b52f118568959d7cb2635688591bd77c4bde | refs/heads/master | 2020-03-31T15:51:47.525452 | 2018-11-14T06:19:47 | 2018-11-14T06:19:47 | 152,354,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | #main blueprint
from flask import Blueprint
main=Blueprint('main',__name__)
from . import views,errors
| [
"953258481@qq.com"
] | 953258481@qq.com |
585e0c0605f122c703682e1ebfecea8b7d006ff4 | 6bebdb5a81c03681a7828b35b41f8856ebf43c88 | /recifegrafica/arts_and_orders/migrations/0002_auto_20170130_1824.py | bffb997039aedc90f0d787e14fc55e3bda1cfb62 | [] | no_license | 3ecologias/emporio-farmaceutico | cf4a4dc0f2fe3383694af705184b94dc8122474b | f29105a9a3cfb4c3806cb69195a94ad691e8f9e9 | refs/heads/master | 2021-03-19T18:57:57.284829 | 2017-03-16T22:17:04 | 2017-03-16T22:17:04 | 85,241,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('arts_and_orders', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='document',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterField(
model_name='document',
name='document',
field=models.FileField(upload_to=b'Artes/', verbose_name=b'Arquivo da arte'),
),
migrations.AlterField(
model_name='document',
name='order_number',
field=models.CharField(max_length=255, null=True, verbose_name=b'N\xc3\xbamero da ordem', blank=True),
),
migrations.AlterField(
model_name='document',
name='product_description',
field=models.CharField(max_length=500, null=True, verbose_name=b'Descri\xc3\xa7\xc3\xa3o do produto', blank=True),
),
]
| [
"laqcs@cin.ufpe.br"
] | laqcs@cin.ufpe.br |
958e66963b687952abe54f34a00c7cef057ef540 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/Python-master/ciphers/hill_cipher.py | bc8f5b41b624ca389e5279c713516e329ed4da0f | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 7,549 | py | """
Hill Cipher:
The 'HillCipher' class below implements the Hill Cipher algorithm which uses
modern linear algebra techniques to encode and decode text using an encryption
key matrix.
Algorithm:
Let the order of the encryption key be N (as it is a square matrix).
Your text is divided into batches of length N and converted to numerical vectors
by a simple mapping starting with A=0 and so on.
The key is then multiplied with the newly created batch vector to obtain the
encoded vector. After each multiplication modular 36 calculations are performed
on the vectors so as to bring the numbers between 0 and 36 and then mapped with
their corresponding alphanumerics.
While decrypting, the decrypting key is found which is the inverse of the
encrypting key modular 36. The same process is repeated for decrypting to get
the original message back.
Constraints:
The determinant of the encryption key matrix must be relatively prime w.r.t 36.
Note:
This implementation only considers alphanumerics in the text. If the length of
the text to be encrypted is not a multiple of the break key(the length of one
batch of letters), the last character of the text is added to the text until the
length of the text reaches a multiple of the break_key. So the text after
decrypting might be a little different than the original text.
References:
https://apprendre-en-ligne.net/crypto/hill/Hillciph.pdf
https://www.youtube.com/watch?v=kfmNeskzs2o
https://www.youtube.com/watch?v=4RhLNDqcjpA
"""
import string
import numpy
def greatest_common_divisor(a: int, b: int) -> int:
"""
>>> greatest_common_divisor(4, 8)
4
>>> greatest_common_divisor(8, 4)
4
>>> greatest_common_divisor(4, 7)
1
>>> greatest_common_divisor(0, 10)
10
"""
return b if a == 0 else greatest_common_divisor(b % a, a)
class HillCipher:
key_string = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
modulus = numpy.vectorize(lambda x: x % 36)
to_int = numpy.vectorize(lambda x: round(x))
def __init__(self, encrypt_key: numpy.ndarray) -> None:
"""
encrypt_key is an NxN numpy array
"""
self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
self.break_key = encrypt_key.shape[0]
def replace_letters(self, letter: str) -> int:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_letters('T')
19
>>> hill_cipher.replace_letters('0')
26
"""
return self.key_string.index(letter)
def replace_digits(self, num: int) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_digits(19)
'T'
>>> hill_cipher.replace_digits(26)
'0'
"""
return self.key_string[round(num)]
def check_determinant(self) -> None:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.check_determinant()
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
req_l = len(self.key_string)
if greatest_common_divisor(det, len(self.key_string)) != 1:
raise ValueError(
f"determinant modular {req_l} of encryption key({det}) is not co prime "
f"w.r.t {req_l}.\nTry another key."
)
def process_text(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.process_text('Testing Hill Cipher')
'TESTINGHILLCIPHERR'
>>> hill_cipher.process_text('hello')
'HELLOO'
"""
chars = [char for char in text.upper() if char in self.key_string]
last = chars[-1]
while len(chars) % self.break_key != 0:
chars.append(last)
return "".join(chars)
def encrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.encrypt('testing hill cipher')
'WHXYJOLM9C6XT085LL'
>>> hill_cipher.encrypt('hello')
'85FF00'
"""
text = self.process_text(text.upper())
encrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[
0
]
encrypted_batch = "".join(
self.replace_digits(num) for num in batch_encrypted
)
encrypted += encrypted_batch
return encrypted
def make_decrypt_key(self) -> numpy.ndarray:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.make_decrypt_key()
array([[ 6, 25],
[ 5, 26]])
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
det_inv = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
det_inv = i
break
inv_key = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(inv_key))
def decrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL')
'TESTINGHILLCIPHERR'
>>> hill_cipher.decrypt('85FF00')
'HELLOO'
"""
decrypt_key = self.make_decrypt_key()
text = self.process_text(text.upper())
decrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0]
decrypted_batch = "".join(
self.replace_digits(num) for num in batch_decrypted
)
decrypted += decrypted_batch
return decrypted
def main() -> None:
N = int(input("Enter the order of the encryption key: "))
hill_matrix = []
print("Enter each row of the encryption key with space separated integers")
for _ in range(N):
row = [int(x) for x in input().split()]
hill_matrix.append(row)
hc = HillCipher(numpy.array(hill_matrix))
print("Would you like to encrypt or decrypt some text? (1 or 2)")
option = input("\n1. Encrypt\n2. Decrypt\n")
if option == "1":
text_e = input("What text would you like to encrypt?: ")
print("Your encrypted text is:")
print(hc.encrypt(text_e))
elif option == "2":
text_d = input("What text would you like to decrypt?: ")
print("Your decrypted text is:")
print(hc.decrypt(text_d))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
52475c83925cb4a2a169c87ed0c0ac24a278254c | 3b79d12d1f3383adb45aa33f785b1678de3ea168 | /lab5/analysis.py | d02affacc703e766ac523e9460bb0ca295454e9c | [] | no_license | ryanrishi/coen146-computer-networks | df19bba8af57f308955a97baf54fb623dbf08e1c | b0add4284e598ae34cf8c1b59dfaa615e6abb635 | refs/heads/master | 2021-01-21T13:25:19.753755 | 2016-05-18T20:32:36 | 2016-05-18T20:32:36 | 53,754,601 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/env python
from math import log
import sys
"""
Initialize an array to hold frequency of characters
"""
frequency = []
for i in range(256):
frequency.append(0)
count = 0 # total number of bytes read
"""
Read from stdin until stdin is empty
"""
while True:
try:
byte = ord(sys.stdin.read(1))
frequency[byte] += 1
count += 1
except TypeError as e:
# once stdin is empty
break
"""
Compute frequency distribution and entropy using shannon entropy formula
http://blogs.cisco.com/security/on_information_entropy/
"""
entropy = 0
for i in range(256):
if frequency[i]:
probability = float(frequency[i]) / float(count)
entropy += probability * log(1/probability, 2)
print 'Number of bytes read: %d' % count
print 'Entropy: %.8f' % entropy
| [
"rdrishi12@gmail.com"
] | rdrishi12@gmail.com |
2c9a0ac0ece267189af57b23c3bb60a2e93a04dc | afff4906de0f9d306b65b3781a82edb4ddce6da6 | /PicoCTF/2019/ReverseEngineering/VaultDoor4/script.py | 3a01fc0673277c2977d5b7766d6f566c3b27936c | [] | no_license | FORGIS98/roadToCTF | 9fea7cd0e3551baf177b79164e64b7d1d6a444db | 610ff59ac8ba68ba13eef2e69965fda84b39dfa6 | refs/heads/master | 2022-04-19T18:45:15.377390 | 2020-04-19T15:38:39 | 2020-04-19T15:38:39 | 241,630,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | myBytes = [
106 , 85 , 53 , 116 , 95 , 52 , 95 , 98 ,
0x55, 0x6e, 0x43, 0x68, 0x5f, 0x30, 0x66, 0x5f,
0o142, 0o131, 0o164, 0o63 , 0o163, 0o137, 0o67 , 0o141,
'1' , 'c' , '8' , 'c' , '6' , '6' , '8' , 'b'
]
for x in myBytes:
if(x not in ['1' , 'c' , '8' , 'c' , '6' , '6' , '8' , 'b']):
print(chr(x), end="")
else:
print(x, end="")
| [
"cifradoforgis@gmail.com"
] | cifradoforgis@gmail.com |
af24b9455252c6b9b58c9672b4c0a8a22e0657eb | 334fafa9d87fdd13cc549a662a0cf35c47f2d5e3 | /backend/data/bahn/bahnscript/bahn2.py | 9738bfe0b9f26768f010e9a577d6262182208138 | [] | no_license | Jugendhackt/apartmapp | 27cd7c17e808a6948f017043e61ebd5480e87c89 | 0fded8e1da75a021547b53d68e88e9db524b088e | refs/heads/master | 2021-01-15T14:31:43.959035 | 2014-06-08T12:10:04 | 2014-06-08T12:10:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import json, psycopg2
filename = "plankgeo_data.json"
def trains(filename):
con = psycopg2.connect(database='appartmapp', user='postgres')
cur = con.cursor()
with open(filename, "r") as data:
json_encoded = json.loads(data.read())
data.close()
# print to screen
print json_encoded[0]['lat']
for entry in json_encoded:
cur.execute("INSERT INTO items(name, type, picture, lat, lng)VALUES(%s, 'dbstop', 'dbstop.jpg', %s, %s)", (entry['id'], entry['lat'], entry['lon']))
con.commit()
con.close()
trains(filename) | [
"me@me.com"
] | me@me.com |
7f0eda9e127f5ae2eed25034e58524dc7081955c | 03a90ee0d517426a052f230c10e4f1ead43ddc97 | /OOP/Intro/6.py | 0c1b8821832a38760d743c91d55d7aeaf7d8920b | [] | no_license | AKNO6/Python3_DataStructure | 13ab8341c915e29050cda1fb0ec1997d5a951fc4 | aed34a12c81d61e666b36c5a0c9e0b4b3c30d2fe | refs/heads/master | 2022-03-01T05:40:13.519176 | 2019-09-23T02:32:02 | 2019-09-23T02:32:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | class Calc:
r=23
def __init__(self,a,b):
self.x=a
self.y=b
def getValue(self):
print(self.x)
print(self.y)
def test(self,m,n):
self.m1=m
self.n1=n
def test1(self):
print(self.m1)
print(self.r)
self.r=67
def test2(self):
print(self.r)
obj=Calc(10,20)
obj.getValue()
obj.test(100,200)
obj.test1()
obj.test2()
| [
"phpguhan@gmail.com"
] | phpguhan@gmail.com |
cc833d1c0989c341c318177547ee45c3ac6aca36 | 0df3fa2587a356eab2d211f273236a62c1a0c939 | /pylearn2/models/sparse_autoencoder.py | f3c054ab9685e94e20bf5eccfb788bba93386f1e | [] | no_license | poolio/pylearn | 92a4e5cc508c8776f7a5f7e8f837b88edd29b1b2 | 502f777713cdb34357652ecff0086e0d75dc71c7 | refs/heads/master | 2020-12-25T08:50:34.954918 | 2012-12-07T21:05:03 | 2012-12-07T21:05:03 | 7,122,948 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,841 | py | import numpy
import theano
import theano.sparse
from theano import tensor
from pylearn2.autoencoder import DenoisingAutoencoder
from pylearn2.utils import sharedX
from pylearn2.space import VectorSpace
from theano.sparse.sandbox.sp2 import sampling_dot
from pylearn2.expr.basic import theano_norms
if 0:
print 'WARNING: using SLOW rng'
RandomStreams = tensor.shared_randomstreams.RandomStreams
else:
import theano.sandbox.rng_mrg
RandomStreams = theano.sandbox.rng_mrg.MRG_RandomStreams
class Linear(object):
def __call__(self, X_before_activation):
# X_before_activation is linear inputs of hidden units, dense
return X_before_activation
class Rectify(object):
def __call__(self, X_before_activation):
# X_before_activation is linear inputs of hidden units, dense
return X_before_activation * (X_before_activation > 0)
class SparseDenoisingAutoencoder(DenoisingAutoencoder):
"""
denoising autoencoder working with only sparse inputs and efficient reconstruction sampling
Based on:
Y. Dauphin, X. Glorot, Y. Bengio.
Large-Scale Learning of Embeddings with Reconstruction Sampling.
In Proceedings of the 28th International Conference on Machine Learning (ICML 2011).
"""
def __init__(self, corruptor, nvis, nhid, act_enc, act_dec,
tied_weights=False, irange=1e-3, rng=9001):
# sampling dot only supports tied weights
assert tied_weights == True
self.names_to_del = set()
super(SparseDenoisingAutoencoder, self).__init__(corruptor,
nvis, nhid, act_enc, act_dec,
tied_weights=tied_weights, irange=irange, rng=rng)
# this step is crucial to save loads of space because w_prime is never used in
# training the sparse da.
del self.w_prime
self.input_space = VectorSpace(nvis, sparse=True)
def get_params(self):
# this is needed because sgd complains when not w_prime is not used in grad
# so delete w_prime from the params list
params = super(SparseDenoisingAutoencoder, self).get_params()
return params[0:3]
def encode(self, inputs):
if (isinstance(inputs, theano.sparse.basic.SparseVariable)):
return self._hidden_activation(inputs)
else:
raise TypeError
#return [self.encode(v) for v in inputs]
def decode(self, hiddens, pattern):
"""
Map inputs through the encoder function.
Parameters
----------
hiddens : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the input
minibatch(es) to be encoded. Assumed to be 2-tensors, with the
first dimension indexing training examples and the second indexing
data dimensions.
pattern: dense matrix, the same shape of the minibatch inputs
0/1 like matrix specifying how to reconstruct inputs.
Returns
-------
decoded : tensor_like or list of tensor_like
Theano symbolic (or list thereof) representing the corresponding
minibatch(es) after decoding.
"""
if self.act_dec is None:
act_dec = lambda x: x
else:
act_dec = self.act_dec
if isinstance(hiddens, tensor.Variable):
pattern = theano.sparse.csr_from_dense(pattern)
return act_dec(self.visbias + theano.sparse.dense_from_sparse(sampling_dot(hiddens, self.weights, pattern)))
else:
return [self.decode(v, pattern) for v in hiddens]
def reconstruct(self, inputs, pattern):
"""
Parameters
----------
inputs : theano sparse variable
pattern: binary dense matrix specifying which part of inputs should be reconstructed
"""
# corrupt the inputs
inputs_dense = theano.sparse.dense_from_sparse(inputs)
corrupted = self.corruptor(inputs_dense)
inputs = theano.sparse.csc_from_dense(corrupted)
return self.decode(self.encode(inputs), pattern)
def reconstruct_without_dec_acti(self, inputs, pattern):
# return results before applying the decoding activation function
inputs_dense = theano.sparse.dense_from_sparse(inputs)
corrupted = self.corruptor(inputs_dense)
inputs = theano.sparse.csc_from_dense(corrupted)
hiddens = self.encode(inputs)
outputs = self.visbias + sampling_dot.sampling_dot(hiddens, self.weights, pattern)
return outputs
def _hidden_input(self, x):
"""
Given a single minibatch, computes the input to the
activation nonlinearity without applying it.
Parameters
----------
x : theano sparse variable
Theano symbolic representing the corrupted input minibatch.
Returns
-------
y : tensor_like
(Symbolic) input flowing into the hidden layer nonlinearity.
"""
return self.hidbias + theano.sparse.dot(x, self.weights)
def get_monitoring_channels(self, V):
vb, hb, weights = self.get_params()
norms = theano_norms(weights)
return {'W_min': tensor.min(weights),
'W_max': tensor.max(weights),
'W_norm_mean': tensor.mean(norms),
'bias_hid_min' : tensor.min(hb),
'bias_hid_mean' : tensor.mean(hb),
'bias_hid_max' : tensor.max(hb),
'bias_vis_min' : tensor.min(vb),
'bias_vis_mean' : tensor.mean(vb),
'bias_vis_max': tensor.max(vb),
}
| [
"li.yao@umontreal.ca"
] | li.yao@umontreal.ca |
29f368c746a19f3998950246ad42f3b884fe13f2 | 2269649d28dfcda6021ae62d9a49bb7ffd187986 | /rest.py | f800e8dc332792572119943591189d10ba20cf15 | [] | no_license | fbrundu/dimc | 00a830fe940342712fb4ea6d768bcdc1e059b042 | 0bea1e30cfd13ad77b3147a149fc473536ff8447 | refs/heads/master | 2021-06-18T15:47:11.653731 | 2021-01-28T20:02:00 | 2021-01-28T20:02:00 | 168,638,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,310 | py | import cherrypy
from cherrypy import log
from lib import get_parameters, handle_error
import json
# import os
import requests
import ssl
# import subprocess as sp
import urllib
from xml.etree import ElementTree
# load configuration into global dictionary
with open("conf/conf.json", "r") as cfile:
pbc = json.load(cfile)
# FIXME TEMPORARY
districts_map = {
"TU": ["ITC11-" + str(i).zfill(2) for i in range(1, 8)],
"MA": ["UKD33-" + str(i).zfill(2) for i in range(1, 27)]
}
storeymap = {}
# storeymap[]
###
bimprovider_url = pbc["bimpurl"]
# Ping microservice
class Ping(object):
exposed = True
def GET(self, **params):
if validate(cherrypy.request.headers):
try:
return handle_error(200, "Pong")
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# PingThru microservice
class PingThru(object):
exposed = True
def GET(self, **params):
if validate(cherrypy.request.headers):
try:
# basic ping response through the infrastructure
r = requests.get(urllib.parse.urljoin(bimprovider_url, "ping"))
# return response
return handle_error(200, r.json()["message"] + "thru")
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# GetJSON microservice
class GetJSON(object):
exposed = True
def GET(self, *paths, **params):
if validate(cherrypy.request.headers):
try:
# get buildings as JSON array
url = urllib.parse.urljoin(bimprovider_url, "getjson")
response = query(url, params)
ctype = "application/json;charset=utf-8"
cherrypy.response.headers["Content-Type"] = ctype
# return response
return response
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# GetIFC microservice
class GetIFC(object):
exposed = True
def GET(self, *paths, **params):
if validate(cherrypy.request.headers):
try:
# get zip of ifcs
url = urllib.parse.urljoin(bimprovider_url, "getifc")
response = get_resources(url, params)
# set response header for zip
cherrypy.response.headers["Content-Type"] = "application/zip"
cdisp = 'attachment; filename="resp.zip"'
cherrypy.response.headers["Content-Disposition"] = cdisp
return response
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# GetGBXML microservice
class GetGBXML(object):
exposed = True
def GET(self, *paths, **params):
if validate(cherrypy.request.headers):
try:
# get zip of gbxmls
url = urllib.parse.urljoin(bimprovider_url, "getgbxml")
response = get_resources(url, params)
# set response header for zip
cherrypy.response.headers["Content-Type"] = "application/zip"
cdisp = 'attachment; filename="resp.zip"'
cherrypy.response.headers["Content-Disposition"] = cdisp
return response
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# GetRVT microservice
class GetRVT(object):
exposed = True
def GET(self, *paths, **params):
if validate(cherrypy.request.headers):
try:
# get zip of rvts
url = urllib.parse.urljoin(bimprovider_url, "getrvt")
response = get_resources(url, params)
# set response header for zip
cherrypy.response.headers["Content-Type"] = "application/zip"
cdisp = 'attachment; filename="resp.zip"'
cherrypy.response.headers["Content-Disposition"] = cdisp
return response
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# Query microservice
class Query(object):
exposed = True
def GET(self, *paths, **params):
if validate(cherrypy.request.headers):
try:
# query
url = urllib.parse.urljoin(bimprovider_url, "query")
response = query(url, params)
ctype = "application/json;charset=utf-8"
cherrypy.response.headers["Content-Type"] = ctype
# return response
return response
except:
return handle_error(500, "Internal Server Error")
else:
return handle_error(401, "Unauthorized")
# to start the Web Service
def start():
# start the service registrator utility (deprecated, now we use sreg)
# p = sp.Popen([
# os.path.join(pbc["binpath"], "service-registrator"),
# "-conf", pbc["confpath"],
# "-endpoint", pbc["scatalog"],
# "-authProvider", pbc["aprovider"],
# "-authProviderURL", pbc["aurl"],
# "-authUser", pbc["auser"],
# "-authPass", pbc["apass"],
# "-serviceID", pbc["aserviceID"]])
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ctx.options |= ssl.OP_NO_SSLv2
ctx.options |= ssl.OP_NO_SSLv3
# ciphers = {
# 'DHE-RSA-AE256-SHA',
# ...
# 'RC4-SHA'
# }
# ctx.set_cipher_list(':'.join(ciphers))
# start Web Service with some configuration
if pbc["stage"] == "production":
global_conf = {
"global": {
"server.environment": "production",
"engine.autoreload.on": True,
"engine.autoreload.frequency": 5,
"server.socket_host": "0.0.0.0",
"server.socket_port": 443,
# "server.socket_port": 8082,
"server.ssl_module": "builtin",
"server.ssl_certificate": pbc["cert"],
"server.ssl_private_key": pbc["priv"],
"server.ssl_certificate_chain": pbc["chain"],
"server.ssl_context": ctx,
"log.screen": False,
"log.access_file": "dimc.log",
"log.error_file": "dimc.log"
}
}
cherrypy.config.update(global_conf)
conf = {
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.encode.debug": True,
"request.show_tracebacks": False
}
# "/dimc/getjson": {
# # "tools.encode.on": True,
# }
}
cherrypy.tree.mount(Ping(), '/dimc/ping', conf)
cherrypy.tree.mount(PingThru(), '/dimc/pingthru', conf)
cherrypy.tree.mount(GetJSON(), '/dimc/getjson', conf)
cherrypy.tree.mount(GetIFC(), '/dimc/getifc', conf)
cherrypy.tree.mount(GetGBXML(), '/dimc/getgbxml', conf)
cherrypy.tree.mount(GetRVT(), '/dimc/getrvt', conf)
cherrypy.tree.mount(Query(), '/dimc/query', conf)
# activate signal handler
if hasattr(cherrypy.engine, "signal_handler"):
cherrypy.engine.signal_handler.subscribe()
# subscribe to the stop signal
# cherrypy.engine.subscribe("stop", p.terminate)
# start serving pages
cherrypy.engine.start()
cherrypy.engine.block()
def validate(headers):
validated = False
try:
token = headers["X-Auth-Token"]
url = (pbc["aurl"] + "/p3/serviceValidate?" +
"service=bimServiceProvider&ticket=" +
token)
r = requests.get(url, verify=pbc["acert"])
root = list(ElementTree.fromstring(r.content))
validated = root[0].tag.endswith("authenticationSuccess")
except Exception:
log.error(msg="Validation Error", context="HTTP", traceback=True)
validated = False
return validated
def query(bimprovider_url, params):
url = ask_bimprovider(bimprovider_url, params)
r = requests.get(url)
if r.status_code != 200:
raise cherrypy.HTTPError(str(r.status_code), "")
return r.content
def ask_bimprovider(bimprovider_url, params):
try:
districts = get_parameters(params, "district")
buildings = []
except:
# FIXME temp fallback
districts = []
buildings = get_parameters(params, "building")
try:
typologies = get_parameters(params, "typology")
except:
typologies = None
try:
heatings = get_parameters(params, "heating")
except:
heatings = None
unchanged = {key: value
for (key, value) in params.items()
if key not in ("building", "district", "typology", "heating")}
unchanged = urllib.parse.urlencode(unchanged, doseq=True)
url = bimprovider_url + "?" + unchanged
if len(unchanged) > 0:
url += "&"
url += get_buildings_url(get_buildings(bimprovider_url, districts,
buildings, typologies, heatings))
return url
def get_resources(bimprovider_url, params):
url = ask_bimprovider(bimprovider_url, params)
r = requests.get(url)
if r.status_code != 200:
raise cherrypy.HTTPError(str(r.status_code), "")
return r.content
def get_buildings_url(buildings):
buildings = ["building=" + b for b in buildings]
url = "&".join(buildings)
return url
def get_buildings(bimp_url, districts, buildings, typologies=None,
heatings=None):
if "*" in districts:
districts = districts_map.keys()
if len(districts) > 0:
buildings += districts_to_buildings(districts)
if typologies is not None or heatings is not None:
buildings = filter_buildings(bimp_url, buildings, typologies, heatings)
return set(buildings)
def districts_to_buildings(districts):
buildings = []
for c in districts:
buildings += districts_map[c]
return buildings
def filter_buildings(bimp_url, buildings, typologies=None, heatings=None):
url = urllib.parse.urljoin(bimp_url, "query")
parameters = [("gettypology", typologies),
("getheatingsupply", heatings)]
for qname, filters in parameters:
if filters is not None:
lurl = url + "?qname=" + qname
lurl += "".join(["&building=" + b for b in buildings])
r = requests.get(lurl)
if r.status_code != 200:
raise cherrypy.HTTPError(str(r.status_code), "")
result = r.json()
result = result["q_res"]
buildings = [b["b_id"] for b in result if b["b_res"][0] in filters]
return buildings
| [
"francesco.brundu@gmail.com"
] | francesco.brundu@gmail.com |
1f3e515e316251c9b0be121a21442fe3cc97b115 | 6ed2f542e1d3cff6c94eb8cd189625f8ad045b9a | /scripts/resize-ds.py | 864431d5d694ed1af61d31a166a97851a590836b | [] | no_license | roman-boursier/katrina | 331a0b3f320c16d288dc795e64108aedc509958a | 14b94ddf6b90c3665cf4ae2bfbe000a1a0398c27 | refs/heads/master | 2022-12-22T10:12:04.540664 | 2020-07-09T22:40:12 | 2020-07-09T22:40:12 | 273,904,052 | 0 | 0 | null | 2022-12-13T04:27:59 | 2020-06-21T12:55:26 | Python | UTF-8 | Python | false | false | 533 | py | from PIL import Image
from resizeimage import resizeimage
import os
i= 0
for file in os.listdir("./peinture_c_hd/"):
#Save output image
outputImg = Image.open(r'./peinture_c_hd/' + file)
if(outputImg.size[0] >= 256 and outputImg.size[1] >= 256):
outputImg = resizeimage.resize_cover(outputImg, [256, 256])
try:
outputImg.save('./pc-resized/' + str(i) + '.jpg', "JPEG")
i = i + 1
except:
print("An exception occurred")
else:
print(file)
| [
"roman.boursier@gmail.com"
] | roman.boursier@gmail.com |
747156b62d51a84b1a7ab38642f1d75f96cb87f3 | ec6313f67565b7e2408bd1206060b5eece562d25 | /app/views.py | 05117c19818f3854d344fa23ae9f212aa2894af2 | [] | no_license | renatomak/crud-python | ba14ac99e75c40fb5e49d9f255cdaeffc4d99447 | 5b191aeaf525feb26c1ebbc231404ec3e00765d4 | refs/heads/main | 2023-08-15T13:17:44.393671 | 2021-09-19T01:18:50 | 2021-09-19T01:18:50 | 408,001,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | from django.shortcuts import render, redirect
from app.forms import CarrosForm
from app.models import Carros
from django.core.paginator import Paginator
# Create your views here.
def home(request):
# data = {}
# data['db'] = Carros.objects.all()
# return render(request, 'index.html', data)
# data = {}
# all = Carros.objects.all()
# paginador = Paginator(all, 2)
# pages = request.GET.get('page')
# data['db'] = paginador.get_page(pages)
# return render(request, 'index.html', data)
data = {}
search = request.GET.get('search')
if search:
data['db'] = Carros.objects.filter(modelo__icontains=search)
else:
data['db'] = Carros.objects.all()
return render(request, 'index.html', data)
def form(request):
data = {}
data['form'] = CarrosForm()
return render(request, 'form.html', data)
def create(request):
form = CarrosForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('home')
def view(request, pk):
data = {}
data['db'] = Carros.objects.get(pk=pk)
return render(request, 'view.html', data)
def edit(request, pk):
data = {}
data['db'] = Carros.objects.get(pk=pk)
data['form'] = CarrosForm(instance=data['db'])
return render(request, 'form.html', data)
def update(request, pk):
data = {}
data['db'] = Carros.objects.get(pk=pk)
form = CarrosForm(request.POST or None, instance=data['db'])
if form.is_valid():
form.save()
return redirect('home')
def delete(request, pk):
db = Carros.objects.get(pk=pk)
db.delete()
return redirect('home') | [
"renato.mark.silva@gmail.com"
] | renato.mark.silva@gmail.com |
997ab21d5f096e6d92121758955ec180c36f4722 | 6c4f83c048be905ce824f7890ed9aa05166c9155 | /zalore_project/zalore_project/wsgi.py | 8e2772c9d6f6082494a51eca79b815a6ed39a713 | [] | no_license | Tjandra-Putra/zalore-ecommerce | ebf54919b853e23836bb4ee7547067145a2577ef | fb5f57c6b3d43cfe57d2e3986be5c1ac13b28edb | refs/heads/master | 2023-01-20T06:55:25.297778 | 2020-11-26T11:52:53 | 2020-11-26T11:52:53 | 297,910,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for zalore_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zalore_project.settings')
application = get_wsgi_application()
| [
"tjandraputra2000@gmail.com"
] | tjandraputra2000@gmail.com |
6eca501b543acfb8f36cd94dfddb2243e88dcbd9 | 1f0e700c2c2b2fc7c8001032a75255655c7ba287 | /aula3/tagsHTML.py | d0933a5323069276a146cd6831104df0c3404814 | [] | no_license | Renato2000/FichasPL | 6318ad0edd40c5d3a8347f6326d8d4ad558b6603 | ef50531f366a835a16843498dd4e6c765ef26f1e | refs/heads/master | 2023-05-01T11:17:23.748600 | 2021-05-11T14:41:30 | 2021-05-11T14:41:30 | 341,492,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # Deteção de tags HTML
'''
Exemplo de input:
2
<p><a href="http://www.quackit.com/html/tutorial/html_links.cfm">Example Link</a></p>
<div class="more-info"><a href="http://www.qt.com/lexamples.cfm">More Link Examples...</a></div>
Resultado:
a;div;p
'''
import re
tag = re.compile(r'<([a-z]+)\s*')
tags = []
n = int(input())
for i in range(n):
linha = input()
r = tag.findall(linha)
if r:
for x in r:
tags.append(x)
tags.sort()
tags = list( dict.fromkeys(tags) )
print(';'.join(tags))
| [
"renatoandre2000@hotmail.com"
] | renatoandre2000@hotmail.com |
e60fd91f73ca435922e6eeb21ff36cf4eb2476a7 | 02cad35c0d37dd88d0028a7a97c06d3e7f8de04f | /按w.py | 6f9729225b37fbe63c2c03c0283e648d12057a6a | [] | no_license | MrFengGG/PythonDemos | 2f0e90cab4d2b210ff1fbf0c6f4a1139bed20505 | ff62af2019825781571403ca228438fd9f9917e0 | refs/heads/master | 2022-03-03T05:40:47.495534 | 2019-09-03T02:33:34 | 2019-09-03T02:33:34 | 114,099,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | import win32api
import win32con
import time
while True:
win32api.keybd_event(87,0,0,0)
time.sleep(10)
| [
"fengziyu@apexsoft.com.cn"
] | fengziyu@apexsoft.com.cn |
9a7032fb4a6c3a4c73af2c3f8c631ba5100585c7 | 638b207f3c7706cb0cb9dd1d6cf112ab91f69837 | /0x11-python-network_1/5-hbtn_header.py | c0286a8b9aaaf86889e63216152a5918919ad69c | [] | no_license | NasserAbuchaibe/holbertonschool-higher_level_programming | c30a066dfd4525e936b4121f930c3a63e6d911d6 | 5b0c11423e11bd9201cc057775c099eb0259f305 | refs/heads/master | 2022-12-16T17:15:57.775143 | 2020-09-25T03:00:56 | 2020-09-25T03:00:56 | 259,379,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/python3
""" Response header value
"""
import requests
from sys import argv
if __name__ == "__main__":
"""ok
"""
r = requests.get(argv[1])
print(r.headers.get('X-Request-Id'))
| [
"nasser_abuchaibe@hotmail.com"
] | nasser_abuchaibe@hotmail.com |
5632f932f5ba3faf87be97db115dc4e8b62af556 | 2a9246c620b588068e2a82a166f7836bf56938e3 | /JetAnalysis/python/EGammaAnalyzers_cff.py | 6680749b27dda629b3d777655cee0b7f9f41ed14 | [] | no_license | CmsHI/CVS_CmsHi | 6dc20b07a34a8927f1af3d11b59b59058b5ddeeb | 9b9dcd34a1b718e4649ca2ddc34583706dfd5d1c | refs/heads/master | 2021-01-20T15:36:12.324219 | 2013-06-20T13:06:49 | 2013-06-20T13:06:49 | 11,457,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import FWCore.ParameterSet.Config as cms
from CmsHi.PhotonAnalysis.MultiPhotonAnalyzer_cfi import *
multiPhotonAnalyzer.GenParticleProducer = cms.InputTag("hiGenParticles")
multiPhotonAnalyzer.PhotonProducer = cms.InputTag("selectedPatPhotons")
multiPhotonAnalyzer.VertexProducer = cms.InputTag("hiSelectedVertex")
multiPhotonAnalyzer.OutputFile = cms.string('mpa.root')
multiPhotonAnalyzer.doStoreCompCone = cms.untracked.bool(False)
multiPhotonAnalyzer.doStoreJets = cms.untracked.bool(False)
| [
""
] | |
9a8b0d7ad862f8956c21a0b3756db79d5082d5cf | c699fc1741bbf3d1c57e0c4fc9cd2baf691ff636 | /re_basic_pattern_search.py | f5bcbd74c6711bb9fa9ea8b5813139dd9ecd1046 | [] | no_license | akash-mitra/pykarma | 164163209995366f67d9bbbf36eb97cd113fb687 | 7071fde8d31e301ee75f0cf886e896359fe5ee18 | refs/heads/master | 2022-12-16T19:57:03.208551 | 2020-09-08T01:04:46 | 2020-09-08T01:04:46 | 293,665,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # Given a specific string pattern, check
# how many times the pattern appear in a
# block of text
import re
texts = """
This is a test block of the text where we
need to search for a specific pattern. Then
we nedd to count how many times that specific
pattern has appeard in this block of text.
"""
regexpattern = input('Enter search pattern: ')
match = re.search(regexpattern, texts)
if match:
print('Pattern found')
else:
print('Pattern not present')
| [
"akashmitra@gmail.com"
] | akashmitra@gmail.com |
b3dfed5c1dc9814d3659cf0c53bb2e0736762273 | af5120175ced1e0fb805d3a5d7d8e17a21c91c2a | /backend/legacy.py | 1ecdf1c007195a9b9a2a1b81a8401f2692e33098 | [] | no_license | valgilbert/CryptoDashboard | 1bd79bb41bcd51e7b8d5885762293eda76fdb775 | e143ac9a45a0dd77d7538e6b5ceb11a9b05c7544 | refs/heads/master | 2020-08-30T23:28:54.719894 | 2019-03-01T22:54:03 | 2019-03-01T22:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | @app.route('/binance/portfolio_old')
def binance_portfolio_old():
url = 'https://api.binance.com/api/v3/account'
key = get_input_param('key')
secret = get_input_param('secret')
curr_time_unix = str(int(time.time()*1000))
query_string = 'timestamp=' + curr_time_unix
hmac_string = hmac.new(bytes(secret, 'latin-1'), msg=bytes(query_string, 'latin-1'), digestmod=hashlib.sha256).hexdigest()
headers = {'X-MBX-APIKEY': key}
resp = requests.get(url, headers=headers, params={'timestamp': curr_time_unix, 'signature': hmac_string})
return resp.text, resp.status_code
'''
Gets 20 latest news articles from cryptopanic
key: Cryptopanic API Key
'''
@app.route('/cryptopanic/news')
def cryptopanic_news():
#TODO loop through the param page=2 , 3, etc to get more posts
url = 'https://cryptopanic.com/api/v1/posts/'
key = get_input_param('key')
resp = requests.get(url, params={'auth_token': key, 'public': 'true'})
return resp.text, resp.status_code | [
"jiafrank98@gmail.com"
] | jiafrank98@gmail.com |
8ccb82f2bb44ea36b97afd9274410eb99677819a | ac6ba5888ef8a1a8446e2789c62603893521329c | /display_prob_forecasts_mme/display_prob_forecasts_mme.py | 8ba4ec220fbd10f313cddb246b4014e9567dfafc | [] | no_license | ThomasMoreHamill/Multi-model_PQPF | 1c9a23b3f0fc80cbc57ee03502a0b56226ace4c5 | e5772a8e9f45f557a2011471815995d8b7173f11 | refs/heads/master | 2021-01-23T11:20:20.541658 | 2018-01-11T21:02:29 | 2018-01-11T21:02:29 | 102,629,167 | 3 | 5 | null | 2017-09-15T14:45:33 | 2017-09-06T15:57:34 | Fortran | UTF-8 | Python | false | false | 12,496 | py | """ this python script is intended to display precipitation analysis
and forecast information at the 1/8-degree grid mesh scale. This
routine is capable of generating plots of quantile mapped
and dressed, quantile mapped only, and raw ensemble
probability forecasts.
"""
# --- import library routines
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
import matplotlib
import pygrib
import matplotlib.pyplot as plt
from matplotlib import rcParams
import numpy as np
from numpy import ma
import os, sys
from dateutils import daterange, dateshift, dayofyear, splitdate
def read_forecast_probabilities (data_directory, center, cleade, \
cyyyymmddhh, cempirical, cgammadress):
if cempirical == '1' and cgammadress == '1':
infile = data_directory+center+'/'+center+'_'+cleade+'h_IC'+cyyyymmddhh+'_empirical_gammadress.nc'
elif cempirical == '1' and cgammadress == '0':
infile = data_directory+center+'/'+center+'_'+cleade+'h_IC'+cyyyymmddhh+'_empirical_gaussdress.nc'
elif cempirical == '0' and cgammadress == '0':
infile = data_directory+center+'/'+center+'_'+cleade+'h_IC'+cyyyymmddhh+'_gammaqmap_gaussdress.nc'
elif cempirical == '0' and cgammadress == '1':
infile = data_directory+center+'/'+center+'_'+cleade+'h_IC'+cyyyymmddhh+'_gammaqmap_gammadress.nc'
fexist = os.path.exists(infile)
if fexist:
try:
nc = Dataset(infile)
conusmask = nc.variables['conusmask'][:,:]
rlonsa = nc.variables['rlonsa'][:,:]
rlatsa = nc.variables['rlatsa'][:,:]
prob_forecast = nc.variables['prob_forecast'][ithresh,:,:]
nc.close()
except (IOError, ValueError, RuntimeError):
print 'Error reading ', infile
print 'IOError = ', IOError
print 'ValueError = ',ValueError
print 'RuntimeError = ',RuntimeError
print 'Quitting.'
print sys.exit()
else:
print 'File = ',infile,'does not exist. Quitting'
sys.exit()
return rlonsa, rlatsa, conusmask, prob_forecast
# --- setting up font sizes for the display
rcParams['legend.fontsize']='small'
rcParams['legend.fancybox']=True
rcParams['xtick.labelsize']='small'
rcParams['axes.labelsize']='small'
rcParams['contour.negative_linestyle']='solid'
# ---- read inputs from command line
cyyyymmddhh = sys.argv[1] # initial time in YYYYMMDDHH format
cleade = sys.argv[2] # end lead time in hours
cthresh = sys.argv[3] # threshold amount
cempirical = sys.argv[4] # =1 for empirical CDF, 0 for Gamma
cgammadress = sys.argv[5] # = 1 for full gamma-dist dressing 0 for simple Gaussian
ileade = int(cleade)
cleadb = str(int(cleade)-12)
date_verif = dateshift(cyyyymmddhh, ileade)
if cthresh == 'POP':
ithresh = 0
cthresh_title = 'POP'
elif cthresh == '1mm':
ithresh = 1
cthresh_title = '$\geq$ 1 mm'
elif cthresh == '2p5mm':
ithresh = 2
cthresh_title = '$\geq$ 2.5 mm'
elif cthresh == '5mm':
ithresh = 3
cthresh_title = '$\geq$ 5 mm'
elif cthresh == '10mm':
ithresh = 4
cthresh_title = '$\geq$ 10 mm'
elif cthresh == '25mm':
ithresh = 5
cthresh_title = '$\geq$ 25 mm'
elif cthresh == '50mm':
ithresh = 6
cthresh_title = '$\geq$ 50 mm'
else:
print 'Invalid threshold', cthresh
print 'Please use POP, 1mm, 2p5mm, 5mm, 10mm, 25mm, 50mm'
print 'Exiting.'
sys.exit()
yyyy,mm,dd,hh = splitdate(cyyyymmddhh)
cyyyy = str(yyyy)
cdd = str(dd)
chh = str(hh)
cmonths = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
cmonth = cmonths[mm-1]
iyyyymmddhh = int(cyyyymmddhh)
yyyy_verif,mm_verif,dd_verif,hh_verif = splitdate(date_verif)
cyyyy_verif = str(yyyy_verif)
cdd_verif = str(dd_verif)
chh_verif = str(hh_verif)
cmonth_verif = cmonths[mm_verif-1]
# ---- read in precipitation analysis
data_directory = '/Users/thamill/precip/ecmwf_data/'
#data_directory = '/Projects/Reforecast2/netcdf/NationalBlend/'
filename= data_directory + 'precip_analyses_ccpa_v1_2002010100_to_2016123100.nc'
print 'reading ',filename
nc = Dataset(filename)
lats_anal = nc.variables['lats_anal'][:]
lons_anal = nc.variables['lons_anal'][:]
nya, nxa = np.shape(lats_anal)
iyyyymmddhh_list = nc.variables['yyyymmddhh_anal_end'][:]
cyyyymmddhh_list = str(iyyyymmddhh_list)
cdate_anal_late = dateshift(cyyyymmddhh, ileade)
cdate_anal_early = dateshift(cyyyymmddhh, ileade-6)
idate_anal_late = int(cdate_anal_late)
idate_anal_early = int(cdate_anal_early)
idx_late = np.where(iyyyymmddhh_list == idate_anal_late)[0]
idx_early = np.where(iyyyymmddhh_list == idate_anal_early)[0]
print 'idx_late, idx_early = ', idx_late, idx_early
apcp_anal = nc.variables['apcp_anal'][idx_late[0],:,:] + \
nc.variables['apcp_anal'][idx_early[0],:,:]
mninetynine = -99.99*np.ones((nya,nxa), dtype=np.float32)
nc.close()
# ---- read in the NCEP ensemble probabilities
rlonsa, rlatsa, conusmask, prob_forecast_NCEP = \
read_forecast_probabilities (data_directory, 'NCEP', cleade, \
cyyyymmddhh, cempirical, cgammadress)
# ---- read in the CMC ensemble probabilities
rlonsa, rlatsa, conusmask, prob_forecast_CMC = \
read_forecast_probabilities (data_directory, 'CMC', cleade, \
cyyyymmddhh, cempirical, cgammadress)
# ---- read in the ECMWF ensemble probabilities
rlonsa, rlatsa, conusmask, prob_forecast_ECMWF = \
read_forecast_probabilities (data_directory, 'ECMWF', cleade, \
cyyyymmddhh, cempirical, cgammadress)
# ---- create the final output forecast as a weighted linear combination
# of the three inputs.
prob_forecast_MME = 0.5*prob_forecast_ECMWF + 0.25*prob_forecast_CMC + \
0.25*prob_forecast_NCEP
# ======================================================================
# ---- plot a four-panel figure with raw, qmapped, final, verif
fig1 = plt.figure(figsize=(7.8,6.5))
if cempirical == 1:
cemptitle = ' empirical CDFs'
else:
cemptitle = ' Gamma CDFs'
if cgammadress == 1:
cgammatitle = ' Gamma-distribution dressing'
else:
cgammatitle = ' simplified Gaussian dressing'
plt.suptitle(r''+cleade+'-h statistically post-processed forecast of '+cthresh_title+\
' initialized\n00 UTC '+cdd+' '+cmonth+' '+cyyyy+', '+cemptitle+\
' and'+cgammatitle,fontsize=14)
for ifield in range(4):
if ifield == 0:
prob_forecast_display = prob_forecast_NCEP
position = [0.02, 0.55, 0.46, 0.33]
position_legend = [0.02, 0.52, 0.46, 0.02]
ctitle = '(a) NCEP GEFS ensemble'
colorst = ['White','#E4FFFF','#C4E8FF','#8FB3FF','#D8F9D8','#A6ECA6','#42F742','Yellow','Gold',\
'Orange','#FCD5D9','#F6A3AE','#FB4246','Red','#AD8ADB','#A449FF','LightGray'] #'#AD8ADB
colorstblack=['White','Black','Black','Black','Black', 'Black','Black','Black',\
'Black','Black','Black','Black','Black','Black','Black','Black','Black']
colorstwhite=['White','Black','Black','White','White','White','White',\
'White','White','White','Black','White','White','White','White','White']
clevs = [0.0, 0.03, 0.05,0.1, 0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,1.0]
#linewidths = [0.2,0.2, 0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2]
linewidths = [0.1,0.1, 0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]
legend_title = 'Probability'
elif ifield == 1:
prob_forecast_display = prob_forecast_CMC
position = [0.52, 0.55, 0.46, 0.33]
position_legend = [0.52, 0.52, 0.46, 0.02]
ctitle = '(b) CMC ensemble'
elif ifield == 2:
prob_forecast_display = prob_forecast_ECMWF
position = [0.02, 0.09, 0.46, 0.33]
position_legend = [0.02, 0.06, 0.46, 0.02]
ctitle = '(c) ECMWF ensemble'
elif ifield == 3:
prob_forecast_display = prob_forecast_MME
position = [0.52, 0.09, 0.46, 0.33]
position_legend = [0.52, 0.06, 0.46, 0.02]
ctitle = '(d) Multi-model ensemble' # Climatological probability' #
ax = fig1.add_axes(position)
ax.set_title(ctitle,fontsize=11)
m = Basemap(projection='mill',llcrnrlon=rlonsa[0,0],llcrnrlat=rlatsa[0,0],\
urcrnrlon=rlonsa[-1,-1],urcrnrlat=rlatsa[-1,-1],resolution='l')
x,y = m(rlonsa,rlatsa)
prob_forecast_m = ma.array(prob_forecast_display)
CS1 = m.contour(x,y,prob_forecast_m,clevs,colors=colorstblack,cmap=None,linewidths = linewidths)
CS2 = m.contourf(x,y,prob_forecast_m,clevs,colors=colorst,cmap=None,extend='neither')
m.drawcoastlines(linewidth=.5)
m.drawstates(linewidth=.5)
m.drawcountries(linewidth=.5)
cax = fig1.add_axes(position_legend)
cbar = fig1.colorbar(CS2,extend='neither', \
orientation='horizontal',cax=cax,drawedges=True,ticks=clevs,format='%g')
cax.set_xlabel(legend_title,fontsize=9)
cbar.ax.tick_params(labelsize=6)
# ---- set plot title, save to pdf file
plot_title = 'MME_'+cthresh+'_'+cyyyymmddhh+'_'+cleade+'h.pdf'
fig1.savefig(plot_title)
print 'saving plot to file = ',plot_title
# ======================================================================
# ---- plot a two-panel figure with MME + verif
fig1 = plt.figure(figsize=(7.8,3.7))
plt.suptitle(r'' + cleade + '-h MME forecast of ' + cthresh_title + ' and verification, '\
+' with forecast initialized\n00 UTC ' + cdd + ' ' + cmonth + ' ' + cyyyy +\
cemptitle + ' and' + cgammatitle,fontsize=14)
for ifield in range(4):
if ifield == 0:
prob_forecast_display = prob_forecast_MME # climo_prob[ithresh,:,:] #
position = [0.02, 0.1, 0.46, 0.68]
position_legend = [0.02, 0.095, 0.46, 0.04]
ctitle = '(a) Post-processed multi-model probability' # Climatological probability' #
colorst = ['White','#E4FFFF','#C4E8FF','#8FB3FF','#D8F9D8','#A6ECA6','#42F742','Yellow','Gold',\
'Orange','#FCD5D9','#F6A3AE','#FA5257','Red','Maroon','#A449FF','LightGray'] #'#AD8ADB
colorstblack=['White','Black','White','White', 'White','White',\
'White','White','White', 'White','White','White','White','White','White']
levs = [0.0, 0.03, 0.05,0.1, 0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,1.0]
linewidths = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
legend_title = 'Probability'
elif ifield == 1:
prob_forecast_display = apcp_anal
position = [0.52, 0.1, 0.46, 0.68]
position_legend = [0.52, 0.095, 0.46, 0.04]
ctitle = '(b) CCPA 12-h accumulated precipitation analysis\n'+\
'valid '+chh_verif+' UTC '+cdd_verif+' '+cmonth_verif+' '+cyyyy_verif
colorst = ['White','#E4FFFF','#C4E8FF','#8FB3FF','#D8F9D8','#A6ECA6','#42F742','Yellow','Gold',\
'Orange','#FCD5D9','#F6A3AE','#FB4246','Red','#AD8ADB','#A449FF','LightGray'] #'#AD8ADB
colorstblack=['White','Black','Black','Black','Black', 'Black','Black','Black',\
'Black','Black','Black','Black','Black','Black','Black','Black','Black']
colorstwhite=['White','Black','Black','White','White','White','White',\
'White','White','White','Black','White','White','White','White','White']
clevs = [0.0, 0.254,1,2,3,4,5,7,10,15,20,30,50,100]
linewidths = [0.0,0.4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
legend_title = 'Precipitation amount (mm)'
ax = fig1.add_axes(position)
ax.set_title(ctitle,fontsize=10.5)
m = Basemap(projection='mill',llcrnrlon=rlonsa[0,0],llcrnrlat=rlatsa[0,0],\
urcrnrlon=rlonsa[-1,-1],urcrnrlat=rlatsa[-1,-1],resolution='l')
x,y = m(rlonsa,rlatsa)
prob_forecast_m = ma.array(prob_forecast_display)
if ifield == 1: \
CS1 = m.contour(x,y,prob_forecast_m,clevs,\
colors=colorstblack,cmap=None,linewidths = linewidths)
CS2 = m.contourf(x,y,prob_forecast_m,clevs,colors=colorst,cmap=None,extend='neither')
m.drawcoastlines(linewidth=.5)
m.drawstates(linewidth=.5)
m.drawcountries(linewidth=.5)
cax = fig1.add_axes(position_legend)
cbar = fig1.colorbar(CS2,extend='neither', \
orientation='horizontal',cax=cax,drawedges=True,ticks=clevs,format='%g')
cax.set_xlabel(legend_title,fontsize=9)
cbar.ax.tick_params(labelsize=6)
# ---- set plot title, save to pdf file
plot_title = 'MME_2panel_'+cthresh+'_'+cyyyymmddhh+'_'+cleade+'h.pdf'
fig1.savefig(plot_title)
print 'saving plot to file = ',plot_title
| [
"noreply@github.com"
] | noreply@github.com |
d3ea76fa3f229d69bca38fda67f5e6358d97c8a5 | 8c54c9426e1724cba8fea5184cd0303e63e7b0a5 | /download.py | de3fdb1d13e14fcca33642fd105fc465f5efcedf | [] | no_license | SahuSumanta/DownloadYoutubeVideo | 6faedd0a3ffe6a25fd7fd90d38c4298e3cdafa4b | d4d57dc6a3c8d342fb52bc3546d1a8f0380515b0 | refs/heads/master | 2022-11-25T19:23:01.872403 | 2020-08-04T01:37:13 | 2020-08-04T01:37:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from pytube import YouTube
link = input("Enter the YouTube Link : ")
yt = YouTube(link)
videos = yt.streams.all()
i = 1
#it used for user convinience
for streams in videos:
print(str(i) + " " +str(streams))
i += 1
stream_number = int(input("enter number : "))
#for the user convienience because user don't no programm start from zero.
video = videos[stream_number - 1]
#D:\Movies is the path you can give otherwise it's okay.
video.download("D:\Movies")
#When you have sucessfully download it.
print("downloaded") | [
"smsahu9@gmail.com"
] | smsahu9@gmail.com |
2b6e7be9c0aa37d81d8306f9f2251fe28502fe9a | e87201773bd434dbde007d5ff8023ea536f5376f | /PlumberClient/IncomingDataThread.py | a05c6339738a7fb68d633c0b0b362c7f3aeed5ab | [] | no_license | AharronS/plumber | 95cab02794205cb9c7df0f498ef7414e2d67fca2 | a9ac3eaa140ae1009a1dd0f70b5f6aa4567ce619 | refs/heads/master | 2021-04-12T04:41:36.226545 | 2018-05-08T15:18:33 | 2018-05-08T15:18:33 | 125,761,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,911 | py | import sys
sys.path.append('../')
from scapy.all import *
from Protocol import *
from Protocol.DataPacket import DataPacket
from scapy.layers.inet import IP, ICMP, TCP, UDP
import threading
import logging
def get_plumberpacket_packet(base_proto, magic, pkt):
if IP in pkt and base_proto in pkt and Raw in pkt:
logging.debug("base_proto={protocol}, magic={_magic}".format(protocol=base_proto, _magic=magic))
plum_packet = PlumberPacket(pkt[base_proto][Raw].load)
if plum_packet.magic == magic:
plum_packet.src_ip = pkt[IP].src
plum_packet.id, plum_packet.seq = pkt[ICMP].id, pkt[ICMP].seq
return plum_packet
return None
def plumberpacket_wrapper(base_proto, magic, pkt, dst_ip):
plumber_pkt = DataPacket()
plumber_pkt.message_target = "server"
plumber_pkt.ip = ""
payload = pkt[base_proto]
return PlumberPacket(magic=magic, message_target="client", message_type="data",
ip=dst_ip, data=payload)
def stop_sniff(pkt):
if ICMP in pkt and pkt[ICMP].id == 11111 and pkt[ICMP].seq == 11111:
logging.debug("stop filter!\n" + pkt.summary())
return True
else:
return False
class IncomingCovertDataThread(threading.Thread):
def __init__(self, incoming_queue, packet_filter, stop_event, protocol=TCP, magic=12345,
target=None, name=None):
super(IncomingCovertDataThread, self).__init__()
self.target = target
self.name = name
self.packet_filter_func = packet_filter
self.protocol = protocol
self.counter = 0
self.magic = magic
self.queue = incoming_queue
self.logger = logging.getLogger("incomming")
self.stop_event = stop_event
def run(self):
print "Starting " + self.name
sniff(lfilter=self.packet_filter_func, prn=self.dissect_packet(), stop_filter=stop_sniff)
self.stop_event.set()
print "Exiting " + self.name
return
def dissect_packet(self):
def custom_action(pkt):
if self.protocol in pkt:
self.logger.debug("{0} packet! {1}".format(str(self.protocol), pkt.summary()))
if TCP in pkt or UDP in pkt:
try:
plum_pkt = get_plumberpacket_packet(self.protocol, self.magic, pkt)
except Exception as ex:
self.logger.info("unknown packet")
return custom_action
if plum_pkt:
self.logger.info("incoming PlumberPacket!")
self.queue.put(plum_pkt)
else:
self.logger.debug("not plumber packet")
else:
self.logger.debug("unknown protocol: \n" + pkt.show(dump=True))
self.counter += 1
return custom_action
| [
"Aharron@gmail.com"
] | Aharron@gmail.com |
684bb9b634cf46ead79b715049cf84129c8f2ed3 | 4bc25aaf986e481a533e22a7d74e963a18499593 | /Chapitre_5/visionneuse_1.py | 84b18735cbfc634e5f5e849a5feb1d38c636cf5d | [] | no_license | EditionsENI/python-raspberrypi-3e-edition | c5dd3be2cbc7e52793361f2a601b100011ea535d | f189aefc5ea0b265fd664c8a47dcde6cd110a8b0 | refs/heads/master | 2023-04-10T18:59:35.922958 | 2021-04-19T21:47:29 | 2021-04-19T21:47:29 | 317,060,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,780 | py | #!/usr/bin/env python3
import glob
import sys
import os
from tkinter import PhotoImage
from tkinter import Message
from tkinter import Button
from tkinter import Frame
from tkinter import Label
from tkinter import Tk
from tkinter import BOTTOM
from tkinter import LEFT
from tkinter import BOTH
from tkinter import YES
class PiVision(Tk):
def __init__(self, images):
Tk.__init__(self)
self.creer_composants()
if len(images) > 0:
self.initialiser_images()
self.afficher_image()
else:
self.afficher_erreur()
self.mainloop()
def initialiser_images(self):
liste_image = [(PhotoImage(file=image), os.path.basename(image))
for image in sorted(images)]
premiere = derniere = VImage(info=liste_image.pop(0))
for image in liste_image:
derniere = derniere.ajout(info=image)
derniere.suivante = premiere
premiere.precedente = derniere
self.image_courante = premiere
def creer_composants(self):
self.composant_image = Label(self)
self.composant_image.pack(expand=YES, fill=BOTH)
self.bouton_frame = Frame(self)
self.bouton_frame.pack(side=BOTTOM)
self.bouton_precedent = Button(
self.bouton_frame, text="Précédent", command=lambda: self.image_precedente())
self.bouton_precedent.pack(side=LEFT)
self.bouton_suivant = Button(
self.bouton_frame, text="Suivant", command=lambda: self.image_suivante())
self.bouton_suivant.pack(side=LEFT)
self.bouton_fermer = Button(
self.bouton_frame, text="Fermer", command=self.destroy)
self.bouton_fermer.pack(side=LEFT)
self.bind("<Left>", lambda ev: self.image_precedente())
self.bind("<Right>", lambda ev: self.image_suivante())
self.bind("<Escape>", lambda ev: self.destroy())
def image_suivante(self):
self.image_courante = self.image_courante.suivante
self.afficher_image()
def image_precedente(self):
self.image_courante = self.image_courante.precedente
self.afficher_image()
def afficher_image(self):
image, nom_image = self.image_courante.info
self.composant_image.config(image=image)
self.title("%s - %s " % (self.__class__.__name__, nom_image))
self.update()
def afficher_erreur(self):
self.bouton_precedent.configure(state="disable")
self.bouton_suivant.configure(state="disable")
self.unbind("<Left>")
self.unbind("<Right>")
self.erreur = Message(self.composant_image,
text="Aucune image n'a été trouvée !",
pady=25, padx=25, aspect=800)
self.erreur.config(font=("courier", 14, "bold"))
self.erreur.pack(expand=YES, fill=BOTH)
self.title("Erreur !")
self.update()
class VImage:
def __init__(self, info, suivante=None, precedente=None):
self.info = info
self.suivante = suivante
self.precedente = precedente
def ajout(self, info):
self.suivante = VImage(info, None, self)
return self.suivante
if __name__ == "__main__":
def usage(message=""):
print(message)
sys.exit(1)
if len(sys.argv) != 2:
usage("Veuillez indiquer un répertoire!")
repertoire = sys.argv[1]
if not os.path.isdir(repertoire):
usage(f"\"{repertoire}\" n'est pas un répertoire!")
extensions = "png jpg jpeg gif".split()
extensions = extensions + list(map(str.upper, extensions))
images = []
for ext in extensions:
images.append(glob.glob(f"{repertoire}/*.{ext}"))
images = sum(images, [])
PiVision(images)
| [
"monsieurp@gentoo.org"
] | monsieurp@gentoo.org |
b6a5f93ae739c0c33ece286e0c20d023f7b8f57b | e38f5a3c2448683f7eaf860ddbf8f2454103f544 | /ProducerScripts/Production_Manager.py | 6edc8e0da17829f1101469e0d4b3210b1fdc955c | [] | no_license | ggverma/Console-Log-Analyzer | 772f216a20f41f6f77026439ccad2014088a6e7a | 8fc21164e6a2998c1d3e2153f425a67a68111265 | refs/heads/master | 2020-03-17T02:27:46.123286 | 2018-05-12T23:45:33 | 2018-05-12T23:45:33 | 133,190,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | from Logger import Logger as LGR
from threading import Lock, Thread
from AWS_CREDS import Kinesis
Logger = LGR(Lock(), '../LogFiles/Producer/Logs-', 'log_file_')
LOG_FILE_PREFIX = "log_"
from producer_meetups import Producer_Meetups as ProducerMeetups
from producer_reddit import Producer_Reddit as ProducerReddit
from producer_twitter import Producer_Twitter as ProducerTwitter
try:
objKinesis = Kinesis(Logger)
PM, PR, PT = ProducerMeetups(Logger, objKinesis), ProducerReddit(Logger, objKinesis), ProducerTwitter(Logger, objKinesis, MULTIPLE_POST_SEND_LIMIT = 100)
reddit_thread = Thread(target = PR.run)
print("Running Reddit production stream...")
twitter_thread = Thread(target = PT.run)
print("Running Twitter production stream...")
meetups_thread = Thread(target = PM.run)
print("Running Meetup production stream...")
reddit_thread.start()
twitter_thread.start()
meetups_thread.start()
reddit_thread.join()
twitter_thread.join()
meetups_thread.join()
except Exception as e:
Logger.log("An error occurred. Exception: " + str(e))
| [
"gverma@ncsu.edu"
] | gverma@ncsu.edu |
c4aaac47edb884b8174bfdf2ae9a11ec67f53cfe | 518f51e6b79c5f34b4a95c164a191ef7264f30f1 | /show_star.py | 75e7291336e688797ccf1833848ed66effd49666 | [] | no_license | ManishRana11/Basic_Python_Codes | eb466493d433f5f62f6e999b0dfe44aff47b1087 | 3c7cabf1b93a7972019bcd7bd26a4ded3c77f89d | refs/heads/main | 2023-03-11T03:19:49.180036 | 2021-03-01T09:15:24 | 2021-03-01T09:15:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def show_star(rows):
for i in range(0, rows+1):
print("*" * i)
i = 0
i = i = 1
rows = int(input("Enter number of rows:"))
show_star(rows)
| [
"noreply@github.com"
] | noreply@github.com |
d5da6927dd31fe7ad45d93dbfc11c2071edde7dc | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/client/models/v1_container_status.py | 1a90953d9050f84b6e1a3a816893dd05898df8f7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,938 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container_id': 'str',
'image': 'str',
'image_id': 'str',
'last_state': 'V1ContainerState',
'name': 'str',
'ready': 'bool',
'restart_count': 'int',
'state': 'V1ContainerState'
}
attribute_map = {
'container_id': 'containerID',
'image': 'image',
'image_id': 'imageID',
'last_state': 'lastState',
'name': 'name',
'ready': 'ready',
'restart_count': 'restartCount',
'state': 'state'
}
def __init__(self, container_id=None, image=None, image_id=None, last_state=None, name=None, ready=None, restart_count=None, state=None):
"""
V1ContainerStatus - a model defined in Swagger
"""
self._container_id = None
self._image = None
self._image_id = None
self._last_state = None
self._name = None
self._ready = None
self._restart_count = None
self._state = None
self.discriminator = None
if container_id is not None:
self.container_id = container_id
self.image = image
self.image_id = image_id
if last_state is not None:
self.last_state = last_state
self.name = name
self.ready = ready
self.restart_count = restart_count
if state is not None:
self.state = state
@property
def container_id(self):
"""
Gets the container_id of this V1ContainerStatus.
Container's ID in the format 'docker://<container_id>'.
:return: The container_id of this V1ContainerStatus.
:rtype: str
"""
return self._container_id
@container_id.setter
def container_id(self, container_id):
"""
Sets the container_id of this V1ContainerStatus.
Container's ID in the format 'docker://<container_id>'.
:param container_id: The container_id of this V1ContainerStatus.
:type: str
"""
self._container_id = container_id
@property
def image(self):
"""
Gets the image of this V1ContainerStatus.
The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images
:return: The image of this V1ContainerStatus.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1ContainerStatus.
The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images
:param image: The image of this V1ContainerStatus.
:type: str
"""
if image is None:
raise ValueError("Invalid value for `image`, must not be `None`")
self._image = image
@property
def image_id(self):
"""
Gets the image_id of this V1ContainerStatus.
ImageID of the container's image.
:return: The image_id of this V1ContainerStatus.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""
Sets the image_id of this V1ContainerStatus.
ImageID of the container's image.
:param image_id: The image_id of this V1ContainerStatus.
:type: str
"""
if image_id is None:
raise ValueError("Invalid value for `image_id`, must not be `None`")
self._image_id = image_id
@property
def last_state(self):
"""
Gets the last_state of this V1ContainerStatus.
Details about the container's last termination condition.
:return: The last_state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._last_state
@last_state.setter
def last_state(self, last_state):
"""
Sets the last_state of this V1ContainerStatus.
Details about the container's last termination condition.
:param last_state: The last_state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._last_state = last_state
@property
def name(self):
"""
Gets the name of this V1ContainerStatus.
This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.
:return: The name of this V1ContainerStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerStatus.
This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.
:param name: The name of this V1ContainerStatus.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def ready(self):
"""
Gets the ready of this V1ContainerStatus.
Specifies whether the container has passed its readiness probe.
:return: The ready of this V1ContainerStatus.
:rtype: bool
"""
return self._ready
@ready.setter
def ready(self, ready):
"""
Sets the ready of this V1ContainerStatus.
Specifies whether the container has passed its readiness probe.
:param ready: The ready of this V1ContainerStatus.
:type: bool
"""
if ready is None:
raise ValueError("Invalid value for `ready`, must not be `None`")
self._ready = ready
@property
def restart_count(self):
"""
Gets the restart_count of this V1ContainerStatus.
The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.
:return: The restart_count of this V1ContainerStatus.
:rtype: int
"""
return self._restart_count
@restart_count.setter
def restart_count(self, restart_count):
"""
Sets the restart_count of this V1ContainerStatus.
The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.
:param restart_count: The restart_count of this V1ContainerStatus.
:type: int
"""
if restart_count is None:
raise ValueError("Invalid value for `restart_count`, must not be `None`")
self._restart_count = restart_count
@property
def state(self):
"""
Gets the state of this V1ContainerStatus.
Details about the container's current condition.
:return: The state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this V1ContainerStatus.
Details about the container's current condition.
:param state: The state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"robert.fujara@gmail.com"
] | robert.fujara@gmail.com |
a29bbe7d2c8cb74beba2552e92cb4abc19df3926 | b773ff8595421fb743e55f7bc0190791f2ece7a2 | /backend/home/migrations/0002_load_initial_data.py | eb446cfe1a0a9a986553ca35b2a5b469e122a3f3 | [] | no_license | crowdbotics-apps/msm-tc208-fzjohztpg-12768 | d68746372f604aa5ec805c7c4c480eb451d2b96d | 016bfac5d6497dbd88b49eddc4b8f74788161c83 | refs/heads/master | 2022-12-28T04:01:12.567205 | 2020-10-06T05:28:05 | 2020-10-06T05:28:05 | 301,622,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSM-TC208-fzjohztpgt"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSM-TC208-fzjohztpgt</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msm-tc208-fzjohztpg-12768.botics.co"
site_params = {
"name": "MSM-TC208-fzjohztpgt",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
12542080ccca60a90bdf8d0f0071599b072ee0bc | 89c09dcade7c64540f9c7efe8c060f878ee6986d | /whole_model/CrowdNav/crowd_sim/envs/crowd_sim.py | 6a058d104fb0c2afdc11ad72206e6a3ea79c8b1c | [] | no_license | SiChiTong/Navigation-2 | d0f4c0dd5df3be8c9ebd206c5e94ac50c33ff1d1 | 7658ce2a0db8f5fb551af1daf5ed47e0ab453cac | refs/heads/master | 2022-03-31T18:00:59.981618 | 2020-02-25T13:44:04 | 2020-02-25T13:44:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,110 | py | import logging
import gym
import matplotlib.lines as mlines
import numpy as np
import rvo2
from matplotlib import patches
from numpy.linalg import norm
from crowd_sim.envs.utils.human import Human
from crowd_sim.envs.utils.info import *
from crowd_sim.envs.utils.utils import point_to_segment_dist
class CrowdSim(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
"""
Movement simulation for n+1 agents
Agent can either be human or robot.
humans are controlled by a unknown and fixed policy.
robot is controlled by a known and learnable policy.
"""
self.time_limit = None
self.time_step = None
self.robot = None
self.humans = None
self.global_time = None
self.human_times = None
# reward function
self.success_reward = None
self.collision_penalty = None
self.discomfort_dist = None
self.discomfort_penalty_factor = None
# simulation configuration
self.config = None
self.case_capacity = None
self.case_size = None
self.case_counter = None
self.randomize_attributes = None
self.train_val_sim = None
self.test_sim = None
self.square_width = None
self.circle_radius = None
self.human_num = None
# for visualization
self.states = None
self.action_values = None
self.attention_weights = None
def configure(self, config):
self.config = config
self.time_limit = config.getint('env', 'time_limit')
self.time_step = config.getfloat('env', 'time_step')
self.randomize_attributes = config.getboolean('env', 'randomize_attributes')
self.success_reward = config.getfloat('reward', 'success_reward')
self.collision_penalty = config.getfloat('reward', 'collision_penalty')
self.discomfort_dist = config.getfloat('reward', 'discomfort_dist')
self.discomfort_penalty_factor = config.getfloat('reward', 'discomfort_penalty_factor')
if self.config.get('humans', 'policy') == 'orca':
self.case_capacity = {'train': np.iinfo(np.uint32).max - 2000, 'val': 1000, 'test': 1000}
self.case_size = {'train': np.iinfo(np.uint32).max - 2000, 'val': config.getint('env', 'val_size'),
'test': config.getint('env', 'test_size')}
self.train_val_sim = config.get('sim', 'train_val_sim')
self.test_sim = config.get('sim', 'test_sim')
self.square_width = config.getfloat('sim', 'square_width')
self.circle_radius = config.getfloat('sim', 'circle_radius')
self.human_num = config.getint('sim', 'human_num')
else:
raise NotImplementedError
self.case_counter = {'train': 0, 'test': 0, 'val': 0}
logging.info('human number: {}'.format(self.human_num))
if self.randomize_attributes:
logging.info("Randomize human's radius and preferred speed")
else:
logging.info("Not randomize human's radius and preferred speed")
logging.info('Training simulation: {}, test simulation: {}'.format(self.train_val_sim, self.test_sim))
logging.info('Square width: {}, circle width: {}'.format(self.square_width, self.circle_radius))
def set_robot(self, robot):
self.robot = robot
def generate_random_human_position(self, human_num, rule):
"""
Generate human position according to certain rule
Rule square_crossing: generate start/goal position at two sides of y-axis
Rule circle_crossing: generate start position on a circle, goal position is at the opposite side
:param human_num:
:param rule:
:return:
"""
# initial min separation distance to avoid danger penalty at beginning
if rule == 'square_crossing':
self.humans = []
for i in range(human_num):
self.humans.append(self.generate_square_crossing_human())
elif rule == 'circle_crossing':
self.humans = []
for i in range(human_num-1): # 1 for CA human
self.humans.append(self.generate_circle_crossing_human())
for i in range(1):
self.humans.append(self.generate_CA_human())
elif rule == 'mixed':
# mix different raining simulation with certain distribution
static_human_num = {0: 0.05, 1: 0.2, 2: 0.2, 3: 0.3, 4: 0.1, 5: 0.15}
dynamic_human_num = {1: 0.3, 2: 0.3, 3: 0.2, 4: 0.1, 5: 0.1}
static = True if np.random.random() < 0.2 else False
prob = np.random.random()
for key, value in sorted(static_human_num.items() if static else dynamic_human_num.items()):
if prob - value <= 0:
human_num = key
break
else:
prob -= value
self.human_num = human_num
self.humans = []
if static:
# randomly initialize static objects in a square of (width, height)
width = 4
height = 8
if human_num == 0:
human = Human(self.config, 'humans')
human.set(0, -10, 0, -10, 0, 0, 0)
self.humans.append(human)
for i in range(human_num):
human = Human(self.config, 'humans')
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * width * 0.5 * sign
py = (np.random.random() - 0.5) * height
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, px, py, 0, 0, 0)
self.humans.append(human)
else:
# the first 2 two humans will be in the circle crossing scenarios
# the rest humans will have a random starting and end position
for i in range(human_num):
if i < 2:
human = self.generate_circle_crossing_human()
else:
human = self.generate_square_crossing_human()
self.humans.append(human)
else:
raise ValueError("Rule doesn't exist")
def generate_circle_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
while True:
angle = np.random.random() * np.pi * 2
# add some noise to simulate all the possible cases robot could meet with human
px_noise = (np.random.random() - 0.5) * human.v_pref
py_noise = (np.random.random() - 0.5) * human.v_pref
px = self.circle_radius * np.cos(angle) + px_noise
py = self.circle_radius * np.sin(angle) + py_noise
collide = False
for agent in [self.robot] + self.humans:
min_dist = human.radius + agent.radius + self.discomfort_dist
if norm((px - agent.px, py - agent.py)) < min_dist or \
norm((px - agent.gx, py - agent.gy)) < min_dist:
collide = True
break
if not collide:
break
human.set(px, py, -px, -py, 0, 0, 0)
return human
# I added this to teach the robot Collision-Avoidance situations
def generate_CA_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
while True:
angle = np.random.random() * np.pi * 2
# add some noise to simulate all the possible cases robot could meet with human
px_noise = (np.random.random() - 0.5) * human.v_pref
py_noise = (np.random.random() - 0.5) * human.v_pref
px = px_noise
py = self.circle_radius + py_noise -1 #(0,r-1) --> (0,-r+1)
collide = False
for agent in [self.robot] + self.humans:
min_dist = human.radius + agent.radius + self.discomfort_dist
if norm((px - agent.px, py - agent.py)) < min_dist or \
norm((px - agent.gx, py - agent.gy)) < min_dist:
collide = True
#print('collide in placing CA')
break
if not collide:
break
human.set(px, py, -px, -py, 0, 0, 0)
return human
def generate_square_crossing_human(self):
human = Human(self.config, 'humans')
if self.randomize_attributes:
human.sample_random_attributes()
if np.random.random() > 0.5:
sign = -1
else:
sign = 1
while True:
px = np.random.random() * self.square_width * 0.5 * sign
py = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((px - agent.px, py - agent.py)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
while True:
gx = np.random.random() * self.square_width * 0.5 * -sign
gy = (np.random.random() - 0.5) * self.square_width
collide = False
for agent in [self.robot] + self.humans:
if norm((gx - agent.gx, gy - agent.gy)) < human.radius + agent.radius + self.discomfort_dist:
collide = True
break
if not collide:
break
human.set(px, py, gx, gy, 0, 0, 0)
return human
def get_human_times(self):
"""
Run the whole simulation to the end and compute the average time for human to reach goal.
Once an agent reaches the goal, it stops moving and becomes an obstacle
(doesn't need to take half responsibility to avoid collision).
:return:
"""
# centralized orca simulator for all humans
if not self.robot.reached_destination():
raise ValueError('Episode is not done yet')
params = (10, 10, 5, 5)
sim = rvo2.PyRVOSimulator(self.time_step, *params, 0.3, 1)
sim.addAgent(self.robot.get_position(), *params, self.robot.radius, self.robot.v_pref,
self.robot.get_velocity())
for human in self.humans:
sim.addAgent(human.get_position(), *params, human.radius, human.v_pref, human.get_velocity())
max_time = 1000
while not all(self.human_times):
for i, agent in enumerate([self.robot] + self.humans):
vel_pref = np.array(agent.get_goal_position()) - np.array(agent.get_position())
if norm(vel_pref) > 1:
vel_pref /= norm(vel_pref)
sim.setAgentPrefVelocity(i, tuple(vel_pref))
sim.doStep()
self.global_time += self.time_step
if self.global_time > max_time:
logging.warning('Simulation cannot terminate!')
for i, human in enumerate(self.humans):
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# for visualization
self.robot.set_position(sim.getAgentPosition(0))
for i, human in enumerate(self.humans):
human.set_position(sim.getAgentPosition(i + 1))
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
del sim
return self.human_times
def reset(self, phase='test', test_case=None):
"""
Set px, py, gx, gy, vx, vy, theta for robot and humans
:return:
"""
if self.robot is None:
raise AttributeError('robot has to be set!')
assert phase in ['train', 'val', 'test']
if test_case is not None:
self.case_counter[phase] = test_case
self.global_time = 0
if phase == 'test':
self.human_times = [0] * self.human_num
else:
self.human_times = [0] * (self.human_num if self.robot.policy.multiagent_training else 1)
if not self.robot.policy.multiagent_training:
self.train_val_sim = 'circle_crossing'
if self.config.get('humans', 'policy') == 'trajnet':
raise NotImplementedError
else:
counter_offset = {'train': self.case_capacity['val'] + self.case_capacity['test'],
'val': 0, 'test': self.case_capacity['val']}
self.robot.set(0, -self.circle_radius, 0, self.circle_radius, 0, 0, np.pi / 2)
if self.case_counter[phase] >= 0:
np.random.seed(counter_offset[phase] + self.case_counter[phase])
if phase in ['train', 'val']:
human_num = self.human_num if self.robot.policy.multiagent_training else 1
self.generate_random_human_position(human_num=human_num, rule=self.train_val_sim)
else:
self.generate_random_human_position(human_num=self.human_num, rule=self.test_sim)
# case_counter is always between 0 and case_size[phase]
self.case_counter[phase] = (self.case_counter[phase] + 1) % self.case_size[phase]
else:
assert phase == 'test'
if self.case_counter[phase] == -1:
# for debugging purposes
self.human_num = 3
self.humans = [Human(self.config, 'humans') for _ in range(self.human_num)]
self.humans[0].set(0, -6, 0, 5, 0, 0, np.pi / 2)
self.humans[1].set(-5, -5, -5, 5, 0, 0, np.pi / 2)
self.humans[2].set(5, -5, 5, 5, 0, 0, np.pi / 2)
else:
raise NotImplementedError
for agent in [self.robot] + self.humans:
agent.time_step = self.time_step
agent.policy.time_step = self.time_step
self.states = list()
if hasattr(self.robot.policy, 'action_values'):
self.action_values = list()
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights = list()
# get current observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob
def onestep_lookahead(self, action):
return self.step(action, update=False)
def step(self, action, update=True):
"""
Compute actions for all agents, detect collision, update environment and return (ob, reward, done, info)
"""
human_actions = []
for human in self.humans:
# observation for humans is always coordinates
ob = [other_human.get_observable_state() for other_human in self.humans if other_human != human]
if self.robot.visible:
ob += [self.robot.get_observable_state()]
human_actions.append(human.act(ob))
# collision detection
dmin = float('inf')
collision = False
for i, human in enumerate(self.humans):
px = human.px - self.robot.px
py = human.py - self.robot.py
if self.robot.kinematics == 'holonomic':
vx = human.vx - action.vx
vy = human.vy - action.vy
else:
vx = human.vx - action.v * np.cos(action.r + self.robot.theta)
vy = human.vy - action.v * np.sin(action.r + self.robot.theta)
ex = px + vx * self.time_step
ey = py + vy * self.time_step
# closest distance between boundaries of two agents
closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius
if closest_dist < 0:
collision = True
# logging.debug("Collision: distance between robot and p{} is {:.2E}".format(i, closest_dist))
break
elif closest_dist < dmin:
dmin = closest_dist
# collision detection between humans
human_num = len(self.humans)
for i in range(human_num):
for j in range(i + 1, human_num):
dx = self.humans[i].px - self.humans[j].px
dy = self.humans[i].py - self.humans[j].py
dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius
if dist < 0:
# detect collision but don't take humans' collision into account
logging.debug('Collision happens between humans in step()')
# check if reaching the goal
end_position = np.array(self.robot.compute_position(action, self.time_step))
reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius
if self.global_time >= self.time_limit - 1:
reward = 0
done = True
info = Timeout()
elif collision:
reward = self.collision_penalty
done = True
info = Collision()
elif reaching_goal:
reward = self.success_reward
done = True
info = ReachGoal()
elif dmin < self.discomfort_dist:
# only penalize agent for getting too close if it's visible
# adjust the reward based on FPS
reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step
done = False
info = Danger(dmin)
else:
reward = 0
done = False
info = Nothing()
if update:
# store state, action value and attention weights
self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans]])
if hasattr(self.robot.policy, 'action_values'):
self.action_values.append(self.robot.policy.action_values)
if hasattr(self.robot.policy, 'get_attention_weights'):
self.attention_weights.append(self.robot.policy.get_attention_weights())
# update all agents
self.robot.step(action)
for i, human_action in enumerate(human_actions):
self.humans[i].step(human_action)
self.global_time += self.time_step
for i, human in enumerate(self.humans):
# only record the first time the human reaches the goal
if self.human_times[i] == 0 and human.reached_destination():
self.human_times[i] = self.global_time
# compute the observation
if self.robot.sensor == 'coordinates':
ob = [human.get_observable_state() for human in self.humans]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
else:
if self.robot.sensor == 'coordinates':
ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]
elif self.robot.sensor == 'RGB':
raise NotImplementedError
return ob, reward, done, info
def render(self, mode='human', output_file=None):
from matplotlib import animation
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
x_offset = 0.11
y_offset = 0.11
cmap = plt.cm.get_cmap('hsv', 10)
robot_color = 'yellow'
goal_color = 'red'
arrow_color = 'red'
arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)
if mode == 'human':
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
for human in self.humans:
human_circle = plt.Circle(human.get_position(), human.radius, fill=False, color='b')
ax.add_artist(human_circle)
ax.add_artist(plt.Circle(self.robot.get_position(), self.robot.radius, fill=True, color='r'))
plt.show()
elif mode == 'traj':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
robot_positions = [self.states[i][0].position for i in range(len(self.states))]
human_positions = [[self.states[i][1][j].position for j in range(len(self.humans))]
for i in range(len(self.states))]
for k in range(len(self.states)):
if k % 4 == 0 or k == len(self.states) - 1:
robot = plt.Circle(robot_positions[k], self.robot.radius, fill=True, color=robot_color)
humans = [plt.Circle(human_positions[k][i], self.humans[i].radius, fill=False, color=cmap(i))
for i in range(len(self.humans))]
ax.add_artist(robot)
for human in humans:
ax.add_artist(human)
# add time annotation
global_time = k * self.time_step
if global_time % 4 == 0 or k == len(self.states) - 1:
agents = humans + [robot]
times = [plt.text(agents[i].center[0] - x_offset, agents[i].center[1] - y_offset,
'{:.1f}'.format(global_time),
color='black', fontsize=14) for i in range(self.human_num + 1)]
for time in times:
ax.add_artist(time)
if k != 0:
nav_direction = plt.Line2D((self.states[k - 1][0].px, self.states[k][0].px),
(self.states[k - 1][0].py, self.states[k][0].py),
color=robot_color, ls='solid')
human_directions = [plt.Line2D((self.states[k - 1][1][i].px, self.states[k][1][i].px),
(self.states[k - 1][1][i].py, self.states[k][1][i].py),
color=cmap(i), ls='solid')
for i in range(self.human_num)]
ax.add_artist(nav_direction)
for human_direction in human_directions:
ax.add_artist(human_direction)
plt.legend([robot], ['Robot'], fontsize=16)
plt.show()
elif mode == 'video':
fig, ax = plt.subplots(figsize=(7, 7))
ax.tick_params(labelsize=16)
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
# add robot and its goal
robot_positions = [state[0].position for state in self.states]
goal = mlines.Line2D([0], [4], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
robot = plt.Circle(robot_positions[0], self.robot.radius, fill=True, color=robot_color)
ax.add_artist(robot)
ax.add_artist(goal)
plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16)
# add humans and their numbers
human_positions = [[state[1][j].position for j in range(len(self.humans))] for state in self.states]
humans = [plt.Circle(human_positions[0][i], self.humans[i].radius, fill=False)
for i in range(len(self.humans))]
human_numbers = [plt.text(humans[i].center[0] - x_offset, humans[i].center[1] - y_offset, str(i),
color='black', fontsize=12) for i in range(len(self.humans))]
for i, human in enumerate(humans):
ax.add_artist(human)
ax.add_artist(human_numbers[i])
# add time annotation
time = plt.text(-1, 5, 'Time: {}'.format(0), fontsize=16)
ax.add_artist(time)
# compute attention scores
if self.attention_weights is not None:
attention_scores = [
plt.text(-5.5, 5 - 0.5 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
fontsize=16) for i in range(len(self.humans))]
# compute orientation in each step and use arrow to show the direction
radius = self.robot.radius
if self.robot.kinematics == 'unicycle':
orientation = [((state[0].px, state[0].py), (state[0].px + radius * np.cos(state[0].theta),
state[0].py + radius * np.sin(state[0].theta))) for state
in self.states]
orientations = [orientation]
else:
orientations = []
for i in range(self.human_num + 1):
orientation = []
for state in self.states:
if i == 0:
agent_state = state[0]
else:
agent_state = state[1][i - 1]
theta = np.arctan2(agent_state.vy, agent_state.vx)
orientation.append(((agent_state.px, agent_state.py), (agent_state.px + radius * np.cos(theta),
agent_state.py + radius * np.sin(theta))))
orientations.append(orientation)
arrows = [patches.FancyArrowPatch(*orientation[0], color=arrow_color, arrowstyle=arrow_style)
for orientation in orientations]
for arrow in arrows:
ax.add_artist(arrow)
global_step = 0
def update(frame_num):
nonlocal global_step
nonlocal arrows
global_step = frame_num
robot.center = robot_positions[frame_num]
for i, human in enumerate(humans):
human.center = human_positions[frame_num][i]
human_numbers[i].set_position((human.center[0] - x_offset, human.center[1] - y_offset))
for arrow in arrows:
arrow.remove()
arrows = [patches.FancyArrowPatch(*orientation[frame_num], color=arrow_color,
arrowstyle=arrow_style) for orientation in orientations]
for arrow in arrows:
ax.add_artist(arrow)
if self.attention_weights is not None:
human.set_color(str(self.attention_weights[frame_num][i]))
attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))
time.set_text('Time: {:.2f}'.format(frame_num * self.time_step))
def plot_value_heatmap():
assert self.robot.kinematics == 'holonomic'
for agent in [self.states[global_step][0]] + self.states[global_step][1]:
print(('{:.4f}, ' * 6 + '{:.4f}').format(agent.px, agent.py, agent.gx, agent.gy,
agent.vx, agent.vy, agent.theta))
# when any key is pressed draw the action value plot
fig, axis = plt.subplots()
speeds = [0] + self.robot.policy.speeds
rotations = self.robot.policy.rotations + [np.pi * 2]
r, th = np.meshgrid(speeds, rotations)
z = np.array(self.action_values[global_step % len(self.states)][1:])
z = (z - np.min(z)) / (np.max(z) - np.min(z))
z = np.reshape(z, (16, 5))
polar = plt.subplot(projection="polar")
polar.tick_params(labelsize=16)
mesh = plt.pcolormesh(th, r, z, vmin=0, vmax=1)
plt.plot(rotations, r, color='k', ls='none')
plt.grid()
cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
cbar = plt.colorbar(mesh, cax=cbaxes)
cbar.ax.tick_params(labelsize=16)
plt.show()
def on_click(event):
anim.running ^= True
if anim.running:
anim.event_source.stop()
if hasattr(self.robot.policy, 'action_values'):
plot_value_heatmap()
else:
anim.event_source.start()
fig.canvas.mpl_connect('key_press_event', on_click)
anim = animation.FuncAnimation(fig, update, frames=len(self.states), interval=self.time_step * 1000)
anim.running = True
if output_file is not None:
ffmpeg_writer = animation.writers['ffmpeg']
writer = ffmpeg_writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save(output_file, writer=writer)
else:
plt.show()
else:
raise NotImplementedError
| [
"saleh.gholamzadeh@epfl.ch"
] | saleh.gholamzadeh@epfl.ch |
54d9c458c43f93f11ddf1b42c2ea501142e9d7cb | 58dfa1adc54f2d6020f9728e3d8c29aee9a27a42 | /src/lsa.py | 9eaefa77b8f83550811acf53ce72f4a39a8645e4 | [] | no_license | rock417/bddac-task1 | 35da5c81ab5421f843a72526bdc9510a4ccd7a72 | 91dd645468b19d4517b6f7fcd7a0e3c6f290d75d | refs/heads/master | 2021-01-17T21:40:38.583349 | 2014-07-05T11:06:55 | 2014-07-05T11:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 05 13:03:23 2014
LSA: Latent Semantic Analysis
__init__:
input: word_dict:the dict {word:the label of id}
id_count:the number of the id
build_matrix: construct the matrix of the word and the id
obtain a dict of the word and its label
self.A[word,id_label]=the times of the word occured in id
self.word_lbl={word:word_label}
TF_IDF: convert the element in self.A to the TF_IDF value
self.A[word,id_label]=tf_idf_value_of_the _word
cal_lsa: calculate the svd decomposition of the self.A
self.u,self.s,self.vt=svd(self.A)
@author: mountain
"""
import numpy as np
class LSA(object):
def __init__(self,word_dict,id_count):
self.word_dict=word_dict
self.id_count=id_count
def build_matrix(self):
self.word_lbl={}
self.words=[k for k in self.word_dict.keys()]
self.A=np.zeros([len(self.words),self.id_count])
for i,k in enumerate(self.words):
for d in self.word_dict[k]:
self.A[i,d]+=1
self.word_lbl[k]=i
def TF_IDF(self):
words_per_doc=np.sum(self.A,axis=0)
doc_per_word=np.sum(self.A,axis=1)
row,col=self.A.shape
for i in range(row):
for j in range(col):
self.A[i,j]=self.A[i,j]/words_per_doc[j]*np.log(col)/doc_per_word[i]
def cal_lsa(self):
self.build_matrix()
self.TF_IDF()
self.U,self.S,self.Vt=np.linalg.svd(self.A)
| [
"mountlovestudy@gmail.com"
] | mountlovestudy@gmail.com |
45b917165828401e96c1a8c3a7cfa1b5fae52fd8 | 4f7d2beed58fd3f484b1930ca3adeac406576d66 | /config/settings/mlsettings/pyTorchClassificationParams.py | 0f57469851642d8c24fec13f9b798fbbfeec8d2d | [] | no_license | Srinidhi-SA/mAdvisorStgAPIUI | 55515997fff1e30fe22f6a88cc222dcd51816031 | 75653e9f2eef51be771991edd6473f470b344110 | refs/heads/main | 2023-08-24T15:53:27.585277 | 2021-10-08T06:27:49 | 2021-10-08T06:27:49 | 343,185,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,760 | py | PT_ACTIVATION_ELU_PARAMS = [
{
"name": "alpha",
"displayName": "alpha",
"description": "the alpha value for the ELU formulation.",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Hardshrink_PARAMS = [
{
"name": "lambd",
"displayName": "lambd",
"description": "the lambda value for the Hardshrink formulation.",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Hardtanh_PARAMS = [
{
"name": "min_val",
"displayName": "min_val",
"description": "minimum value of the linear region range.",
"defaultValue": -1,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "max_val",
"displayName": "max_val",
"description": "maximum value of the linear region range.",
"defaultValue": 1,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_LeakyReLU_PARAMS = [
{
"name": "negative_slope",
"displayName": "negative_slope",
"description": "Controls the angle of the negative slope.",
"defaultValue": 0.01,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_MultiheadAttention_PARAMS = [
{
"name": "embed_dim",
"displayName": "embed_dim",
"description": "total dimension of the model.",
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "num_heads",
"displayName": "num_heads",
"description": "parallel attention heads.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "dropout",
"displayName": "dropout",
"description": "a Dropout layer on attn_output_weights.",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "bias",
"displayName": "bias",
"description": "",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "add_bias_kv",
"displayName": "add_bias_kv",
"description": "add bias to the key and value sequences at dim=0.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "add_zero_attn",
"displayName": "add_zero_attn",
"description": "add a new batch of zeros to the key and Value sequences at dim=1.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "kdim",
"displayName": "kdim",
"description": "total number of features in key.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "vdim",
"displayName": "vdim",
"description": "",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_PreLU_PARAMS = [
{
"name": "num_parameters",
"displayName": "num_parameters",
"description": "number of alpha to learn.",
"defaultValue": [
{
"name": "1",
"selected": True,
"displayName": "1"
},
{
"name": "no of channels",
"selected": False,
"displayName": "No of Channels"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "init",
"displayName": "init",
"description": "the initial value of alpha.",
"defaultValue": 0.25,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_ACTIVATION_RreLU_PARAMS = [
{
"name": "lower",
"displayName": "lower",
"description": "lower bound of the uniform distribution.",
"defaultValue": 0.125,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "upper",
"displayName": "upper",
"description": "upper bound of the uniform distribution.",
"defaultValue": 0.33,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_CELU_PARAMS = [
{
"name": "alpha",
"displayName": "alpha",
"description": "the alpha value for the CELU formulation.",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Softplus_PARAMS = [
{
"name": "beta",
"displayName": "beta",
"description": "the beta value for the Softplus formulation.",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "threshold",
"displayName": "threshold",
"description": "values above this revert to a linear function.",
"defaultValue": 20,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Softshrink_PARAMS = [
{
"name": "lambd",
"displayName": "lambd",
"description": "the lambda value for the Softshrink formulation.",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Threshold_PARAMS = [
{
"name": "threshold",
"displayName": "threshold",
"description": "The value to threshold at.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "value",
"displayName": "value",
"description": "The value to replace with.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Softmin_PARAMS = [
{
"name": "dim",
"displayName": "dim",
"description": "A dimension along which Softmin will be computed.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_Softmax_PARAMS = [
{
"name": "dim",
"displayName": "dim",
"description": "A dimension along which Softmax will be computed.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_LogSoftmax_PARAMS = [
{
"name": "dim",
"displayName": "dim",
"description": "A dimension along which LogSoftmax will be computed.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_AdaptiveLogSoftmaxWithLoss_PARAMS = [
{
"name": "n_classes",
"displayName": "n_classes",
"description": "Number of classes in the dataset.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "cutoffs",
"displayName": "cutoffs",
"description": "Cutoffs used to assign targets to their buckets.",
"defaultValue": None,
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "div_value",
"displayName": "div_value",
"description": "value used as an exponent to compute sizes of the clusters.",
"defaultValue": 4.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "head_bias",
"displayName": "head_bias",
"description": "If True, adds a bias term to the 'head' of the Adaptive softmax.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PYTORCH_ACTIVATION_PARAMETERS = [
{"name": "ELU", "selected": False, "displayName": "ELU",
"parameters": [obj for obj in PT_ACTIVATION_ELU_PARAMS]},
{"name": "Hardshrink", "selected": False, "displayName": "Hardshrink",
"parameters": [obj for obj in PT_ACTIVATION_Hardshrink_PARAMS]},
{"name": "Hardtanh", "selected": False, "displayName": "Hardtanh",
"parameters": [obj for obj in PT_ACTIVATION_Hardtanh_PARAMS]},
{"name": "LeakyReLU", "selected": False, "displayName": "LeakyReLU",
"parameters": [obj for obj in PT_ACTIVATION_LeakyReLU_PARAMS]},
{"name": "LogSigmoid", "selected": False, "displayName": "LogSigmoid", "parameters": None},
{"name": "MultiheadAttention", "selected": False, "displayName": "MultiheadAttention",
"parameters": [obj for obj in PT_ACTIVATION_MultiheadAttention_PARAMS]},
{"name": "PreLU", "selected": False, "displayName": "PreLU",
"parameters": [obj for obj in PT_ACTIVATION_PreLU_PARAMS]},
{"name": "ReLU", "selected": False, "displayName": "ReLU", "parameters": None},
{"name": "ReLU6", "selected": False, "displayName": "ReLU6", "parameters": None},
{"name": "RreLU", "selected": False, "displayName": "RreLU",
"parameters": [obj for obj in PT_ACTIVATION_RreLU_PARAMS]},
{"name": "SELU", "selected": False, "displayName": "SELU", "parameters": None},
{"name": "CELU", "selected": False, "displayName": "CELU",
"parameters": [obj for obj in PT_ACTIVATION_CELU_PARAMS]},
{"name": "GELU", "selected": False, "displayName": "GELU", "parameters": None},
{"name": "Sigmoid", "selected": False, "displayName": "Sigmoid", "parameters": None},
{"name": "Softplus", "selected": False, "displayName": "Softplus",
"parameters": [obj for obj in PT_ACTIVATION_Softplus_PARAMS]},
{"name": "Softshrink", "selected": False, "displayName": "Softshrink",
"parameters": [obj for obj in PT_ACTIVATION_Softshrink_PARAMS]},
{"name": "Softsign", "selected": False, "displayName": "Softsign", "parameters": None},
{"name": "Tanh", "selected": False, "displayName": "Tanh", "parameters": None},
{"name": "Tanhshrink", "selected": False, "displayName": "Tanhshrink", "parameters": None},
{"name": "Threshold", "selected": False, "displayName": "Threshold",
"parameters": [obj for obj in PT_ACTIVATION_Threshold_PARAMS]},
{"name": "Softmin", "selected": False, "displayName": "Softmin",
"parameters": [obj for obj in PT_ACTIVATION_Softmin_PARAMS]},
{"name": "Softmax", "selected": False, "displayName": "Softmax",
"parameters": [obj for obj in PT_ACTIVATION_Softmax_PARAMS]},
{"name": "Softmax2d", "selected": False, "displayName": "Softmax2d", "parameters": None},
{"name": "LogSoftmax", "selected": False, "displayName": "LogSoftmax",
"parameters": [obj for obj in PT_ACTIVATION_LogSoftmax_PARAMS]},
{"name": "AdaptiveLogSoftmaxWithLoss", "selected": False, "displayName": "AdaptiveLogSoftmaxWithLoss",
"parameters": [obj for obj in PT_ACTIVATION_AdaptiveLogSoftmaxWithLoss_PARAMS]}
]
PT_DROPOUT_P_PARAMS = [
{
"name": "p",
"displayName": "p",
"description": "probability of an element to be dropped.",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "slider",
"display": True,
"valueRange": [0, 1],
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PYTORCH_DROPOUT_PARAMETERS = [
{"name": "Dropout", "selected": False, "displayName": "Dropout",
"parameters": [obj for obj in PT_DROPOUT_P_PARAMS]}
]
PT_BATCHNORMALIZATION_BatchNorm1d_PARAMS = [
{
"name": "num_features",
"displayName": "num_features",
"description": "C from an expected input of size (N,C,L) or L from input of size (N, L).",
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "eps",
"displayName": "eps",
"description": "a value added to the denominator for numerical stability.",
"defaultValue": 0.00001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "momentum",
"displayName": "momentum",
"description": "the value used for the running_mean and running_var computation.",
"defaultValue": 0.1,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "affine",
"displayName": "affine",
"description": "a boolean value that when set to True, this module has learnable affine parameters, initialized the same way as done for batch normalization.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": True,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"valueRange": [0, 1],
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "track_running_stats",
"displayName": "track_running_stats",
"description": "a boolean value that when set to True, this module tracks the running mean and variance, and when set to False, this module does not track such statistics and always uses batch statistics in both training and eval modes.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": True,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"valueRange": [0, 1],
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
PYTORCH_BATCHNORMALIZATION_PARAMETERS = [
{"name": "BatchNorm1d", "selected": False, "displayName": "BatchNorm1d",
"parameters": [obj for obj in PT_BATCHNORMALIZATION_BatchNorm1d_PARAMS]}
]
PT_BIAS_INIT_Uniform_PARAMS = [
{
"name": "lower_bound",
"displayName": "lower bound",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "upper_bound",
"displayName": "upper bound",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_BIAS_INIT_Normal_PARAMS = [
{
"name": "mean",
"displayName": "mean",
"description": "Fills the input Tensor with values drawn from the normal distribution,N(mean,std^2)",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "std",
"displayName": "std",
"description": "Fills the input Tensor with values drawn from the normal distribution,N(mean,std^2)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_BIAS_INIT_Constant_PARAMS = [
{
"name": "val",
"displayName": "val",
"description": "Fills the input Tensor with the value {val}",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PYTORCH_BIAS_INIT_PARAMETERS = [
{"name": "Uniform", "selected": False, "displayName": "Uniform",
"parameters": [obj for obj in PT_BIAS_INIT_Uniform_PARAMS]},
{"name": "Normal", "selected": False, "displayName": "Normal",
"parameters": [obj for obj in PT_BIAS_INIT_Normal_PARAMS]},
{"name": "Constant", "selected": False, "displayName": "Constant",
"parameters": [obj for obj in PT_BIAS_INIT_Constant_PARAMS]},
{
"name": "Ones",
"displayName": "Ones",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Zeros",
"displayName": "Zeros",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Eyes",
"displayName": "Eyes",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Default",
"displayName": "Default",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Other",
"displayName": "Other",
"description": "Input Units parameter for the hidden layer.",
"selected": True,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
]
PT_WEIGHT_INIT_xavier_uniform_PARAMS = [
{
"name": "gain",
"displayName": "gain",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_INIT_xavier_normal_PARAMS = [
{
"name": "gain",
"displayName": "gain",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_INIT_Kaiming_Normal_PARAMS = [
{
"name": "a",
"displayName": "a",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "mode",
"displayName": "mode",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": [
{
"name": "fan_in",
"selected": True,
"displayName": "fan_in"
},
{
"name": "fan_out",
"selected": False,
"displayName": "fan_out"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "nonlinearity",
"displayName": "nonlinearity",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": [
{
"name": "leaky_relu",
"selected": True,
"displayName": "leaky_relu"
},
{
"name": "relu",
"selected": False,
"displayName": "relu"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
]
PT_WEIGHT_INIT_Orthogonal_PARAMS = [
{
"name": "gain",
"displayName": "gain",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_INIT_Sparse_PARAMS = [
{
"name": "sparsity",
"displayName": "sparsity",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "std",
"displayName": "std",
"description": "Fills the input Tensor with values drawn from the normal distribution,N(mean,std^2)",
"defaultValue": 0.01,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_CONSTRAINT_TRUE_PARAMS = [
{
"name": "min",
"displayName": "min",
"description": "minimum value.",
"defaultValue": 0.3,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "max",
"displayName": "max",
"description": "maximum value.",
"defaultValue": 0.7,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PYTORCH_WEIGHT_INIT_PARAMETERS = [
{"name": "Uniform", "selected": False, "displayName": "Uniform",
"parameters": [obj for obj in PT_BIAS_INIT_Uniform_PARAMS]},
{"name": "Normal", "selected": False, "displayName": "Normal",
"parameters": [obj for obj in PT_BIAS_INIT_Normal_PARAMS]},
{"name": "Constant", "selected": False, "displayName": "Constant",
"parameters": [obj for obj in PT_BIAS_INIT_Constant_PARAMS]},
{
"name": "Ones",
"displayName": "Ones",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Zeros",
"displayName": "Zeros",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Eyes",
"displayName": "Eyes",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Dirac",
"displayName": "Dirac",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{"name": "Xavier_Uniform", "selected": False, "displayName": "Xavier Uniform",
"parameters": [obj for obj in PT_WEIGHT_INIT_xavier_uniform_PARAMS]},
{"name": "Xavier_Normal", "selected": False, "displayName": "Xavier Normal",
"parameters": [obj for obj in PT_WEIGHT_INIT_xavier_normal_PARAMS]},
{"name": "Kaiming_Normal", "selected": False, "displayName": "Kaiming Normal",
"parameters": [obj for obj in PT_WEIGHT_INIT_Kaiming_Normal_PARAMS]},
{"name": "Orthogonal", "selected": False, "displayName": "Orthogonal",
"parameters": [obj for obj in PT_WEIGHT_INIT_Orthogonal_PARAMS]},
{"name": "Sparse", "selected": False, "displayName": "Sparse",
"parameters": [obj for obj in PT_WEIGHT_INIT_Sparse_PARAMS]},
{"name": "Default", "selected": True, "displayName": "Default",
"parameters": None},
]
PT_WEIGHT_CONSTRAINT_CONSTRAINT_PARAMS = [
{
"name": "constraint",
"displayName": "constraint",
"description": "constraint",
"defaultValue": [
{
"name": "True",
"selected": False,
"displayName": "True",
"parameters": [PT_WEIGHT_CONSTRAINT_TRUE_PARAMS]
},
{
"name": "False",
"selected": True,
"displayName": "False"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
PYTORCH_WEIGHT_CONSTRAINT_PARAMETERS = [
{"name": "constraint", "selected": True, "displayName": "constraint",
"parameters": [obj for obj in PT_WEIGHT_CONSTRAINT_CONSTRAINT_PARAMS]},
]
PT_BIAS_PARAMS = [
{
"name": "bias_init",
"displayName": "bias_init",
"description": "Bias initialisation parameter for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_BIAS_INIT_PARAMETERS],
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
]
PYTORCH_LINEAR_PARAMETERS = [
{
"name": "activation",
"displayName": "Activation",
"description": "Activation function for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_ACTIVATION_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "dropout",
"displayName": "Dropout",
"description": "During training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution.",
"defaultValue": [obj for obj in PYTORCH_DROPOUT_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "batchnormalization",
"displayName": "Batch Normalization",
"description": "Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputs with optional additional channel dimension) as described in the paper.",
"defaultValue": [obj for obj in PYTORCH_BATCHNORMALIZATION_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "units_ip",
"displayName": "Input Units",
"description": "Input Units parameter for the hidden layer.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "units_op",
"displayName": "Output Units",
"description": "Output Units parameter for the hidden layer.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "bias_init",
"displayName": "bias_init",
"description": "Bias initialisation parameter for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_BIAS_INIT_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "weight_init",
"displayName": "weight_init",
"description": "Weight initialisation parameter for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_WEIGHT_INIT_PARAMETERS],
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "weight_constraint",
"displayName": "weight constraint",
"description": "clipping the Weights.",
"defaultValue": [
{
"name": "True",
"selected": False,
"displayName": "True",
"parameters": [PT_WEIGHT_CONSTRAINT_TRUE_PARAMS]
},
{
"name": "False",
"selected": True,
"displayName": "False"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
SKLEARN_ML_SUPPORTED_PT_LAYER = [
{"name": "Linear", "selected": True, "displayName": "Linear",
"parameters": [obj for obj in PYTORCH_LINEAR_PARAMETERS]}
]
PT_OPTIMIZER_Adadelta_PARAMETERS = [
{
"name": "rho",
"displayName": "rho",
"description": "coefficient used for computing a running average of squared gradients.",
"defaultValue": 0.9,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.000001,
"paramType": "number",
"uiElemType": "textBox",
"valueRange": [0.000001, 1],
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "lr",
"displayName": "lr",
"description": "coefficient that scale delta before it is applied to the parameters.",
"defaultValue": 1.0,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_Adagrad_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "lr_decay",
"displayName": "lr_decay",
"description": " learning rate decay.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.0000000001,
"valueRange": [0.0000000001, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_OPTIMIZER_Adam_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.0000000001, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "amsgrad",
"displayName": "amsgrad",
"description": "whether to use the AMSGrad variant of this algorithm from the paper.",
"defaultValue": [
{
"name": "false",
"selected": True,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PT_OPTIMIZER_AdamW_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0.01,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "amsgrad",
"displayName": "amsgrad",
"description": "whether to use the AMSGrad variant of this algorithm from the paper.",
"defaultValue": [
{
"name": "false",
"selected": True,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PT_OPTIMIZER_SparseAdam_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_Adamax_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_OPTIMIZER_ASGD_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "lambd",
"displayName": "lambd",
"description": "decay term.",
"defaultValue": 0.0001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "alpha",
"displayName": "alpha",
"description": "power for eta update.",
"defaultValue": 0.75,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "t0",
"displayName": "t0",
"description": "point at which to start averaging.",
"defaultValue": 0.000001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_LBFGS_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 1,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "max_iter",
"displayName": "max_iter",
"description": "maximal number of iterations per optimization step.",
"defaultValue": 20,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "max_eval",
"displayName": "max_eval",
"description": "maximal number of function evaluations per optimization step.",
"defaultValue": 25,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "tolerance_grad",
"displayName": "tolerance_grad",
"description": " termination tolerance on first order optimality.",
"defaultValue": 0.00001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "tolerance_change",
"displayName": "tolerance_change",
"description": "termination tolerance on function value/parameter changes.",
"defaultValue": 0.000000001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "history_size",
"displayName": "history_size",
"description": "update history size.",
"defaultValue": 100,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "line_search_fn",
"displayName": "line_search_fn",
"description": "either 'strong_wolfe' or None.",
"defaultValue": [
{
"name": "None",
"selected": True,
"displayName": "None"
},
{
"name": "strong_wolfe",
"selected": False,
"displayName": "strong_wolfe"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_OPTIMIZER_RMSprop_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "momentum",
"displayName": "momentum",
"description": "momentum factor.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "alpha",
"displayName": "alpha",
"description": "smoothing constant.",
"defaultValue": 0.99,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "centered",
"displayName": "centered",
"description": "if True, compute the centered RMSProp, the gradient is normalized By an estimation of its variance.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": True,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_OPTIMIZER_Rprop_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eta",
"displayName": "eta",
"description": "pair of (etaminus, etaplUs), that are multiplicative.",
"defaultValue": [0.5, 1.2],
"valueRange": [[0.0, 5.0], [0.0, 5.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "step_sizes",
"displayName": "step_sizes",
"description": "a pair of minimal and maximal allowed step sizes.",
"defaultValue": [0.000001, 50],
"valueRange": [[0.0, 5.0], [0.0, 5.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_SGD_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.1,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "momentum",
"displayName": "momentum",
"description": "momentum factor.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "dampening",
"displayName": "dampening",
"description": "dampening for momentum.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "nesterov",
"displayName": "nesterov",
"description": "enables Nesterov momentum.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
SKLEARN_ML_SUPPORTED_PT_OPTIMIZER_PARAMETERS = [
{"name": "Adadelta", "selected": False, "displayName": "Adadelta",
"parameters": [obj for obj in PT_OPTIMIZER_Adadelta_PARAMETERS]},
{"name": "Adagrad", "selected": False, "displayName": "Adagrad",
"parameters": [obj for obj in PT_OPTIMIZER_Adagrad_PARAMETERS]},
{"name": "Adam", "selected": False, "displayName": "Adam",
"parameters": [obj for obj in PT_OPTIMIZER_Adam_PARAMETERS]},
{"name": "AdamW", "selected": False, "displayName": "AdamW",
"parameters": [obj for obj in PT_OPTIMIZER_AdamW_PARAMETERS]},
{"name": "SparseAdam", "selected": False, "displayName": "SparseAdam",
"parameters": [obj for obj in PT_OPTIMIZER_SparseAdam_PARAMETERS]},
{"name": "Adamax", "selected": False, "displayName": "Adamax",
"parameters": [obj for obj in PT_OPTIMIZER_Adamax_PARAMETERS]},
{"name": "ASGD", "selected": False, "displayName": "ASGD",
"parameters": [obj for obj in PT_OPTIMIZER_ASGD_PARAMETERS]},
{"name": "LBFGS", "selected": False, "displayName": "LBFGS",
"parameters": [obj for obj in PT_OPTIMIZER_LBFGS_PARAMETERS]},
{"name": "RMSprop", "selected": False, "displayName": "RMSprop",
"parameters": [obj for obj in PT_OPTIMIZER_RMSprop_PARAMETERS]},
{"name": "Rprop", "selected": False, "displayName": "Rprop",
"parameters": [obj for obj in PT_OPTIMIZER_Rprop_PARAMETERS]},
{"name": "SGD", "selected": False, "displayName": "SGD", "parameters": [obj for obj in PT_OPTIMIZER_SGD_PARAMETERS]}
]
PT_LOSS_CrossEntropyLoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "ignore_index",
"displayName": "ignore_index",
"description": "Specifies a target value that is ignored and does not contribute to the input gradient.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_CTCLoss_PARAMETERS = [
{
"name": "blank",
"displayName": "blank",
"description": "blank label.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "zero_infinity",
"displayName": "zero_infinity",
"description": "Whether to zero infinite losses and the associated gradients.",
"defaultValue": [
{
"name": "false",
"selected": True,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PT_LOSS_NLLLoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "ignore_index",
"displayName": "ignore_index",
"description": "Specifies a target value that is ignored and does not contribute to the input gradient.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_PoissonNLLLoss_PARAMETERS = [
{
"name": "log_input",
"displayName": "log_input",
"description": "if True the loss is computed as exp(input)-target*input.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "full",
"displayName": "full",
"description": "whether to compute full loss, i. e. to add the Stirling approximation term.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "eps",
"displayName": "eps",
"description": "small value to avoid evaluation of log(0) when log_input = False.",
"defaultValue": 0.00000001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_BCELoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_BCEWithLogitsLoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "pos_weight",
"displayName": "pos_weight",
"description": "a weight of positive examples. Must be a vector with length equal to the number of classes.",
"defaultValue": "mean",
"paramType": "tensor",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
]
PT_LOSS_SoftMarginLoss_PARAMETERS = [
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
SKLEARN_ML_SUPPORTED_PT_LOSS_PARAMS = [
{"name": "CrossEntropyLoss", "selected": False, "displayName": "CrossEntropyLoss",
"parameters": [obj for obj in PT_LOSS_CrossEntropyLoss_PARAMETERS]},
{"name": "CTCLoss", "selected": False, "displayName": "CTCLoss",
"parameters": [obj for obj in PT_LOSS_CTCLoss_PARAMETERS]},
{"name": "NLLLoss", "selected": False, "displayName": "NLLLoss",
"parameters": [obj for obj in PT_LOSS_NLLLoss_PARAMETERS]},
{"name": "PoissonNLLLoss", "selected": False, "displayName": "PoissonNLLLoss",
"parameters": [obj for obj in PT_LOSS_PoissonNLLLoss_PARAMETERS]},
{"name": "BCELoss", "selected": False, "displayName": "BCELoss",
"parameters": [obj for obj in PT_LOSS_BCELoss_PARAMETERS]},
{"name": "BCEWithLogitsLoss", "selected": False, "displayName": "BCEWithLogitsLoss",
"parameters": [obj for obj in PT_LOSS_BCEWithLogitsLoss_PARAMETERS]},
{"name": "SoftMarginLoss", "selected": False, "displayName": "SoftMarginLoss",
"parameters": [obj for obj in PT_LOSS_SoftMarginLoss_PARAMETERS]}
]
SKLEARN_ML_SUPPORTED_PT_L1_REGULARIZER_PARAMETERS = [
{
"name": "l1_decay",
"selected": False,
"displayName": "l1_decay",
"description": "l1 decay.",
"defaultValue": 0.0,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
SKLEARN_ML_SUPPORTED_PT_L2_REGULARIZER_PARAMETERS = [
{
"name": "l2_decay",
"selected": False,
"displayName": "l2_decay",
"description": "l2 dacay.",
"defaultValue": 0.0,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
SKLEARN_ML_SUPPORTED_PT_REGULARIZER_PARAMETERS = [
{"name": "l1_regularizer", "selected": False, "displayName": "l1_regularizer",
"parameters": [obj for obj in SKLEARN_ML_SUPPORTED_PT_L1_REGULARIZER_PARAMETERS]},
{"name": "l2_regularizer", "selected": False, "displayName": "l2_regularizer",
"parameters": [obj for obj in SKLEARN_ML_SUPPORTED_PT_L2_REGULARIZER_PARAMETERS]},
]
SKLEARN_ML_PYTORCH_CLASSIFICATION_PARAMS = [
{
"name": "layer",
"displayName": "Layer",
"description": "A layer is a class implementing common Neural Networks Operations, such as convolution, batch norm, etc.",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_LAYER],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "loss",
"displayName": "Loss",
"description": "The function used to evaluate the candidate solution (i.e. a set of weights).",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_LOSS_PARAMS],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "optimizer",
"displayName": "Optimizer",
"description": "Method used to minimize the loss function.",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_OPTIMIZER_PARAMETERS],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "regularizer",
"displayName": "regularizer",
"description": "Regularizer function.",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_REGULARIZER_PARAMETERS],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "batch_size",
"displayName": "Batch Size",
"description": "The number of training examples in one Forward/Backward Pass.",
"defaultValue": 0,
"acceptedValue": None,
"valueRange": [0, 100],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": False,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "number_of_epochs",
"displayName": "Number of Epochs",
"description": "An epoch refers to one cycle through the full training data-set.",
"defaultValue": 100,
"acceptedValue": None,
"valueRange": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": False,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
| [
"Srinidhi.Anantharamu@gmail.com"
] | Srinidhi.Anantharamu@gmail.com |
d63a6f917f3b4ae9bc5527725f1ef87f2016b0e7 | e0a3c60948e9b89e4c3e96939fafabe24dbdc8cb | /153. Find Minimum in Rotated Sorted Array/Find_Minimum_in_Rotated_Sorted_Array.py | 7d6dcd66d3989ce52798bdfba04520dcf6d70750 | [] | no_license | hyang012/leetcode-algorithms-questions | 5366f71c4c50e76b2ee27de55ad38010c402374e | dcf84809b80bcd8349f5be87536e212c6d45ba88 | refs/heads/master | 2021-07-15T21:33:56.606145 | 2018-12-02T05:46:58 | 2018-12-02T05:46:58 | 129,002,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Leetcode 153. Find Minimum in Rotated Sorted Array
Suppose an array sorted in ascending order is rotated at some pivot unknown
to you beforehand.
Find the minimum element.
You may assume no duplicate exists in the array.
"""
def findMin(nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return []
left, right = 0, len(nums)-1
while left < right:
mid = (right + left) // 2
if nums[left] <= nums[mid] and nums[mid] < nums[right]:
return nums[left]
elif nums[left] > nums[mid]:
right = mid
else:
left = mid + 1
return nums[left]
print(findMin([3, 4, 5, 1, 2]))
print(findMin([4, 5, 6, 7, 0, 1, 2])) | [
"Derek@hongfei-mbp.attlocal.net"
] | Derek@hongfei-mbp.attlocal.net |
9087056cacb16dc0ef9ef078ac17d911f34b3807 | 3616fa9c7fe5b82ada659a373f55b5783a50011c | /find_button_in_a_sample.py | a0d2de293abaed09c9e6632a4a61223bb6d1cb3d | [] | no_license | Mihail12/agileengine_test | 4cd094cc83989f84f1d25f53640ac3e0181262ad | 720928807dc4f180dd01bfbe4db481030eb37b10 | refs/heads/master | 2022-06-10T03:13:01.355202 | 2020-05-07T15:06:11 | 2020-05-07T15:06:11 | 262,081,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | import difflib
import re
import sys
from bs4 import BeautifulSoup
def read_file(path):
with open(path, 'r') as f:
text = f.read()
return text
def similarity(normalized1, s2):
normalized2 = s2.lower()
matcher = difflib.SequenceMatcher(None, normalized1, normalized2)
return matcher.ratio()
def get_max_similar(similarity_gen):
max_, value = 0, ''
for sim in similarity_gen:
if sim[0] > max_:
max_, value = sim
return max_, value
def find_in_sample(html_origin, html_sample):
origin_button = re.findall(r'<.+\s+?id=[\"\']make-everything-ok-button[\"\'][\s\S]*?>[\s\S]*?</.+>', html_origin)
origin_button_tag = origin_button[0] if origin_button else ''
tag_name = re.findall(r'<(\w+)', origin_button_tag)[0]
all_buttons = re.findall(rf'<{tag_name}[\s\S]+?>[\s\S]*?</{tag_name}>', html_sample)
normalized_origin_button = origin_button_tag.lower()
similarity_gen = ((similarity(normalized_origin_button, b), b) for b in all_buttons)
_, tag = get_max_similar(similarity_gen)
return tag
def xpath_soup(element):
components = []
child = element if element.name else element.parent
for parent in child.parents:
siblings = parent.find_all(child.name, recursive=False)
components.append(
child.name
if siblings == [child] else
'%s[%d]' % (child.name, 1 + siblings.index(child))
)
child = parent
components.reverse()
return '/%s' % '/'.join(components)
def get_xpath_to_tag(html, tag):
tag_name = re.findall(r'<(\w+)', tag)[0]
tag_attrs = BeautifulSoup(tag, 'html.parser').find('a').attrs
bs = BeautifulSoup(html, 'html.parser').find(tag_name, tag_attrs)
return xpath_soup(bs)
if __name__ == '__main__':
path_to_origin = sys.argv[1]
html_origin = read_file(path_to_origin)
path_to_sample = sys.argv[2]
html_sample = read_file(path_to_sample)
tag = find_in_sample(html_origin, html_sample)
sys.exit(get_xpath_to_tag(html_sample, tag))
| [
"spasenkomihail@gmail.com"
] | spasenkomihail@gmail.com |
3d8932294108b58d955a6a001dde9f5ab2567032 | 23ae37ef3fe4dad5fae6bbcd5265cb9dfa5ff032 | /assignment1/cs231n/classifiers/k_nearest_neighbor.py | d805a9ea2b3ef27d7bb42d53c385cc01d2d27f1f | [
"BSD-2-Clause"
] | permissive | ZhiquanW/CS231n-Coursework | 82890ee8999d1e34b98cf9533d14c1e83b576423 | 286089692561c12a82724487ea27db68920fc1f3 | refs/heads/master | 2022-12-09T07:11:37.839763 | 2020-02-26T19:18:28 | 2020-02-26T19:18:28 | 207,199,382 | 0 | 0 | BSD-2-Clause | 2022-12-08T05:21:37 | 2019-09-09T01:31:59 | Jupyter Notebook | UTF-8 | Python | false | false | 9,018 | py | from builtins import range
from builtins import object
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
# print(X)
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension, nor use np.linalg.norm(). #
#####################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i,j] = np.sqrt(np.sum(np.square(X[i,:]-self.X_train[j,:])))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
# Do not use np.linalg.norm(). #
#######################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i] = np.sqrt(np.sum(np.square(self.X_train-X[i,:]),axis=1))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy, #
# nor use np.linalg.norm(). #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists = np.sqrt(np.transpose([np.sum(np.square(X),axis=1)])
+ np.sum(np.square(self.X_train),axis=1)
-2*np.dot(X,self.X_train.T))
# dists = np.sqrt(np.transpose([np.sum(np.square(X),axis=1)])
# + np.sum(np.square(self.X_train),axis=1)
# -2*np.dot(X,self.X_train.T))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#closest_y = self.y_train[np.argpartition(dists[i,:],k)]
closest_y = self.y_train[np.argsort(dists[i,:])[:k]]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
y_pred[i] = np.argmax(np.bincount(closest_y))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
| [
"zhiquan.wzq@gmail.com"
] | zhiquan.wzq@gmail.com |
144f59685bb10c3354166f6418c4dafff8ef54e3 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/ospf/actxpol.py | eb68c9228b8ae67d290caf769569822d2309626a | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 10,769 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ACtxPol(Mo):
meta = ClassMeta("cobra.model.ospf.ACtxPol")
meta.isAbstract = True
meta.moClassName = "ospfACtxPol"
meta.moClassName = "ospfACtxPol"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of OSPF Context Policy"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.fabric.L3CtxPol")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoDomPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L3DomPol")
meta.concreteSubClasses.add("cobra.model.ospf.CtxDef")
meta.concreteSubClasses.add("cobra.model.ospf.CtxDefAf")
meta.concreteSubClasses.add("cobra.model.ospf.CtxPol")
meta.rnPrefixes = [
]
prop = PropMeta("str", "bwRef", "bwRef", 1089, PropCategory.REGULAR)
prop.label = "Bandwidth Preference"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4000000)]
prop.defaultValue = 40000
prop.defaultValueStr = "40000"
meta.props.add("bwRef", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "ctrl", "ctrl", 22755, PropCategory.REGULAR)
prop.label = "Control knobs"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop._addConstant("name-lookup", "enable-name-lookup-for-router-ids", 2)
prop._addConstant("pfx-suppress", "prefix-suppression", 1)
meta.props.add("ctrl", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dist", "dist", 1087, PropCategory.REGULAR)
prop.label = "Distance Preference"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 110
prop.defaultValueStr = "110"
meta.props.add("dist", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "grCtrl", "grCtrl", 1098, PropCategory.REGULAR)
prop.label = "Graceful Restart Controls"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "helper"
prop._addConstant("helper", "graceful-restart-helper", 1)
meta.props.add("grCtrl", prop)
prop = PropMeta("str", "lsaArrivalIntvl", "lsaArrivalIntvl", 1094, PropCategory.REGULAR)
prop.label = "Min Arrival Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(10, 600000)]
prop.defaultValue = 1000
prop.defaultValueStr = "1000"
meta.props.add("lsaArrivalIntvl", prop)
prop = PropMeta("str", "lsaGpPacingIntvl", "lsaGpPacingIntvl", 1093, PropCategory.REGULAR)
prop.label = "Pacing Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1800)]
prop.defaultValue = 10
prop.defaultValueStr = "10"
meta.props.add("lsaGpPacingIntvl", prop)
prop = PropMeta("str", "lsaHoldIntvl", "lsaHoldIntvl", 1096, PropCategory.REGULAR)
prop.label = "Throttle Hold Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(50, 30000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("lsaHoldIntvl", prop)
prop = PropMeta("str", "lsaMaxIntvl", "lsaMaxIntvl", 1097, PropCategory.REGULAR)
prop.label = "Throttle Max Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(50, 30000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("lsaMaxIntvl", prop)
prop = PropMeta("str", "lsaStartIntvl", "lsaStartIntvl", 1095, PropCategory.REGULAR)
prop.label = "Throttle Start Wait Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 5000)]
prop.defaultValue = 0
prop.defaultValueStr = "0"
meta.props.add("lsaStartIntvl", prop)
prop = PropMeta("str", "maxEcmp", "maxEcmp", 1088, PropCategory.REGULAR)
prop.label = "Max ECMP"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 64)]
prop.defaultValue = 8
prop.defaultValueStr = "8"
meta.props.add("maxEcmp", prop)
prop = PropMeta("str", "maxLsaAction", "maxLsaAction", 17808, PropCategory.REGULAR)
prop.label = "Action"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "reject"
prop._addConstant("log", "log", 2)
prop._addConstant("reject", "reject", 0)
prop._addConstant("restart", "restart", 1)
meta.props.add("maxLsaAction", prop)
prop = PropMeta("str", "maxLsaNum", "maxLsaNum", 17803, PropCategory.REGULAR)
prop.label = "Maximum # of non self-generated LSAs"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
prop.defaultValue = 20000
prop.defaultValueStr = "20000"
meta.props.add("maxLsaNum", prop)
prop = PropMeta("str", "maxLsaResetIntvl", "maxLsaResetIntvl", 17807, PropCategory.REGULAR)
prop.label = "Reset Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1440)]
prop.defaultValue = 10
prop.defaultValueStr = "10"
meta.props.add("maxLsaResetIntvl", prop)
prop = PropMeta("str", "maxLsaSleepCnt", "maxLsaSleepCnt", 17805, PropCategory.REGULAR)
prop.label = "Sleep Count"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
prop.defaultValue = 5
prop.defaultValueStr = "5"
meta.props.add("maxLsaSleepCnt", prop)
prop = PropMeta("str", "maxLsaSleepIntvl", "maxLsaSleepIntvl", 17806, PropCategory.REGULAR)
prop.label = "Sleep Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1440)]
prop.defaultValue = 5
prop.defaultValueStr = "5"
meta.props.add("maxLsaSleepIntvl", prop)
prop = PropMeta("str", "maxLsaThresh", "maxLsaThresh", 17804, PropCategory.REGULAR)
prop.label = "Threshold"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 100)]
prop.defaultValue = 75
prop.defaultValueStr = "75"
meta.props.add("maxLsaThresh", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "spfHoldIntvl", "spfHoldIntvl", 1091, PropCategory.REGULAR)
prop.label = "Max Hold Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 1000
prop.defaultValueStr = "1000"
meta.props.add("spfHoldIntvl", prop)
prop = PropMeta("str", "spfInitIntvl", "spfInitIntvl", 1090, PropCategory.REGULAR)
prop.label = "Initial Delay Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 200
prop.defaultValueStr = "200"
meta.props.add("spfInitIntvl", prop)
prop = PropMeta("str", "spfMaxIntvl", "spfMaxIntvl", 1092, PropCategory.REGULAR)
prop.label = "Min Wait Time"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("spfMaxIntvl", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
8f10397edc7ab65ffd5da0b053bac750e2a65976 | 552556631580799b16d0fb31e8f10850383ef3b2 | /ex3/outputs/hmmer/hmmer.DW_32-WS_384.out/info.py | ac221e133c77455ceb86e9ee7263f6d24f0a78b6 | [] | no_license | gregth/NTUA-advcomparch | f19ee414f8b77f749a09f263feb980350f88880d | bc501f427ddf1423f851ce1a052dc335183c5103 | refs/heads/master | 2022-11-14T20:11:49.035503 | 2020-06-27T09:17:43 | 2020-06-27T09:17:43 | 262,262,423 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,779 | py | power = {'BUSES': {'Area': 6.71959,
'Bus/Area': 6.71959,
'Bus/Gate Leakage': 0.0180267,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.187981,
'Bus/Subthreshold Leakage with power gating': 0.070493,
'Gate Leakage': 0.0180267,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.187981,
'Subthreshold Leakage with power gating': 0.070493},
'Core': [{'Area': 1204.13,
'Execution Unit/Area': 1132.45,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 4.60328,
'Execution Unit/Instruction Scheduler/Area': 1094.64,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.344008,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00151512,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.76857,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 1.20329,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0203725,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.0110394,
'Execution Unit/Instruction Scheduler/Gate Leakage': 4.20257,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 65.9732,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.340924,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 132.987,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 12.0751,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 5.16833,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 2.94537,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 49009.7,
'Execution Unit/Instruction Scheduler/ROB/Area': 1028.32,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 3.86013,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 48875.0,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 3945.62,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 45.9077,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 17.2608,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 3958.9,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 51.0964,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 20.2172,
'Execution Unit/Integer ALUs/Area': 3.76696,
'Execution Unit/Integer ALUs/Gate Leakage': 0.212233,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.840147,
'Execution Unit/Integer ALUs/Runtime Dynamic': 1.03156,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 3.21776,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 1.20666,
'Execution Unit/Peak Dynamic': 49013.1,
'Execution Unit/Register Files/Area': 28.3565,
'Execution Unit/Register Files/Floating Point RF/Area': 9.63068,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.00825445,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.193132,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.128663,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.0497709,
'Execution Unit/Register Files/Gate Leakage': 0.026159,
'Execution Unit/Register Files/Integer RF/Area': 18.7258,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.0179046,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 1.57228,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 1.60801,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.273097,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.103541,
'Execution Unit/Register Files/Peak Dynamic': 1.57228,
'Execution Unit/Register Files/Runtime Dynamic': 1.80115,
'Execution Unit/Register Files/Subthreshold Leakage': 0.40176,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.153311,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.436407,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.0569945,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.845842,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 22.6045,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.864121,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.324045,
'Execution Unit/Runtime Dynamic': 3984.84,
'Execution Unit/Subthreshold Leakage': 56.9352,
'Execution Unit/Subthreshold Leakage with power gating': 22.4095,
'Gate Leakage': 5.24531,
'Instruction Fetch Unit/Area': 21.8028,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00137507,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00137507,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00118832,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000454898,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00340304,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0073415,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0135185,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.116748,
'Instruction Fetch Unit/Instruction Buffer/Area': 2.64509,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 0.0346434,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 497.007,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 23.3152,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.290984,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.110014,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.446139,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 14.8639,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 10.9923,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.773492,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 517.199,
'Instruction Fetch Unit/Runtime Dynamic': 24.5557,
'Instruction Fetch Unit/Subthreshold Leakage': 1.36137,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.5701,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0585066,
'L2/Runtime Dynamic': 0.0397313,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 9.12408,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 8.91832,
'Load Store Unit/Data Cache/Runtime Dynamic': 4.79319,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0582639,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.248505,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.319138,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 12.3086,
'Load Store Unit/Runtime Dynamic': 6.68621,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.612772,
'Load Store Unit/StoreQ/Runtime Dynamic': 1.57388,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.730943,
'Load Store Unit/Subthreshold Leakage with power gating': 0.335652,
'Memory Management Unit/Area': 0.74897,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.217475,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.280163,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.0312611,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0731451,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 3.14649,
'Memory Management Unit/Runtime Dynamic': 0.353309,
'Memory Management Unit/Subthreshold Leakage': 0.216232,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0921915,
'Peak Dynamic': 56715.0,
'Renaming Unit/Area': 31.0758,
'Renaming Unit/FP Front End RAT/Area': 0.284555,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00465468,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 23.4847,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0482834,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0275216,
'Renaming Unit/Free List/Area': 8.22312,
'Renaming Unit/Free List/Gate Leakage': 0.00130004,
'Renaming Unit/Free List/Peak Dynamic': 7.12809,
'Renaming Unit/Free List/Runtime Dynamic': 1.00316,
'Renaming Unit/Free List/Subthreshold Leakage': 0.0311556,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.0167994,
'Renaming Unit/Gate Leakage': 0.296443,
'Renaming Unit/Int Front End RAT/Area': 22.2087,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.264049,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 7136.03,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 498.726,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 3.48901,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 1.98873,
'Renaming Unit/Peak Dynamic': 7169.17,
'Renaming Unit/Runtime Dynamic': 499.729,
'Renaming Unit/Subthreshold Leakage': 3.72773,
'Renaming Unit/Subthreshold Leakage with power gating': 2.09279,
'Runtime Dynamic': 4516.21,
'Subthreshold Leakage': 65.6835,
'Subthreshold Leakage with power gating': 26.6055}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 1.4122186516949031,
'Runtime Dynamic': 1.4122186516949031,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.0752572,
'Runtime Dynamic': 0.0809061,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 1272.76,
'Gate Leakage': 5.31175,
'Peak Dynamic': 56715.1,
'Peak Power': 56793.1,
'Runtime Dynamic': 4516.29,
'Subthreshold Leakage': 72.6723,
'Subthreshold Leakage with power gating': 30.5195,
'Total Cores/Area': 1204.13,
'Total Cores/Gate Leakage': 5.24531,
'Total Cores/Peak Dynamic': 56715.0,
'Total Cores/Runtime Dynamic': 4516.21,
'Total Cores/Subthreshold Leakage': 65.6835,
'Total Cores/Subthreshold Leakage with power gating': 26.6055,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.0752572,
'Total L3s/Runtime Dynamic': 0.0809061,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 77.9841,
'Total NoCs/Area': 6.71959,
'Total NoCs/Gate Leakage': 0.0180267,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.187981,
'Total NoCs/Subthreshold Leakage with power gating': 0.070493}} | [
"gregthanasoulas@gmail.com"
] | gregthanasoulas@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.