blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58d24c74d7b413b4c84424a01795b23245494e1c | 5a7be7d5cc921062582c8e5f014e53da9bf541b0 | /Tasks_code/task_add_primary_date.py | c96fc00566d5e5423eebd1010a6961ae0ff6677f | [] | no_license | UdaikaranSingh/zillow_analysis | 93efc0a4ce4861ecc09e451a50c468a17fee869c | a4d74514ae90bd10937b004ab8ed78aec1d64fa5 | refs/heads/master | 2020-08-29T18:04:50.907553 | 2019-11-27T20:03:17 | 2019-11-27T20:03:17 | 218,121,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from ZillowUnivariate.univ_stats import SingleTableReader
def program(fips_code, gen_dir = os.getcwd()):
file_dir = fips_code + "/"
gen_path = gen_dir
transaction_main_table = SingleTableReader("ZTrans", "Main", file_dir).read(
usecols =["TransId", "DocumentDate", "SignatureDate", "RecordingDate"])
def get_primary_date(row):
if not pd.isna(row['DocumentDate']):
return 0
elif not pd.isna(row['SignatureDate']):
return 1
else:
return 2
primary_dates_marker = transaction_main_table.apply(get_primary_date, axis = 1)
primary_dates = pd.read_csv(os.path.join(gen_dir, "primary_dates.csv"),
usecols = ['TransId', 'PrimaryDate'])
main_table = pd.read_csv(os.path.join(gen_dir, "reduced_table.csv"))
main_table['primary_dates_marker'] = primary_dates_marker
fin_df = primary_dates.merge(primary_dates, how = "left", on = "TransId")
fin_df = main_table.merge(fin_df, how = "left", on = "TransId")
path = os.path.join(gen_dir, "reduced_table.csv")
fin_df.to_csv(path)
| [
"udaisingh@Udaikarans-MacBook-Pro.local"
] | udaisingh@Udaikarans-MacBook-Pro.local |
c29e4fcb17ba98010f15b65b0383c453ae095f67 | 4ee2ebef215cf879aafdfa44221f52d82775176a | /Inheritance/Exercise/02-Zoo/project/reptile.py | 75f13a08a48e503a16c38a736c1bf215ce43adcd | [] | no_license | Avstrian/SoftUni-Python-OOP | d2a9653863cba7bc095e647cd3f0561377f10f6d | 6789f005b311039fd46ef1f55f3eb6fa9313e5a6 | refs/heads/main | 2023-08-01T09:31:38.099842 | 2021-08-24T04:21:38 | 2021-08-24T04:21:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from project.animal import Animal
class Reptile(Animal):
def __init__(self, name):
super().__init__(name)
| [
"noreply@github.com"
] | noreply@github.com |
9ca7a052e7117038353576d0ec3d66ac59d833ae | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_k_alkiek_countingsheep.py | f323c616326819f23c2e79d41df1a4a52585b9c4 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 736 | py | T = int(raw_input()) # read number of cases
nums = [0,1,2,3,4,5,6,7,8,9]
def elementsofin(L,ref): #function to compare seen with all numbers
x = [i for i in ref if i in L]
if x == ref:
return True
else:
return False
for i in xrange(1, T + 1):
N = int(raw_input()) # read chosen N
if N == 0:
output = "INSOMNIA"
else:
seen = []
z=0
while not(elementsofin(seen,nums)):
z+=1
listofN = map(int, str(N*z)) # convert product into an array
for j in listofN: #add digits of product as seen numbers
seen.append(j)
output = N*z
print "Case #{}: {}".format(i, output)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
63229fcec11be87eb0dbe3e74dc61d9c35ae0353 | f17dcee16ca0302fc703dbe04b45d35d68747f97 | /Exam/Qn_4.py | 154d4cef547e3ffee7c4797c7c0a0619a241cb0e | [] | no_license | Vineeth-97/Comp_phys_TIFR | 3ff124278d689909487d9561878f96869990e803 | 6a7b1de15d2436d269a5f01fbccae1b88478f4c3 | refs/heads/master | 2021-12-14T19:26:18.599190 | 2021-11-21T06:32:24 | 2021-11-21T06:32:24 | 243,167,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import numpy as np
import matplotlib.pyplot as plt
x = np.random.uniform(0.0, 1.0, 1024)
plt.plot(x)
plt.xlabel('Sample')
plt.ylabel('Value')
plt.title('Uniform distribution of Random Numbers')
plt.show()
Fs = 1/1024
ps = np.abs(np.fft.fft(x))**2
freqs = np.fft.fftfreq(x.size, Fs)
k = 2*np.pi*freqs
idx = np.argsort(k)
plt.plot(k[idx], ps[idx])
plt.show()
print(k.max(), k.min())
| [
"vineethbannu@gmail.com"
] | vineethbannu@gmail.com |
9cdbaac23eae2e713a247e3023bddbc2840fb4b9 | 66c8c43ee01504e7530b020cb468ffb49865673a | /Detection/model/teacher/teacher.py | 83187e2558bac2b11b4a28254b0dec746a9bba60 | [] | no_license | Senwang98/ReviewKD | 26ef3820efce8d1c6d6e74ac5d50b48b14b45bad | cede6ea6387ae9b6127de0e561507177bf19c11e | refs/heads/master | 2023-06-10T23:31:32.694486 | 2021-07-05T12:45:51 | 2021-07-05T12:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.checkpoint import DetectionCheckpointer
from torch import nn
class Teacher(nn.Module):
def __init__(self, backbone, proposal_generator, roi_heads):
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
def build_teacher(cfg):
teacher_cfg = cfg.TEACHER
backbone = build_backbone(teacher_cfg)
if not 'Retina' in teacher_cfg.MODEL.META_ARCHITECTURE:
proposal_generator = build_proposal_generator(teacher_cfg, backbone.output_shape())
roi_heads = build_roi_heads(teacher_cfg, backbone.output_shape())
else:
proposal_generator = None
roi_heads = None
teacher = Teacher(backbone, proposal_generator, roi_heads)
for param in teacher.parameters():
param.requires_grad = False
return teacher
| [
"pgchen@proj72"
] | pgchen@proj72 |
a17811f686265c67220bd262c90c17b8f6164be8 | 9d6c07f636cbd011c7d5ade8a20ab5807afde6b8 | /Lib/tdb/tests/examples/uniquecalls.py | e934a4b0c3343d48a047ae1b1b06c5662f31d660 | [
"LicenseRef-scancode-jython"
] | permissive | smnbackwards/jython_tdb | 9eb0981bb25b6a965ce472e9158bd0686c2921ec | 6d0e13c0c51b8d92d7b45691998c5928206162b0 | refs/heads/master | 2021-03-27T19:06:01.501366 | 2016-06-06T15:39:09 | 2016-06-06T15:39:09 | 62,494,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | def aaa():
print 'aaa 1'
a = bbb() + eee()
print 'aaa 2'
return a
def bbb():
print 'bbb 1'
b = ccc() + ddd()
print 'bbb 2'
return b
def ccc():
print 'ccc 1'
c = 1
print 'ccc 2'
return c
def ddd():
print 'ddd 1'
d = 0
print 'ddd 2'
return d
def eee():
print 'eee 1'
e = 1
print 'eee 2'
return e
f = aaa()
print f | [
"Niklas Steidl"
] | Niklas Steidl |
5e0157cbe7967799bd395e9d9038dedcf13957bb | 49a167d942f19fc084da2da68fc3881d44cacdd7 | /kubernetes_asyncio/test/test_v1_scale_io_persistent_volume_source.py | 0ee57636f0970338a9cb0f60be03d7b2ee42a7f5 | [
"Apache-2.0"
] | permissive | olitheolix/kubernetes_asyncio | fdb61323dc7fc1bade5e26e907de0fe6e0e42396 | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | refs/heads/master | 2020-03-19T12:52:27.025399 | 2018-06-24T23:34:03 | 2018-06-24T23:34:03 | 136,546,270 | 1 | 0 | Apache-2.0 | 2018-06-24T23:52:47 | 2018-06-08T00:39:52 | Python | UTF-8 | Python | false | false | 1,122 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1ScaleIOPersistentVolumeSource(unittest.TestCase):
"""V1ScaleIOPersistentVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOPersistentVolumeSource(self):
"""Test V1ScaleIOPersistentVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_scale_io_persistent_volume_source.V1ScaleIOPersistentVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"tomasz.prus@gmail.com"
] | tomasz.prus@gmail.com |
22dd923e7b80f1c955e94162b923d27992dc7bb4 | b3f7ceb487f72b7cf09b58dcd6f7ac594d229bca | /profiles_api/serializers.py | 735d70fc90121d954155a178fd21dc02de4e86b9 | [
"MIT"
] | permissive | subban358/profiles-rest-api | eff5959f1f1443ed3b3a34ad3992149685543a5f | 79a441adf6449f08aecebd31752c3a952b53f7fa | refs/heads/master | 2022-04-30T15:12:11.594398 | 2020-06-06T02:06:02 | 2020-06-06T02:06:02 | 238,642,560 | 0 | 0 | MIT | 2022-04-22T23:00:48 | 2020-02-06T08:37:24 | Python | UTF-8 | Python | false | false | 1,200 | py | from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
""" Serializes a name field for testing our APIView """
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
""" Serializes a user profile object """
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password' : {
'write_only': True,
'style':{
'input_type': 'password'
}
}
}
def create(self, validated_data):
""" Create and return a new user """
user = models.UserProfile.objects.create_user(email=validated_data['email'], name=validated_data['name'],password=validated_data['password'])
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
""" Serializes profile feed items """
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text','created_on')
extra_kwargs = {'user_profile':{'read_only' : True}}
| [
"subhambanerjee138@gmail.com"
] | subhambanerjee138@gmail.com |
c79f737de7690fc52877eb13c4099495de3fe7d9 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901040051/d08/mymodule/try_except.py | 2bab5904e58d40ed0977888a300dca3aa289874d | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 217 | py | def spam(divideby):
try:
return 42 / divideby
except ZeroDivisionError:
print('error:Invalid argument.')
print(int(spam(2)))
print(int(spam(12)))
print(spam(0))
print(spam(0.1))
print(spam(1)) | [
"40155646+seven-tears@users.noreply.github.com"
] | 40155646+seven-tears@users.noreply.github.com |
032ccf681f1e2851361f8583215105b58ac8455e | f11b090095c3d9dab32932a6e7afca5fe46fade2 | /blog_django/forms.py | 82cde936ff0bba233f84f508ef500b84da13a378 | [
"MIT"
] | permissive | marthaurion/blog_django | 94d2f30736fb68f40e4118fca0aee4998b7e9a7c | 98b2bc0baf72fa6fd6dee3562b74440162a00b41 | refs/heads/master | 2020-04-04T04:12:11.956346 | 2019-05-01T03:39:23 | 2019-05-01T03:39:23 | 41,460,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | from django import forms
from django.core.mail import send_mail
from captcha.fields import CaptchaField
attrib = {'class': 'form-control'}
class ContactForm(forms.Form):
name = forms.CharField(max_length=100, required=True, widget=forms.TextInput(attrs=attrib))
sender = forms.EmailField(label="Email", required=True, widget=forms.EmailInput(attrs=attrib))
message = forms.CharField(required=True, widget=forms.Textarea(attrs=attrib))
captcha = CaptchaField(label="")
def send_email(self):
name = self.cleaned_data['name']
message = self.cleaned_data['message']
sender = self.cleaned_data['sender']
subject = "New message from: " + name + " at " + sender
recipients = ['marthaurion@gmail.com']
send_mail(subject, message, sender, recipients) | [
"marthaurion@gmail.com"
] | marthaurion@gmail.com |
e64c4148cd45c64ab109e437f17adfb407d43fd4 | 93dd16432fcb4b42670f208edf81b2eb29f40d41 | /__init__.py | d6ecb6b7a55d08523e19946d03b10da7deef48b2 | [
"MIT"
] | permissive | shyams1993/pycaesarcipher | d067f4fda7acdb5f70687d5262a6fbc39d5e3790 | a396f165cc9a103950d060c94e25f7f344e7b257 | refs/heads/master | 2022-06-27T17:28:48.417994 | 2020-05-07T10:05:25 | 2020-05-07T10:05:25 | 261,873,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | from pycaesarcipher.pycaesarcipher import pycaesarcipher | [
"noreply@github.com"
] | noreply@github.com |
fdf116fc0fba39809c9daedd37fdb20c0c721dc8 | 5115d3fd60826f2e7eb36c3467608a31e34d8cd1 | /myshop/orders/urls.py | a9ba0463ffca3b9978db9ce5070203cf9675187e | [] | no_license | Dyavathrocky/e-commerce | 650ca4e764723101c9f1cf456c15ab43c503d1b4 | 2c6368fc514c5a2102088df1427da41a8b8af34a | refs/heads/master | 2022-12-10T11:31:36.052547 | 2020-09-06T14:27:34 | 2020-09-06T14:27:34 | 289,501,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('create/', views.order_create, name='order_create'),
path('admin/order/<int:order_id>/', views.admin_order_detail, name='admin_order_detail'),
] | [
"davathrak@gmail.com"
] | davathrak@gmail.com |
e40250615dd41dd7211a4ed1b2dca574944d53f6 | e9f33742ee046d785f4b653667769415cb6dc6f1 | /django_lite/django_lite.py | 5250b7eec79c38a04d5cf5b5b43c494b192c2023 | [
"MIT"
] | permissive | fmarco/django-lite | 5bd754b0ea5abf0cc543cbc227e8f51bd12d402e | b7c6162755893cefd3d448e4a2ae180dec1fcd96 | refs/heads/master | 2020-12-30T14:20:36.737255 | 2017-06-13T11:32:04 | 2017-06-13T11:32:04 | 91,315,035 | 3 | 0 | null | 2017-06-13T11:28:45 | 2017-05-15T08:44:22 | Python | UTF-8 | Python | false | false | 11,344 | py | # -*- coding:utf-8 -*-
import os, json, inspect, sys
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.db import models
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.list import ListView
from .services import (
Config, DBSettings, MiddlewareSettings, ModelFactory,
StaticSettings, TemplateSettings
)
from .templates import base_apps
from .utils import generate_secret_key, generate_get_absolute_url, DJANGO_FIELDS
separator = ' '
header = '# -*- coding:utf-8 -*-'
slug_regex = '(?P<pk>\d+)'
DJ_CLASSES = [ CreateView, DeleteView, DetailView, ListView, UpdateView ]
DJ_CLASSES_IMPORT = {
'CreateView': 'from django.views.generic.edit import CreateView',
'UpdateView': 'from django.views.generic.edit import UpdateView',
'DeleteView': 'from django.views.generic.edit import DeleteView',
'DetailView': 'from django.views.generic.detail import DetailView',
'ListView': 'from django.views.generic.list import ListView'
}
class DjangoLite(object):
extra_mapping = {
'detail_view': ('Detail', DetailView, False),
'list_view': ('List', ListView, False),
'create_view': ('Create', CreateView, True),
'delete_view': ('Delete', DeleteView, False),
'update_view': ('Update', UpdateView, True)
}
commands = {
'make_models': 'generate_models',
'make_urls': 'generate_urls',
'make_views': 'generate_views',
'make_settings': 'generate_settings'
}
autoconfigure = True
config = {}
configuration = None
_urlpatterns = []
MODELS = {}
VIEWS = {}
def __init__(self, file_attr, autoconfigure=True, *args, **kwargs):
self.base_dir = os.path.dirname(os.path.abspath(file_attr))
sys.path[0] = os.path.dirname(self.base_dir)
self.configuration = Config()
if autoconfigure:
self.configure()
def set_url(self):
self._urlpatterns = [url(r'^admin/', include(admin.site.urls))]
@property
def urlpatterns(self):
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
return [url(r'^admin/', include(admin.site.urls))] + self._urlpatterns + staticfiles_urlpatterns()
@property
def root_urlconf(self):
return self
def configure(self, secret_key=None, debug=True, **kwargs):
if 'override' in kwargs:
self.config = kwargs.get('overrides')
else:
self.configuration.register(DBSettings(self.base_dir))
self.configuration.register(TemplateSettings(self.base_dir))
self.configuration.register(MiddlewareSettings())
self.configuration.register(StaticSettings(self.base_dir))
self.config['BASE_DIR'] = self.base_dir
self.config['ROOT_URLCONF'] = self.root_urlconf
self.config['DEBUG'] = debug
self.config.update(self.installed_apps())
self.config.update(self.configuration.settings)
self.config['SECRET_KEY'] = generate_secret_key() if not secret_key else secret_key
self.config['SESSION_ENGINE'] = 'django.contrib.sessions.backends.signed_cookies'
if 'extra' in kwargs:
self.config.update(kwargs.get('extra'))
if not settings.configured:
settings.configure(**self.config)
import django
django.setup()
@property
def app_label(self):
base_dir = self.config.get('BASE_DIR')
if base_dir:
return os.path.basename(base_dir)
def new_model(self, *args, **kwargs):
model = ModelFactory.create(self.app_label, __name__, *args, **kwargs)
setattr(model, 'get_absolute_url', generate_get_absolute_url(model.__name__.lower()))
self.MODELS[model.__name__] = model
def add_view(self, url_pattern, func, name=None):
params = [url_pattern, func]
if name is None:
name = func.__name__
self._urlpatterns.append(
url(*params, name=name)
)
self.VIEWS[func.__name__] = func
def installed_apps(self, **kwargs):
if 'override_apps' in kwargs:
apps_list = kwargs.get('ovveride_apps')
else:
apps_list = base_apps + (
self.app_label,
) + kwargs.get('extra_apps', ())
return {
'INSTALLED_APPS': apps_list
}
def query(self, model):
model = self.MODELS.get(model)
if model:
return model.objects
def start(self):
from django.core.wsgi import get_wsgi_application
if __name__ == "django_lite.django_lite":
from django.core.management import execute_from_command_line
try:
command = sys.argv[1]
if command in self.commands.keys():
cmd = getattr(self, self.commands.get(command))
for line in cmd():
sys.stdout.write("%s\n" % line)
return
except IndexError:
pass
execute_from_command_line(sys.argv)
else:
get_wsgi_application()
def route(self, url_pattern, name=None):
def wrap(f):
self.add_view(url_pattern, f, name)
def wrapped_f(*args):
f(*args)
return wrapped_f
return wrap
def generate_view(self, cls, view_name):
try:
view_name, view_parent, edit = self.extra_mapping[view_name]
cls_name = cls.__name__
view_class_name = '{0}{1}'.format(cls_name, view_name)
data = { 'model': self.MODELS[cls_name]}
if edit:
data['fields'] = '__all__'
return type(view_class_name, (view_parent, ), data)
except KeyError:
pass
def model(self, admin=True, crud=False):
def wrap(cls):
attributes = inspect.getmembers(cls, lambda attr:not(inspect.isroutine(attr)))
attrs = dict([attr for attr in attributes if not(attr[0].startswith('__') and attr[0].endswith('__'))])
self.new_model(
**{
'name': cls.__name__,
'admin': admin,
'attrs': attrs
}
)
setattr(cls, 'objects', self.query(cls.__name__))
generated_views = []
if hasattr(cls, 'Extra'):
base_url = ''
if hasattr(cls.Extra, 'base_url'):
base_url = cls.Extra.base_url
else:
base_url = cls.__name__.lower()
for extra in cls.Extra.__dict__.iteritems():
view = self.generate_view(cls, extra[0])
if view is not None:
generated_views.append(extra[0])
view_name = '{0}_{1}'.format(cls.__name__.lower(), extra[0])
url = '{0}{1}'.format(base_url, extra[1])
self.add_view(url, view.as_view(), view_name)
else:
base_url = cls.__name__.lower()
if crud:
crud_views = set(self.extra_mapping.keys())
remaining = crud_views - set(generated_views)
for new_view in remaining:
view = self.generate_view(cls, new_view)
view_name = '{0}_{1}'.format(cls.__name__.lower(), new_view)
view_info = self.extra_mapping[new_view]
url_suffix = view_info[0].lower()
url = '^{0}/{1}'.format(base_url, url_suffix)
if view_info[2] or new_view == 'delete_view':
url = '{0}/{1}$'.format(url, slug_regex)
self.add_view(url, view.as_view(), view_name)
return cls
return wrap
def generate_models(self):
yield header
yield 'from django.db import models'
yield 'from django.utils.translation import ugettext_lazy as _\n'
for k, v in self.MODELS.iteritems():
yield 'class {0}(models.Model):'.format(k)
fields = v._meta.get_fields()
for field in fields:
if field.__class__.__name__ in DJANGO_FIELDS:
yield '{0}{1} = models.{2}()'.format(separator, field.name, field.__class__.__name__)
yield '\n{0}class Meta:'.format(separator)
yield '{0}{1}verbose_name = _(\'{2}\')'.format(separator, separator, k.lower())
yield '{0}{1}verbose_name_plural = _(\'{2}s\')'.format(separator, separator, k.lower())
yield '\n{0}def __str__(self):'.format(separator)
yield '{0}{1}return self.pk'.format(separator, separator)
yield '\n'
def generate_urls(self):
from django.core.urlresolvers import RegexURLResolver
patterns = []
for url in self.urlpatterns:
if isinstance(url, RegexURLResolver):
if url.app_name == 'admin':
str_pattern = '{0}url(r\'^admin/\', include(admin.site.urls)),'.format(separator)
patterns.append(str_pattern)
else:
if 'static' not in url.regex.pattern:
str_pattern = '{0}url(r\'{1}\', views.{2}),'.format(separator, url.regex.pattern, url.callback.__name__)
patterns.append(str_pattern)
yield header
yield 'from django.conf.urls import url'
yield 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns'
yield ''
yield 'from . import views\n'
yield 'urlpatterns = ['
for url in patterns:
yield url
yield '] + staticfiles_urlpatterns()\n'
def generate_views(self):
yield header
declarations = []
counters = {}
for k, f in self.VIEWS.iteritems():
if hasattr(f, 'view_class'):
cls = f.view_class
cls_str = ''
for dj_class in DJ_CLASSES:
if issubclass(cls, dj_class):
dj_class_name = dj_class.__name__
try:
counters[dj_class_name] += 1
except KeyError:
counters[dj_class_name] = 1
cls_str = 'class {0}({1}):'.format(cls.__name__, dj_class.__name__)
cls_str += '\n{0}model={1}'.format(separator, cls.model.__name__)
declarations.append(cls_str)
else:
declarations.append(inspect.getsource(f))
for import_str, count in counters.iteritems():
if count > 0:
yield DJ_CLASSES_IMPORT[import_str]
for declaration in declarations:
yield '\n'
yield declaration
def generate_settings(self):
yield header
for k, v in settings._wrapped.__dict__.iteritems():
try:
yield '{0} = {1}'.format(k, json.dumps(settings._wrapped.__dict__[k]))
except TypeError:
pass
| [
"federighi.marco@gmail.com"
] | federighi.marco@gmail.com |
b6ef5650d08eea12702799633726678c48be5259 | 552ebada003b5f3dda1e705f6de235048ac1c4aa | /readData_IWR1443.py | 943eefc6f46158914f37f07ea2f899a6e561eb52 | [] | no_license | gimac/IWR1443-Read-Data-Python | c5e3dfdb13ba8587a3878d3413611cc7d285f450 | 88a3991e41761206d3ac4e3ca42e0f5a2d7c90eb | refs/heads/master | 2020-06-28T14:52:10.887842 | 2019-06-09T06:47:17 | 2019-06-09T06:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,955 | py | import serial
import time
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
# Change the configuration file name
configFileName = '1443config.cfg'
CLIport = {}
Dataport = {}
byteBuffer = np.zeros(2**15,dtype = 'uint8')
byteBufferLength = 0;
# ------------------------------------------------------------------
# Function to configure the serial ports and send the data from
# the configuration file to the radar
def serialConfig(configFileName):
global CLIport
global Dataport
# Open the serial ports for the configuration and the data ports
# Raspberry pi
#CLIport = serial.Serial('/dev/ttyACM0', 115200)
#Dataport = serial.Serial('/dev/ttyACM1', 921600)
# Windows
CLIport = serial.Serial('COM3', 115200)
Dataport = serial.Serial('COM4', 921600)
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
CLIport.write((i+'\n').encode())
print(i)
time.sleep(0.01)
return CLIport, Dataport
# ------------------------------------------------------------------
# Function to parse the data inside the configuration file
def parseConfigFile(configFileName):
configParameters = {} # Initialize an empty dictionary to store the configuration parameters
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
# Split the line
splitWords = i.split(" ")
# Hard code the number of antennas, change if other configuration is used
numRxAnt = 4
numTxAnt = 3
# Get the information about the profile configuration
if "profileCfg" in splitWords[0]:
startFreq = int(float(splitWords[2]))
idleTime = int(splitWords[3])
rampEndTime = float(splitWords[5])
freqSlopeConst = float(splitWords[8])
numAdcSamples = int(splitWords[10])
numAdcSamplesRoundTo2 = 1;
while numAdcSamples > numAdcSamplesRoundTo2:
numAdcSamplesRoundTo2 = numAdcSamplesRoundTo2 * 2;
digOutSampleRate = int(splitWords[11]);
# Get the information about the frame configuration
elif "frameCfg" in splitWords[0]:
chirpStartIdx = int(splitWords[1]);
chirpEndIdx = int(splitWords[2]);
numLoops = int(splitWords[3]);
numFrames = int(splitWords[4]);
framePeriodicity = int(splitWords[5]);
# Combine the read data to obtain the configuration parameters
numChirpsPerFrame = (chirpEndIdx - chirpStartIdx + 1) * numLoops
configParameters["numDopplerBins"] = numChirpsPerFrame / numTxAnt
configParameters["numRangeBins"] = numAdcSamplesRoundTo2
configParameters["rangeResolutionMeters"] = (3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * numAdcSamples)
configParameters["rangeIdxToMeters"] = (3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * configParameters["numRangeBins"])
configParameters["dopplerResolutionMps"] = 3e8 / (2 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * configParameters["numDopplerBins"] * numTxAnt)
configParameters["maxRange"] = (300 * 0.9 * digOutSampleRate)/(2 * freqSlopeConst * 1e3)
configParameters["maxVelocity"] = 3e8 / (4 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * numTxAnt)
return configParameters
# ------------------------------------------------------------------
# Funtion to read and parse the incoming data
def readAndParseData14xx(Dataport, configParameters):
global byteBuffer, byteBufferLength
# Constants
OBJ_STRUCT_SIZE_BYTES = 12;
BYTE_VEC_ACC_MAX_SIZE = 2**15;
MMWDEMO_UART_MSG_DETECTED_POINTS = 1;
MMWDEMO_UART_MSG_RANGE_PROFILE = 2;
maxBufferSize = 2**15;
magicWord = [2, 1, 4, 3, 6, 5, 8, 7]
# Initialize variables
magicOK = 0 # Checks if magic number has been read
dataOK = 0 # Checks if the data has been read correctly
frameNumber = 0
detObj = {}
readBuffer = Dataport.read(Dataport.in_waiting)
byteVec = np.frombuffer(readBuffer, dtype = 'uint8')
byteCount = len(byteVec)
# Check that the buffer is not full, and then add the data to the buffer
if (byteBufferLength + byteCount) < maxBufferSize:
byteBuffer[byteBufferLength:byteBufferLength + byteCount] = byteVec[:byteCount]
byteBufferLength = byteBufferLength + byteCount
# Check that the buffer has some data
if byteBufferLength > 16:
# Check for all possible locations of the magic word
possibleLocs = np.where(byteBuffer == magicWord[0])[0]
# Confirm that is the beginning of the magic word and store the index in startIdx
startIdx = []
for loc in possibleLocs:
check = byteBuffer[loc:loc+8]
if np.all(check == magicWord):
startIdx.append(loc)
# Check that startIdx is not empty
if startIdx:
# Remove the data before the first start index
if startIdx[0] > 0:
byteBuffer[:byteBufferLength-startIdx[0]] = byteBuffer[startIdx[0]:byteBufferLength]
byteBufferLength = byteBufferLength - startIdx[0]
# Check that there have no errors with the byte buffer length
if byteBufferLength < 0:
byteBufferLength = 0
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Read the total packet length
totalPacketLen = np.matmul(byteBuffer[12:12+4],word)
# Check that all the packet has been read
if (byteBufferLength >= totalPacketLen) and (byteBufferLength != 0):
magicOK = 1
# If magicOK is equal to 1 then process the message
if magicOK:
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Initialize the pointer index
idX = 0
# Read the header
magicNumber = byteBuffer[idX:idX+8]
idX += 8
version = format(np.matmul(byteBuffer[idX:idX+4],word),'x')
idX += 4
totalPacketLen = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
platform = format(np.matmul(byteBuffer[idX:idX+4],word),'x')
idX += 4
frameNumber = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
timeCpuCycles = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
numDetectedObj = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
numTLVs = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
# Read the TLV messages
for tlvIdx in range(numTLVs):
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Check the header of the TLV message
tlv_type = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
tlv_length = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
# Read the data depending on the TLV message
if tlv_type == MMWDEMO_UART_MSG_DETECTED_POINTS:
# word array to convert 4 bytes to a 16 bit number
word = [1, 2**8]
tlv_numObj = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
tlv_xyzQFormat = 2**np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
# Initialize the arrays
rangeIdx = np.zeros(tlv_numObj,dtype = 'int16')
dopplerIdx = np.zeros(tlv_numObj,dtype = 'int16')
peakVal = np.zeros(tlv_numObj,dtype = 'int16')
x = np.zeros(tlv_numObj,dtype = 'int16')
y = np.zeros(tlv_numObj,dtype = 'int16')
z = np.zeros(tlv_numObj,dtype = 'int16')
for objectNum in range(tlv_numObj):
# Read the data for each object
rangeIdx[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
dopplerIdx[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
peakVal[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
x[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
y[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
z[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
# Make the necessary corrections and calculate the rest of the data
rangeVal = rangeIdx * configParameters["rangeIdxToMeters"]
dopplerIdx[dopplerIdx > (configParameters["numDopplerBins"]/2 - 1)] = dopplerIdx[dopplerIdx > (configParameters["numDopplerBins"]/2 - 1)] - 65535
dopplerVal = dopplerIdx * configParameters["dopplerResolutionMps"]
#x[x > 32767] = x[x > 32767] - 65536
#y[y > 32767] = y[y > 32767] - 65536
#z[z > 32767] = z[z > 32767] - 65536
x = x / tlv_xyzQFormat
y = y / tlv_xyzQFormat
z = z / tlv_xyzQFormat
# Store the data in the detObj dictionary
detObj = {"numObj": tlv_numObj, "rangeIdx": rangeIdx, "range": rangeVal, "dopplerIdx": dopplerIdx, \
"doppler": dopplerVal, "peakVal": peakVal, "x": x, "y": y, "z": z}
dataOK = 1
#print(detObj['range'].mean())
elif tlv_type == MMWDEMO_UART_MSG_RANGE_PROFILE:
idX += tlv_length
# Remove already processed data
if idX > 0 and dataOK == 1:
shiftSize = idX
byteBuffer[:byteBufferLength - shiftSize] = byteBuffer[shiftSize:byteBufferLength]
byteBufferLength = byteBufferLength - shiftSize
# Check that there are no errors with the buffer length
if byteBufferLength < 0:
byteBufferLength = 0
return dataOK, frameNumber, detObj
# ------------------------------------------------------------------
# Funtion to update the data and display in the plot
def update():
dataOk = 0
global detObj
x = []
y = []
# Read and parse the received data
dataOk, frameNumber, detObj = readAndParseData14xx(Dataport, configParameters)
if dataOk:
#print(detObj)
x = -detObj["x"]
y = detObj["y"]
s.setData(x,y)
QtGui.QApplication.processEvents()
return dataOk
# ------------------------- MAIN -----------------------------------------
# Configurate the serial port
CLIport, Dataport = serialConfig(configFileName)
# Get the configuration parameters from the configuration file
configParameters = parseConfigFile(configFileName)
# START QtAPPfor the plot
app = QtGui.QApplication([])
# Set the plot
pg.setConfigOption('background','w')
win = pg.GraphicsWindow(title="2D scatter plot")
p = win.addPlot()
p.setXRange(-0.5,0.5)
p.setYRange(0,1.5)
p.setLabel('left',text = 'Y position (m)')
p.setLabel('bottom', text= 'X position (m)')
s = p.plot([],[],pen=None,symbol='o')
# Main loop
detObj = {}
frameData = {}
currentIndex = 0
while True:
try:
# Update the data and check if the data is okay
dataOk = update()
if dataOk:
# Store the current frame into frameData
frameData[currentIndex] = detObj
currentIndex += 1
time.sleep(0.033) # Sampling frequency of 30 Hz
# Stop the program and close everything if Ctrl + c is pressed
except KeyboardInterrupt:
CLIport.write(('sensorStop\n').encode())
CLIport.close()
Dataport.close()
win.close()
break
| [
"noreply@github.com"
] | noreply@github.com |
bd2ae5fb7ab409ebe2302f6ad8f5950260059205 | 05d3cbb10e11b9c3e717bb0f48842f51ea7fc77a | /myapp/china/route/index.py | a17e6c8d8ea9a07db36b42b3ce49ae070ef8f009 | [] | no_license | karthus007/flask-demo | 6c253a75359ea1139b738b482f15316e66b4fb37 | ca75c1848b37bc9d56b33c617a8600a44d5649cc | refs/heads/master | 2020-09-03T15:41:40.097486 | 2019-11-04T12:54:42 | 2019-11-04T12:54:42 | 219,500,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from flask import Blueprint, render_template
from selenium import webdriver
import time
china = Blueprint('china', __name__)
@china.route("/index")
def demo_index():
return render_template("home/index.html")
@china.route("/baidu")
def demo_weibo():
driver = webdriver.Chrome("tool/chromedriver")
driver.maximize_window()
driver.get("https://www.baidu.com/")
driver.find_element_by_name("wd").send_keys("冰雪")
driver.find_element_by_id("su").click()
time.sleep(5)
driver.quit()
return "success"
| [
"237458345@qq.com"
] | 237458345@qq.com |
73f699b53370be080a723734410e86ee80b96259 | b87ea98bc166cade5c78d246aeb0e23c59183d56 | /samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/paths/request_body_post_additionalproperties_are_allowed_by_default_request_body/post.py | 00eeab16165cc40c5ceea36e2021cce7127c7ea9 | [
"Apache-2.0"
] | permissive | holisticon/openapi-generator | 88f8e6a3d7bc059c8f56563c87f6d473694d94e5 | 6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272 | refs/heads/master | 2023-05-12T02:55:19.037397 | 2023-04-14T08:31:59 | 2023-04-14T08:31:59 | 450,034,139 | 1 | 0 | Apache-2.0 | 2022-01-20T09:34:14 | 2022-01-20T09:34:13 | null | UTF-8 | Python | false | false | 10,414 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.additionalproperties_are_allowed_by_default import AdditionalpropertiesAreAllowedByDefault
from . import path
# body param
SchemaForRequestBodyApplicationJson = AdditionalpropertiesAreAllowedByDefault
request_body_additionalproperties_are_allowed_by_default = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
},
required=True,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
)
_status_code_to_response = {
'200': _response_for_200,
}
class BaseApi(api_client.Api):
@typing.overload
def _post_additionalproperties_are_allowed_by_default_request_body_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _post_additionalproperties_are_allowed_by_default_request_body_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _post_additionalproperties_are_allowed_by_default_request_body_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _post_additionalproperties_are_allowed_by_default_request_body_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _post_additionalproperties_are_allowed_by_default_request_body_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_additionalproperties_are_allowed_by_default.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class PostAdditionalpropertiesAreAllowedByDefaultRequestBody(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def post_additionalproperties_are_allowed_by_default_request_body(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post_additionalproperties_are_allowed_by_default_request_body(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post_additionalproperties_are_allowed_by_default_request_body(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post_additionalproperties_are_allowed_by_default_request_body(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post_additionalproperties_are_allowed_by_default_request_body(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_additionalproperties_are_allowed_by_default_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_additionalproperties_are_allowed_by_default_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"noreply@github.com"
] | noreply@github.com |
158301fb55b8e41ffda571e1fd41f022ce8efaf8 | ecd5b484d3cc1931f23376c06dc36e247435c81f | /excelpp.py | 256869f8ac5b4a9d438e4e78044a7bd6a57cc1e4 | [] | no_license | t6166as/duos | 3391b386a7974207d6f8e6985c0518938e097768 | 3a7da48473a271459408da0d1372d683a4788c0f | refs/heads/master | 2021-01-19T16:21:16.378272 | 2018-09-18T15:10:01 | 2018-09-18T15:10:01 | 101,002,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | #!/usr/local/bin/python3
import xlrd as exc
import csv
import argparse
from abbv import abbvtool
def parse_args():
parser=argparse.ArgumentParser( description ='supply file that need to be parsed')
parser.add_argument('--filename',required=True)
parser.add_argument('--outputname',required=True)
return parser.parse_args()
def csvconvert(filename,outputname):
workbook = exc.open_workbook(filename)
intial_li=[]
for i in workbook.sheet_names():
processing_sheet=i
worksheet = workbook.sheet_by_name(processing_sheet)
for j in range(0, worksheet.nrows):
acronym=abbvtool(worksheet.cell_value(j,3))
txt_val = worksheet.cell_value(j,4)
label = worksheet.cell_value(j,3)
prefix='NA'
suffix='NA'
try:
if label.upper() in txt_val.upper():
prefix = txt_val.upper().split(label.upper())[0]
suffix = txt_val.upper().split(label.upper())[1]
except ValueError:
try:
if acronym.upper() in txt_val.upper():
prefix = txt_val.upper().split(acronym.upper())[0]
suffix = txt_val.upper().split(acronym.upper())[1]
except:
pass
except:
pass
initial_li=intial_li.append([i]+worksheet.row_values(j)+[acronym]+[prefix]+[suffix])
#print(intial_li)
with open(outputname, "w") as f:
writer = csv.writer(f,delimiter='~')
writer.writerows(intial_li)
def main():
args = parse_args()
csvconvert(args.filename,args.outputname)
if __name__ == "__main__":
main()
| [
"j1axs01@node7.awishkar.com"
] | j1axs01@node7.awishkar.com |
71e9b21280f294cc649378a9840f97acf29ad84d | 8724722b33a3f6489d467bfe32f12eb94aa0a901 | /Final_Project/quickstart.py | 75d3d8be34a0e83439ac9c372270f5486cb72e62 | [] | no_license | keyaria/SI206 | 97c89bb9980b76f429f5c4ed142a88be012cfc66 | c6dcec1771cd12d014fab44fdb9cc58627bb19bb | refs/heads/master | 2021-08-29T20:02:49.576043 | 2017-12-14T21:41:37 | 2017-12-14T21:41:37 | 105,152,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,680 | py | import os
import google.oauth2.credentials
import sqlite3
import google_auth_oauthlib.flow
from datetime import datetime
import dateutil.parser as dateparser
import calendar
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret.
CLIENT_SECRETS_FILE = "client_secret.json"
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account and requires requests to use an SSL connection.
SCOPES = ['https://www.googleapis.com/auth/youtube.force-ssl']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
conn = sqlite3.connect('Final_Project.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS youtube')
cur.execute('CREATE TABLE youtube (id TEXT NOT NULL, chanTitle TEXT, viewCount INTEGER, time_posted DATETIME, day DATETIME)')
def get_authenticated_service():
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)
credentials = flow.run_console()
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
def print_response(response):
print(response)
# Build a resource based on a list of properties given as key-value pairs.
# Leave properties with empty values out of the inserted resource.
def build_resource(properties):
resource = {}
for p in properties:
# Given a key like "snippet.title", split into "snippet" and "title", where
# "snippet" will be an object and "title" will be a property in that object.
prop_array = p.split('.')
ref = resource
for pa in range(0, len(prop_array)):
is_array = False
key = prop_array[pa]
# For properties that have array values, convert a name like
# "snippet.tags[]" to snippet.tags, and set a flag to handle
# the value as an array.
if key[-2:] == '[]':
key = key[0:len(key)-2:]
is_array = True
if pa == (len(prop_array) - 1):
# Leave properties without values out of inserted resource.
if properties[p]:
if is_array:
ref[key] = properties[p].split(',')
else:
ref[key] = properties[p]
elif key not in ref:
# For example, the property is "snippet.title", but the resource does
# not yet have a "snippet" object. Create the snippet object here.
# Setting "ref = ref[key]" means that in the next time through the
# "for pa in range ..." loop, we will be setting a property in the
# resource's "snippet" object.
ref[key] = {}
ref = ref[key]
else:
# For example, the property is "snippet.description", and the resource
# already has a "snippet" object.
ref = ref[key]
return resource
# Remove keyword arguments that are not set
def remove_empty_kwargs(**kwargs):
good_kwargs = {}
if kwargs is not None:
for key, value in kwargs.items():
if value:
good_kwargs[key] = value
return good_kwargs
def videos_list_most_popular(client, **kwargs):
# See full sample for function
kwargs = remove_empty_kwargs(**kwargs)
response = client.videos().list(
**kwargs
).execute()
for res in response.get('items', []):
# print(res['id'])
# print(res['snippet']['publishedAt'] +' ' +res['snippet']['channelTitle'])
# print(res['statistics']['viewCount'] + ' ' + res['statistics']['likeCount'])
my_data = (dateparser.parse(res['snippet']['publishedAt']))
time_data= my_data.strftime('%H:%M:%S')
# print(time_data)
#new_date= (datetime.strptime(time_data, "%H:%M:%S"))
tup = res['id'], res['snippet']['channelTitle'], res['statistics']['viewCount'],my_data.strftime("%I:%M:%S %p"),calendar.day_name[my_data.weekday()]
cur.execute('INSERT INTO youtube (id, chanTitle, viewCount, time_posted, day) VALUES (?, ?, ?, ?, ?)', tup, )
#return print_response(response)
if __name__ == '__main__':
# When running locally, disable OAuthlib's HTTPs verification. When
# running in production *do not* leave this option enabled.
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
client = get_authenticated_service()
videos_list_most_popular(client,
part='snippet,contentDetails,statistics',
chart='mostPopular',
maxResults=50,
regionCode='US',
videoCategoryId='')
videos_list_most_popular(client,
part='snippet,contentDetails,statistics',
chart='mostPopular',
maxResults=50,
regionCode='CA',
videoCategoryId='')
conn.commit()
cur.close() | [
"keyariaw"
] | keyariaw |
f884f1ae88a326d402d9caadf012a5eb529f084f | d1410b20da549dcb986861064db68e74cb836627 | /targetview/settings.py | 2e9763c14fbadffb6a4fbf1b3df61ff8ff2a456a | [] | no_license | ta-ou/target-deploy | a69d965233f9a104ccbb4dd0231aa994cd0456d8 | eddc283e7112797a6f331f6adc77b1c69a61336e | refs/heads/master | 2022-12-30T13:51:07.076910 | 2020-10-21T14:51:20 | 2020-10-21T14:51:20 | 297,659,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,026 | py | import os
from decouple import config
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_auth',
'rest_auth.registration',
'webpack_loader',
'users',
'targets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'targetview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'targetview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'target$default',
'USER': 'target',
'PASSWORD': 'database',
'HOST': 'target.mysql.pythonanywhere-services.com',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_URL = "accounts/login/"
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "frontend/dist")
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
AUTH_USER_MODEL = "users.CustomUser"
SITE_ID = 1
ACCOUNT_EMAIL_VERIFICATION = "none"
ACCOUNT_EMAIL_REQUIRED = (True)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'frontend', 'webpack-stats.json'),
}
}
| [
"takuyaouchi18@gmail.com"
] | takuyaouchi18@gmail.com |
2c23efbcaca7f08a26a6b5dcd830320bbbb50db5 | fc56e214468af3752296683c175e57ee0dce6064 | /AER_test.py | 106f4994a9d697fcf521fd7a92fa66fb1e24c764 | [] | no_license | sray0309/Speech-Emotion-Recognition-based-on-both-Audio-and-Text | 775e294bc6a0509651e1719131293c33b25f3ccb | 3b7e30317b4abaad0e4e9a4c249aee53649f49ab | refs/heads/master | 2022-12-28T22:48:18.938067 | 2020-10-12T14:28:25 | 2020-10-12T14:28:25 | 303,415,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | import os
import pandas as pd
import glob
import matplotlib.pyplot as plt
import time
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
# Load audio MFCC
MFCC = (np.load("FC_MFCC12EDA.npy")).transpose(0,2,1) # Transpose
data_train = MFCC[0:int(MFCC.shape[0]*0.9)]
data_test = MFCC[int(MFCC.shape[0]*0.9):]
# Load label
file=open("FC_label.txt")
lines=file.readlines()
label = []
for line in lines:
line = line.strip().split('\n')
label.append(line)
label_int = (np.array(label)[:,0]).astype(int)
label_train = label_int[0:int(MFCC.shape[0]*0.9)]
label_test = label_int[int(MFCC.shape[0]*0.9):]
audio_data_train_tensor = Variable(torch.tensor(data_train),requires_grad = False)
audio_data_test_tensor = Variable(torch.tensor(data_test),requires_grad = False)
audio_label_train_tensor = Variable(torch.tensor(label_train),requires_grad = False)
audio_label_test_tensor = Variable(torch.tensor(label_test),requires_grad = False)
class model(nn.Module):
def __init__(self):
super(model, self).__init__()
self.conv0 = nn.Sequential(nn.Conv1d(in_channels=39, out_channels=10, kernel_size = 4),nn.ReLU())
self.lstm_audio = nn.LSTM(input_size=10, hidden_size=16, batch_first = True, bidirectional = True)
self.fc0 = nn.Sequential(nn.Flatten(start_dim = 1, end_dim = -1),nn.Linear(in_features = 23904, out_features = 4))
def forward(self,x):
Conv_out0 = self.conv0(x)
LSTM_out0,_ = self.lstm_audio(Conv_out0.transpose(1,2))
FC_out0 = self.fc0(LSTM_out0)
output = FC_out0
return output
# Test
Model_r = model()
Model_r.load_state_dict(torch.load('model_trained_audio_only.pkl'))
Model_r.eval()
outputs = Model_r(audio_data_test_tensor.float())
_, y_pred = outputs.max(dim=1)
accuracy = int(sum(y_pred == audio_label_test_tensor))/len(audio_label_test_tensor)
print("test accuray: {:.2f}".format(accuracy))
mat = np.zeros(shape=(4,4))
mat_new = np.zeros(shape=(4,4))
for i in range(0,len(audio_label_test_tensor)):
m = audio_label_test_tensor[i]
n = y_pred[i]
mat[m,n] = mat[m,n] + 1
# generate heatmap of AER_test
for i in range(4):
mat[i,:] = mat[i,:]/sum(mat[i,:])
import seaborn as sns
import matplotlib as mpl
sns.set(style = "whitegrid",color_codes = True)
ax = sns.heatmap(mat, annot=True) #notation: "annot" not "annote"
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
| [
"sray1997@163.com"
] | sray1997@163.com |
b718955f50d1f4ad7b792f47ff62beb3938634f9 | 3a1fea0fdd27baa6b63941f71b29eb04061678c6 | /src/ch06/instructions/math/Rem.py | cc1e03153ae35a83b62c6b568c1c156b2675f163 | [] | no_license | sumerzhang/JVMByPython | 56a7a896e43b7a5020559c0740ebe61d608a9f2a | 1554cf62f47a2c6eb10fe09c7216518416bb65bc | refs/heads/master | 2022-12-02T17:21:11.020486 | 2020-08-18T06:57:10 | 2020-08-18T06:57:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: Rem.py
@time: 2019/9/15 20:04
@desc: 求余(rem)指令
"""
import math
from ch06.instructions.base.Instruction import NoOperandsInstruction
# double remainder
class DREM(NoOperandsInstruction):
def execute(self, frame):
stack = frame.operand_stack
v2 = stack.pop_numeric()
v1 = stack.pop_numeric()
if v2 == 0.0:
result = math.nan
else:
result = math.fmod(v1, v2)
stack.push_numeric(result)
# float remainder
class FREM(NoOperandsInstruction):
def execute(self, frame):
stack = frame.operand_stack
v2 = stack.pop_numeric()
v1 = stack.pop_numeric()
if v2 == 0.0:
result = math.nan
else:
result = math.fmod(v1, v2)
stack.push_numeric(result)
# int remainder
class IREM(NoOperandsInstruction):
def execute(self, frame):
stack = frame.operand_stack
v2 = stack.pop_numeric()
v1 = stack.pop_numeric()
if v2 == 0:
raise RuntimeError("java.lang.ArithmeticException: / by zero")
result = v1 % v2
stack.push_numeric(result)
# long remainder
class LREM(NoOperandsInstruction):
def execute(self, frame):
stack = frame.operand_stack
v2 = stack.pop_numeric()
v1 = stack.pop_numeric()
if v2 == 0:
raise RuntimeError("java.lang.ArithmeticException: / by zero")
result = v1 % v2
stack.push_numeric(result)
| [
"huruifeng1202@163.com"
] | huruifeng1202@163.com |
5a05c368915f91b2fa2adce1dd15e0fd531cac44 | abfe7041f66b9441e5748d3eeae36abedf94e708 | /manager/admin.py | e02874bdc57f1de3a354ab65f250b360bd3be097 | [] | no_license | SaurabhKumarVerma/newsfeed | 743333ff10354723f234fa6513927693ff4f9449 | 57271b3f4e45e7cde500e0eff8cd0874bd796172 | refs/heads/main | 2022-12-30T22:45:07.114578 | 2020-10-22T10:08:49 | 2020-10-22T10:08:49 | 303,373,271 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from . models import Manager
# Register your models here.
admin.site.register(Manager) | [
"saurav88871kumar@hotmail.com"
] | saurav88871kumar@hotmail.com |
278050766a8cc25d551e9bb1d2c27c53d27b3b1a | 4a518e86e013596c31f1e389f12b8b00bbb3d725 | /package190904/test12.py | ddab69c0ce689d1a7cc35c5888f13bcbd4e935ce | [] | no_license | Ccccaramel/Python | e88b9b8d6d156b9ee80fefc17b88a7dfd21885fd | e503818564c9689e57b7a77165bcb44cc2ece096 | refs/heads/master | 2021-06-21T14:07:23.652783 | 2021-05-07T02:29:19 | 2021-05-07T02:29:19 | 218,760,966 | 0 | 0 | null | 2019-11-01T03:29:05 | 2019-10-31T12:28:44 | null | UTF-8 | Python | false | false | 428 | py | # 关键字(end)
print("-----end-----")
# 关键字end可以用于将结果输出到同一行,或者在输出的末尾添加不同的字符
a, b = 0, 1
while b < 1000:
print(b, end=',')
a, b = b, a+b
# 异包调用通过继承的方式重写内置模块的方法
# import sys
# sys.path.append("package190904A")
# from stringR import string
# s1=string("yes")
# print("s1" ,s1.capitalize())
# print("s1" ,s1.isalpha()) | [
"444543565@qq.com"
] | 444543565@qq.com |
0da8477d69cb6d25da46902f07980694e0ef0879 | d6af70695194177fc60a774660e83f0628ded147 | /dortgen_alan_hesapla_v1.py | 5dad1589df3d56dfd63b3a704bdc962263818a7d | [] | no_license | ibrhmisot/dortgen-ve-daire-alani-if-kosulu | 8cbd6c6ce67665f540b725c5cc65c77bdbe39b0d | 1f64e79488b0d6883cbdc9ab7cd73247fdc8748d | refs/heads/master | 2021-05-25T14:24:51.805422 | 2020-04-07T20:33:59 | 2020-04-07T20:33:59 | 253,789,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | def dortgen_alan_hesapla_v1(uzun,kisa):
print("uzun kenar:", uzun)
print("kisa kenar:", kisa)
sayilari_carp = uzun*kisa
print("Alan:", sayilari_carp)
dortgen_alan_hesapla_v1(10,8)
| [
"ibrhmisot@gmail.com"
] | ibrhmisot@gmail.com |
ca1612d5068d3f4480ffbc0428ee9943db2a5476 | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2016_09_01/models/application_gateway_ssl_policy.py | ff1c8f6c4ca1af9b4fb15521b1d8135387e1a75f | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 1,153 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewaySslPolicy(Model):
"""Application gateway SSL policy.
:param disabled_ssl_protocols: SSL protocols to be disabled on application
gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'.
:type disabled_ssl_protocols: list of str or
:class:`ApplicationGatewaySslProtocol
<azure.mgmt.network.v2016_09_01.models.ApplicationGatewaySslProtocol>`
"""
_attribute_map = {
'disabled_ssl_protocols': {'key': 'disabledSslProtocols', 'type': '[str]'},
}
def __init__(self, disabled_ssl_protocols=None):
self.disabled_ssl_protocols = disabled_ssl_protocols
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
30478c215b9a270f62b60b57d3b5b0f724f32c95 | d2c8f44e23b8cab81af2672f9384aa12d063b251 | /Preprocessing/process.py | 18ab155db021cdc98712ec1c31cb251aa7406658 | [] | no_license | tjlogue4/kaggle-OSIC-Pulmonary-Fibrosis-Progression | bf8a0b0f2a8c1a5e75f31739b8abfde4c0a7de95 | e028fc8da2b7b44afe786e4f92f0f63fe28676b0 | refs/heads/master | 2023-02-17T02:27:12.003839 | 2021-01-13T04:47:24 | 2021-01-13T04:47:24 | 295,912,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | import pandas as pd
import pydicom
import numpy as np
import os
import cv2
from tqdm import tqdm
import multiprocessing as mp
cpu_count = mp.cpu_count() #gets the number of cores your machine has
NEW_SIZE = 512
DATA_DIR = 'X:/OISC/osic-pulmonary-fibrosis-progression/train/'
SAVE_DIR = 'C:/temp/processed_files/'
PATIENTS = os.listdir(DATA_DIR)
df_train = pd.read_csv('X:/OISC/osic-pulmonary-fibrosis-progression/train.csv')
p_amount = len(PATIENTS)
errors = pd.DataFrame(columns = ['Patient', 'Error'])
errors.to_csv('error_log.csv', index = False)
#slope
for patient in PATIENTS:
x = df_train[df_train["Patient"]==patient]["Weeks"]
y = df_train[df_train["Patient"]==patient]["FVC"]
slope = np.polyfit(x, y, 1)[0]
df_train.loc[df_train["Patient"]==patient, 'Slope'] = slope
del df_train['Weeks']
del df_train['FVC']
del df_train["Percent"]
df_train = df_train.drop_duplicates()
df_train['label'] = 0
df_train.loc[df_train['Slope'] < -3,'label'] = 1
#######################################################################################
#code from https://www.kaggle.com/allunia/pulmonary-dicom-preprocessing
def set_outside_scanner_to_air(raw_pixelarrays):
# in OSIC we find outside-scanner-regions with raw-values of -2000.
# Let's threshold between air (0) and this default (-2000) using -1000
raw_pixelarrays[raw_pixelarrays <= -1000] = 0
return raw_pixelarrays
def transform_to_hu(slices):
images = np.stack([file.pixel_array for file in slices])
images = images.astype(np.int16)
images = set_outside_scanner_to_air(images)
# convert to HU
for n in range(len(slices)):
intercept = slices[n].RescaleIntercept
slope = slices[n].RescaleSlope
if slope != 1:
images[n] = slope * images[n].astype(np.float64)
images[n] = images[n].astype(np.int16)
images[n] += np.int16(intercept)
return np.array(images, dtype=np.int16)
#######################################################################################
def multi(p= patient, data_dir = DATA_DIR, patients = PATIENTS, new_size = NEW_SIZE):
try:
path = data_dir + p
slices = [pydicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) #step 1
slices = transform_to_hu(slices)#step 3
slices = [cv2.resize(np.array(each_slice), (new_size, new_size)) for each_slice in slices] #step 4
label = int(df_train[df_train['Patient'] == p]['label'])#step 5 get the label value
for num, each_slice in enumerate(slices): #notice i am now enumerating the slices
np.save(f'{SAVE_DIR}{p}{num}.npy', [each_slice, label]) #notice my save directory, much_data is also gone
except Exception as e:
error_dict = {'Patient' : patient, 'Error': e}
temp_error = pd.read_csv('error_log.csv')
temp_error = temp_error.append(error_dict, ignore_index = True)
temp_error.to_csv('error_log.csv', index = False)
if __name__ == '__main__':
pool = mp.Pool(cpu_count)
for _ in tqdm(pool.imap_unordered(multi, [patient for patient in PATIENTS]), total = len(PATIENTS)):
pass
#results = pool.map(multi, [patient for patient in PATIENTS]) #here we call the funtion and the list we want to pass
pool.close()
| [
"tjlogue4@gmail.com"
] | tjlogue4@gmail.com |
898f8669c025fcb6c7a6aba2fa3dcebc841b282c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02957/s796407054.py | 9de0035e3ab02e37a3607cddb48ae764505a081a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | A,B=map(int,input().split())
if (A+B) % 2 == 1:print("IMPOSSIBLE")
else:print((A+B)//2) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a0b156200bd22de56905cd83e7809dfe81abdbab | 75d518a3129b5d5fc50fe49ac6e4d0a5a1078946 | /Truncatable_primes.py | 9c50eec508574f51922af8fa4905d42b1b40819a | [] | no_license | Goasty/Project-Euler | 823455673245b7bc74a6dbbc705a4f3014c1ebde | 9df17550427b7ede0e30a35c72329717c552b978 | refs/heads/main | 2023-03-26T02:13:41.316311 | 2021-03-24T18:10:20 | 2021-03-24T18:10:20 | 351,173,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #https://projecteuler.net/problem=37
a = 3797
c = []
d = []
def left()
def right()
a = int(str(a)[:-1])
def prime()
if a % 2 == 0:
return False
return True
def loop(a)
x = all(c)
| [
"noreply@github.com"
] | noreply@github.com |
dd3a13b3441bdb43584cc9c9fa763ecb19c44f8e | 3e8e1add88b0782bc64f8682b05f399638094729 | /teacherstudent/urls.py | b239ac643a67403dbc79edb5bef2e6da441b6a22 | [] | no_license | nnish09/Task2 | 3e154c8f61ef88034cff6e1e42fdd616d2922951 | cddc23c062c81e0669b6656d7f294f61f3c05976 | refs/heads/master | 2023-04-29T12:04:14.565984 | 2019-09-23T10:52:08 | 2019-09-23T10:52:08 | 209,307,595 | 0 | 0 | null | 2023-04-21T20:37:36 | 2019-09-18T12:51:29 | CSS | UTF-8 | Python | false | false | 1,171 | py | """teacherstudent URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('studentteacher.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('friendship/', include('friendship.urls'))
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"nishtha0995@gmail.com"
] | nishtha0995@gmail.com |
eed8531d3cb546a055192bd95204d0d304ac460e | df9d6ecde9202bd4b73e69cd28c50b41195c0aa1 | /tests/data.py | c5e249eb686ffb112d8483880a75c1c68a661d9d | [
"MIT"
] | permissive | ppinard/dataclasses-sql | 8d6d18dd558537fbf40c386a11fdd75f4720fa2a | 8f2eeaf090887985f8fd9853adb763883906bed6 | refs/heads/master | 2021-03-23T22:48:34.437619 | 2020-06-10T14:23:07 | 2020-06-10T14:23:07 | 247,489,847 | 6 | 3 | MIT | 2020-11-02T01:43:20 | 2020-03-15T15:06:32 | Python | UTF-8 | Python | false | false | 858 | py | """"""
# Standard library modules.
import dataclasses
import datetime
# Third party modules.
# Local modules.
# Globals and constants variables.
@dataclasses.dataclass
class TaxonomyData:
kingdom: str = dataclasses.field(metadata={"key": True})
order: str = dataclasses.field(metadata={"key": True})
family: str = dataclasses.field(metadata={"key": True})
genus: str = dataclasses.field(metadata={"key": True})
@dataclasses.dataclass
class TreeData:
serial_number: int = dataclasses.field(metadata={"key": True})
taxonomy: TaxonomyData = dataclasses.field(metadata={"key": True})
specie: str = dataclasses.field(metadata={"key": True})
diameter_m: float = None
long_description: bytes = None
has_flower: bool = None
plantation_datetime: datetime.datetime = None
last_pruning_date: datetime.date = None
| [
"philippe.pinard@gmail.com"
] | philippe.pinard@gmail.com |
d1ca62f472d0cfb194929d1011a5b121f36a89d1 | c0527d63f267223d91288db0566a559a2b13b091 | /Problem Set 2/2.2/data/animals.py | 11f10ca0003ff0d69e94c5082b68a3dd9244919f | [] | no_license | fyjgreatlion/Computational-Linguistics | 43439a768b8bc595bd31df331c24767c00530243 | fcc6371c1abe52eb462b63c393d6d49a275ee06d | refs/heads/master | 2022-02-15T01:07:46.075257 | 2019-04-15T02:41:14 | 2019-04-15T02:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,819 | py | from collections import defaultdict
###animals from Hill et al. (2012)
african_animals = ['aardvark', 'antelope', 'buffalo', 'camel', 'chameleon', 'cheetah', 'chimpanzee', 'cobra', 'duiker', 'eland', 'elephant', 'gazelle', 'genet', 'giraffe', 'gnu', 'gorilla', 'hippopotamus', 'hyena', 'impala', 'jackal', 'kongoni', 'kudu', 'lemur', 'leopard', 'lion', 'lioness', 'manatee', 'meerkat', 'mongoose', 'monkey', 'okapi', 'oryx', 'ostrich', 'panther', 'rhino', 'rhinoceros', 'steenbok', 'tiger', 'warthog', 'wildebeest', 'zebra']
animals_used_for_fur = ['alpaca','beaver','chamois','chinchilla','ermine','fox','llama','mink','rabbit','sable','vicuna']
arctic_far_north_animals = ['arctic fox','auk','caribou','muskox','penguin','polar bear','reindeer','seal','walrus','woolly mammoth']
australian_animals = ['bandicoot','devil','dingo','duckbill','emu','kangaroo','kiwi','koala',
'possum','platypus','possum','sugar glider','tasmanian devil','wallaby','wallaroo','wombat']
beasts_of_burden = ['ass','burro','camel','colt','donkey','dromedary','horse','llama','mare',
'mule','mustang','ox','pony','trotter','yak']
birds = ['albatross', 'avian', 'bird', 'blackbird', 'bluebird', 'bluefooted booby', 'bluegill', 'bluejay', 'bobolink', 'booby', 'bullfinch', 'bunting', 'buzzard', 'canary', 'cardinal', 'chickadee', 'chicken', 'cock', 'cockatiel', 'cockatoo', 'crow', 'cuckoo', 'dodo', 'dove', 'drake', 'duck', 'duckling', 'eagle', 'eaglet', 'egret', 'emu', 'ewe', 'falcon', 'finch', 'flamingo', 'fowl', 'gander', 'goldfinch', 'goshawk', 'gosling', 'grebe', 'grouse', 'gull', 'harrier', 'hawk', 'heron', 'hummingbird', 'ibis', 'jackdaw', 'jay', 'kingfisher', 'kite', 'kiwi', 'lark', 'loon', 'macaw', 'mallard', 'merlin', 'mockingbird', 'mouse', 'myna', 'nightingale', 'oriole', 'osprey', 'ostrich', 'owl', 'parakeet', 'parrot', 'partridge', 'peacock', 'pelican', 'penguin', 'peregrine', 'pheasant', 'pigeon', 'quail', 'quetzal', 'raven', 'rhea', 'roadrunner', 'robin', 'seagull', 'shrike', 'songbird', 'sparrow', 'spoonbill', 'starling', 'stilt', 'stork', 'swallow', 'swallowtail', 'swan', 'swift', 'tanager', 'thrush', 'toucan', 'trumpeter', 'turkey', 'vulture', 'woodpecker', 'wren']
bovine = ['bison','buffalo','bullock','calf','cattle','cow','heifer','monitor','muskox','steer','water buffalo','yak']
canine = ['akita', 'black lab', 'blood hound', 'bulldog', 'canine', 'chihuahua', 'coyote', 'dachshund', 'dalmatian', 'dog', 'fox', 'golden retriever', 'great dane', 'greyhound', 'harrier', 'husky', 'hyena', 'jackal', 'labrador retriever', 'malamute', 'pembroke welsh corgi', 'poodle', 'pug', 'puggle', 'pup', 'shihtzu', 'siberian husky', 'terrier', 'timber wolf', 'wild dog', 'wolf'] ##removed duplicate wolf
deers = ['blacktailed deer','buck','caribou','deer','doe','eland','elk','fawn','gazelle','gnu','impala','moose','muledeer','reindeer','roe','stag','whitetailed deer','wildebeest']
farm_animals = ['ass','billygoat','bronco','bullock','calf','chick','chicken','cock','colt',
'cow','donkey','ferret','foal','goat','heifer','hen','hog','horse','kid','lamb','mare','miniature pony','billy_goat',
'mule','pig','piglet','pony','potbellied pig','ram','rooster','sheep','snake','sow','spider','stallion',
'turkey'] #added billy_goat
feline = ['bengal tiger','bobcat','bull','cat','cat','cheetah','cougar','crane','jaguar','leopard',
'liger','lion','lynx','mountainlion','ocelot','panther','puma','siamese cat','snow leopard','snow lion','tiger','tomcat','whitetiger','wildcat']
fish = ['angelfish','arrowhead shark','barracuda','bass','betta','blowfish','carp','catfish',
'clownfish','cuttlefish','fish','flounder','freshwater fish','goldfish','great white shark',
'grenadier','grouper','grunt','guppy','herring','jack','koi','lamprey','mackerel','mako shark',
'minnow','parrotfish','pike','pink salmon','piranha','rainbowfish','salmon','saltwater fish',
'seabass','shark','shrimp','smelt','stickleback','sturgeon','swordfish','tilapia','trout','tuna','whale shark']
insectivores = ['aardvark','anteater','armadillo','bat','echidna','hedgehog','mole','shrew']
insects = ['ant','antlion','aphid','bee','beetle','blackwidow','bug','butterfly','caterpillar',
'centipede','cicada','cockroach','cricket','daddy long legs','dolphin','dragonfly','earthworm',
'flea','fly','gnat','grasshopper','grub','honeybee','hornet','insect','June beetle','ladybug','larva',
'leafy','louse','maggot','mealworm','mite','monarch butterfly','mosquito','moth','pill bug',
'praying mantis','scorpion','stick insect','tarantula','termite','tick','wasp','worm','yellow jacket']
north_american_animals = ['badger','bear','beaver','bighorn','bison','blackbear','boar',
'bobcat','brown bear','caribou','chipmunk','cougar','cub','deer','elk','fox','grizzly bear','kodiak bear','moose','mountain goat','mountain lion','puma','rabbit','raccoon','skunk','squirrel','wolf']
pets = ['budgie','canary','cat','cockatiel','cockatoo','dog','gerbil','golden retriever',
'goldfish','guinea pig','guppy','hamster','kitten','labrador retriever','malamute','parakeet',
'parrot','poodle','puppy','rabbit']
primates = ['ape','baboon','bonobo','chimpanzee','gibbon','gorilla','howler monkey',
'human','lemur','loris','marmoset','monkey','orangutan','primates','saki monkey','shrew','snow monkey','spider monkey','titi']
rabbits = ['bunny','coney','hare','jackrabbit','rabbit']
reptiles_amphibians = ['adder','alligator','amphibian','anaconda','anole','asp','black mamba','boa constrictor','bullfrog','caiman','chameleon','cobra','crocodile','diamondback',
'dinosaur','dragon','frog','gardensnake','gecko','godzilla','iguana','komododragon','lizard',
'moccasin','newt','python','rattlesnake','reptile','salamander','serpent','snake','toad','tortoise',
'tree frog','turtle','velociraptor','viper','watersnake']
rodents = ['agouti','beaver','black squirrel','capybara','cavy','chinchilla','chipmunk',
'dormouse','flying squirrel','gerbil','golden marmot','gopher','groundhog','guinea pig',
'hamster','hedgehog','lemming','marmot','mole','mouse','muskrat','naked mole rat','porcupine',
'prairie dog','rat','rodent','shrew','squirrel','woodchuck']
water_animals = ['alga','alligator','anemone','axolotl','beaver','beluga','blue whale',
'boto','brine shrimp','clam','conch','coral','cowry','crab','crawfish','crayfish','dolphin','eel',
'elephant seal','fish','frog','goose','hammerhead shark','humpback whale','jellyfish','killer whale','leech','limpet','lobster','manatee','mantaray','monster','muskrat','mussel','narwhal',
'nautilus','newt','octopus','orca','otter','oyster','penguin','platypus','porpoise','ray','salamander',
'sand dollar','scallop','seahorse','seal','sea lion','sea monkey','shark','slug','snail','sponge',
'squid','starfish','stingray','tadpole','toad','turtle','urchin','whale']
weasels = ['badger','ferret','groundhog','marten','mink','mongoose','otter','polecat','sea otter','skunk','stoat','weasel','wolverine']
ALL_CATEG_NAMES = ['african_animals', 'animals_used_for_fur', 'arctic_far_north_animals', 'australian_animals', 'beasts_of_burden', 'birds', 'bovine', 'canine', 'deers', 'farm_animals', 'feline', 'fish', 'insectivores', 'insects', 'north_american_animals', 'pets', 'primates', 'rabbits', 'reptiles_amphibians', 'rodents', 'water_animals', 'weasels']
ALL_CATEG = [african_animals, animals_used_for_fur, arctic_far_north_animals, australian_animals, beasts_of_burden, birds, bovine, canine, deers, farm_animals, feline, fish, insectivores, insects, north_american_animals, pets, primates, rabbits, reptiles_amphibians, rodents, water_animals, weasels]
CATEG_DICT = {ALL_CATEG_NAMES[i]: ALL_CATEG[i] for i in range(0,len(ALL_CATEG_NAMES))}
ANIMAL_TO_CATEGORIES = defaultdict(list)
for category_name, animals in CATEG_DICT.items():
for animal in animals:
ANIMAL_TO_CATEGORIES[animal].append(category_name) | [
"josh.gelua@mail.utoronto.ca"
] | josh.gelua@mail.utoronto.ca |
3f9b54e396ee8f2754c5e658f4b89988a00aaac7 | 66a031b47088d52f1b2f3121fde45c8fb8ebe4ad | /1 лаба/1.py | 47e2ff7d1db473d058315dbf2bc6a91457460160 | [] | no_license | Aznhalfblood/PytonCourse | 0fa2f24faa8fc4041a6b81a1f6ff0567b3eff502 | 7e9e884f28ca9fa08ce746c1df5548ace7805574 | refs/heads/main | 2023-04-30T19:33:01.663218 | 2021-05-13T20:13:32 | 2021-05-13T20:13:32 | 341,880,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | print('Введите деньги: ')
try:
A = float(input())
if A <=0:
raise ValueError
except (ValueError, TypeError):
print('Введено число с минусом')
raise SystemExit
a = float(A) // 1
b = float(A) % 1
print("Рублей: " ,int(a))
print("Копеек: " ,int(b * 100))
| [
"noreply@github.com"
] | noreply@github.com |
005b9ddae67b81e287690165c8501ea5af4efa32 | f08b297a61acdc85071af0883ca98f1f91397b13 | /home/views.py | b24aa0b0c4090ce36d382732cf491a1170ac4ed1 | [] | no_license | SSN-IEEE/IEEESB | 2edde18ff7bdaaa0a72f65517cb9a4c1554f5060 | 0a1407eccfe24ce2b442f5b000899bc67cdab528 | refs/heads/master | 2020-12-02T06:18:57.678688 | 2018-01-16T00:14:23 | 2018-01-16T00:14:23 | 96,814,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from datetime import date, datetime
from django.shortcuts import render, render_to_response, get_object_or_404
from .models import events
from .models import gallery
# Create your views here.
def index(request):
event = events.objects.filter(start_date__gte=datetime.now()).order_by('start_date')[:5]
return render_to_response('index.html', {'events': event})
def about(request):
return render_to_response('about.html', {'events' : events.objects.all()})
def past_events(request):
event = events.objects.filter(start_date__lt = datetime.now()).order_by('-start_date')
return render_to_response('past_events.html', {'events' : event})
def upcoming_events(request):
event = events.objects.filter(start_date__gte = datetime.now()).order_by('start_date')
return render_to_response('upcoming_events.html', {'events' : event})
def contact(request):
return render(request, 'contact.html')
def gallery_images(request):
return render(request, 'gallery.html', {'images':gallery.objects.all()})
def event_page_base(request, event_id):
event = get_object_or_404(events, pk=event_id)
return render(request, 'event_page_base.html', {'events':event}) | [
"chamow97@hotmail.com"
] | chamow97@hotmail.com |
3f74c1b5fa2bc630a8bf21264065d02361cd4cb8 | be36550ed6c8ab32351b4512af095c1ab907dd22 | /u19_pipeline/alert_system/custom_alerts/rig_bias.py | 076d36dbd531b980a51ae59490c60f91cb7748ec | [] | no_license | BrainCOGS/U19-pipeline_python | 633d47250641f3eb2f60708a8ca7a8e8f19bacd4 | 7bc81be171fe4b68bc3e59e1f57f2148651d9772 | refs/heads/master | 2023-08-17T04:48:30.044265 | 2023-08-16T13:33:19 | 2023-08-16T13:33:19 | 216,374,484 | 2 | 9 | null | 2023-07-20T04:23:35 | 2019-10-20T14:26:17 | Jupyter Notebook | UTF-8 | Python | false | false | 3,059 | py |
import pandas as pd
import datajoint as dj
import datetime
import numpy as np
import u19_pipeline.alert_system.behavior_metrics as bm
import u19_pipeline.alert_system.alert_system_utility as asu
# Slack Configuration dictionary
slack_configuration_dictionary = {
'slack_notification_channel': ['custom_alerts'],
'slack_users_channel': ['alvaros']
}
zscore_alert = 2
def main():
'''
Main function for subject "num trials & bias" alert
'''
# Get sessions
_, rig_session_key_list = asu.get_acquisition_data_alert_system(type='session_location')
# Get trials
behavior = dj.create_virtual_module('behavior', dj.config['custom']['database.prefix']+'behavior')
acquisition = dj.create_virtual_module('acquisition', dj.config['custom']['database.prefix']+'acquisition')
rig_trial_df = pd.DataFrame((behavior.TowersBlock.Trial * acquisition.SessionStarted.proj('session_location') \
& rig_session_key_list).fetch('KEY', 'trial_type', 'choice', 'session_location', as_dict=True))
# Get zscores for bias
bias_df = bm.BehaviorMetrics.get_bias_from_trial_df(rig_trial_df)
bias_df = bm.BehaviorMetrics.get_zscore_metric_session_df(bias_df, 'bias', 'subject_fullname')
# Filter df for today
today = datetime.date.today() - datetime.timedelta(days=1)
bias_df = bias_df.loc[bias_df['session_date'] == today, :]
bias_df['abs_z_score_bias'] = np.abs(bias_df['z_score_bias'])
bias_df = bias_df.reset_index(drop=True)
# Filter if today we got > 3 zscore of trials for a session
bias_df = bias_df.loc[bias_df['abs_z_score_bias'] >= zscore_alert, :]
# Get sign of bias (only group subjects with bias to same side)
bias_df['sign_bias'] = np.sign(bias_df['z_score_bias'])
bias_df['sign_bias'] = bias_df['sign_bias'].astype(int)
#Count how many subjects were biassed by rig
bias_location = bias_df.groupby(['session_location', 'sign_bias']).agg({'session_location': [('num_subjects', 'size')],\
'subject_fullname': [('subject_fullnames', lambda x: ','.join(x))]})
bias_location.columns = bias_location.columns.droplevel()
bias_location = bias_location.reset_index()
#Filter if there were subjects biased to different sides
bias_location2 = bias_location.groupby(['session_location']).agg({'session_location': [('num_bias_sides', 'size')]})
bias_location2.columns = bias_location2.columns.droplevel()
bias_location2 = bias_location2.reset_index()
bias_location2 = bias_location2.loc[bias_location2['num_bias_sides'] == 1, :]
bias_location = bias_location.merge(bias_location2, on='session_location')
# Only alert if more than 1 subject was biased today
bias_location = bias_location.loc[bias_location['num_subjects'] > 1, :]
# Filter columns and set message
columns_alert = ['session_location', 'sign_bias', 'num_subjects', 'subject_fullnames']
bias_location = bias_location[columns_alert]
bias_location['alert_message'] = 'Multiple subjects were biased in this rig'
return bias_location
| [
"alvalunasan@gmail.com"
] | alvalunasan@gmail.com |
5fbea83c44a90125a9a8c6ce5805d9cd624a5b3b | 1a0567251e17892a4345008de30203010608270f | /Optimum Thread Testing/Process.py | 588818c4b50bc44471b806b795d452f90352de41 | [] | no_license | gokulbeeshman/ultimate-comics-scraper | a9e8d0d4f961b2545b8970903b4774162e453c6e | 2c4d8214b0fce84cf53f4a40b345255ff404cd05 | refs/heads/master | 2016-08-05T01:56:03.206413 | 2015-08-24T17:29:28 | 2015-08-24T17:29:28 | 41,248,537 | 1 | 2 | null | 2015-08-24T17:29:28 | 2015-08-23T12:18:54 | Python | UTF-8 | Python | false | false | 2,337 | py | import sys
import os
import urllib.request
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import time
from threading import Thread
from multiprocessing import Process
# INSTRUCTIONS : Modify processcount[] list with no of processes to test with
def getxkcd(id):
url='http://xkcd.com/%d' %id
page=urllib.request.urlopen(url)
soup=BeautifulSoup(page.read())
page.close();
div=soup.findAll('div',attrs={'id':'comic'})
for link in div:
l=link.findAll('img')
for x in l:
yay='http:'+x['src']
if not os.path.exists('xkcd/'):
os.makedirs('xkcd/')
urllib.request.urlretrieve(yay, 'xkcd/%d.jpg' %id)
#print('%d downloaded' %id)
return id+1
def xkcddownloadingthread(start,increment,limit):
cur=start
while(cur<limit):
while True:
try:
#print(start," is downloading ",cur)
getxkcd(cur)
except:
#print("Failed to get ",cur)
continue
else:
#print(start, " downloaded ",cur)
break
cur=cur+increment
def serialaverage(nooftries): # Pass the number of test cases for each process count that is averaged
processcount=[16] # Enter the process counts that you want it to check
for j in processcount:
etime=0
for s in range(1,nooftries+1):
processes=[]
start=time.time()
for i in range(1,j+1):
t=Process(target=xkcddownloadingthread,args=(i,j-1,100,))
processes += [t]
t.start()
for x in processes:
x.join()
end=time.time()
print("Process count :",j," round ",s," time : ",end-start)
etime=etime+end-start
print("Process Count :",j,"Average :",etime/nooftries)
def parallelaverage(nooftries): # Pass the number of test cases for each process count that is averaged
processcount=[12,16] # Enter the process counts that you want it to check
etime={}
for i in processcount:
etime[i]=0;
for i in range(1,nooftries+1):
for j in processcount:
start=time.time()
processes=[]
for k in range(1,j+1):
t=Process(target=xkcddownloadingthread,args=(k,j,100,))
processes+=[t]
t.start()
for x in processes:
x.join()
end=time.time()
print("Process count",j," Round ",i," time :",end-start)
etime[j]+=end-start
for i in processcount:
print("Process count :",i," Average: ",etime[i]/nooftries)
| [
"gokul.beeshman@gmail.com"
] | gokul.beeshman@gmail.com |
b24e16046c837fac814f79eec431149bee4d0c75 | d38c87a4987508518472aa55b7de6be952a58d2a | /main/db/database_handler.py | 385126024b7de4bc5651e97973cee22ee537bd99 | [] | no_license | lbaribeau/los-helper | 43ca8984e87433a147d21ff0621342efe6433534 | 94534df7f3a2bee2c5c84e75c993b593910770fa | refs/heads/master | 2023-08-10T21:03:47.248417 | 2016-11-01T16:13:06 | 2016-11-01T16:13:06 | 14,797,233 | 1 | 1 | null | 2022-08-12T13:56:27 | 2013-11-29T09:26:41 | Python | UTF-8 | Python | false | false | 257 | py |
from db.Database import *
from db.MudMap import MudMap
class DatabaseHandler(object):
def __init__(self, map):
self.map = map
create_tables()
db.close()
def get_smithy_paths(self):
return self.map.get_smithy_paths
| [
"laurier.baribeau@gmail.com"
] | laurier.baribeau@gmail.com |
3efaa1849901cad977f39d385cefc6029dad88a6 | bf279c2ec0d6e7f326c860c618f3a379da391eb0 | /Python/Lessons/tkinter/fromHabrahabr.py | 2908d95a2e2aead514aa072be25179ab4d2310c4 | [] | no_license | alisatsar/itstep | 356b38598c8adddb2b93777d197511cbc4b8ed2a | acb2761e167c2ec84338a0429749bb8e33d301cf | refs/heads/master | 2021-01-25T05:56:17.222305 | 2018-07-25T18:04:30 | 2018-07-25T18:04:30 | 80,703,725 | 2 | 2 | null | 2017-04-06T08:47:39 | 2017-02-02T07:42:42 | Python | UTF-8 | Python | false | false | 1,696 | py | from random import *
from tkinter import *
from tkinter import colorchooser
window = Tk() #создание окна tkinter
colorchooser.askcolor() #открывает окно выбора цвета и возвращает кортеж из двух значений: кортеж из трех элементов,
# интенсивность каждой RGB цвета, и строка. цвет в шестнадцатиричной системе.
size = 600
root = Tk() #создаем окно;
canvas = Canvas(root, width=size, height = size)#создаем холст, используя значение переменной size
canvas.pack()#указание расположить холст внутри окна;
diapason = 0
while True: #diapason < 1000:
colors = choice(['aqua', 'blue', 'fuchsia', 'green', 'maroon',
'orange', 'pink', 'purple', 'red', 'yellow', 'violet', 'indigo', 'chartreuse', 'lime', "#f55c4b"])
x0 = randint(0, size)#случайный выбор координат х и у в рамках холста размером size.
y0 = randint(0, size)
d = randint(0, size/5)#произвольный выбор размера круга, ограниченный size/5.
canvas.create_oval(x0, y0, x0+d, y0+ d, fill=colors)
root.update() #обрабатывает все задачи, стоящие в очереди. Обычно эта функция используется
# во время «тяжёлых» расчётов, когда необходимо чтобы приложение оставалось отзывчивым на действия пользователя.
diapason += 1
| [
"noreply@github.com"
] | noreply@github.com |
fffa8fbb2fdfd6329893112ece1a66dea0255f23 | 1f3780d9e6bb5cd985bba9f609739c118f5691ff | /hive_attention_tokens/chain/base/auth.py | 99760f7d9fe4741f7cac2580274cf2cc85ba5261 | [
"MIT"
] | permissive | imwatsi/hive-attention-tokens | 8c0e4823abc7707944e4219e9d509dc3a38e479d | 87b02b1b6fa6dc75f2cdf25d92f0a79cbeeb7e5f | refs/heads/master | 2023-08-18T03:08:59.373281 | 2021-06-19T16:50:21 | 2021-06-19T16:50:21 | 345,184,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,785 | py | import base64
import ecdsa
import json
from hashlib import sha256
from binascii import hexlify, unhexlify
from ecdsa.util import sigencode_der
from hive_attention_tokens.config import Config
from hive_attention_tokens.utils.base58 import Base58
from hive_attention_tokens.ibc.hive.hive_api import HiveApi
from hive_attention_tokens.chain.database.access import DbAccess
chain_id = "0xhat0testnet" # TODO: import from utils
db = DbAccess.db
class HiveAccounts:
accounts = {}
@classmethod
def init(cls, state):
cls.BlockchainState = state
@classmethod
def fetch_account(cls, acc_name):
# check DB first
db_acc = db.get_account(acc_name)
if db_acc:
cls.accounts[acc_name] = {
'memo': db_acc['memo'],
'posting': db_acc['posting'],
'active': db_acc['active'],
'owner': db_acc['owner']
}
else:
keys = HiveApi.get_accounts_keys([acc_name])
if keys:
cls.accounts[acc_name] = keys[acc_name]
@classmethod
def get_account_key(cls, acc_name, key_type):
if acc_name not in cls.accounts:
if cls.BlockchainState.is_replaying() or cls.BlockchainState.is_genesis():
cls.fetch_account(acc_name)
else:
return None
if acc_name in cls.accounts:
return cls.accounts[acc_name][key_type]
@classmethod
def update_keys(cls):
pass
@classmethod
def verify_account_key(cls, acc_name, pub_key, auth):
if acc_name not in cls.accounts:
cls.fetch_account(acc_name)
acc_keys = cls.accounts[acc_name]
if pub_key == acc_keys[auth]:
return True
return False
class TransactionAuth:
@classmethod
def verify_transaction(cls, transaction, acc_name, signature, authority, auth_level):
if acc_name == '@@sys':
# TODO: crosscheck with individual token issuer data
return True
authenticated = HiveAccounts.verify_account_key(acc_name, authority, auth_level)
if not authenticated: return False
sig = base64.b64decode(signature)
pubkey = unhexlify(repr(Base58(authority, prefix="STM")))
pk = ecdsa.VerifyingKey.from_string(pubkey, curve=ecdsa.SECP256k1, hashfunc=sha256)
message = chain_id + transaction
digest = sha256(message.encode('ascii')).digest()
try:
valid = pk.verify_digest(sig,digest)
return valid
except:
return False
class WitnessSigning:
@classmethod
def sign_block(cls, block):
pass
class WitnessVerification:
@classmethod
def verify_block(cls, block):
pass | [
"imwatsi@gmail.com"
] | imwatsi@gmail.com |
8b45f0d93aee22425ef4d4d65d12ce9ee00adbbc | 2a6b3bceff31cda4bd99d4978084c08107a160e8 | /multi-scale-lstm/multi_lstm_s.py | 026b3619bbe62714b2ad272521e3b27c5d8745db | [
"Apache-2.0"
] | permissive | jiacheng-xu/Cached-LSTM | 8c1a6ae46f8d3dc211cb3cfc3757d6586eac7b93 | 8e684d5558e18216d6ec8186f8fd46fa1e15ccfd | refs/heads/master | 2021-08-26T08:39:19.123101 | 2017-11-22T17:28:17 | 2017-11-22T17:28:17 | 111,713,187 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | from optim import *
from util import *
from gate_module import *
SEED = 123
numpy.random.seed(SEED)
def init_params(options):
"""
Global (not LSTM) parameter. For the embeding and the classifier.
"""
params = OrderedDict()
# embedding
# randn = numpy.random.rand(options['n_words'],
# options['dim_proj'])
# params['Wemb'] = (0.01 * randn).astype(config.floatX)
# params = param_init_lstm_s(options, params, prefix='lstm_s',in_dim=options['wdim'], out_dim=options['edim'])
params = param_init_multi_lstm_s(options, params, prefix='multi_lstm_s', in_dim=options['wdim'], out_dim=options['edim'])
params = param_init_forget(options,params)
# classifier
params['U'] = 0.01 * numpy.random.randn(options['edim'] * options['pipe_num'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
y = tensor.vector('y', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps,
n_samples,
options['wdim']])
# proj = lstm_s(tparams, emb, options, mask=mask, prefix='lstm_s', in_dim=options['wdim'],
# out_dim=options['edim'])
proj = multi_lstm_s(tparams, emb, options, mask=mask, prefix='multi_lstm_s', in_dim=options['wdim'],
out_dim=options['edim'])
rt = proj
if options['end'] == True:
proj = proj[-1]
else:
proj = (proj * mask[:, :, None]).sum(axis=0)
proj = proj / mask.sum(axis=0)[:, None]
if options['use_dropout']:
proj = dropout_layer(proj, use_noise, trng,options['noise'])
pred = tensor.nnet.softmax(tensor.dot(proj, tparams['U']) + tparams['b'])
f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
f_pred = theano.function([x, mask], pred.argmax(axis=1), name='f_pred')
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
cost = -tensor.log(pred[tensor.arange(n_samples), y] + off).mean()
return use_noise, x, mask, y, f_pred_prob, f_pred, cost,rt
| [
"noreply@github.com"
] | noreply@github.com |
bb3e9bf050d7d33ee69a5976c27520dbba8eb9b4 | 09ecfada6ef8afea6153d67d013c2c88a5f4008d | /cryptovenom/modules/symetric/cast/menu.py | a42b47027605b5e3ac3a03cb3fdb6f2ad71519fa | [] | no_license | ryantxu1/BearshellTools | f35eff15272c5256239bfc3a04f61cdc43fd1ffc | e959dbe0f95f6b84af4a244606e454b66cc3a966 | refs/heads/master | 2020-08-28T03:18:18.358377 | 2019-11-13T19:28:21 | 2019-11-13T19:28:21 | 217,571,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,551 | py | #!/usr/bin/python
#
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#
# [====={ CRYPTO VENOM }=====]
#
# | ATTENTION!: THIS SOFTWARE IS PART OF THE "CRYPTOVENOM FRAMEWORK" |
#
# ( https://github.com/lockedbyte/cryptovenom )
#
# << GNU PUBLIC LICENSE >>
#
# / CREATED BY LOCKEDBYTE /
#
# [ CONTACT => alejandro.guerrero.rodriguez2@gmail.com ]
# [ CONTACT => @LockedByte (Twitter) ]
#
#
# AND NOW...HERE THE CODE
#
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#
from main import *
from bruteforce import *
import random
def stringRandom(lenght):
out = ''
for i in range(0, lenght):
out = out + random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890=)(/&%$#@!|\-+}{][+*_:;.,<>')
return out
print('''
-=[OPTIONS]=-
1) Encrypt
2) Decrypt
3) Brute Force
''')
opt = input('\033[1;34m[=]\033[0m Option: ')
if opt == '1':
opt2 = input('\033[1;34m[=]\033[0m [F]ile or [T]ext: ')
if opt2 == 'f' or opt2 == 'F':
importx = 'file'
exportx = 'file'
raw = ''
infile = input('\033[1;34m[=]\033[0m Input file path: ')
outfile = input('\033[1;34m[=]\033[0m Output file path: ')
elif opt2 == 't' or opt2 == 'T':
importx = 'print'
exportx = 'print'
infile = ''
outfile = ''
raw = input('\033[1;34m[=]\033[0m Text: ')
else:
print('\033[1;31m[-]\033[0m Unknown option')
exit()
type1 = input('\033[1;34m[=]\033[0m Encryption Type (eg.: ebc): ')
print('''
-=[OPTIONS]=-
1) Random IV (First Bytes)
2) Random IV (Last Bytes)
3) Custom IV
4) No IV
''')
ivtype = input('\033[1;34m[=]\033[0m Option: ')
if ivtype == '1':
ivtype = 'randomstart'
iv = stringRandom(16)
elif ivtype == '2':
ivtype = 'randomend'
iv = stringRandom(16)
elif ivtype == '3':
ivtype = 'custom'
iv = input('\033[1;34m[=]\033[0m Custom IV: ')
elif ivtype == '4':
ivtype = 'noiv'
iv = ''
else:
print('\033[1;31m[-]\033[0m Unknown option')
keyimport = input('\033[1;34m[=]\033[0m Key Encoding Import (eg.: raw or base64): ')
passwd = input('\033[1;34m[=]\033[0m Key: ')
format1 = input('\033[1;34m[=]\033[0m Output encoding (eg.: raw or base64): ')
out = encryptCast(type1, importx, infile, exportx, outfile, format1, ivtype, iv, passwd, raw, keyimport)
print('\033[1;32m[+]\033[0m Out = ' + str(out))
print('\033[1;32m[+]\033[0m All done!')
elif opt == '2':
opt2 = input('\033[1;34m[=]\033[0m [F]ile or [T]ext: ')
if opt2 == 'f' or opt2 == 'F':
importx = 'file'
exportx = 'file'
raw = ''
infile = input('\033[1;34m[=]\033[0m Input file path: ')
outfile = input('\033[1;34m[=]\033[0m Output file path: ')
elif opt2 == 't' or opt2 == 'T':
importx = 'print'
exportx = 'print'
infile = ''
outfile = ''
raw = input('\033[1;34m[=]\033[0m Text: ')
else:
print('\033[1;31m[-]\033[0m Unknown option')
exit()
type1 = input('\033[1;34m[=]\033[0m Encryption Type (eg.: ebc): ')
format1 = input('\033[1;34m[=]\033[0m Input format (Eg.: raw or base64): ')
print('''
-=[OPTIONS]=-
1) Random IV (First Bytes)
2) Random IV (Last Bytes)
3) Custom IV
4) No IV
''')
ivtype = input('\033[1;34m[=]\033[0m Option: ')
if ivtype == '1':
ivtype = 'randomstart'
iv = stringRandom(16)
elif ivtype == '2':
ivtype = 'randomend'
iv = stringRandom(16)
elif ivtype == '3':
ivtype = 'custom'
iv = input('\033[1;34m[=]\033[0m Custom IV: ')
elif ivtype == '4':
ivtype = 'noiv'
iv = ''
else:
print('\033[1;31m[-]\033[0m Unknown option')
passwd = input('\033[1;34m[=]\033[0m Key: ')
out = decryptCast(type1, importx, infile, exportx, outfile, format1, ivtype, iv, passwd, raw)
print('\033[1;32m[+]\033[0m Out = ' + str(out))
print('\033[1;32m[+]\033[0m All done!')
elif opt == '3':
print('NOT YET')
exit()
opt2 = input('\033[1;34m[=]\033[0m [F]ile or [T]ext: ')
if opt2 == 'f' or opt2 == 'F':
importx = 'file'
exportx = 'file'
text = ''
infile = input('\033[1;34m[=]\033[0m Input file path: ')
outfile = input('\033[1;34m[=]\033[0m Output file path: ')
elif opt2 == 't' or opt2 == 'T':
importx = 'print'
exportx = 'print'
infile = ''
outfile = ''
text = input('\033[1;34m[=]\033[0m Text: ')
else:
print('\033[1;31m[-]\033[0m Unknown option')
exit()
dic = input('\033[1;34m[=]\033[0m Dictionary path: ')
bf(ct, dic) # REVISAR ESTO ----------------------------------------------------------------------------------
else:
print('\033[1;31m[-]\033[0m Unknown option')
exit()
| [
"ryanxu@wustl.edu"
] | ryanxu@wustl.edu |
69bcf13bef1b5163826eb88f8f1c07f84740a576 | 21c1da4a354037f8aed1fb323d64295f1d40d0c6 | /combination-sum-ii/solution_test.py | b7e0f896c063203f32bafc1f132b4691bb535d97 | [] | no_license | hsinhoyeh/leecode | b0de2334a1bcd9277335fba4ae7e3519775da8f9 | 7760d44f7a9038f48e12eabb6d5bafd182a0a8f6 | refs/heads/master | 2021-01-10T10:11:40.259319 | 2017-08-11T01:31:22 | 2017-08-11T01:31:22 | 45,776,150 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | import unittest
import solution
class TestSolution(unittest.TestCase):
def list_of_list_to_set(self, lsts):
sset = set()
for ele in lsts:
sset.add(tuple(ele))
return sset
def test_case0(self):
sol = solution.Solution()
got = sol.combinationSum2([10,1,2,7,6,1,5], 8)
expect = [[1,7],[1,2,5],[2,6],[1,1,6]]
self.assertSetEqual(self.list_of_list_to_set(expect), self.list_of_list_to_set(got))
if __name__ == '__main__':
unittest.main()
| [
"yhh92u@gmail.com"
] | yhh92u@gmail.com |
7f7fb87aa946e005e8e5b5d89d94c27b1ed77685 | 6766e99fcd0a5d49d0ac4d25cff6e366bdde4dd2 | /dusty/scanners/sast/gosec/scanner.py | a43299a4cc3e6907567aa44e1c40bd38aaea5394 | [
"Apache-2.0"
] | permissive | carrier-io/dusty | 9b75a7df8669a033a2821d1d6b251e1006d1bfab | 4846f6a00ea4db2ac271113787e49954ead97018 | refs/heads/master | 2023-09-03T20:07:41.832111 | 2023-08-22T18:42:03 | 2023-08-22T18:42:03 | 160,715,359 | 5 | 17 | Apache-2.0 | 2020-07-24T10:48:00 | 2018-12-06T18:28:23 | Python | UTF-8 | Python | false | false | 3,268 | py | #!/usr/bin/python3
# coding=utf-8
# pylint: disable=I0011,E0401,W0702,W0703
# Copyright 2019 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scanner: gosec
"""
import os
import subprocess
from dusty.tools import log
from dusty.models.module import DependentModuleModel
from dusty.models.scanner import ScannerModel
from .parser import parse_findings
class Scanner(DependentModuleModel, ScannerModel):
""" Scanner class """
def __init__(self, context):
""" Initialize scanner instance """
super().__init__()
self.context = context
self.config = \
self.context.config["scanners"][__name__.split(".")[-3]][__name__.split(".")[-2]]
def execute(self):
""" Run the scanner """
task = subprocess.run([
"gosec", "-fmt=json", "./..."
], cwd=self.config.get("code"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
log.log_subprocess_result(task)
parse_findings(task.stdout.decode("utf-8", errors="ignore"), self)
# Save intermediates
self.save_intermediates(task.stdout)
def save_intermediates(self, task_stdout):
""" Save scanner intermediates """
if self.config.get("save_intermediates_to", None):
log.info("Saving intermediates")
base = os.path.join(self.config.get("save_intermediates_to"), __name__.split(".")[-2])
try:
# Make directory for artifacts
os.makedirs(base, mode=0o755, exist_ok=True)
# Save report
with open(os.path.join(base, "report.json"), "w") as report:
report.write(task_stdout.decode("utf-8", errors="ignore"))
except:
log.exception("Failed to save intermediates")
@staticmethod
def fill_config(data_obj):
""" Make sample config """
data_obj.insert(len(data_obj), "code", "/path/to/code", comment="scan target")
data_obj.insert(
len(data_obj), "save_intermediates_to", "/data/intermediates/dast",
comment="(optional) Save scan intermediates (raw results, logs, ...)"
)
@staticmethod
def validate_config(config):
""" Validate config """
required = ["code"]
not_set = [item for item in required if item not in config]
if not_set:
error = f"Required configuration options not set: {', '.join(not_set)}"
log.error(error)
raise ValueError(error)
@staticmethod
def get_name():
""" Module name """
return "gosec"
@staticmethod
def get_description():
""" Module description or help message """
return "Golang Security Checker"
| [
"noreply@github.com"
] | noreply@github.com |
133911539ddc4f7e9dc1505e0990830e78bccf81 | 1c29d8ed7faeb4fb244ee31c9125c515b715fafb | /dealer.py | a98f0d5441ba7bc58823911bed44385774886288 | [] | no_license | Atinanoraa/Motorcycle-Dealer | a717f9cbdae4d0422ffca1a484d4421bce863edc | 3da031e358beb580d850f84d42a7b4c2c2b4fbe7 | refs/heads/master | 2022-12-24T08:49:59.511150 | 2020-09-17T08:32:32 | 2020-09-17T08:32:32 | 296,258,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | import mysql.connector
import connect
db=connect.koneksi()
#menambahkan data baru ke dalam table dealer
def add(data):
cursor=db.cursor()
sql="""INSERT INTO dealer(nama,motor)VALUES(%s,%s)"""
cursor.execute(sql,data)
db.commit()
print('{}Data pembeli berhasil ditambahkan!'.format(cursor.rowcount))
#menampilkan seluruh data dari tabel dealer
def show():
cursor=db.cursor()
sql="""SELECT*FROM dealer"""
cursor.execute(sql)
result=cursor.fetchall()
print("--------------------------------------")
print("|ID|NAMA\t\t|MOTOR\t\t|")
print("--------------------------------------")
for data in result:
print("|",data[0],"|",data[1],"\t\t|",data[2],"\t\t|")
#mengubah data per record berdasarkan id pada table dealer
def edit(data):
cursor=db.cursor()
sql="""UPDATE dealer SET nama=%s,motor=%sWHERE id=%s"""
cursor.execute(sql,data)
db.commit()
print('{}Data pembeli berhasil diubah!'.format(cursor.rowcount))
#menghapus data dari tabel dealer
def delete(data):
cursor=db.cursor()
sql="""DELETE FROM dealer WHERE id=%s"""
cursor.execute(sql,data)
db.commit()
print('{}Data pembeli berhasil dihapus!'.format(cursor.rowcount))
#mencari data dari tabel dealer
def search(id_dealer):
cursor=db.cursor()
sql="""SELECT*FROM dealer WHERE id=%s"""
cursor.execute(sql,id_dealer)
result=cursor.fetchall()
print("--------------------------------------")
print("|ID|NAMA\t\t|MOTOR\t\t|")
print("--------------------------------------")
for data in result:
print("|",data[0],"|",data[1],"\t\t|",data[2],"\t\t|")
print("--------------------------------------")
| [
"atinanhy@gmail.com"
] | atinanhy@gmail.com |
98f4e7acd30637d80c64b6e77a2ee86abcc026b2 | 60165cc59bb3b3cad7d315a6018cc85496e04ff8 | /MapSuiteGisEditor/WpfDesktopExtension/DlrLanguages/CodeTemplates/PythonFilterFunction.py | d7010aaab09c9b60b9b5c63e53999e606d7679a0 | [
"Apache-2.0"
] | permissive | hnjm/GIS-Editor | 0e031e640642c07a08c0b5aed84c7eca21f702c0 | 1f0be7f6944650c04317ff63cc35398369a3ecc4 | refs/heads/master | 2022-10-14T09:23:28.135007 | 2020-04-06T17:42:56 | 2020-04-06T17:42:56 | 272,882,016 | 1 | 0 | Apache-2.0 | 2020-06-17T05:01:07 | 2020-06-17T05:01:06 | null | UTF-8 | Python | false | false | 345 | py | import clr
clr.AddReference("mscorlib")
clr.AddReference("ThinkGeo.MapSuite")
from System.Collections.ObjectModel import *
from ThinkGeo.MapSuite import *
def match(feature, features):
[expression]
resultFeatures = Collection[Feature]()
for f in Features:
try:
if match(f, Features): resultFeatures.Add(f)
except:
pass
resultFeatures | [
"howardch@outlook.com"
] | howardch@outlook.com |
17b0c3efb04efec5a6d635005649370d3c085113 | 3e3bf98840d133e56f0d0eb16ba85678ddd6ca45 | /.history/iss_20200102103033.py | 9612bd296d926015d41b8fbb1e476f1266919608 | [] | no_license | Imraj423/backend-iss-location-assessment | a05d3cc229a5fc4857483ae466348c1f8c23c234 | b0565c089a445ccffcb8d0aab3c0be3bb0c1d5b8 | refs/heads/master | 2020-12-03T17:04:58.512124 | 2020-06-24T16:02:02 | 2020-06-24T16:02:02 | 231,400,854 | 0 | 0 | null | 2020-06-24T16:02:04 | 2020-01-02T14:43:44 | null | UTF-8 | Python | false | false | 273 | py | #!/usr/bin/env python3
__author__ = 'Imraj423'
import requests
import turtle
r = requests.get('http://api.open-notify.org/astros.json')
print(r.text)
s = requests.get('http://api.open-notify.org/iss-now.json')
print(s.text)
# if __name__ == '__main__':
# main()
| [
"dahqniss@gmail.com"
] | dahqniss@gmail.com |
a00f4cde9d2bf4e5616e7ef2af441f62039f190c | 0dc24aef69a9bc12c7454d25db90e464d2dd947f | /4/e4.py | 294862352437bdf184eb8a353e40e2d20486703c | [
"MIT"
] | permissive | gorbunov-dmitry/rucode-spring-2021-d | 6e63896de8856daa79f2da36a878224ba3a86ff8 | 6930895b62458f48fda3a9c99f00dd13b1a6b75c | refs/heads/main | 2023-05-26T08:46:40.679839 | 2021-06-15T13:00:15 | 2021-06-15T13:00:15 | 376,492,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | from math import gcd
from random import randint
def f(x, n):
return (x**2 + 1) % n
def find_simple(n):
p = 2
while p**2 <= n:
if not n % p:
return p
p += 1
return 1
def find_rho(n):
x = randint(1, n-2)
y = x
counter = int(n**(1/4)) * 3
while abs(gcd(n, x-y)) in [1, n] and counter > 0:
x = f(f(x, n), n)
y = f(y, n)
counter -= 1
return abs(gcd(n, x-y))
n = int(input())
if n < 10**9:
ans = find_simple(n)
else:
ans = find_rho(n)
if abs(ans) in [1, n]:
print('IMPOSSIBLE')
else:
ans = abs(ans)
print(ans, n // ans)
| [
"gorbunov.dmitry.1999@gmail.com"
] | gorbunov.dmitry.1999@gmail.com |
c6e19072060391328f872ea19f17b658ffc82705 | 776c0c9ced44af2b056cd116d9bbba1d9e771e02 | /app/main/admin.py | c04ee4202fd9a6650ff6640881a5f786ca566a5e | [] | no_license | thulasiraman9468/Rama | 4cee00c5dfbe49ee79b63bc83d272d2b21bc76fb | 0cffef4a893f364126d7b55d1f134e5426d30292 | refs/heads/main | 2023-07-10T08:24:19.414659 | 2021-08-19T08:10:20 | 2021-08-19T08:10:20 | 392,018,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from django.contrib import admin
# Register your models here.
from .models import booking
class bookingAdmin(admin.ModelAdmin):
class Meta:
model = booking
admin.site.register(booking, bookingAdmin) | [
"thulasiraman9468@gmail.com"
] | thulasiraman9468@gmail.com |
0d64a8c636f9f2e49653c5f639d440261262932e | fe8ea0cc7e6b850975affa42970723d774dd8d72 | /main.py | e6e6e552b706fda1afb077005a96a73f3b6e3d02 | [
"MIT"
] | permissive | christophschnabl/lewansoul-lx16a | 2b345b6030fab2d1ca5c056500879166a0046d01 | fc92dc3470bc12353576bae00f6bc0873c753a7a | refs/heads/master | 2020-04-19T00:25:02.982773 | 2019-10-25T00:11:04 | 2019-10-25T00:11:04 | 167,846,051 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import serial
import lewansoul_lx16a
SERIAL_PORT = '/dev/ttyUSB0'
controller = lewansoul_lx16a.ServoController(
serial.Serial(SERIAL_PORT, 115200, timeout=1),
)
# control servos directly
#controller.move(1, 100) # move servo ID=1 to position 100
# or through proxy objects
servo1 = controller.servo(1)
#servo2 = controller.servo(2)
servo1.move(0)
# synchronous move of multiple servos
#servo1.move_prepare(300)
#servo2.move_prepare(600)
#controller.move_start()
| [
"christoph.schnabl.cs@gmail.com"
] | christoph.schnabl.cs@gmail.com |
1d959f7ffb45e5bd71979204e4f0c6b34379fcf2 | 55883f5c70f634b4341b2368ad3c6eccbe13e7e5 | /CEPNetworks.py | 30de48a2acd99ea226f70ff8ba9a0a7fe1d56710 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaihami/pycsa | e93fcceb4b7fed1c0b4ae23fbebd0a5bc3e2bf44 | a85594526a4d10d2e8097b6e90f5b93b44a8236f | refs/heads/master | 2021-01-23T02:40:29.036635 | 2016-04-29T17:21:11 | 2016-04-29T17:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,029 | py | """
CEPNetworks.py
This module is used to control all of the pipeline flow and do
the reshuffling, etc. It works with the CEPAlgorithms module which can
easily be adapted to include more algorithms. The main pipeline is initialized
with all of the information for the rest of the project.
@author: Kevin S. Brown (University of Connecticut), Christopher A. Brown (Palomidez LLC)
This source code is provided under the BSD-3 license, duplicated as follows:
Copyright (c) 2014, Kevin S. Brown and Christopher A. Brown
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
3. Neither the name of the University of Connecticut nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os, unittest, scipy.stats, re, copy, networkx
from numpy import array
from networkx import Graph as nxGraph
from pycsa.CEPLogging import LogPipeline
# decorator function to be used for logging purposes
log_function_call = LogPipeline.log_function_call
class CEPGraph(nxGraph):
"""This is the main graph class (subclassed from networkx.Graph) for computing statistics on networks
Note: all of the functions below assume a weighted graph (setup in __init__)"""
def __init__(self,nwkFile=None):
"""Initialize with a network file from the pipeline"""
try:
# initialize the networkx.Graph class first
super(CEPGraph,self).__init__()
self.read_network(nwkFile)
if self.is_weighted():
self.weighted = True
else:
raise CEPGraphWeightException
except IOError:
raise CEPGraphIOException(nwkFile)
except TypeError:
super(CEPGraph,self).__init__()
def is_weighted(self):
edges = {'weight':False}
for v1,v2 in self.edges():
if 'weight' in self[v1][v2]:
edges['weight'] = True
break
return edges['weight']
def read_network(self,nwkFile):
"""Read in a network file to the current graph object"""
nwkFile = open(nwkFile,'r')
network = nwkFile.readlines()
nwkFile.close()
# add edges to self (note: (+)int nodes, (+/-)float edges, (+)float p-values)
for edge in network:
link = re.search('(\d+)\t(\d+)\t(-?\d+\.\d+)\t(\d+\.\d+)',edge)
self.add_edge(int(link.group(1)), int(link.group(2)), weight=float(link.group(3)),pvalue=float(link.group(4)))
def compute_node_degrees(self):
"""Computes the node degree (weighted sum if applicable) for a graph"""
degrees = {}
for node in self.nodes_iter():
knode = 0.0
for neighbor in self.neighbors_iter(node):
knode += self.get_edge_data(node,neighbor)['weight']
degrees[node] = knode
# get half sum of node degrees as well
halfDegreeSum = 0.5*(array(degrees.values()).sum())
return degrees, halfDegreeSum
def prune_graph(self, threshold):
"""Removes all weighted edges below a certain threshold along with any nodes
that have been orphaned (no neighbors) by the pruning process"""
for v1,v2 in self.edges():
if self[v1][v2]['weight'] < threshold:
self.remove_edge(v1,v2)
for n in self.nodes():
if len(self.neighbors(n)) < 1:
self.remove_node(n)
def calculate_pvalue(self,number=None):
"""Removes edges that aren't significant given their p-values (p > 0.05)
Note: all MHT corrections, etc. should be taken care of in the method and
not here (see CEPAlgorithms)"""
edges = self.edges()
for v1,v2 in edges:
if self[v1][v2]['pvalue'] > 0.05:
self.remove_edge(v1,v2)
def calculate_mst(self,number=None):
"""Calculates a maximal spanning tree mapping large weights to small weights"""
graph = copy.deepcopy(self)
maxWeight = max([self[v[0]][v[1]]['weight'] for v in self.edges()])
for v1,v2 in self.edges():
graph[v1][v2]['weight'] = maxWeight - self[v1][v2]['weight']
edges = self.edges()
tree = networkx.minimum_spanning_tree(graph,weight='weight')
for v1,v2 in edges:
if (v1,v2) in tree.edges():
pass
else:
self.remove_edge(v1,v2)
def calculate_top_n(self,number):
"""Removes edges except for those with the n-largest weights (n = number)"""
weights = [(self[v[0]][v[1]]['weight'],v[0],v[1]) for v in self.edges()]
weights.sort()
weights.reverse()
# only keep n-largest vertex pairs
weights = [(v[1],v[2]) for v in weights[:number]]
edges = self.edges()
for v1,v2 in edges:
if (v1,v2) in weights:
pass
else:
self.remove_edge(v1,v2)
def calculate_bottom_n(self,number):
"""Removes edges except for those with the n-smallest weights (n = number)"""
weights = [(self[v[0]][v[1]]['weight'],v[0],v[1]) for v in self.edges()]
weights.sort()
# only keep n-smallest vertex pairs
weights = [(v[1],v[2]) for v in weights[:number]]
edges = self.edges()
for v1,v2 in edges:
if (v1,v2) in weights:
pass
else:
self.remove_edge(v1,v2)
def compute_jaccard_index(self,graph):
"""Computes the Jaccard index for edges between self and another graph.
note: Jaccard index = edge intersection divided by edge union"""
union = frozenset(self.edges()).union(graph.edges())
intersection = frozenset(self.edges()).intersection(graph.edges())
try:
jaccard = float(len(intersection))/len(union)
except ZeroDivisionError:
jaccard = 0.0
return jaccard
class CEPGraphIOException(IOError):
@log_function_call('ERROR : Network File Input')
def __init__(self,nwkFile):
print "The network file you have provided, '%s', does not exist. Please check your file selection."%(nwkFile)
class CEPGraphWeightException(Exception):
@log_function_call('ERROR : Graph Not Weighted')
def __init__(self):
print "The graph you have provided is not a weighted graph. Most of the methods provided are pointless for binary graphs."
class CEPNetworksTests(unittest.TestCase):
def setUp(self):
pass # TODO add unit tests to CEPNetworks
if __name__ == '__main__':
unittest.main()
| [
"kevin.s.brown@uconn.edu"
] | kevin.s.brown@uconn.edu |
01503584d16b7c4a0cb75a7cc758eb2adfac62e9 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/cb96f8a25034763e9f45f371afae4ca021d26a68-<process_state>-bug.py | 6773a85f0e7bd66ac286e39e30972daccc1daef3 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | def process_state(self):
try:
dvs_host_states = {
'absent': {
'present': self.state_destroy_dvs_host,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs_host,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs_host,
},
}
dvs_host_states[self.state][self.check_dvs_host_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e)) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
5f589a00f44a392774b00c5be087bfbd0071b735 | 40b6fc12bea094e81deb2ac992c4314477f6a35f | /django/mathias/api/slack.py | 2b99b7e6cb3bd38ad538f562f7cf24b8d6b0bc5c | [] | no_license | mateusflavio/mathias | d2ea11638f62182230a780dc0350e5dcc97b2772 | cf6cf9e541d51a67237dd3236b73b1899e76b255 | refs/heads/master | 2021-01-11T08:16:45.945002 | 2017-10-10T18:41:57 | 2017-10-10T18:41:57 | 68,975,603 | 3 | 2 | null | 2017-10-10T18:41:58 | 2016-09-23T01:09:50 | Python | UTF-8 | Python | false | false | 1,577 | py | # -*- coding: utf-8 -*-
import http.client
import requests
from django.conf import settings
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
class SlackApi:
host = settings.SLACK['host']
token = settings.SLACK['token']
@staticmethod
def send_message(self, channel, username, icon_url, attachments):
url = '?token=' + SlackApi.token + '&channel=' + channel + '&username=' + username + \
'&icon_url=' + icon_url + '&attachments=' + attachments
try:
res = requests.post(
SlackApi.host + 'chat.postMessage' + url
)
except Exception as e:
raise Exception('Error in request slack api (http://slack.com/api/chat.postMessage) ' + str(e))
if res.status_code == http.client.OK:
stream = BytesIO(res.content)
data = JSONParser().parse(stream)
return data
else:
raise Exception('Could not be to send message')
@staticmethod
def get_users(self):
url = '?token=' + SlackApi.token
try:
res = requests.get(
SlackApi.host + 'users.list' + url,
)
except Exception as e:
raise Exception('Error in request slack api (https://slack.com/api/users.list) ' + str(e))
if res.status_code == http.client.OK:
stream = BytesIO(res.content)
data = JSONParser().parse(stream)
return data['members']
else:
raise Exception('Users not found')
| [
"mateusflavio@gmail.com"
] | mateusflavio@gmail.com |
f251afa3b7e3e955386ac164c6b65104d166d613 | 0c1a586af2040fdc955c0ebde15639cf5a3c01f9 | /install/cms.py | a2ef055d38d05994b28a535c50f9d0d979f55a79 | [] | no_license | akali/easy_cms | 5640017ee63b975b0dad40c0d021cb44f9e80044 | 6a56bbe2b28865f5e237830b812adaaa87c240d4 | refs/heads/master | 2021-04-28T20:21:25.754528 | 2018-01-20T11:45:24 | 2018-01-20T11:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py |
import subprocess
import config
def install(ip):
commands = ""
commands += "wget https://github.com/cms-dev/cms/releases/download/v1.3.rc0/v1.3.rc0.tar.gz;"
commands += "tar -xvf v1.3.rc0.tar.gz;"
subprocess.call(config.commands.ssh + ["ubuntu@{}".format(ip), commands])
subprocess.call(config.commands.scp + ["./config/generated/cms.conf", "ubuntu@{}:~/cms/config/".format(ip)])
subprocess.call(config.commands.scp + ["./config/generated/cms.ranking.conf", "ubuntu@{}:~/cms/config/".format(ip)])
commands = ""
commands += "cd cms;"
commands += "sudo ./prerequisites.py install -y;"
commands += "sudo pip2 install -r requirements.txt;"
commands += "sudo python2 setup.py install;"
subprocess.call(config.commands.ssh + ["ubuntu@{}".format(ip), commands])
subprocess.call(config.commands.ssh + ["ubuntu@{}".format(ip), commands])
| [
"erel3.69@gmail.com"
] | erel3.69@gmail.com |
de6406c817df44c40d6f9af3cabf9e0a1ae90cfd | 5f4ea8b9d32eb399736f44d39bc7b3d7c233b271 | /django_movie/urls.py | 773d580aff7008fafd32c63f44e0786115e74674 | [] | no_license | SmartBoY10/django_movie | 15208e4ac45e148ebcb2639cb92a062b8f411849 | 5c8d43b69cb7201c36e3ce086eccc8a75b661c71 | refs/heads/master | 2023-04-30T01:14:20.880132 | 2021-05-17T17:59:48 | 2021-05-17T17:59:48 | 367,028,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | """django_movie URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('i18n/', include('django.conf.urls.i18n')),
#path('silk/', include('silk.urls', namespace='silk')),
]
urlpatterns += i18n_patterns(
path('accounts/', include('allauth.urls')),
path('pages/', include('django.contrib.flatpages.urls')),
path('contact/', include("contact.urls")),
path("", include("movies.urls")),
)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"qurol.abduajalilov99@gmail.com"
] | qurol.abduajalilov99@gmail.com |
208e9e694fe4431263cb5db015d442871a25daaa | eced0633e98865c735dcbe40fb8ce12ece27f48f | /models/bonet.py | fbff7084b536ee851280c265ba599fee74827d9f | [
"MIT"
] | permissive | nichakornchaisuwan/Bonet | 448f231af7a2cb2e538f31c3cebba43a3626086b | d167b66a02c03439f201e9e5e461bc6f9468a916 | refs/heads/master | 2023-03-18T16:29:28.479775 | 2021-01-23T02:51:33 | 2021-01-23T02:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | # -*- coding: utf-8 -*-
"""
Bone Age Assessment Network (BoNet) PyTorch implementation.
"""
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Local imports
from .backbone.inception_v3 import InceptionV3
class BoNet(nn.Module):
def __init__(self, transform_input=False):
super(BoNet, self).__init__()
# Backbone
self.inception_v3 = InceptionV3()
# Gender
self.gender = DenseLayer(1, 32)
self.fc_1 = DenseLayer(100384, 1000)
self.fc_2 = DenseLayer(1000, 1000)
self.fc_3 = nn.Linear(1000, 1)
def forward(self, x, y):
x = self.inception_v3(x)
y = self.gender(y)
x = self.fc_1(torch.cat([x, y], 1))
x = self.fc_2(x)
x = self.fc_3(x)
return x
class DenseLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(DenseLayer, self).__init__()
self.linear = nn.Linear(in_channels, out_channels)
def forward(self, x):
x = self.linear(x)
return F.relu(x, inplace=True)
| [
"mc.escobar11@uniandes.edu.co"
] | mc.escobar11@uniandes.edu.co |
3b2de14b58dc4026e326e6c37c57ada52e92a0b9 | 9d48227b24770b4142be2696a4315f01e0fe0bd7 | /Docx_anon/server/venv/Lib/site-packages/razdel/tests/partition.py | 0cb9715c93bb8e8d16f884691495aab122d81f0e | [] | no_license | AbbaVaR/1191b_Abbazov_mobile | ea65c00207f2644b20a998d0dabc35611980882b | 05f7b74286b2f001765a359ff18c5a7127b0c633 | refs/heads/main | 2023-04-25T18:07:52.832196 | 2021-06-07T13:18:25 | 2021-06-07T13:18:25 | 337,994,551 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py |
import re
from razdel.record import Record
from razdel.substring import Substring
FILL = ' '
FILL_PATTERN = re.compile('^\s*$')
class Partition(Record):
__attributes__ = ['chunks']
is_fill = FILL_PATTERN.match
def __init__(self, chunks):
self.chunks = chunks
@property
def text(self):
return ''.join(self.chunks)
@property
def substrings(self):
start = 0
for chunk in self.chunks:
stop = start + len(chunk)
if not self.is_fill(chunk):
yield Substring(start, stop, chunk)
start = stop
@classmethod
def from_substrings(cls, substrings):
chunks = list(substring_chunks(substrings))
return cls(chunks)
def substring_chunks(substrings, fill=FILL):
previous = 0
for index, substring in enumerate(substrings):
if index > 0:
size = substring.start - previous
if size:
yield fill * size
yield substring.text
previous = substring.stop
SEP = '|'
ESCAPE = [
(SEP, r'\|'),
('\n', r'\n')
]
def escape_chunk(chunk):
for source, target in ESCAPE:
chunk = chunk.replace(source, target)
return chunk
def parse_partition(line):
chunks = line.split(SEP)
return Partition(chunks)
def parse_partitions(lines):
for line in lines:
yield parse_partition(line)
def format_partition(partition):
return SEP.join(escape_chunk(_) for _ in partition.chunks)
def format_partitions(partitions):
for partition in partitions:
yield format_partition(partition)
def update_partition(partition, segment):
text = partition.text
substrings = segment(text)
return Partition.from_substrings(substrings)
def update_partitions(partitions, segment):
for partition in partitions:
yield update_partition(partition, segment)
| [
"asr77734@gmail.com"
] | asr77734@gmail.com |
7e5707c253c5dd0752d89832311a101d4f2ddd7f | 4ae7a930ca6aa629aa57df7764665358ee70ffac | /cflearn/data/blocks/ml/__init__.py | 004f79c534726862b6d1c4593e1b88dbc9ee2142 | [
"MIT"
] | permissive | carefree0910/carefree-learn | 0ecc7046ef0ab44a642ff0a72a181c4cb5037571 | 554bf15c5ce6e3b4ee6a219f348d416e71d3972f | refs/heads/dev | 2023-08-23T07:09:56.712338 | 2023-08-23T02:49:10 | 2023-08-23T02:49:10 | 273,041,593 | 451 | 38 | MIT | 2021-01-05T10:49:46 | 2020-06-17T17:44:17 | Python | UTF-8 | Python | false | false | 169 | py | from .schema import *
from .file import *
from .nan_handler import *
from .recognizer import *
from .preprocessor import *
from .splitter import *
from .gather import *
| [
"syameimaru.saki@gmail.com"
] | syameimaru.saki@gmail.com |
cdbeb9fa3d7154cdaf2a4181e90676a14873159b | 6080843c9d68a285ccb8b37a65c921dcb6ca994f | /seo_link/migrations/0001_initial.py | 7c41cb945fc3ac0ecdf4545f7f2abe7db44a0427 | [
"BSD-3-Clause"
] | permissive | lubosvr/django-seo-link | 161f4ba6147b943d4a1128866f6fb29b1febcc38 | 48df353152ac066437892bd478d8186bdb59464e | refs/heads/master | 2020-12-26T04:05:51.104081 | 2011-03-16T16:07:02 | 2011-03-16T16:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,149 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MatchType'
db.create_table('seo_link_matchtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('seo_link', ['MatchType'])
# Adding model 'OperatingPath'
db.create_table('seo_link_operatingpath', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seo_link.MatchType'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('pattern', self.gf('django.db.models.fields.CharField')(max_length=255)),
('is_include', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('seo_link', ['OperatingPath'])
# Adding unique constraint on 'OperatingPath', fields ['type', 'pattern']
db.create_unique('seo_link_operatingpath', ['type_id', 'pattern'])
# Adding model 'ReplacementTemplate'
db.create_table('seo_link_replacementtemplate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('template_filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('seo_link', ['ReplacementTemplate'])
# Adding model 'TargetPath'
db.create_table('seo_link_targetpath', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('path', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('is_external', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('seo_link', ['TargetPath'])
# Adding model 'Term'
db.create_table('seo_link_term', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('words', self.gf('django.db.models.fields.CharField')(max_length=255)),
('word_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('replacement_template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seo_link.ReplacementTemplate'])),
('target_path', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seo_link.TargetPath'], null=True, blank=True)),
('is_case_sensitive', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('seo_link', ['Term'])
# Adding unique constraint on 'Term', fields ['words', 'word_count']
db.create_unique('seo_link_term', ['words', 'word_count'])
# Adding M2M table for field operating_path on 'Term'
db.create_table('seo_link_term_operating_path', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('term', models.ForeignKey(orm['seo_link.term'], null=False)),
('operatingpath', models.ForeignKey(orm['seo_link.operatingpath'], null=False))
))
db.create_unique('seo_link_term_operating_path', ['term_id', 'operatingpath_id'])
# Adding model 'TestUrl'
db.create_table('seo_link_testurl', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('test_url', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('created_at', self.gf('django.db.models.fields.DateTimeField')()),
('tested_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('seo_link', ['TestUrl'])
# Adding model 'TestResult'
db.create_table('seo_link_testresult', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page_url', self.gf('django.db.models.fields.related.ForeignKey')(related_name='test_urls', to=orm['seo_link.TestUrl'])),
('page_title', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('link_url', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('link_text', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('is_injected', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_at', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('seo_link', ['TestResult'])
# Adding model 'CacheKey'
db.create_table('seo_link_cachekey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=255)),
('site', self.gf('django.db.models.fields.PositiveIntegerField')()),
('key', self.gf('django.db.models.fields.CharField')(max_length=1024)),
))
db.send_create_signal('seo_link', ['CacheKey'])
def backwards(self, orm):
# Removing unique constraint on 'Term', fields ['words', 'word_count']
db.delete_unique('seo_link_term', ['words', 'word_count'])
# Removing unique constraint on 'OperatingPath', fields ['type', 'pattern']
db.delete_unique('seo_link_operatingpath', ['type_id', 'pattern'])
# Deleting model 'MatchType'
db.delete_table('seo_link_matchtype')
# Deleting model 'OperatingPath'
db.delete_table('seo_link_operatingpath')
# Deleting model 'ReplacementTemplate'
db.delete_table('seo_link_replacementtemplate')
# Deleting model 'TargetPath'
db.delete_table('seo_link_targetpath')
# Deleting model 'Term'
db.delete_table('seo_link_term')
# Removing M2M table for field operating_path on 'Term'
db.delete_table('seo_link_term_operating_path')
# Deleting model 'TestUrl'
db.delete_table('seo_link_testurl')
# Deleting model 'TestResult'
db.delete_table('seo_link_testresult')
# Deleting model 'CacheKey'
db.delete_table('seo_link_cachekey')
models = {
'seo_link.cachekey': {
'Meta': {'object_name': 'CacheKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'site': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'seo_link.matchtype': {
'Meta': {'object_name': 'MatchType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'seo_link.operatingpath': {
'Meta': {'unique_together': "(('type', 'pattern'),)", 'object_name': 'OperatingPath'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_include': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seo_link.MatchType']"})
},
'seo_link.replacementtemplate': {
'Meta': {'object_name': 'ReplacementTemplate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'template_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'seo_link.targetpath': {
'Meta': {'object_name': 'TargetPath'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'seo_link.term': {
'Meta': {'unique_together': "(('words', 'word_count'),)", 'object_name': 'Term'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_case_sensitive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'operating_path': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['seo_link.OperatingPath']", 'symmetrical': 'False', 'blank': 'True'}),
'replacement_template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seo_link.ReplacementTemplate']"}),
'target_path': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seo_link.TargetPath']", 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'words': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'seo_link.testresult': {
'Meta': {'object_name': 'TestResult'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_injected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'link_url': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'page_url': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_urls'", 'to': "orm['seo_link.TestUrl']"})
},
'seo_link.testurl': {
'Meta': {'object_name': 'TestUrl'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'test_url': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'tested_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['seo_link']
| [
"frank.bieniek@produktlaunch.de"
] | frank.bieniek@produktlaunch.de |
c457235b9a8c39152cc8605e92397937890b35f0 | d30a41cd32c48a779d473f02c87119ab23ebf18a | /src/stk/molecular/topology_graphs/macrocycle/macrocycle.py | ac2f50e4f5b66a65bebaad61fe5100a2808c1b02 | [
"MIT"
] | permissive | pk-organics/stk | 7c69e97a40b1e0195a24c7d2921f8200e91a9fa8 | 4dc93ddcccf0ff96f8ecc1110fcc42bd5719b2da | refs/heads/master | 2023-03-25T06:28:08.818537 | 2021-03-09T09:47:00 | 2021-03-09T09:47:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,669 | py | """
Macrocycle
==========
"""
import numpy as np
from .vertices import _CycleVertex
from ..topology_graph import TopologyGraph, NullOptimizer, Edge
from ...reactions import GenericReactionFactory
class Macrocycle(TopologyGraph):
"""
Represents a macrocycle topology graph.
Building blocks with two functional groups are required
for this topology.
Examples
--------
*Construction*
This topology graph essentially makes a polymer chain and joins
the ends, hence the constructor parameters allows you to specify
the chain
.. code-block:: python
import stk
macrocycle = stk.ConstructedMolecule(
topology_graph=stk.macrocycle.Macrocycle(
building_blocks=(
stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
stk.BuildingBlock('BrCNCBr', [stk.BromoFactory()]),
),
repeating_unit='AB',
num_repeating_units=5,
),
)
*Suggested Optimization*
For :class:`.Macrocycle` topologies, it is recommended to use the
:class:`.MCHammer` optimizer.
.. code-block:: python
macrocycle = stk.ConstructedMolecule(
topology_graph=stk.macrocycle.Macrocycle(
building_blocks=(
stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
stk.BuildingBlock('BrCNCBr', [stk.BromoFactory()]),
),
repeating_unit='AB',
num_repeating_units=5,
optimizer=stk.MCHammer(),
),
)
*Defining the Orientation of Each Building Block*
The `orientations` parameter allows the direction of each building
block along to the chain to be flipped
.. code-block:: python
bb4 = stk.BuildingBlock('BrCOCCBr', [stk.BromoFactory()])
c3 = stk.ConstructedMolecule(
topology_graph=stk.macrocycle.Macrocycle(
building_blocks=(bb2, bb4),
repeating_unit='AB',
num_repeating_units=5,
orientations=(1, 0.5),
),
)
In the above example, ``bb2`` is guaranteed to be flipped,
``bb4`` has a 50% chance of being flipped, each time it is placed
on a node.
Note that whether a building block will be flipped or not
is decided during the initialization of :class:`.Macrocycle`
.. code-block:: python
# cycle will always construct the same macrocycle.
cycle = stk.macrocycle.Macrocycle(
building_blocks=(bb2, bb4),
repeating_unit='AB',
num_repeating_units=5,
orientations=(0.65, 0.45),
)
# c4 and c5 are guaranteed to be the same as they used the same
# topology graph.
c4 = stk.ConstructedMolecule(cycle)
c5 = stk.ConstructedMolecule(cycle)
# cycle2 may lead to a different polymer than chain, despite
# being initialized with the same parameters.
cycle2 = stk.macrocycle.Macrocycle(
building_blocks=(bb2, bb4),
repeating_unit='AB',
num_repeating_units=5,
orientations=(0.65, 0.45)
)
# c6 and c7 are guaranteed to be the same because they used
# the same topology graph. However, they may be different to
# c4 and c5.
c6 = stk.ConstructedMolecule(cycle2)
c7 = stk.ConstructedMolecule(cycle2)
The `random_seed` parameter can be used to get reproducible results
.. code-block:: python
# c8 and c9 are guaranteed to be the same, because cycle3 and
# cycle4 used the same random seed.
cycle3 = stk.macrocycle.Macrocycle(
building_blocks=(bb2, bb4),
repeating_unit='AB',
num_repeating_units=5,
orientations=(0.65, 0.45),
random_seed=4,
)
c8 = stk.ConstructedMolecule(cycle3)
cycle4 = stk.macrocycle.Macrocycle(
building_blocks=(bb2, bb4),
repeating_unit='AB',
num_repeating_units=5,
orientations=(0.65, 0.45),
random_seed=4,
)
c9 = stk.ConstructedMolecule(cycle4)
*Using Numbers to Define the Repeating Unit*
The repeating unit can also be specified through the indices of
the building blocks
.. code-block:: python
bb1 = stk.BuildingBlock('BrCCBr', [stk.BromoFactory()])
bb2 = stk.BuildingBlock('BrCNCBr', [stk.BromoFactory()])
bb3 = stk.BuildingBlock('BrCNNCBr', [stk.BromoFactory()])
# c1 and c2 are different ways to write the same thing.
c1 = stk.ConstructedMolecule(
topology_graph=stk.macrocycle.Macrocycle(
building_blocks=(bb1, bb2, bb3),
repeating_unit='ACB',
num_repeating_units=3
)
)
c2 = stk.ConstructedMolecule(
topology_graph=stk.macrocycle.Macrocycle(
building_blocks=(bb1, bb2, bb3),
repeating_unit=(0, 2, 1),
num_repeating_units=3,
)
)
"""
def __init__(
self,
building_blocks,
repeating_unit,
num_repeating_units,
orientations=None,
random_seed=None,
reaction_factory=GenericReactionFactory(),
num_processes=1,
optimizer=NullOptimizer(),
):
"""
Initialize a :class:`Macrocycle` instance.
Parameters
----------
building_blocks : :class:`tuple` of :class:`.BuildingBlock`
The building blocks of the macrocycle.
repeating_unit : :class:`str` or :class:`tuple` of :class:`int`
A string specifying the repeating unit of the macrocycle.
For example, ``'AB'`` or ``'ABB'``. The first building
block passed to `building_blocks` is ``'A'`` and so on.
The repeating unit can also be specified by the indices of
`building_blocks`, for example ``'ABB'`` can be
written as ``(0, 1, 1)``.
num_repeating_units : :class:`int`
The number of repeating units which are used to make the
macrocycle.
orientations : :class:`tuple` of :class:`float`, optional
For each character in the repeating unit, a value
between ``0`` and ``1`` (both inclusive) must be given in
a :class:`tuple`. It indicates the probability that each
monomer will have its orientation along the chain flipped.
If ``0`` then the monomer is guaranteed not to flip. If
``1`` it is guaranteed to flip. This allows the user to
create head-to-head or head-to-tail chains, as well as
chain with a preference for head-to-head or head-to-tail if
a number between ``0`` and ``1`` is chosen. If ``None``,
then ``0`` is picked in every case.
It is also possible to supply an orientation for every
vertex in the final topology graph. In this case, the
length of `orientations` must be equal to
``len(repeating_unit)*num_repeating_units``.
random_seed : :class:`int`, optional
The random seed to use when choosing random orientations.
num_processes : :class:`int`, optional
The number of parallel processes to create during
:meth:`construct`.
optimizer : :class:`.Optimizer`, optional
Used to optimize the structure of the constructed
molecule.
Raises
------
:class:`ValueError`
If the length of `orientations` is not equal in length to
`repeating_unit` or to the total number of vertices.
"""
if orientations is None:
orientations = tuple(
0. for i in range(len(repeating_unit))
)
if len(orientations) == len(repeating_unit):
orientations = orientations*num_repeating_units
chain_length = len(repeating_unit)*num_repeating_units
if len(orientations) != chain_length:
raise ValueError(
'The length of orientations must match either '
'the length of repeating_unit or the '
'total number of vertices.'
)
generator = np.random.RandomState(random_seed)
# Keep these for __repr__.
self._repeating_unit = self._normalize_repeating_unit(
repeating_unit=repeating_unit
)
self._num_repeating_units = num_repeating_units
# Each monomer in the macrocycle is separated by angle_diff.
angle_diff = (2*np.pi)/chain_length
vertices = []
edges = []
choices = [True, False]
for vertex_id, flip_chance in enumerate(orientations):
theta = vertex_id*angle_diff
vertices.append(
_CycleVertex(
id=vertex_id,
position=[np.cos(theta), np.sin(theta), 0],
flip=generator.choice(
choices,
p=[flip_chance, 1-flip_chance],
),
angle=theta,
)
)
if vertex_id > 0:
edges.append(
Edge(
id=len(edges),
vertex1=vertices[vertex_id-1],
vertex2=vertices[vertex_id],
)
)
# Save the chosen orientations for __repr__.
self._orientations = tuple(
int(vertex.get_flip()) for vertex in vertices
)
edges.append(Edge(len(edges), vertices[0], vertices[-1]))
super().__init__(
building_block_vertices=self._get_building_block_vertices(
building_blocks=building_blocks,
vertices=vertices,
),
edges=tuple(edges),
reaction_factory=reaction_factory,
construction_stages=(),
num_processes=num_processes,
optimizer=optimizer,
edge_groups=None,
)
def clone(self):
clone = super().clone()
clone._repeating_unit = self._repeating_unit
clone._num_repeating_units = self._num_repeating_units
clone._orientations = self._orientations
return clone
@staticmethod
def _normalize_repeating_unit(repeating_unit):
if isinstance(repeating_unit, tuple):
return repeating_unit
base = ord('A')
return tuple(ord(letter)-base for letter in repeating_unit)
def _get_building_block_vertices(self, building_blocks, vertices):
polymer = self._repeating_unit*self._num_repeating_units
building_block_vertices = {}
for bb_index, vertex in zip(polymer, vertices):
bb = building_blocks[bb_index]
building_block_vertices[bb] = (
building_block_vertices.get(bb, [])
)
building_block_vertices[bb].append(vertex)
return building_block_vertices
def _get_scale(self, building_block_vertices):
length = len(self._repeating_unit)*self._num_repeating_units
return length*0.25*max(
bb.get_maximum_diameter()
for bb in building_block_vertices
)
def __repr__(self):
return (
f'macrocycle.Macrocycle({self._repeating_unit!r}, '
f'{self._num_repeating_units!r}, '
f'{self._orientations!r})'
)
| [
"noreply@github.com"
] | noreply@github.com |
9eb9374124783aaea8cce51a1e158d0970a5cda4 | 653224ae441821ed95e7fb9c26d6f24425b550cb | /udemy-curso-python-projetos-reais-django/djangotemplates/blog/views.py | 9285cd7100bf97aa825ef9cb27b5e7f5d8d3e29b | [
"MIT"
] | permissive | herculeshssj/python | d68432e2cf38622bbba3d50be48810ceeaea6694 | a6b9dedf81162467a4d99ae61ca001bd4b691819 | refs/heads/master | 2023-09-05T00:18:55.126544 | 2023-08-18T18:07:02 | 2023-08-18T18:07:02 | 230,921,078 | 0 | 0 | MIT | 2023-07-06T01:07:08 | 2019-12-30T13:29:34 | Python | UTF-8 | Python | false | false | 131 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'blog/index.html')
| [
"hercules_hssj@outlook.com"
] | hercules_hssj@outlook.com |
222d7d7d0a8181630d15e2fec554272d154b8807 | 65e78f7069e68de8e3ad49dc267a5343933b4ca3 | /plist2txt.py | 4304b7190d4adc198bf3a68f6d45b2464ebfe7d8 | [] | no_license | ZuyingWo/contrib | e5315a33eb02d9172ce2701c13edd1993902f2fb | fa3316135b015f711b863b9cb13dc1ce1f54303f | refs/heads/master | 2021-01-17T19:25:56.858627 | 2016-06-28T14:22:04 | 2016-06-28T14:23:07 | 64,099,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# plist2txt.py: convert a Info.plist file to a text list
# Copyright (C) 2016 Ludovic Rousseau
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Usage:
# ./plist2txt.py /usr/lib/pcsc/drivers/ifd-ccid.bundle/Contents/Info.plist
import plistlib
import sys
def convert(filename):
root = plistlib.readPlist(filename)
# for key in root:
# print key
zipped = zip(root['ifdVendorID'], root['ifdProductID'],
root['ifdFriendlyName'])
for elt in sorted(zipped):
print ":".join(elt)
if __name__ == "__main__":
convert(sys.argv[1])
| [
"ludovic.rousseau@free.fr"
] | ludovic.rousseau@free.fr |
bb979d8fa0d21924647fd592b6f4a360ffb3851c | db46d617df684974ba774ca672ad74aa90decdf7 | /images/migrations/0023_auto_20200505_1750.py | a753f1eb6e5fcdbb53a9c6ffe1bf313c0193a214 | [] | no_license | kylebruder/kbruder | 664d86b7d5c35dec67efa6fe85805a325d29acdb | d50aa09bb33cc53ae3b7b3e28b5a2a1b24c4880e | refs/heads/master | 2021-07-16T09:03:37.755443 | 2020-06-10T18:50:43 | 2020-06-10T18:50:43 | 245,283,002 | 0 | 0 | null | 2021-04-08T19:51:47 | 2020-03-05T22:45:54 | Python | UTF-8 | Python | false | false | 693 | py | # Generated by Django 3.0.3 on 2020-05-05 17:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('metrics', '0001_initial'),
('images', '0022_auto_20200505_1744'),
]
operations = [
migrations.AlterField(
model_name='piece',
name='currency',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='metrics.Currency'),
),
migrations.AlterField(
model_name='piece',
name='price',
field=models.FloatField(blank=True, null=True),
),
]
| [
"kylebruder@pm.me"
] | kylebruder@pm.me |
bb4870710eb00f3e5b229972f2c6af8a5271ca74 | b21d7531d5f873e35515f4d39575e6ed93668509 | /Server.py | ae80794677de3ec26c6b9bcb9c7d639dc8531edd | [] | no_license | GadiTab/DevOps-Puppet-for-Linux | 399752f2ec94086570096fe50eefbf0cf33f2ff8 | ca22a67f96e5fb62d0641ae14fb83fa172a051d3 | refs/heads/master | 2021-04-30T17:40:01.112914 | 2017-01-27T19:43:47 | 2017-01-27T19:43:47 | 80,240,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,185 | py | #!/usr/bin/python
# Import Libraries
from time import sleep
from os import system, path
from datetime import datetime
from socket import *
import fcntl, struct, subprocess, shlex, threading
# Extended Library
try:
from netaddr import *
except:
system("apt-get install python-netaddr -y")
from netaddr import *
# *START* Network calculations *START*
#--------------------------------------------------------------------------------------------------------------------
strs = subprocess.check_output(shlex.split('ip r l')) # Get Default Gateway
gateway = strs.split('default via')[-1].split()[0] # Get Default Gateway
hostIP = strs.split('src')[-1].split()[0] # Get Server IP Address
iface = "eth0" # Get Subnet Mask
mask=inet_ntoa(fcntl.ioctl(socket(AF_INET, SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24]) # Get Subnet Mask
segment = str(IPNetwork(('{}/{}').format(hostIP,mask)).cidr) # Calculate Segment
server_dns = getfqdn() # Server DNS name
port = 51234
#---------------------------------------------------------------------------------------------------------------------
# *END* Network Calculations *END*
# Set global clients connections lists
clients_connections = []
clients_addresses = []
# Open & bind socket
def open_socket():
global sock
try:
sock = socket()
sock.bind((hostIP, port))
sock.listen(5)
accept_connections()
close_socket()
except error as message:
print "Socket error: " + str(message)
sleep(5)
close_socket()
# Accept connections thread job
def accept_connections():
while True:
try:
client_conn, client_address = sock.accept()
client_conn.setblocking(1)
clients_connections.append(client_conn)
clients_addresses.append(client_address)
print "\nConnections has been established: " + client_address[0]
except:
print "Error accepting connections"
close_socket()
break
# Check clients connections
def check_clients():
global clients
clients = []
for i, client_conn in enumerate(clients_connections):
try:
client_conn.send("hostname")
hostname = client_conn.recv(1024)
except:
print "Connection lost with: " + clients_addresses[i][0]
del clients_connections[i]
del clients_addresses[i]
continue
clients.append([clients_addresses[i][0],hostname])
# Print Clients
def print_clients():
global clients
i = 0
print "Clients:"
for client in clients:
i += 1
print "{}. {} - {}".format(i, client[0], client[1])
# Creating a thread to open and accept connections
def threads():
thread_clients = threading.Thread(target=open_socket)
thread_clients.setDaemon(True)
thread_clients.start()
# (1) Show clients menu
def show_clients():
system("clear")
for i in range (3):
check_clients()
print_clients()
enter_to_continue()
# (2) Command the clients menu
def command_clients_menu():
system("clear")
global client_pick
print "Send commands to a client [client - number or 0 to all] (Type 'back' to return)\n"
for i in range (3):
check_clients()
print_clients()
client_pick = raw_input("\nClient Number: ")
while len(client_pick) == 0:
client_pick = raw_input("Client Number: ")
if client_pick == 'back':
print_menu()
elif client_pick == '0':
command_all_clients()
else:
single_client_to_command()
# (2) Pick a client to send commands.
def single_client_to_command():
global client_pick
try:
client_pick = int(client_pick)
i = 0
for client_con in clients_connections:
i += 1
if client_pick == i:
print "Type commands to the client (Type 'back' to return):"
command_a_single_client(client_con)
return
if client_pick > i:
print "There is no such a client."
raw_input("Press enter to continue...\n")
command_clients_menu()
except:
print "You can't input a character"
raw_input("Press enter to continue...\n")
command_clients_menu()
# (2) Send commands to a single client.
def command_a_single_client(client_con):
while True:
client_con.send("Cmd")
cmd = raw_input("> ")
while len(cmd) == 0:
cmd = raw_input("> ")
if cmd == "back":
client_con.send("back")
command_clients_menu()
return
else:
try:
client_con.send(cmd)
client_response = client_con.recv(1024)
print "\n{}\n".format(client_response)
except:
print "connection lost with the client"
command_clients_menu()
return
# (2) Send commands to all clients ('0' at command_clients_menu)
def command_all_clients():
print "Type commands to all clients (Type 'back' to return):"
while True:
cmd = raw_input("> ")
if cmd == "back":
command_clients_menu()
return
elif len(cmd) == 0:
continue
else:
for i, client_con in enumerate(clients_connections):
client_con.send("Cmd")
sleep(0.1)
try:
client_con.send(cmd)
print clients_addresses[i][0] + " - Success"
except:
print clients_connections[i][0] + " - Failed to receive."
print ""
# (3) Send files menu
def send_file_menu():
system("clear")
global client_pick
print "Send a file to a client [client - number or 0 to all] (Type 'back' to return):\n"
for i in range (3):
check_clients()
print_clients()
client_pick = raw_input("\nClient Number: ")
while len(client_pick) == 0:
client_pick = raw_input("Client Number: ")
if client_pick == 'back':
print_menu()
return
elif client_pick == '0':
send_to_all_clients_mode()
else:
send_file_to_single_client()
# (3) Pick a client to transfer files.
def send_file_to_single_client():
global client_pick
try:
client_pick = int(client_pick)
i = 0
for client_con in clients_connections:
i += 1
if client_pick == i:
print "Type the destination path (Type 'back' to return):"
client_con.send("SC-File")
send_file_destination(client_con)
return
if client_pick > i:
print "There is no such a client"
raw_input("\nPress enter to continue...\n")
send_file_menu()
except:
print "You can't input a character"
raw_input("\nPress enter to continue...\n")
send_file_menu()
# (3) Choose a destination path
def send_file_destination(client_con):
check_path_answer = "Path is NOT existed"
dst_file_path = raw_input("Destination Path > ")
if dst_file_path == "back":
client_con.send("back")
send_file_menu()
return
while len(dst_file_path) == 0:
dst_file_path = raw_input("Destination Path > ")
sleep(0.1)
while check_path_answer == "Path is NOT existed":
client_con.send(dst_file_path)
check_path_answer = client_con.recv(1024)
print "client says: {}".format(check_path_answer)
if check_path_answer == "Path is NOT existed":
send_file_destination(client_con)
send_file_source(client_con)
# (3) Choose a source file
def send_file_source(client_con):
while True:
src_file_path = raw_input("Source File > ")
if src_file_path == "back":
client_con.send("back")
send_file_destination(client_con)
return
while len(src_file_path) == 0:
src_file_path = raw_input("Source File > ")
if not path.isfile(src_file_path):
print "There is no such a file."
continue
src_file_name = path.basename(src_file_path)
client_con.send(src_file_name)
sleep(0.1)
client_con.send(str(path.getsize(src_file_path)))
sleep(0.1)
try:
with open(src_file_path, 'rb') as chosen_file:
bytes_to_send = chosen_file.read(1024)
client_con.send(bytes_to_send)
while len(bytes_to_send) > 0:
sleep(0.1)
bytes_to_send = chosen_file.read(1024)
client_con.send(bytes_to_send)
result = sock.recv(1024)
print "Client says: {}".format(result)
except:
print "Sending file failed. Probably internet connection has lost."
# (3) Tell all clients to get in multi clients file transfer mode.
def send_to_all_clients_mode():
for i, client_con in enumerate(clients_connections):
client_con.send("MC-File")
dst_file_to_all()
# (3) Choose destination path in multi clients mode.
def dst_file_to_all():
print "Type the destination path (Type 'back' to return):"
dst_file_path = raw_input("Destination Path > ")
while len(dst_file_path) == 0:
dst_file_path = raw_input("Destination Path > ")
for i, client_con in enumerate(clients_connections):
if dst_file_path == "back":
client_con.send("back")
continue
else:
client_con.send(dst_file_path)
check_path_answer = client_con.recv(1024)
print "{} says: {}".format(clients_addresses[i][0], check_path_answer)
if dst_file_path == "back":
send_file_menu()
return
else:
source_file_to_all()
return
# (3) Choose a file source in multi clients mode.
def source_file_to_all():
while True:
src_file_path = raw_input("Source File > ")
while len(src_file_path) == 0:
src_file_path = raw_input("Source File > ")
if not path.isfile(src_file_path) and src_file_path != "back":
print "There is no such a file."
continue
for i, client_con in enumerate(clients_connections):
if src_file_path == "back":
client_con.send("back")
client_con.recv(1024)
continue
else:
src_file_name = path.basename(src_file_path)
client_con.send(src_file_name)
relevant_check = client_con.recv(1024)
if relevant_check == "Out of scope":
continue
else:
client_con.send(str(path.getsize(src_file_path)))
sleep(0.1)
try:
with open(src_file_path, 'rb') as chosen_file:
bytes_to_send = chosen_file.read(1024)
client_con.send(bytes_to_send)
while len(bytes_to_send) > 0:
sleep(0.1)
bytes_to_send = chosen_file.read(1024)
client_con.send(bytes_to_send)
print "{} says: File transferring completed".format(clients_addresses[i][0])
except:
print "Sending file to {} has failed. Probably internet connection has lost.".format(clients_addresses[i][0])
if src_file_path == "back":
dst_file_to_all()
return
# (4) Remote install menu.
def remote_install_menu():
system("clear")
global target_client
print "Install a package to a client [client - number or 0 to all] (Type 'back' to return):\n"
for i in range (3):
check_clients()
print_clients()
target_client = raw_input("\nClient number: ")
while len(target_client) == 0:
target_client = raw_input("Client number: ")
if target_client == "back":
print_menu()
return
elif target_client == "0":
remote_install_all()
else:
single_client_remote_install()
# (4) Pick a client for remote install.
def single_client_remote_install():
global target_client
try:
target_client = int(target_client)
i = 0
for client_con in clients_connections:
i += 1
if target_client == i:
client_con.send("Install")
remote_install_command(client_con)
return
if target_client > i:
print "There is no such a client"
remote_install_menu()
except:
print "You can't input a character"
raw_input("\nPress enter to continue...\n")
remote_install_menu()
# (4) Remote install command
def remote_install_command(client_con):
while True:
package_name = raw_input("Type the package name you would like to install remotely (Type 'back' to return): ")
if package_name == "back":
client_con.send("back")
remote_install_menu()
return
cmd = "apt-get install {} -y".format(package_name)
client_con.send(cmd)
# (4) Remote install to all clients.
def remote_install_all():
while True:
package_name = raw_input("Type the package name you would like to install remotely (Type 'back' to return): ")
for client_con in clients_connections:
client_con.send("Install")
sleep(0.1)
if package_name == "back":
client_con.send("back")
cmd = "apt-get install {} -y".format(package_name)
client_con.send(cmd)
if package_name == "back":
remote_install_menu()
return
# (5) Remote uninstall menu
def remote_uninstall_menu():
system("clear")
global victim_client
print "Remove a package from a client [client - number or 0 to all] (Type 'back' to return)\n"
for i in range(3):
check_clients()
print_clients()
victim_client = raw_input("\nClient number: ")
while len(victim_client) == 0:
victim_client = raw_input("Client number: ")
if victim_client == "back":
print_menu()
return
elif victim_client == "0":
remote_uninstall_all()
else:
single_client_remote_uninstall()
# (5) Pick a client for remote uninstall
def single_client_remote_uninstall():
global victim_client
try:
victim_client = int(victim_client)
i = 0
for client_con in clients_connections:
i += 1
if victim_client == i:
client_con.send("UnInstall")
remote_uninstall_command(client_con)
return
if victim_client > i:
print "There is no such a client"
remote_uninstall_menu()
except:
print "You can't input a character"
raw_input("\nPress enter to continue...\n")
remote_uninstall_menu()
# (5) Remote uninstall command.
def remote_uninstall_command(client_con):
while True:
package_name = raw_input("Type the package name you would like to remove remotely (Type 'back' to return): ")
if package_name == "back":
client_con.send("back")
remote_uninstall_menu()
return
cmd = "apt-get remove {} -y".format(package_name)
client_con.send(cmd)
client_con.recv(1024)
cmd = "apt-get remove --purge {} -y".format(package_name)
client_con.send(cmd)
# (5) Remote uninstall to all clients
def remote_uninstall_all():
while True:
package_name = raw_input("Type the package name you would like to remove remotely (Type 'back' to return): ")
for client_con in clients_connections:
client_con.send("UnInstall")
sleep(0.1)
if package_name == "back":
client_con.send("back")
cmd = "apt-get remove {} -y".format(package_name)
client_con.send(cmd)
client_con.recv(1024)
cmd = "apt-get remove --purge {} -y".format(package_name)
client_con.send(cmd)
if package_name == "back":
remote_uninstall_menu()
return
# (6) Netcat menu
def netcat_menu():
system("clear")
global chat_mate
print "Chat with a client [client - number or 0 to SEND ONLY to all] (Type 'back' to return)\n"
for i in range(3):
check_clients()
print_clients()
chat_mate = raw_input("\nClient number: ")
while len(chat_mate) == 0:
chat_mate = raw_input("Client number: ")
if chat_mate == "back":
print_menu()
return
elif chat_mate == "0":
netcat_all()
else:
netcat_single()
# (6) Pick a client for netcat
def netcat_single():
global chat_mate
try:
chat_mate = int(chat_mate)
i = 0
for client_con in clients_connections:
i += 1
if chat_mate == i:
client_con.send("NetCat")
netcat_chat_single()
return
if chat_mate > i:
print "There is no such a client"
netcat_menu()
except:
print "You can't input a character"
raw_input("\nPress enter to continue...\n")
netcat_menu()
# (6) Start netcat chat with the chosen client.
def netcat_chat_single():
print "Opening a Netcan session with the client, please Hold patiently. To stop chatting type Ctrl+C."
cmd = "nc -lvp 54321"
system(cmd)
netcat_menu()
# (6) Send chat (messages) to all connected clients.
def netcat_all():
print "You are now sending messages to all connected clients. Type 'stop chat' to exit."
you_said = " "
for client_con in clients_connections:
client_con.send("NetCatAll")
while you_said != "stop chat":
you_said = raw_input("> ")
print "You: {}".format(you_said)
for client_con in clients_connections:
client_con.send(you_said)
if you_said == "stop chat":
client_con.send("stop chat")
sleep(0.5)
netcat_menu()
# Exit
def exit_program():
close_socket()
print "Master of Puppets Server is... Closed :)"
# User typed an unexisting option
def invalid_option():
print "Invalid option"
enter_to_continue()
# Getting back to main menu
def enter_to_continue():
raw_input("\nPress enter to continue...\n")
print_menu()
# Closing the socket
def close_socket():
global sock
for connection in clients_connections:
connection.send("close")
sock.close()
# Main menu INPUT
def select_menu():
user_selection = raw_input("Action: ")
menu_options = {
"1": show_clients,
"2": command_clients_menu,
"3": send_file_menu,
"4": remote_install_menu,
"5": remote_uninstall_menu,
"6": netcat_menu,
"7": exit_program,
}
menu_options.get(user_selection, invalid_option)()
# Main menu OUTPUT
def print_menu():
system("clear")
time = datetime.now().strftime('%H:%M')
print """
##############################################################################
Python 102 Name: Gadi Tabak {}
##############################################################################
Segment: {}
Your DNS: {}
Your gateway: {}
#####################################
Menu
----
1. Show all clients
2. Send commands to all clients
3. Transfer files to all clients
4. Install something on all clients
5. Remove something from all clients
6. Open Netcat (Chat) Session.
7. Exit
""".format(time, segment, server_dns, gateway)
select_menu()
# Main function
def main():
threads()
print_menu()
# Let's start (and make sure the socket will be closed in case of an error)
try:
main()
except:
print "Something wrong happend and the program had to be closed."
for client_conn in clients_connections:
client_conn.send("close")
close_socket()
| [
"slipkno5@hotmail.com"
] | slipkno5@hotmail.com |
cec36baa612c60f379002a0c6efbe3bfb85d5786 | 13e588e2e656caee69bfc31106dd8d52f5a64b7a | /lib/refresh_view.py | a21f698cc7d2d7f0988e39843f6326fa80a3c771 | [] | no_license | fbraza/DSTI-SQL_VIEW | cdf6a875e85b76f672f203e6c10aafba53c34880 | a49337b9c2c42145304552a21014ca88a5837fa8 | refs/heads/main | 2022-12-27T19:32:10.328407 | 2020-10-06T17:43:18 | 2020-10-06T17:43:18 | 301,807,665 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,395 | py | from __future__ import annotations
import os
import filecmp
import pandas as pd
from .connection import Connection
FILE_NAME = "Survey_updated_view.csv"
TEMP_NAME = "temp_view.csv"
def is_folder_empty(path_directory: str) -> bool:
"""
Function designed to determine if a folder is empty
Parameters
----------
- path_directory: string of the path directory
Return
------
- boolean
"""
if not os.path.isdir(path_directory):
raise ValueError("Enter a valid folder path.\n")
return not bool(os.listdir(path_directory))
def has_view_csv(path_directory: str) -> bool:
"""
Function designed to determine if a view is already present in the folder
Parameter
---------
- path_directory: string of the path directory
Return
------
- boolean
"""
if not os.path.isdir(path_directory):
raise ValueError("Enter a valid folder path.\n")
for file in os.listdir(path_directory):
if file == FILE_NAME:
return True
def is_view_updated(file_path_1: str, file_path_2: str) -> bool:
"""
Function designed to determine if the generated view is different
from the last one present in the folder.
Parameters
----------
- param file_path_1: string of the path files
- param file_path_2: string of the path files
Return
------
- boolean
"""
if not (os.path.isfile(file_path_1) & os.path.isfile(file_path_2)):
raise ValueError("Check if the files path is correct\n")
return not filecmp.cmp(file_path_1, file_path_2)
def trigger_sql_view_procedure(sql_procedure: str, db_connection: Connection) -> pd.DataFrame:
"""
Function designed to return the view as a pandas DataFrame
Parameters
----------
- sql_procedure: string of the SQL procedure obtained using the get_all_data function
- db_connection: Connection object to talk to the MSSQL Database server
Return
------
- pd.DataFrame
"""
return pd.read_sql(sql_procedure, db_connection)
def database_view_refresh(sql_procedure: str, path_directory: str, db_connection: Connection) -> None:
"""
Function designed to generate the view, save it immediately if folder is empty, if not check whether
the view has been updated before saving it definitively.
Parameters
----------
- sql_procedure: string of the SQL procedure obtained using the get_all_data function
- path_directory: string of the path directory
- db_connection: Connection object to talk to the MSSQL Database server
Return
------
- None
"""
current_view_path = os.path.join("Views", FILE_NAME)
temp_view_path = os.path.join("Views", TEMP_NAME)
if is_folder_empty(path_directory):
trigger_sql_view_procedure(sql_procedure, db_connection).to_csv(path_or_buf=current_view_path, na_rep="-1")
print("View saved\n")
elif has_view_csv(path_directory):
trigger_sql_view_procedure(sql_procedure, db_connection).to_csv(path_or_buf=temp_view_path, na_rep="-1")
if is_view_updated(current_view_path, temp_view_path):
os.remove(current_view_path)
os.rename(temp_view_path, current_view_path)
print("View has been updated.")
else:
os.remove(temp_view_path)
print("Data is up to date. No update of the view.")
| [
"fbraza@tutanota.com"
] | fbraza@tutanota.com |
67d0ba7db3b094445dde8bc9fe3cbec390cc5a2e | dcc97cf781a72b07020c87a1d8413de9eb29191e | /data/velopass_data/load_inout.py | bbbcf10f46cfdad13c86955b23e4843e7c06bb6b | [] | no_license | loicgasser/bikestats | 0a234d8de9666ae65b0ca52026d3cbafc82a0239 | 28b90b15681323c0101ed31628f6909b8857aa0a | refs/heads/master | 2020-04-15T00:06:37.417438 | 2015-05-18T09:12:41 | 2015-05-18T09:12:41 | 31,642,944 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,708 | py | import csv
import psycopg2
'''conn = psycopg2.connect("dbname=velopass user=postgres password=")
SELECT TIMESTAMP '1999-01-08 04:05:06'
cur = conn.cursor()
cur.execute("SELECT * from public.velopass")
results = cur.fetchall()
INSERT INTO transactions_aggregated
(
no,
date,
"in",
"out",
diff,
weekday,
precipitation,
temperature,
wind,
sun
)
VALUES
(
101,
TIMESTAMP '1999-01-08 04:05:06',
2,
2,
3,
'Mardi',
0.2,
0.2,
0.2,
0.3
)
print results'''
with open('../../shared/DesignProject/v4m.csv', 'rb') as csvfile:
file_csv = csv.reader(csvfile, delimiter=';')
table_entries = list()
headers = list()
counter = 0
for row in file_csv:
if counter == 0:
for entry in row:
headers.append(entry)
else:
weekday = str(row[0])
date = str(row[1]).replace('.','-')
hour = str(row[2]) if len(row[2]) == 2 else '0' + str(row[2])
temperature = row[len(headers)-4]
precipitation = row[len(headers)-3]
wind = row[len(headers)-2]
sun = row[len(headers)-1]
i = 3
for no in headers[3:len(headers)-4]:
in_out_diff = no.split('in')
if len(in_out_diff) == 2:
temp = dict()
temp['no'] = in_out_diff[1]
temp['in'] = row[i]
in_out_diff = no.split('out')
if len(in_out_diff) == 2:
temp['out'] = row[i]
in_out_diff = no.split('diff')
if len(in_out_diff) == 2:
temp['diff'] = row[i]
try:
int(sun)
except:
sun = 0
temp['date'] = date.replace('.','-') + ' ' + hour + ':00:00'
temp['weekday'] = str(weekday)
temp['temperature'] = float(temperature)
temp['precipitation'] = float(precipitation)
temp['wind'] = float(wind)
temp['sun'] = int(sun)
table_entries.append(temp)
i += 1
counter += 1
## Update DB table already created
conn = psycopg2.connect("dbname=velopass user=postgres password=")
cur = conn.cursor()
for row in table_entries:
query = ('''INSERT INTO transactions_aggregated (no, date, "in", "out", diff, weekday, precipitation, temperature, wind, sun) VALUES (%s, TIMESTAMP '%s', %s, %s, %s, '%s', %s, %s, %s, %s)''') % (row['no'], row['date'], row['in'], row['out'], row['diff'], row['weekday'], row['precipitation'], row['temperature'], row['wind'], row['sun'])
cur.execute(query)
conn.commit()
| [
"vishal.chandra.sood@gmail.com"
] | vishal.chandra.sood@gmail.com |
58a1e44faa93679374aa5383c8cda2d3e4ae08e2 | 2a304da07857054b1c52b9aa0f1d6c06fd37d16f | /CSVFileReader.py | edca375c773113d9c3e9c172b9aada42f7761a84 | [] | no_license | adamacampbell/PythonCSVFileReader | 532067877349c2e616f406cc4e6d1888b335e0f0 | ecff47b60392cfe29cb282c3500e1373bf3572b5 | refs/heads/master | 2020-04-15T16:24:17.786834 | 2019-01-09T10:51:52 | 2019-01-09T10:51:52 | 164,833,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | import csv
import os
def choosefile():
file = input("Please enter file name or directory\n")
if os.path.isfile(file):
return file
else:
print("File directory not found")
choosefile()
with open(choosefile(), 'r') as file:
csv_reader = csv.reader(file, delimiter=' ')
packetsReceived = 0
packetsDropped = 0
packetsQueued = 0
packetsDequeued = 0
ackPackets = 0
nackPackets = 0
for line in csv_reader:
if line[4] == 'ack':
ackPackets += 1
if line[4] == 'nack':
nackPackets += 1
else:
if line[0] == 'r':
packetsReceived += 1
if line[0] == 'd':
packetsDropped += 1
if line[0] == '+':
packetsQueued += 1
if line[0] == '-':
packetsDequeued += 1
throughput = (packetsReceived / packetsQueued) * 100
lostPackets = packetsQueued - packetsReceived
print("Throughput: %s" % throughput)
print("Dropped Packets: %s" % packetsDropped)
print("Lost Packets: %s" % lostPackets)
print("Packets Received: %s" % packetsReceived)
print("Packets Queued: %s" % packetsQueued)
with open(input("Please enter new file name, file will end in .csv\n") + '.csv', 'w') as newFile:
csv_writer = csv.writer(newFile)
csv_writer.writerow(['Throughput', 'Dropped Packets', 'Lost Packets', 'Packets Received', 'Packets Queued'])
csv_writer.writerow([throughput, packetsDropped, lostPackets, packetsReceived, packetsQueued])
| [
"noreply@github.com"
] | noreply@github.com |
f0ae000ed1eb0a79a633644c810f5132c954dd92 | 3883736fc8742282552287159bff1ef209243779 | /alejandra/punto_2.py | 9e68801240887d5ec9ed082c76e0fb19ffc64a04 | [] | no_license | riyanhax/Control_Venta_Jugos | 9c161cc6f43bf008bdf4c9fa95b87208ea6636b3 | efa4204e97f500f522f42400ad1f95d0375987c2 | refs/heads/main | 2023-08-11T00:35:17.415866 | 2021-10-01T01:19:46 | 2021-10-01T01:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,751 | py | def invertir(vec1):
vecinv = []
x = len(vec1)-1
while x >= 0:
vecinv.append(vec1[x])
x -= 1
return vecinv
def destruir(num):
a = []
while num > 0:
if num >= 100000:
quita = num % 100
a.append(quita)
num //= 100
if num < 100000:
quita = num % 10
a.append(quita)
num //= 10
if num < 1000:
quita = num % 10
a.append(quita)
num //= 10
if num < 100:
quita = num % 100
a.append(quita)
num //= 100
aux = invertir(a)
return aux
def porcien(x, y, matri):
des = 0
info = 0
if x < 5:
if y > 10:
des = (35 * matri) / 100
info = 35
elif (y > 6 and y < 9):
des = (25 * matri) / 100
info = 25
elif (x > 5 and x < 10):
if y > 10:
des = (28 * matri) / 100
info = 28
elif (y > 6 and y < 9):
des = (9 * matri) / 100
info = 9
return des, info
def calMatricula(dato,matri):
valor = 0
desc = 0
info = 0
reca = 0
if dato[1] == 1:
desc = matri
if dato[1] == 2:
desc = matri/2
valor = matri - desc
else:
if dato[3] > 4:
reca = (12 * matri) / 100
r, info = porcien(dato[4], dato[0], matri)
desc = r
valor = (matri - desc) + reca
elif dato[3] <= 4:
r, info = porcien(dato[4],dato[0], matri)
desc = r
valor = matri - desc
return valor, desc, info, reca
def bonosis(x, y):
bono = 0
if x < 5:
if y == 1:
bono = 230000
elif (y == 2 or y == 3):
bono = 200000
elif (x > 5 and x < 10):
if y == 1:
bono = 180000
elif (y == 2 or y == 3):
bono = 100000
return bono
matri = int(input(f'¿Cual es el valor de la matricula? -->'))
while True:
num = int(input(f'Digite el codigo --> '))
if (num >= 1000000 and num <= 9999999):
break
else:
print(f'Codigo no valido!')
r = destruir(num)
rta, des, info, reca = calMatricula(r, matri)
bono = bonosis(r[4], r[2])
print(f'Familia --> {r[0]}.\n'
f'Sisben --> {r[1]}.\n'
f'Estracto --> {r[2]}.\n'
f'Carrera --> {r[3]}.\n'
f'Puesto --> {r[4]}.\n'
f'El descuento es del {info}% --> ${des}.\n'
f'Con el recargo del 12% --> {reca}.\n'
f'El estudiante tiene un bono de --> ${bono}.\n'
f'El valor a pagar por la matricula es de --> ${rta-bono}.') | [
"nadroy11720011022@gmail.com"
] | nadroy11720011022@gmail.com |
d03b154d78c9833bf938cf54367d34ddaaaed477 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano4144.py | 437f1315e60150f35c8d478d47df71dfcc8adc9d | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/110000/B64A9634-E3CA-084C-9EA2-18FC8A77FE70.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest4144.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
9b5c2cbf6056ca51b45eea33164c9c2990e8a7e8 | 8f6e1fb537cf5a0d9800a63aa20abb596e5aff8f | /Basics/Anagrams finder/find-anagrams.py | c92bf383851c977d603ee160af449a4cd7bb9ab9 | [] | no_license | georgiev-ventsi/my-python | 827407ec83b3470a42659791fc76e1825f8a73ad | 581603f6634c80d348a7a1aebf15c6162b1e127b | refs/heads/main | 2023-08-17T18:03:39.448656 | 2021-10-19T07:15:52 | 2021-10-19T07:15:52 | 417,171,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | def find_anagrams(str1, str2):
sorted_lower_str1 = sorted(str1.lower())
sorted_lower_str2 = sorted(str2.lower())
print(sorted_lower_str1, sorted_lower_str2)
if len(sorted_lower_str1) == len(sorted_lower_str2):
if sorted_lower_str1 == sorted_lower_str2:
print('These two strings are anagrams')
else:
print('These two strings are not anagrams')
else:
print('These two strings are not anagrams')
find_anagrams('HIVes', 'Menhit')
find_anagrams('HIVes', 'Eshiv')
find_anagrams('HIVes', 'Eshivs') | [
"vgeorgiev@AGROLAND.BG"
] | vgeorgiev@AGROLAND.BG |
f7af00972670b3961fb2a93450be13284126ec84 | 1af379b14252defebfe9ba55154770a021d040f9 | /2018-09-26/attract/gen/limiti.py | a3f48e45a0f7f19d52915b6b1f0cd1afd65e549c | [] | no_license | romeorizzi/esami-algo-TA | ee8cc12731963ae0d6b46a026b3ef8782539a73b | 57af9679a95ee76c302cae1c5daf5cd9fbc9afd3 | refs/heads/master | 2020-06-24T14:39:03.361825 | 2019-08-02T07:22:02 | 2019-08-02T07:22:02 | 198,989,340 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | MAXN = 200000
MAXM = 500000
| [
"romeo.rizzi@univr.it"
] | romeo.rizzi@univr.it |
0288094a09c412b85518605c28634aa250b5342a | d7527f2be1a03ed29147fc0a8afc98bfa63afb6c | /RunningCalc.py | 67d9dd2bddd64a5dfe5f4a9160e24c85f0a066c9 | [] | no_license | sakobrin/running-calculator | 05551c226a51362565177c3118be0a828e037081 | d723c81cdd1c569ca8b613a4dc70e316ea60c7b3 | refs/heads/master | 2021-01-21T16:44:55.923915 | 2017-04-30T05:02:23 | 2017-04-30T05:02:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from tkinter import *
from tkinter import ttk
main = Tk()
content = ttk.Frame(main)
main.title('Running Calculator')
frame = ttk.Frame(content, borderwidth=5, relief="sunken", width = 500, height = 300)
timelbl = ttk.Label(content, text = "Time")
time = ttk.Entry(content)
distancelbl = ttk.Label(content, text = "Distance")
distance = ttk.Entry(content)
pacelbl = ttk.Label(content, text = "Pace")
pace = ttk.Entry(content)
viewSplits = BooleanVar()
viewSplits.set(False)
splits = ttk.Checkbutton(content, text="View Splits?", variable = viewSplits, onvalue= True)
content.grid(column = 0, row = 0)
frame.grid(column = 0, row = 0, columnspan = 1, rowspan = 9)
timelbl.grid(column = 0, row = 0, columnspan = 1)
time.grid(column = 0, row = 1, columnspan = 1)
distancelbl.grid(column = 0, row = 3, columnspan = 1)
distance.grid(column = 0, row = 4, columnspan = 1)
pacelbl.grid(column = 0, row = 6, columnspan = 1)
pace.grid(column = 0, row = 7, columnspan = 1)
splits.grid(column = 0, row = 9)
main.mainloop() | [
"spencerhusen@gmail.com"
] | spencerhusen@gmail.com |
8d0d1313e78693d985b4e4c39f8e046404d8c52c | 67542fb4ac2e6eedd660e0966363676751bbbd0b | /DB/DataBase.py | fd1df602b354b89b7d315b4af6b8f90e1c9dc6c6 | [] | no_license | PnP4/IoTLink | a389b1fd88e8a0876eea4025ab753a575b111633 | 495b20343166b153bb7e214db1a385648a5f35cc | refs/heads/master | 2020-06-10T04:53:37.211881 | 2016-12-31T13:16:39 | 2016-12-31T13:16:39 | 76,085,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,367 | py | import sqlite3
class SQLDB:
def __init__(self):
try:
self.getCon()
except:
pass
try:
self.CreateTable()
except:
pass
try:
self.insertInitData()
except Exception as e:
print e
pass
def getCon(self):
self.dbconnection = sqlite3.connect('/tmp/globalpnp.db')
def CreateTable(self):
cursor = self.dbconnection.cursor()
cursor.execute('''CREATE TABLE metmetadata(name text primary key,ip text, port int,type text)''')
cursor.execute('''CREATE TABLE snapdata(keyname text primary key,data text)''')
self.dbconnection.commit()
self.dbconnection.close()
def insertInitData(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("Insert INTO metmetadata(name,port) values('inp',8200)")
cursor.execute("Insert INTO metmetadata(name,port,ip) values('out',8500,'192.168.100.1')")
cursor.execute("Insert INTO metmetadata(name,port) values('cont',8100)")
cursor.execute("Insert INTO snapdata(keyname,data) values('status','av')")
cursor.execute("Insert INTO snapdata(keyname,data) values('prgid','01')")
cursor.execute("Insert INTO snapdata(keyname,data) values('programs','a|b|c|d')")
cursor.execute("Insert INTO snapdata(keyname) values('parent')")
cursor.execute("Insert INTO snapdata(keyname) values('myname')") #whether a b c d
cursor.execute("Insert INTO snapdata(keyname) values('controljson')")
cursor.execute("Insert INTO snapdata(keyname) values('seqjson')")
cursor.execute("Insert INTO snapdata(keyname) values('myprj')")
self.dbconnection.commit()
self.dbconnection.close()
def getInputDaemonPort(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT port FROM metmetadata WHERE name = 'inp'")
return cursor.fetchone()[0]
def getOutputDaemonPort(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT port FROM metmetadata WHERE name = 'out'")
return cursor.fetchone()[0]
def getOutputDaemonIP(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT ip FROM metmetadata WHERE name = 'out'")
return cursor.fetchone()[0]
def getControlDaemonPort(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT port FROM metmetadata WHERE name = 'cont'")
return cursor.fetchone()[0]
def getStatus(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'status'")
return cursor.fetchone()[0]
def getprgid(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'prgid'")
return cursor.fetchone()[0]
def getprograms(self):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'programs'")
return cursor.fetchone()[0]
def isPrgInMe(self,prg):
prs=self.getprograms()
prs=prs.split("|")
if (prg in prs):
return True
return False
def setPrgID(self,prgid):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE snapdata SET data = ? WHERE keyname = 'prgid' ''',(prgid,))
self.dbconnection.commit()
self.dbconnection.close()
def setStatus(self,status):
self.getCon()
cursor = self.dbconnection.cursor()
if(status):
cursor.execute('''UPDATE snapdata SET data = 'av' WHERE keyname = 'status' ''')
else:
cursor.execute('''UPDATE snapdata SET data = 'nav' WHERE keyname = 'status' ''')
self.dbconnection.commit()
self.dbconnection.close()
def setParent(self, parentIP):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE snapdata SET data = ? WHERE keyname = 'parent' ''', (parentIP,))
self.dbconnection.commit()
self.dbconnection.close()
print "PARENIP:- ",parentIP
def setMyName(self, name):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE snapdata SET data = ? WHERE keyname = 'myname' ''', (name,))
self.dbconnection.commit()
self.dbconnection.close()
def getMyName(self):
try:
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'myname'")
return cursor.fetchone()[0]
except:
return None
def getParent(self):
try:
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'parent'")
return cursor.fetchone()[0]
except:
return None
def setControlJson(self,json):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE snapdata SET data = ? WHERE keyname = 'controljson' ''', (json,))
self.dbconnection.commit()
self.dbconnection.close()
def setSeqJson(self,json):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE snapdata SET data = ? WHERE keyname = 'seqjson' ''', (json,))
self.dbconnection.commit()
self.dbconnection.close()
def getControlJson(self):
try:
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'controljson'")
return cursor.fetchone()[0]
except:
return None
def getSeqJson(self):
try:
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute("SELECT data FROM snapdata WHERE keyname = 'seqjson'")
return cursor.fetchone()[0]
except:
return None
def setOutputIp(self, ip):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE metmetadata SET ip = ? WHERE name = 'out' ''', (ip,))
self.dbconnection.commit()
self.dbconnection.close()
def setOutputPort(self, port):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE metmetadata SET port = ? WHERE name = 'out' ''', (port,))
self.dbconnection.commit()
self.dbconnection.close()
def setMyProgram(self,myprj):
self.getCon()
cursor = self.dbconnection.cursor()
cursor.execute('''UPDATE snapdata SET data = ? WHERE keyname = 'myprj' ''', (myprj,))
self.dbconnection.commit()
self.dbconnection.close()
# a=SQLDB()
# print a.getOutputDaemonPort()
# print a.getInputDaemonPort()
# print a.getprograms()
# print a.getControlDaemonPort()
# print a.getOutputDaemonIP()
# print a.isPrgInMe("a")
# print a.isPrgInMe("z")
# print a.getprgid()
# print a.getStatus()
# print a.getMyName()
# print a.getParent()
| [
"nrv.rox.u@gmail.com"
] | nrv.rox.u@gmail.com |
8f1d7509d6740417abcd8905bddf1141f2fc7058 | 87b249bde7c729800f2c2530a0ee35c101e3d363 | /game/models.py | 0182d4d21bfdffb62927c088fda77e2b58ca7a54 | [] | no_license | infsolution/prehistory | 9c909230cdc114dfdbc97875df27de23262befcc | 70bab6b9c8352612bd46572b63330c1187dd869e | refs/heads/master | 2020-09-12T12:14:39.358521 | 2019-11-29T23:59:43 | 2019-11-29T23:59:43 | 222,422,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | from django.contrib.auth.models import User
from django.contrib.auth import *
from django.db import models
class Knowing(models.Model):
name = models.CharField(max_length=30)
force = models. IntegerField(default=0)
defense = models.IntegerField(default=0)
allowed_level = models.IntegerField(default=0)
def __str__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=40)
force = models. IntegerField(default=0)
defense = models. IntegerField(default=0)
allowed_level = models. IntegerField(default=0)
def __str__(self):
return self.name
class Avatar(models.Model):
owner = models.OneToOneField('auth.User', related_name='avatar_owner', on_delete=models.CASCADE)
name = models.CharField(max_length=12)
force = models. IntegerField(default=100)
defense = models.IntegerField(default=80)
life = models.IntegerField(default=1000)
level = models.IntegerField(default=0)
knowing = models.ManyToManyField(Knowing)
item = models.ManyToManyField(Item)
def __str__(self):
return self.name
| [
"clsinfsolution@gmail.com"
] | clsinfsolution@gmail.com |
e4e53d12e0453f00ec4986d03ad123974f9e5a3b | e055d7bac58de449f57cf4077c7d64a154868af8 | /rbvm/lib/auth.py | cffc289ccf4ccd89e55eb683641e867d5abf67f2 | [] | no_license | redbrick/rbvm | 4b7d60971229596f1eda3cffdfb0e3582ace976b | db36ff9287a6e36f8673b959799b01432ad8454d | refs/heads/master | 2021-01-22T06:18:27.662294 | 2017-05-26T19:56:25 | 2017-05-26T19:56:25 | 92,534,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | import cherrypy
from rbvm.model.database import *
import rbvm.lib.sqlalchemy_tool as database
import rbvm.config as config
def get_user():
"""
Returns the currently logged in user
"""
if cherrypy.session.get('authenticated') != True:
return None
else:
username = cherrypy.session.get('username')
user = database.session.query(User).filter(User.username == username).first()
return user
def require_login(func):
"""
Decorator to require a user to be logged in
"""
def wrapper(*args, **kwargs):
if cherrypy.session.get('authenticated') == True:
return func(*args, **kwargs)
else:
raise cherrypy.HTTPRedirect(config.SITE_ADDRESS + 'login')
return wrapper
def require_nologin(func):
"""
Decorator to ensure that a user is not logged in
"""
def wrapper(*args, **kwargs):
if cherrypy.session.get('authenticated') == True:
raise cherrypy.HTTPRedirect(config.SITE_ADDRESS)
else:
return func(*args,**kwargs)
return wrapper
def verify_token(func):
"""
Verifies that the current action token is valid.
"""
def wrapper(*args, **kwargs):
user = get_user()
if 'token' not in kwargs:
raise cherrypy.HTTPRedirect(config.SITE_ADDRESS + 'tokenerror')
token_object = database.session.query(OneTimeToken).filter(OneTimeToken.token==kwargs['token']).first()
if token_object is None or token_object.check_and_expire(user) is True:
raise cherrypy.HTTPRedirect(config.SITE_ADDRESS + 'tokenerror')
else:
return func(*args, **kwargs)
return wrapper
def is_administrator():
"""
Finds the Administrator group, and checks if the current user is a member.
"""
user = get_user()
admin_group = database.session.query(Group).filter(Group.name=='Admins').first()
if admin_group is None:
return False
if admin_group in user.groups:
return True
return False
| [
"werdz@users.noreply.github.com"
] | werdz@users.noreply.github.com |
b63687b59f50e081926916d0bcc2589d6d2e1d3c | 1d5bc6615a1d870d36efd8033f048279f58b4258 | /puppycompanyblog/users/forms.py | 0136658377a71ff784d2e25ad1280de8b6adb840 | [] | no_license | Dzenis-Pepic/Flask-Aplication | b502e455bbe56bc6c115444d32acd072ebfae10a | 42b6495f687283c96308b0d518a0479f983abaeb | refs/heads/master | 2023-07-18T11:27:36.099347 | 2021-09-07T08:57:49 | 2021-09-07T08:57:49 | 398,843,215 | 0 | 0 | null | 2021-09-07T08:57:49 | 2021-08-22T16:12:45 | Python | UTF-8 | Python | false | false | 1,950 | py |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired,Email,EqualTo
from wtforms import ValidationError
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from puppycompanyblog.models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),Email()])
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired(), EqualTo('pass_confirm', message='Passwords Must Match!')])
pass_confirm = PasswordField('Confirm password', validators=[DataRequired()])
submit = SubmitField('Register!')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Your email has been registered already!')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Sorry, that username is taken!')
class UpdateUserForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),Email()])
username = StringField('Username', validators=[DataRequired()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Your email has been registered already!')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Sorry, that username is taken!')
| [
"dzenisinedz@gmail.com"
] | dzenisinedz@gmail.com |
ddd97e76402b836d715d4a083b77c3284909e904 | 712d76224c59b5f46f02bd480562582645e1b517 | /test3.py | 98a3e742571aec2a94d1870f54bda85756e35f66 | [] | no_license | xinghudamowang/iForestTest | 8f2e1e777f197a564841909a3f2dcbe5e506ba48 | 9ea2b6621f8cc49f7f60fdfb57ccf5db02aac9e3 | refs/heads/master | 2021-01-21T18:11:10.646267 | 2017-03-10T10:13:37 | 2017-03-10T10:13:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | # -*- coding: utf-8 -*-
### the simple version for existing customers
def getTransMatrix0(customers, products):
## get trans matrix
prods = set(products)
prods = list(prods)
prodsList = []
for item in prods:
tmp = [customers[ii] for ii, x in enumerate(products) if x == item]
tmp = list(set(tmp))
prodsList.append(tmp)
"""
prods = []
n_prod = len(set(products))
prodsList = [None] * n_prod
for i in range(len(customers)):
if products[i] not in prods:
prods.append(products[i])
if (prodsList[prods.index(products[i])]==None):
prodsList[prods.index(products[i])] = customers[i]
else:
prodsList[prods.index(products[i])] = prodsList[prods.index(products[i])] + customers[i]
"""
n_prod = len(prods)
transM = 1.0*np.arange(n_prod*n_prod).reshape(n_prod, n_prod)
for i in range(n_prod):
for j in range(n_prod):
tmp1 = len(set(prodsList[i]).intersection(set(prodsList[j])))
tmp2 = len(set(prodsList[i]).union(set(prodsList[j])))
transM[i,j] = 1.0*tmp1/tmp2
return(transM, prodsList, prods)
def getCustomerVec0(transM, customers, products, prods, prodsList, cusList, unicus, alpha=0.99):
import numpy as np
## get all feature vectors
n_prod = len(prods)
n_cus = len(unicus)
numii = np.array([0.0]*n_cus)
for i in range(n_cus):
numii[i] = customers.count(unicus[i])
X = 1.0*np.arange(n_prod*n_cus).reshape(n_cus, n_prod)
for i in range(n_cus):
for j in range(n_prod):
if unicus[i] in prodsList[j]:
X[i,j] = alpha * prodsList[j].count(unicus[i])/numii[i]
else:
#item = cusList[i][0]
#X[i,j] = alpha*(prodsList[prods.index(item)].count(unicus[i])/numii[i] )*transM[prods.index(item),j]
X[i,j] = 0
for item in set(cusList[i]):
X[i,j] = X[i,j] + alpha*(prodsList[prods.index(item)].count(unicus[i])/numii[i] )*transM[prods.index(item),j]
if X[i,j] == 0:
X[i,j] = np.random.uniform(0,1.0-alpha)
return(X)
def recommendList(X, unicus, prods):
rdList = []
for i in range(len(prods)):
onelist = sorted(zip(unicus, map(lambda x: round(x, 10), -1*X[:,i])))
rdList.append(onelist)
return(rdList)
def customerList0(customers, products):
ncus = len(set(customers))
cusList = [None] * ncus
unicus = []
for i in range(len(customers)):
if customers[i] not in unicus:
unicus.append(customers[i])
if (cusList[unicus.index(customers[i])]==None):
cusList[unicus.index(customers[i])] = [products[i]]
else:
cusList[unicus.index(customers[i])].append(products[i])
return(cusList, unicus)
def filterRdList(rdList, dutime, customers, products, dates, unicus, prods):
import datetime
import numpy as np
strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime
fmat = "%Y-%m-%d"
customers = np.array(customers)
products = np.array(products)
#dates = np.array(dates)
targeT = '2017-10-01'
dropList = [None] * len(rdList)
for i in range(len(rdList)):
for j in range(len(rdList[i])):
cus1 = rdList[i][j][0]
ind1 = np.where(customers==cus1)
ind2 = np.where(products==prods[i])
inInd = np.intersect1d(ind1,ind2)
if len(inInd)>0:
amax = max(inInd)
endtime = strftime(strptime(dates[amax], fmat) + datetime.timedelta(dutime[i]), fmat)
if strptime(endtime, fmat) > strptime(targeT,fmat):
if (dropList[i]==None):
dropList[i] = [j]
else:
dropList[i].append(j)
rdList1 = rdList
for k in range(len(rdList)):
if not (dropList[k]==None):
rdList1[k] = [i for j, i in enumerate(rdList[k]) if j not in dropList[k]]
return(rdList1)
### test section ==============================================================
from metrics_classifer import *
from transMatrix import *
import numpy as np
import timeit
customers, products, money, dates = readFile('test_data2.csv')
customers = customers[:5000]
products = products[:5000]
dates = dates[:5000]
t1 = timeit.default_timer()
transM, prodsList, prods = getTransMatrix0(customers, products)
t2 = timeit.default_timer()
cusList, unicus = customerList0(customers, products)
#cusList, unicus = customerList(customers, products)
t3 = timeit.default_timer()
X = getCustomerVec0(transM, customers, products, prods, prodsList, cusList, unicus, alpha=0.9)
print X.shape
t4 = timeit.default_timer()
rdList = recommendList(X, unicus, prods)
t5 = timeit.default_timer()
print "Escaping time is: %f" % (t2-t1)
print "Escaping time is: %f" % (t3-t2)
print "Escaping time is: %f" % (t4-t3)
print "Escaping time is: %f" % (t5-t4)
#print rdList[0][1][0]
## filter by 产品有效截止日
import random
n_prod = len(prods)
dutime = [ii*40 for ii in range(1,n_prod+1)]
random.shuffle(dutime)
t5 = timeit.default_timer()
rdList1 = filterRdList(rdList, dutime, customers, products, dates, unicus, prods)
## filters by only buying !!!!!!
t6 = timeit.default_timer()
print "drop buying Escaping time is: %f" % (t6-t5)
| [
"h510493094@qq.com"
] | h510493094@qq.com |
49ddb28a6f3660f534a2fe170d8242a86a905300 | 6864a19974eb513074b384b189dd28efebc660a2 | /tests/unit/test_interact.py | 6f3254ca41e7d28390fe75737a90bcce978d27aa | [
"MIT"
] | permissive | EdenInTheEast/zig | 1c8bdb4139f22779edeeae7b94b7a7c46900e661 | 7e0c0b9fb1ae9e9191445310f77c2dff7cc11fea | refs/heads/main | 2023-06-29T18:39:47.272339 | 2021-07-14T16:26:56 | 2021-07-14T16:26:56 | 364,528,761 | 1 | 1 | MIT | 2021-07-14T16:26:57 | 2021-05-05T09:44:43 | JavaScript | UTF-8 | Python | false | false | 8,484 | py | import pytest
from zig.interact import Interact, InteractOut
from zig.interact.interact_in import InteractIn
from zig.html_components import *
class TestInteractBlackBox:
"""
Basic methods:
0. Able to create Interact object with specs [IMPT!]
1. Store all input elements and their specs
2. Store all output elements and their specs
3. Store callback function
4. Create API point
5. Render Interaction as JSON [IMPT!]
REST methods:
1. update input value
2. generate output value
3. Handle GET Request
4. Handle POST/PUT Request
"""
@pytest.fixture
def fixture_normal_parameters(self):
# SETUP testing parameters for Mock InteractIn and InteractOut
para_dict = ["id", "value", "attribute", "dom_type"]
# input 1: HtmlElement of type Input, value, id='456'
input1 = ["456", 3, "value", Input().dom_type]
input1_dict = {i:x for i, x in zip(para_dict, input1)}
# input 2: HtmlElement of type Div, content, id='123'
input2 = ["123", "test_value", "content", Div().dom_type]
input2_dict = {i:x for i, x in zip(para_dict, input2)}
# ouput 3: HtmlElement of type P, content, id='678'
output = ["678", "", "content", P().dom_type]
output_dict = {i:x for i, x in zip(para_dict, output)}
# just a placeholder
callback = lambda x, y: x * y
# NOTE: Might implement diff. format for diff. subclasses
expected_api_point = "/interact/{interaction_id}"
return {"in1":input1_dict, "in2":input2_dict,
"out":output_dict, "callback": callback,
"api_point": expected_api_point, "parameters":para_dict}
@pytest.fixture
def fixture_normal_ini(self, mocker, fixture_normal_parameters):
# TEST initialization of Interact
input1, input2, output, callback, \
api_point_format, para_dict = fixture_normal_parameters.values()
mock_input1 = mocker.patch("zig.interact.InteractIn",
spec=True,
identity=input1["id"],
attribute=input1["attribute"],
value=input1["value"],
dom_type=input1["dom_type"],
key=None)
mock_input2 = mocker.Mock(spec=InteractIn,
identity=input2["id"],
attribute=input2["attribute"],
value=input2["value"],
dom_type=input2["dom_type"],
key=None)
mock_output = mocker.patch("zig.interact.InteractOut",
spec=True,
identity=output["id"],
attribute=output["attribute"],
value=output["value"],
dom_type=output["dom_type"])
interaction = Interact( [mock_input1, mock_input2],
mock_output,
callback)
return interaction
# blackbox: doesn't need to know how inputs are stored
def test_input_args(self, fixture_normal_ini, fixture_normal_parameters):
# TEST that all input elements and their specifications are stored,
# and can be retrieved. CHECK: inputs_args
interaction = fixture_normal_ini
expected_arguments = [ fixture_normal_parameters["in1"]["value"], fixture_normal_parameters["in2"]["value"] ]
assert interaction.inputs_args == expected_arguments
def test_input_keyword_args(self, fixture_normal_ini):
# TEST that all input elements can be stored along with their keywords
# and can be retrieved as such. CHECK: inputs_kwargs
pass
def test_output_elements(self, fixture_normal_ini, fixture_normal_parameters):
# TEST that all output elements and their specs are stored,
# and can be retrieved. CHECK: output_dict
interaction = fixture_normal_ini
output = fixture_normal_parameters["out"]
expected_output_data = None
# NOTE: might implement diff. format for diff. subclass
expected_output_elements = {output["id"]: {
"attribute": output["attribute"],
"dom_type": output["dom_type"],
"data": expected_output_data
} }
# it produces an Ordered Dict
assert dict(interaction.output_dict) == expected_output_elements
def test_generate_api_point(self, fixture_normal_ini, fixture_normal_parameters):
# TEST that it will auto-generate a url for api
# CHECK: api_point
api_point_format = fixture_normal_parameters["api_point"]
interaction = fixture_normal_ini
expected_api_point = api_point_format.format(interaction_id=interaction.id)
assert interaction.api_point == expected_api_point
# NOTE: IMPT it combines a few of the previous tests
def test_rendering(self, fixture_normal_ini, fixture_normal_parameters):
# given in and out, able to render into dict object (json-like)
interaction = fixture_normal_ini
inputs = [fixture_normal_parameters["in1"], fixture_normal_parameters["in2"]]
output = [fixture_normal_parameters["out"]]
input_dict = {i:{"id":v["id"] , "dom_type":v["dom_type"], "attribute":v["attribute"]}
for i,v in enumerate(inputs) }
output_dict = {i:{"id":v["id"] , "dom_type":v["dom_type"], "attribute":v["attribute"]}
for i,v in enumerate(output) }
test_generate_api_point = fixture_normal_parameters["api_point"] \
.format(interaction_id=interaction.id)
expected_json = {"input": input_dict, "output": output_dict, "api_point": test_generate_api_point}
#NOTE: Interact Object will trigger render on each In and Out Object
assert interaction.render() == expected_json
def test_update_single_input(self, fixture_normal_ini, fixture_normal_parameters):
# TEST update of single input value from input 2
interaction = fixture_normal_ini
# input 2: HtmlElement of type Div, content, id='123'
new_value = "this is a test value"
new_input2 = ["123", "content", new_value, "div"]
input2_dict = {i:x for i, x in zip(fixture_normal_parameters["parameters"], new_input2)}
expected_input_values = [fixture_normal_parameters["in1"]["value"], new_input2[2]]
#TODO: this is incomplete. Need a better way to push update
assert interaction._update_single_input(*new_input2[:3]) is True
assert interaction.inputs_args == expected_input_values
def test_process_inputs_generate_output(self, fixture_normal_ini, fixture_normal_parameters):
# NOTE: NEED TO change test
# TEST that it is able to generate new output value from callback with input values
interaction = fixture_normal_ini
expected_output = fixture_normal_parameters["in1"]["value"] * fixture_normal_parameters["in2"]["value"]
result = interaction._process_inputs()
assert result == expected_output
def test_update_output_values(self):
# TEST output is generated and updated:
pass
def test_update_via_put(self, fixture_normal_ini, fixture_normal_parameters):
# TEST update of put response
interaction = fixture_normal_ini
# given dictionary response from external source for input 1
updated_value = "Hello world!"
json_response = { 0:{"type":"div" ,"id":"123" , "data":updated_value, "attribute":"content"}}
expected_args = [fixture_normal_parameters["in1"]["value"], updated_value]
# api-point will trigger this function
interaction.put_response(json_response)
assert interaction.__compile_args() == expected_args
| [
"alterrxalter@gmail.com"
] | alterrxalter@gmail.com |
7f175dcfcf3289d553a65601a42e1f53cf9b8c94 | bd497b739c49eefd96ea62a1bb9aebec7f7cebc0 | /data_processing/utils.py | 11910bef1b0ed9c694da85684350fdc6a1aa98fb | [
"MIT"
] | permissive | gcalmettes/fitApp-fullstack | 3908d82da41f679d5fc57444502c6ee5c47a3066 | 42c5f40f52e2f19127ca37638617ef39445a13f0 | refs/heads/master | 2022-12-21T13:44:06.858334 | 2020-08-08T08:36:42 | 2020-08-08T08:36:42 | 152,452,977 | 1 | 1 | MIT | 2022-12-13T11:52:45 | 2018-10-10T16:13:22 | JavaScript | UTF-8 | Python | false | false | 2,164 | py | import io
import base64
import pandas as pd
import json
def convertUrlToBytesStream(url):
# isolate base64 encoded data part
data = url.split(',')[-1]
# decode base64 to bytes
bytes = base64.urlsafe_b64decode(data)
return bytes
def csvUrlToDf(url):
bytes = convertUrlToBytesStream(url)
df = pd.read_csv(io.BytesIO(bytes))
return df
def xlsUrlToDf(url):
bytes = convertUrlToBytesStream(url)
df = pd.read_excel(io.BytesIO(bytes))
return df
flatten = lambda l: [item for sublist in l for item in sublist]
def getDfInfo(df):
nCols = df.shape[1]
nTraces = (nCols-1)/3
if nTraces%1 == 0:
return {"type": "all", "nTraces": int(nTraces)}
elif (nCols%2 == 0):
return {"type": "processed", "nTraces": int(nCols/2)}
def getColumnNames(df, dfInfo):
if dfInfo['type'] == "all":
names = ["time"]+flatten([
[f"yfp{i}", f"cfp{i}", f"ratio{i}"] for i in range(dfInfo["nTraces"])])
elif dfInfo['type'] == 'processed':
names = flatten([[f"time{i}", f"ratio{i}"] for i in range(dfInfo["nTraces"])])
return names
def getCleanedDf(df, fmt='all'):
if fmt=='all':
df = df[[col for ratio in df.filter(regex='ratio') for col in ['time', ratio]]]
elif fmt == 'processed':
pass
return df
def convertToJsonObject(df, nTraces):
df.columns = flatten([['x', 'y'] for i in range(nTraces)])
dataObject = {'size': nTraces,
'data': {}}
for i in range(nTraces):
dataObject['data'][f'trace{i}'] = json.loads(
df.iloc[:, i*2:i*2+2].to_json(orient='records'))
return dataObject
def processFile(fileUrl, fmt='xls'):
if fmt == 'xls':
df = xlsUrlToDf(fileUrl).dropna()
elif fmt == 'csv':
df = csvUrlToDf(fileUrl).dropna()
dfInfo = getDfInfo(df)
df.columns = getColumnNames(df, dfInfo)
df = getCleanedDf(df, fmt=dfInfo["type"])
json = convertToJsonObject(df, dfInfo["nTraces"])
return json
def convertJSONtoDF(data):
try:
converted = pd.read_json(data, orient='records')
except:
converted = pd.DataFrame(data)
return converted
| [
"guillaume.calmettes@gmail.com"
] | guillaume.calmettes@gmail.com |
fab339050b992558c77da577c671a699a7775d41 | 6ab67facf12280fedf7cc47c61ae91da0bcf7339 | /service/yowsup/yowsup/layers/auth/protocolentities/failure.py | 12d1d8ed0f8ee7267a59a9cbb7493f6c5c879d4d | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | PuneethReddyHC/whatsapp-rest-webservice | 2f035a08a506431c40b9ff0f333953b855f9c461 | 822dfc46b80e7a26eb553e5a10e723dda5a9f77d | refs/heads/master | 2022-09-17T14:31:17.273339 | 2017-11-27T11:16:43 | 2017-11-27T11:16:43 | 278,612,537 | 0 | 1 | MIT | 2020-07-10T11:04:42 | 2020-07-10T11:04:41 | null | UTF-8 | Python | false | false | 684 | py | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class FailureProtocolEntity(ProtocolEntity):
def __init__(self, reason):
super(FailureProtocolEntity, self).__init__("failure")
self.reason = reason
def __str__(self):
out = "Failure:\n"
out += "Reason: %s\n" % self.reason
return out
def getReason(self):
return self.reason
def toProtocolTreeNode(self):
reasonNode = ProtocolTreeNode(self.reason, {})
return self._createProtocolTreeNode({}, children = [reasonNode])
@staticmethod
def fromProtocolTreeNode(node):
return FailureProtocolEntity( node.getAllChildren()[0].tag ) | [
"svub@x900.svub.net"
] | svub@x900.svub.net |
a2305f410c636a1f73e9eb03c70036300b797b07 | 76938f270e6165514162856b2ed33c78e3c3bcb5 | /lib/coginvasion/shop/ItemType.py | e96efa1c25c3578ab1b279372a20d21679db23fd | [] | no_license | coginvasion/src | 9a5ec682845cc4c9c013fcc35e9b379bd4360b6c | 2d7fcdb0cd073050250cb51292ee48300a9fe19f | refs/heads/master | 2021-01-19T06:50:11.786112 | 2015-11-08T12:28:52 | 2015-11-08T12:28:52 | 61,545,543 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | # Embedded file name: lib.coginvasion.shop.ItemType
"""
Filename: ItemType.py
Created by: DecodedLogic (13Jul15)
"""
class ItemType:
GAG, UPGRADE, HEAL = range(3) | [
"ttarchive@yandex.com"
] | ttarchive@yandex.com |
7b54810242d09c7362950443e7fb95afecc7c011 | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/models_20201115165700.py | 0eac60d92b9d9ae7be453f6bd85a6871087bd4aa | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,666 | py | from django.db import models
import re
class UserManager(models.Manager):
def user_registration_validator(self, post_data):
errors = {}
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if len(post_data['first_name']) < 3:
errors['first_name'] = "First name must be 3 characters"
if post_data['first_name'].isalpha() == False:
errors['first_name'] = "letters only"
if len(post_data['last_name']) < 3:
errors['last_name'] = "Last name must be 3 characters"
if post_data['last_name'].isalpha() == False:
errors['last_name'] = "letters only"
if len(post_data['email']) < 8:
errors['email'] = "Email must contain 8 characters"
if post_data['email'].find("@") == -1:
errors['email'] = "email must contain @ and .com"
if post_data['email'].find(".com") == -1:
errors['email'] = "email must contain @ and .com"
# test whether a field matches the pattern
if not EMAIL_REGEX.match(post_data['email']):
errors['email'] = "Invalid email address!"
if post_data['password'] != post_data['confirm_password']:
errors['pass_match'] = "password must match confirm password"
if len(post_data['password']) < 8:
errors['pass_length'] = "password must be longer than 8 characters"
return errors
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class EditManager(models.Manager):
def edit
class BooksManager(models.Manager):
def add_book_validator(self, post_data):
errors = {}
if len(post_data['title']) < 1:
errors['title'] = "title name must be 1 characters"
if len(post_data['desc']) < 5:
errors['desc'] = "Description must be 5 characters"
return errors
class Books(models.Model):
title = models.CharField(max_length=20)
desc = models.CharField(max_length=40)
uploaded_by = models.ForeignKey(User, related_name="books_uploaded", on_delete=models.CASCADE)
users_who_favorite = models.ManyToManyField(User, related_name="liked_books")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects=BooksManager()
| [
"69405488+steven-halla@users.noreply.github.com"
] | 69405488+steven-halla@users.noreply.github.com |
88c76b97606d527768fb95a39096714fdae6701c | 534b4e4d173df16cd8c9a198dfe83b6aefb48e55 | /train/train_svm.py | 5f80ef52c6169643b032ad75ceb92ba9e19fc632 | [] | no_license | fatzh/data-science-titanic | 652cff4477af62f83daa4bdd56814c25339903d8 | e05772d22482e481d292df9d6a368884e586aa14 | refs/heads/master | 2021-01-10T12:12:34.062245 | 2015-10-21T16:38:32 | 2015-10-21T16:38:32 | 43,350,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | # -*- coding: utf-8 -*-
import pandas as pd
from sklearn import cross_validation, svm, tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
import argparse
import sys
import os
from distutils.util import strtobool
def user_yes_no_query(question):
print('%s [y/n]\n' % question)
while True:
try:
return strtobool(raw_input().lower())
except ValueError:
print('Please respond with \'y\' or \'n\'.\n')
def main():
# widen pd output for debugging
pd.set_option('display.width', 1000)
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="Input file to parse (CSV)")
args = parser.parse_args()
if args.input is None:
parser.print_help()
sys.exit()
input_file = args.input
# load data
df = pd.read_csv(input_file, sep=',')
# split X and y
y = df['Survived']
X = df.drop('Survived', 1)
loop = True
while(loop):
# split training set for cross validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y)
# SVM
score_svm = 0
while score_svm < 0.84:
model_svm = svm.SVC(kernel='poly', C=15)
model_svm = model_svm.fit(X, y)
score_svm = model_svm.score(X_test, y_test)
print 'SVM - R-squared:', score_svm
if user_yes_no_query("Save models to disk ? "):
# round scores... and make strings
score_svm = str(round(score_svm * 100, 4))
# create dir for models
if not os.path.exists(os.path.join('./models/svm', score_svm)):
os.makedirs(os.path.join('./models/svm', score_svm))
joblib.dump(model_svm, os.path.join('.', 'models', 'svm', score_svm, 'model_svm.pkl'))
if not user_yes_no_query("Try again ? "):
loop = False
if __name__ == '__main__':
main()
| [
"fabrice.tz@gmail.com"
] | fabrice.tz@gmail.com |
8f37672040f9e295c8762f408ad6bd4bb41e491a | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_Lag1Trend_NoCycle_MLP.py | 46db9aa8f60fd81599dfc2f183a3cf62b5ad1657 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 149 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['Lag1Trend'] , ['NoCycle'] , ['MLP'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
96b5cb6d0cc6d595ec025397e6fba91ee28c1540 | 48433e782befb20935d2272c63299c41ce41a999 | /similarity.py | ce3468575404d7e1962b629ecd73867867d04c20 | [] | no_license | marcielbp/softwareConflitoInteresses | e7827f088681e8c69af9a735f55ba17f177bbc13 | e462352f05d5797c87d0e724ce8f9ba18bdf46c8 | refs/heads/master | 2020-05-27T10:24:59.562976 | 2019-05-25T15:58:30 | 2019-05-25T15:58:30 | 188,582,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | __author__ = 'andremeireles'
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
# print similar('Andre Andrade', 'Andre Souza Andrade') | [
"noreply@github.com"
] | noreply@github.com |
217093a5295f3f4271c4c94eec44528a6ce7ad70 | 0c24fcbf802ed1881abdfbf14b867d4545237602 | /students/y2331/practical_works/Levashova_Nastya/lesson_11022020/lesson_11022020/urls.py | 2767832730380fdde8745167eccc8a0042a4f937 | [] | no_license | TonikX/ITMO_FSPO_Web_Django_2020 | d435d2984778d668f6d04c86d78c991b3f390c1a | 267141d6e4ee231ca689c8929f3db25fef15a5fd | refs/heads/master | 2023-01-18T17:38:05.964240 | 2020-11-20T18:08:57 | 2020-11-20T18:08:57 | 245,081,715 | 2 | 139 | null | 2023-04-25T18:54:50 | 2020-03-05T05:59:54 | Python | UTF-8 | Python | false | false | 810 | py | """lesson_11022020 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Levashova_app.urls'))
]
| [
"62665598+tommmatojuice@users.noreply.github.com"
] | 62665598+tommmatojuice@users.noreply.github.com |
803b42aa13d6faa1c2502eba19d4b72a915cf30b | a63264a25fe18a94d9e2cba2632d7d59262e36a1 | /app/handlers/converttopopulation.py | d1a9eb7efeab1d636cec5909a9a923a2ef1a3ffd | [
"BSD-3-Clause"
] | permissive | bbbales2/stochss | 4f3ac8f909f52aaba0c863ffed91c2ae70b2fe27 | afddf9ad8936993a5b17d1d4130677eb42afa439 | refs/heads/master | 2021-01-15T23:11:40.983003 | 2013-11-27T19:27:23 | 2013-11-27T19:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | try:
import json
except ImportError:
from django.utils import simplejson as json
import traceback
from collections import OrderedDict
from stochssapp import *
from stochss.model import *
class ConvertToPopulationPage(BaseHandler):
def authentication_required(self):
return True
def get(self):
model_edited = self.get_session_property('model_edited')
self.render_response('modeleditor/convert_modeleditor.html', modelName = model_edited.name)
| [
"bbbales2@gmail.com"
] | bbbales2@gmail.com |
b7517557feafba0275822f0660ff6499f13f43cb | ef9c6abc94ee95dbf59dbc2a2272d59687c4ea61 | /src/restaurants/migrations/0023_auto_20200924_0405.py | 1e10e8cd9913cb74ba71d7ec08991139fa3e2285 | [] | no_license | arturocastillo95/Localito | 0ab7a6d07b533d444b59e4b6788286f40703c42e | 5391f8fd9ca780dc22516998674a63bf11cd1e84 | refs/heads/master | 2023-09-01T03:56:58.797047 | 2021-10-07T22:16:26 | 2021-10-07T22:16:26 | 290,685,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # Generated by Django 3.1 on 2020-09-24 04:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0022_restaurant_qr_code'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='address',
field=models.CharField(blank=True, max_length=80, verbose_name='Dirección'),
),
migrations.AlterField(
model_name='restaurant',
name='name',
field=models.CharField(max_length=60, verbose_name='Nombre de tienda'),
),
migrations.AlterField(
model_name='restaurant',
name='qr_code',
field=models.ImageField(blank=True, null=True, upload_to='qr-codes', verbose_name='Código QR'),
),
]
| [
"arturguay@gmail.com"
] | arturguay@gmail.com |
2cb23a3c1884756723095a7f604218153b7ae32c | a7e9720068f00de9c64ba5807894070f77e5420c | /faster_rcnn/function/test_rcnn.py | 39d5f2a8e6e831a1a84639c3959f654b66cac9fc | [] | no_license | novageno/novageno | af2808a46d720ea44a71b5e10c34039b3ddea554 | 16ffe13dcaa463cd2f66c26232e7893f914153e4 | refs/heads/master | 2021-01-16T19:08:46.676527 | 2017-08-19T06:13:49 | 2017-08-19T06:13:49 | 100,146,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Guodong Zhang
# --------------------------------------------------------
import argparse
import pprint
import logging
import time
import os
import mxnet as mx
from symbols import *
from dataset import *
from core.loader import TestLoader
from core.tester import Predictor, pred_eval
from utils.load_model import load_param
def test_rcnn(cfg, dataset, image_set, root_path, dataset_path,fold,
ctx, prefix, epoch,
vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print cfg
pprint.pprint(cfg)
logger.info('testing cfg:{}\n'.format(pprint.pformat(cfg)))
# load symbol and testing data
if has_rpn:
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_symbol(cfg, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path,fold,result_path=output_path)
roidb = imdb.gt_roidb()
else:
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_symbol_rcnn(cfg, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
gt_roidb = imdb.gt_roidb()
roidb = eval('imdb.' + proposal + '_roidb')(gt_roidb)
# get test data iter
test_data = TestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=has_rpn)
# load model
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = None
max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]]
if not has_rpn:
max_data_shape.append(('rois', (cfg.TEST.PROPOSAL_POST_NMS_TOP_N + 30, 5)))
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger)
| [
"lilhope@163.com"
] | lilhope@163.com |
73dd2273af55639c6c3c75288dacb97133f8bd7c | 104d98cda5885b70fd294fbf2de5afda83bff478 | /load_ohlcv.py | f5c0123d8af5fdc32efdc531fa2f984736eff231 | [] | no_license | harishkumar92/EMAIndicators | 98a21c1738ccd00beb549af4e696203eae9963e4 | ca59d6e6e18a6ad5053bb7c701ea49d9ff26a8e3 | refs/heads/master | 2020-12-09T17:02:49.381232 | 2020-01-12T09:19:09 | 2020-01-12T09:19:09 | 233,366,428 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | import requests, sys
from datetime import datetime
import pandas as pd
def get_coin_list(n):
req_url = 'https://min-api.cryptocompare.com/data/top/mktcapfull?limit={0}&tsym=USD'.format(n)
result = requests.get(req_url)
coins = [x['CoinInfo']['Name'] for x in result.json()['Data']]
return coins
def make_request(coin, toTs=None):
ohlcv_url = 'https://min-api.cryptocompare.com/data/histohour?fsym={0}&tsym={1}&limit={2}'
quote_pair = 'BTC' if coin != 'BTC' else 'USD'
req_url = ohlcv_url.format(coin, quote_pair, 2000)
if toTs:
req_url = req_url + '&toTs=' + str(toTs)
result = requests.get(req_url)
return result
def make_requests(coin):
results = []
results.append(make_request(coin, None))
for i in range(20):
print ('Getting batch {0}/20'.format(i+1))
toTs = results[-1].json()['TimeFrom']
toTs = toTs - (1*60*60)
curr_result = make_request(coin, toTs)
results.append(curr_result)
results = results[::-1]
return results
def process_result(result):
if result.json()['Response'] != 'Success':
print (result['Response'])
data = pd.DataFrame(result.json()['Data'])
data['date'] = pd.to_datetime(data.time.apply(lambda x: datetime.utcfromtimestamp(x).strftime('%Y-%m-%d %H:%M:%S')))
data = data.set_index('date')
data = data.drop(['time'], axis=1)
data = data[~((data.close==0) & (data.high==0) & (data.low==0) & (data.open==0))]
return data
def process_results(results):
data_df = pd.concat(map(process_result, results))
return data_df
def save_df(coin, df):
output_file = 'data/{0}.csv'.format(coin)
df.to_csv(output_file)
def update_with_latest(coin):
csv_file = 'data/{0}.csv'.format(coin)
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
ohlcv_df = pd.read_csv(csv_file, index_col='date', parse_dates=True, date_parser=dateparse)
latest_df = process_result(make_request(coin))
first_time = latest_df.index.min()
ohlcv_df = ohlcv_df[(ohlcv_df.index < first_time)]
ohlcv_df = ohlcv_df.append(latest_df)
save_df(coin, ohlcv_df)
def update_historical(coins):
if coins == None:
coins = get_coin_list(num_coins)
for coin in coins:
print ("Getting {0}...".format(coin))
results = make_requests(coin)
df = process_results(results)
save_df(coin, df)
if __name__ == '__main__':
update_with_latest(coin='BTC')
#update_historical(coins=['BTC'])
pass
| [
"noreply@github.com"
] | noreply@github.com |
249f9ba65f1a6d2c88a635746d6ef863b1611db8 | 26dfaaa5c084832122a45242b4f15ba4aead229c | /Projects/trilobot/trilo_remote_control.py | 8ccb1336988e6ab843297fb518d237a3b2f407e8 | [
"MIT"
] | permissive | pkbullock/RaspberryPi | aa622b641031bf49c6a3d19e90a030071accc51a | a1312b0732d2cc7e1b79f26bd2adb44443734135 | refs/heads/master | 2023-07-20T12:16:26.677502 | 2023-07-16T13:58:43 | 2023-07-16T13:58:43 | 83,076,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,886 | py | #!/usr/bin/env python3
import time
import math
import sys
from mappings import *
from mappings import custom_controller_mappings
"""
An advanced example of how Trilobot can be remote controlled using a controller or gamepad.
This will require one of the supported controllers to already be paired to your Trilobot.
At startup a list of supported controllers will be shown, with you being asked to select one.
The program will then attempt to connect to the controller, and if successful Trilobot's
underlights will illuminate with a rainbow pattern.
If your controller becomes disconnected Trilobot will stop moving and show a slow red
pulsing animation on its underlights. Simply reconnect your controller and after 10 to 20
seconds, the program should find your controller again and start up again.
This is highly customised to the 8BitDo Zero 2
Press CTRL + C to exit.
"""
print("Trilobot Example: Remote Control\n")
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
tbot = Trilobot()
# Presents the user with an option of what controller to use
controller = custom_controller_mappings.choose_controller(0)
# Attempt to connect to the created controller
controller.connect()
# Run an amination on the underlights to show a controller has been selected
for led in range(NUM_UNDERLIGHTS):
tbot.clear_underlighting(show=False)
tbot.set_underlight(led, RED)
time.sleep(0.1)
tbot.clear_underlighting(show=False)
tbot.set_underlight(led, GREEN)
time.sleep(0.1)
tbot.clear_underlighting(show=False)
tbot.set_underlight(led, BLUE)
time.sleep(0.1)
tbot.clear_underlighting()
h = 0
v = 0
spacing = 1.0 / NUM_UNDERLIGHTS
tank_steer = False
underlighting_on = True
# The main loop
while True:
try:
if not controller.is_connected():
# Attempt to reconnect to the controller if 10 seconds have passed since the last attempt
controller.reconnect(10, True)
try:
# Get the latest information from the controller. This will throw a RuntimeError if the controller connection is lost
controller.update()
except RuntimeError:
# Lost contact with the controller, so disable the motors to stop Trilobot if it was moving
tbot.disable_motors()
if controller.is_connected():
# Read the controller bumpers to see if the tank steer mode has been enabled or disabled
try:
if controller.read_button("L1") and tank_steer:
tank_steer = False
print("Tank Steering Disabled")
if controller.read_button("R1") and not tank_steer:
tank_steer = True
print("Tank Steering Enabled")
except ValueError:
# Cannot find 'L1' or 'R1' on this controller
print("Tank Steering Not Available")
try:
if tank_steer:
# Have the left stick's Y axis control the left motor, and the right stick's Y axis control the right motor
ly = controller.read_axis("LY")
ry = controller.read_axis("RY")
print("Tank Steer LY:", round(ly), "RY:", round(ry))
tbot.set_left_speed(-ly)
tbot.set_right_speed(-ry)
else:
# Have the left stick control both motors
# Up RY = 2.984/3
# Idle RY = -509.016/-509
# Down RY = -1021.00/-1021
# Idle LY = 515.031/515
# Left LY = -1/-1
# Right = 1031.047/1031
ly = round(controller.read_axis("LY"))
ry = round(controller.read_axis("RY"))
if ly == -509 and ry == 515:
print("Stopped")
tbot.set_left_speed(0)
tbot.set_right_speed(0)
tbot.disable_motors()
# Forward
if ry == 3:
print("Forward")
tbot.set_left_speed(1)
tbot.set_right_speed(1)
# Backward
if ry == -1021:
print("Backward")
tbot.set_left_speed(-1)
tbot.set_right_speed(-1)
# Left
if ly == -1:
print("Left")
tbot.set_left_speed(1)
tbot.set_right_speed(-1)
# Right
if ly == 1031:
print("Right")
tbot.set_left_speed(-1)
tbot.set_right_speed(1)
except ValueError:
# Cannot find 'LX', 'LY', or 'RY' on this controller
tbot.disable_motors()
print("Tank steer value error")
if controller.read_button("Y"):
tbot.disable_motors()
tbot.set_left_speed(0)
tbot.set_right_speed(0)
if controller.read_button("X"):
if underlighting_on:
underlighting_on = False
else:
underlighting_on = True
time.sleep(0.2)
# Run a rotating rainbow effect on the RGB underlights
for led in range(NUM_UNDERLIGHTS):
led_h = h + (led * spacing)
if led_h >= 1.0:
led_h -= 1.0
try:
if controller.read_button("A"):
tbot.set_underlight_hsv(led, 0.0, 0.0, 0.7, show=False)
else:
tbot.set_underlight_hsv(led, led_h, show=False)
except ValueError:
# Cannot find 'A' on this controller
tbot.set_underlight_hsv(led, led_h, show=False)
if underlighting_on:
tbot.show_underlighting()
else:
tbot.clear_underlighting()
# Advance the rotating rainbow effect
h += 0.5 / 360
if h >= 1.0:
h -= 1.0
else:
# Run a slow red pulsing animation to show there is no controller connected
val = (math.sin(v) / 2.0) + 0.5
tbot.fill_underlighting(val * 127, 0, 0)
v += math.pi / 200
time.sleep(0.01)
# Exit cleanly
except KeyboardInterrupt:
sys.exit(0)
| [
"paul.bullock@capacreative.co.uk"
] | paul.bullock@capacreative.co.uk |
15ca578cab22380a15803413284472c6b9776021 | 4e5510dbc74a8ff1b817df2d25f8ada529f7fdee | /mysite/settings.py | 33b7223aa4aaa76815f5c2eaf3106516b8435eee | [] | no_license | danaitabay/my-first-blog | 9de5c24688e5dad111bca4d349d9f1c009db897f | 1ba5421dfe7a1d6e021a00bb2d7659a7fcad7ce6 | refs/heads/master | 2021-07-01T04:10:37.459551 | 2017-09-18T14:40:51 | 2017-09-18T14:40:51 | 103,775,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h87q7rg&_f!(#k86blhsz1xl)q^y&3g(w5pt-r!z%z8(clj%sb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS =[
'127.0.0.1',
'danaitabay.pythonanywhere.com',
'myfirstworkspace-danaitabay.c9users.io'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"db.tsegaye@gmail.com"
] | db.tsegaye@gmail.com |
ca2c59cf13ee879f9813dc695189e615fcc25a53 | 7020c7833efc19f79731356604e8008831bb2a29 | /product/migrations/0001_initial.py | f9b5cc008a1f2bbeda7eb107f84fbab03339593c | [] | no_license | alladini0/task2 | 1673581495b532fabaeadf84bf17434a86900a51 | f46000bf75c505c4683ee7bf413863caeb84a451 | refs/heads/master | 2023-07-27T13:35:36.136500 | 2021-09-10T11:53:18 | 2021-09-10T11:53:18 | 405,063,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # Generated by Django 3.2.7 on 2021-09-10 10:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('name', models.CharField(max_length=255)),
('slug', models.SlugField(primary_key=True, serialize=False)),
],
options={
'verbose_name': 'Категория',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='Название')),
('text', models.TextField(verbose_name='Описание')),
('price', models.DecimalField(decimal_places=3, max_digits=10)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.category', verbose_name='Категория')),
],
options={
'verbose_name': 'Товар',
},
),
]
| [
"mankeypanda@gmail.com"
] | mankeypanda@gmail.com |
b39568cfb41bd202d421f08638b802aa3c233233 | adcd8b283755697c5e58276f9682b768c03dba81 | /dache/backends/redis.py | 28b6c611c618f77bb353db14d518ad3ea0dc43fd | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | eliangcs/dache | 4cfd87d06feee75f0acf567e4e5985b63767a5bd | 7499942a0b0cb21acc6e31cde3678cdda66519b6 | refs/heads/master | 2023-04-15T11:20:43.664066 | 2014-11-08T23:26:12 | 2014-11-08T23:26:12 | 25,982,568 | 0 | 0 | NOASSERTION | 2023-03-31T14:26:36 | 2014-10-30T17:43:44 | Python | UTF-8 | Python | false | false | 1,567 | py | from __future__ import absolute_import
import redis
from six.moves import cPickle as pickle
from .base import BaseCache, DEFAULT_TIMEOUT
DEFAULT_PORT = 6379
class RedisCache(BaseCache):
def __init__(self, url, **options):
super(RedisCache, self).__init__(**options)
port = url.port or DEFAULT_PORT
db = int(url.path[1:] or 0)
self.redis = redis.StrictRedis(host=url.hostname, port=port, db=db,
password=url.password)
def get(self, key, default=None, version=None):
key = self._get_redis_key(key, version)
value = self.redis.get(key)
if not value:
return default
value = pickle.loads(value)
return value
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self._get_redis_key(key, version)
value = pickle.dumps(value)
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
pipeline = self.redis.pipeline(transaction=True)
pipeline.set(key, value)
if timeout is not None:
pipeline.expire(key, int(timeout))
pipeline.execute()
def delete(self, key, version=None):
key = self._get_redis_key(key, version)
self.redis.delete(key)
def clear(self):
self.redis.flushdb()
def _delete(self, redis_key):
self.redis.delete(redis_key)
def _get_redis_key(self, key, version=None):
key = self.make_key(key, version)
self.validate_key(key)
return key
| [
"cliang@machinezone.com"
] | cliang@machinezone.com |
4bdc54ae579efbe9fc29fa19f160057551d83d98 | 4ed9dd49d6dc87d03258c7019274b1dfd1a90e9b | /manager.py | 84e7cd121b37975bafc7b649d34def1e54a3803d | [] | no_license | OSP123/Python_exercises | 824945c296bae5bb4db2ed2db3192d200c3907a2 | 6a1406ae8851f1908892d4a88f893521b3deacbc | refs/heads/master | 2021-01-22T18:14:44.734799 | 2015-07-22T15:55:53 | 2015-07-22T15:55:53 | 39,515,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py |
from employee import Employee
"""
An object of this class represents a manager with the attributes inherited
from Employee and the additional data members: title and annual bonus.
"""
class Manager (Employee):
"""
An object of this class stores a title and annual bonues of a manager, along with
the attributes from employee. The only methods exclusive to manager include
a string output method and a constructor.
"""
def __init__(self, title, annualBonus):
"""
Sets the employee default values, title, and annual bonus. It also
checks to see if values are valid and sets them to default values if they
aren't.
"""
Employee.__init__(self, "Generic First Name", "Generic Last Name", 123456789, 60000)
if type(title) == str:
self.title = title
else:
self.title = "Head Manager"
if type(annualBonus) == int or type(annualBonus) == float:
self.annualBonus = annualBonus
else:
self.annualBonus = 5000.0
def __str__(self):
"""
String output method with Employee object string method combined with Manager data.
"""
return Employee.__str__(self) + "\nThe title of the manager is %s and the annual bonus is %d" % (self.title, self.annualBonus)
| [
"omarpatel123@gmail.com"
] | omarpatel123@gmail.com |
3ad2dd87828157dd063f2b7458571a524017263a | 833ae432c07ff3c33812b4847c247aa730a2549b | /glue/_plugin_helpers.py | 341fbd440fc91bad6628fe8c03f9dba38fea1a07 | [
"BSD-3-Clause"
] | permissive | scalet98/glue | 3e4bc49ac53766d4e1927da3434ad02410d93486 | ff949ad52e205c20561f48c05f870b2abb39e0b0 | refs/heads/master | 2020-11-25T07:50:11.278074 | 2019-10-29T09:23:19 | 2019-10-29T09:23:19 | 228,563,694 | 1 | 0 | NOASSERTION | 2019-12-17T07:58:31 | 2019-12-17T07:58:31 | null | UTF-8 | Python | false | false | 2,776 | py | # The following function is a thin wrapper around iter_entry_points. The reason it
# is in this separate file is that when making the Mac app, py2app doesn't
# support entry points, so we replace this function with a version that has the
# entry points we want hardcoded. If this function was in glue/main.py, the
# reference to the iter_plugin_entry_points function in load_plugin would be
# evaluated at compile time rather than at runtime, so the patched version
# wouldn't be used.
from __future__ import absolute_import, division, print_function
import os
from collections import defaultdict
def iter_plugin_entry_points():
from pkg_resources import iter_entry_points
return iter_entry_points(group='glue.plugins', name=None)
class PluginConfig(object):
def __init__(self, plugins={}):
self.plugins = defaultdict(lambda: True)
self.plugins.update(plugins)
def __str__(self):
string = ""
for plugin in sorted(self.plugins):
string += "{0}: {1}\n".format(plugin, self.plugins[plugin])
return string
@classmethod
def load(cls):
# Import at runtime because some tests change this value. We also don't
# just import the variable directly otherwise it is cached.
from glue import config
cfg_dir = config.CFG_DIR
plugin_cfg = os.path.join(cfg_dir, 'plugins.cfg')
from glue.external.six.moves import configparser
config = configparser.ConfigParser()
read = config.read(plugin_cfg)
if len(read) == 0 or not config.has_section('plugins'):
return cls()
plugins = {}
for name, enabled in config.items('plugins'):
plugins[name] = bool(int(enabled))
self = cls(plugins=plugins)
return self
def save(self):
# Import at runtime because some tests change this value. We also don't
# just import the variable directly otherwise it is cached.
from glue import config
cfg_dir = config.CFG_DIR
plugin_cfg = os.path.join(cfg_dir, 'plugins.cfg')
from glue.external.six.moves import configparser
config = configparser.ConfigParser()
config.add_section('plugins')
for key in sorted(self.plugins):
config.set('plugins', key, value=str(int(self.plugins[key])))
if not os.path.exists(cfg_dir):
os.mkdir(cfg_dir)
with open(plugin_cfg, 'w') as fout:
config.write(fout)
def filter(self, keep):
"""
Keep only certain plugins.
This is used to filter out plugins that are not installed.
"""
for key in list(self.plugins.keys())[:]:
if key not in keep:
self.plugins.pop(key)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
c51601e1b9ee338db02f4c1f497e0198df94460b | 29a47609913b7cec54266f230f19611b60b2080a | /examples/light_head_rcnn/train_coco_multi.py | f5b126c6e8072cf5f6af9613f1d78ddc18fbfdf1 | [
"MIT"
] | permissive | xiaolonghao/chainercv | ec89ec711eb05f4a9a62c7d535d0c42c49500883 | 1db5572aa4bac6c40fd811c51dd63f1b6ad57dcf | refs/heads/master | 2020-12-05T21:20:49.118214 | 2019-12-24T06:01:34 | 2019-12-24T06:01:34 | 232,251,009 | 1 | 0 | MIT | 2020-01-07T05:41:06 | 2020-01-07T05:41:05 | null | UTF-8 | Python | false | false | 8,388 | py | from __future__ import division
import argparse
import functools
import multiprocessing
import numpy as np
import random
import six
import chainer
from chainer.dataset.convert import _concat_arrays
from chainer.dataset.convert import to_device
import chainer.links as L
from chainer.training import extensions
from chainercv.chainer_experimental.datasets.sliceable \
import TransformDataset
from chainercv.chainer_experimental.training.extensions import make_shift
from chainercv.datasets import coco_bbox_label_names
from chainercv.datasets import COCOBboxDataset
from chainercv.links.model.light_head_rcnn import LightHeadRCNNResNet101
from chainercv.links.model.light_head_rcnn import LightHeadRCNNTrainChain
from chainercv.links.model.ssd import GradientScaling
from chainercv import transforms
import chainermn
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
try:
import cv2
cv2.setNumThreads(0)
except ImportError:
pass
def concat_examples(batch, device=None, padding=None,
indices_concat=None, indices_to_device=None):
if len(batch) == 0:
raise ValueError('batch is empty')
first_elem = batch[0]
elem_size = len(first_elem)
if indices_concat is None:
indices_concat = range(elem_size)
if indices_to_device is None:
indices_to_device = range(elem_size)
result = []
if not isinstance(padding, tuple):
padding = [padding] * elem_size
for i in six.moves.range(elem_size):
res = [example[i] for example in batch]
if i in indices_concat:
res = _concat_arrays(res, padding[i])
if i in indices_to_device:
if i in indices_concat:
res = to_device(device, res)
else:
res = [to_device(device, r) for r in res]
result.append(res)
return tuple(result)
class Transform(object):
def __init__(self, light_head_rcnn):
self.light_head_rcnn = light_head_rcnn
def __call__(self, in_data):
img, bbox, label = in_data
_, H, W = img.shape
img = self.light_head_rcnn.prepare(img)
_, o_H, o_W = img.shape
scale = o_H / H
bbox = transforms.resize_bbox(bbox, (H, W), (o_H, o_W))
# horizontally flip
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (o_H, o_W), x_flip=params['x_flip'])
return img, bbox, label, scale
def main():
parser = argparse.ArgumentParser(
description='ChainerCV training example: LightHeadRCNN')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=1234)
parser.add_argument('--batchsize', '-b', type=int, default=8)
parser.add_argument('--epoch', type=int, default=30)
parser.add_argument('--step-epoch', type=int, nargs='*', default=[19, 25])
args = parser.parse_args()
# https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
# chainermn
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
np.random.seed(args.seed)
random.seed(args.seed)
# model
light_head_rcnn = LightHeadRCNNResNet101(
pretrained_model='imagenet',
n_fg_class=len(coco_bbox_label_names))
light_head_rcnn.use_preset('evaluate')
model = LightHeadRCNNTrainChain(light_head_rcnn)
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# train dataset
train_dataset = COCOBboxDataset(
year='2017', split='train')
# filter non-annotated data
train_indices = np.array(
[i for i, label in enumerate(train_dataset.slice[:, ['label']])
if len(label[0]) > 0],
dtype=np.int32)
train_dataset = train_dataset.slice[train_indices]
train_dataset = TransformDataset(
train_dataset, ('img', 'bbox', 'label', 'scale'),
Transform(model.light_head_rcnn))
if comm.rank == 0:
indices = np.arange(len(train_dataset))
else:
indices = None
indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
train_dataset = train_dataset.slice[indices]
train_iter = chainer.iterators.SerialIterator(
train_dataset, batch_size=args.batchsize // comm.size)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(momentum=0.9), comm)
optimizer.setup(model)
global_context_module = model.light_head_rcnn.head.global_context_module
global_context_module.col_max.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.col_max.b.update_rule.add_hook(GradientScaling(3.0))
global_context_module.col.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.col.b.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row_max.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row_max.b.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row.b.update_rule.add_hook(GradientScaling(3.0))
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0001))
model.light_head_rcnn.extractor.conv1.disable_update()
model.light_head_rcnn.extractor.res2.disable_update()
for link in model.links():
if isinstance(link, L.BatchNormalization):
link.disable_update()
converter = functools.partial(
concat_examples, padding=0,
# img, bboxes, labels, scales
indices_concat=[0, 2, 3], # img, _, labels, scales
indices_to_device=[0], # img
)
updater = chainer.training.updater.StandardUpdater(
train_iter, optimizer, converter=converter,
device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
@make_shift('lr')
def lr_scheduler(trainer):
base_lr = 0.0005 * 1.25 * args.batchsize
warm_up_duration = 500
warm_up_rate = 1 / 3
iteration = trainer.updater.iteration
epoch = trainer.updater.epoch
if iteration < warm_up_duration:
rate = warm_up_rate \
+ (1 - warm_up_rate) * iteration / warm_up_duration
else:
for step in args.step_epoch:
if epoch > step:
rate *= 0.1
return rate * base_lr
trainer.extend(lr_scheduler)
if comm.rank == 0:
# interval
log_interval = 100, 'iteration'
plot_interval = 3000, 'iteration'
print_interval = 20, 'iteration'
# training extensions
model_name = model.light_head_rcnn.__class__.__name__
trainer.extend(
chainer.training.extensions.snapshot_object(
model.light_head_rcnn,
filename='%s_model_iter_{.updater.iteration}.npz'
% model_name),
trigger=(1, 'epoch'))
trainer.extend(
extensions.observe_lr(),
trigger=log_interval)
trainer.extend(
extensions.LogReport(log_name='log.json', trigger=log_interval))
report_items = [
'iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'validation/main/map/iou=0.50:0.95/area=all/max_dets=100',
]
trainer.extend(
extensions.PrintReport(report_items), trigger=print_interval)
trainer.extend(
extensions.ProgressBar(update_interval=10))
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(
['main/loss'],
file_name='loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.run()
if __name__ == '__main__':
main()
| [
"shingogo@hotmail.co.jp"
] | shingogo@hotmail.co.jp |
a08d641b179f3b456201c7309f9916d1711d11c8 | c0193e3918f855fb06046446d16e6b5c8c0bf5ae | /students/tests/test_context_processors.py | e5b82ba497ecb510e4e222aa1acc8807a76f0086 | [] | no_license | e-krasyuk/studentsdb | c1dcd6e76ebb2c3fc75f54af38ed8ad0d67ecea1 | 2e8944477b7973e4d00c46957bd3cfdeba8aeeb7 | refs/heads/master | 2021-06-10T08:28:26.399337 | 2017-02-20T19:07:59 | 2017-02-20T19:07:59 | 67,284,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | from django.test import TestCase
from django.http import HttpRequest
from students.context_processors import groups_processor
class ContextProcessorsTest(TestCase):
fixtures = ['students_test_data.json']
def test_groups_processor(self):
''' Test groups processor '''
request = HttpRequest()
data = groups_processor(request)
# test data from processor
self.assertEqual(len(data['GROUPS']), 2)
self.assertEqual(data['GROUPS'][0]['title'], u'Group1')
self.assertEqual(data['GROUPS'][1]['title'], u'Group2') | [
"evgeniykrasyuk@gmail.com"
] | evgeniykrasyuk@gmail.com |
af2b6b930dbd7322f60e7441c3a68f82d72a1a83 | f5611d9af6a6d8ab3e66164f804a395ac394a4dd | /boss_list/serializers.py | eecb59dd352b2dc260ac1e5f428bf242504ec7e8 | [] | no_license | Lux-Sales/absolute_boss_list_API | cd3bafd06d69df10a9993827af355ba4ba0867f4 | 8b867d5f7996a7140f992cd95a65c0a634cac640 | refs/heads/master | 2023-03-16T11:09:37.075269 | 2021-02-26T18:02:58 | 2021-02-26T18:02:58 | 338,630,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | from rest_framework import serializers
from players.models import Player
from .models import BossList
from players.serializers import PlayersSerializer
class AddPlayerSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255, required=True)
vocation = serializers.CharField(max_length=255, required=True)
team = serializers.CharField(max_length=255, required=True)
def create(self, validated_data):
boss_list = BossList.objects.last()
player = Player.objects.create(**validated_data, boss_list=boss_list)
return player
class DeletePlayerSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255, required=True)
vocation = serializers.CharField(max_length=255, required=True)
team = serializers.CharField(max_length=255, required=True)
class BossListSerializer(serializers.ModelSerializer):
players = PlayersSerializer(many=True, read_only=True)
class Meta:
model = BossList
fields = ['title','players','secret_key']
extra_kwargs = {'secret_key':{'write_only':True}}
def create(self, validated_data):
secret_key = validated_data.get('secret_key')
boss_list = BossList.objects.last()
if boss_list.secret_key != secret_key:
raise serializers.ValidationError({'secret_key':'invalid secret key'})
boss_list = BossList.objects.last()
boss_list.title = validated_data['title']
boss_list.save()
return boss_list
def update(self, validated_data):
secret_key = validated_data.get('secret_key')
boss_list = BossList.objects.last()
if boss_list.secret_key != secret_key:
raise serializers.ValidationError({'secret_key':'invalid secret key'})
boss_list = BossList.objects.last()
boss_list.secret_key = validated_data['new_secret_key']
boss_list.save()
| [
"sales_l@hotmail.com"
] | sales_l@hotmail.com |
075bab59ba6d1149112ae36f9a6a72d45b6ef270 | 439872cbc12a9a73ba70e4ff2162681bfbad2a00 | /exercises/lesson 13 locator examples/find_element_by_id.py | da5f33890d8048c74da805e68d3357a5b16e9a6c | [] | no_license | Qola-Learning/Selenium | 9182780dd33be741a38b99a836c62423ecdf54cb | f93deb7787c7f6a4b238222ceca1f0bfa79d2ec9 | refs/heads/master | 2022-11-29T23:39:13.287720 | 2020-08-05T10:00:31 | 2020-08-05T10:00:31 | 273,363,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # find element by id
from selenium import webdriver
# invoking chromedriver to open chrome browser
driver = webdriver.Chrome(executable_path="C:\\chromedriver.exe")
# get method to to open the url
driver.get("https://katalon-demo-cura.herokuapp.com/")
# find element by id
driver.find_element_by_id("btn-make-appointment").click()
# close driver
driver.quit()
| [
"noreply@github.com"
] | noreply@github.com |
9a4313fa0814d9397f41b20ed086b55e7abb6250 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /QgAwpaFWD2jtxZ2wG_4.py | 58922c617047ba45a395b810841f6329c9e8ff3b | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py |
def sum_digits(n):
if n==0:
return 1
k = 0
j = n
while (int(j)>0):
k += 1
j /= 10
return k
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9da54dc53cf6b17761258afa02f01e041e98a274 | 19dfccf1567cb93d57b7c526e1d5a15b6c8d9098 | /minecraftControl/controllers.py | 000ff8b2b69e1c1e0a08fdcba2487e622788b59c | [] | no_license | AndyLamperski/minecraftControl | 7cd4a8d2cb5092b7197708975f9588be82ad2d44 | 3077f8b643742780f008c6694aa3a3814bc004f5 | refs/heads/master | 2020-06-28T02:20:44.235329 | 2019-09-06T18:54:36 | 2019-09-06T18:54:36 | 200,117,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | import numpy as np
import scipy.linalg as la
class moveSphereTo:
"""
controller = moveSphereTo(target_x,target_y,Kp,Kd,tolerance)
target_x - desired x value
target_y - desired y value
Kp - optional proportional gain
Kd - optional derivative gain
tolerance - optional error tolerance to declare when goal is reached.
"""
def __init__(self,target_x,target_y,Kp=3,Kd=7,tolerance=0.1):
"""
This sets up the parameters
"""
self.target = np.array([target_x,target_y])
self.Kp = Kp
self.Kd = Kd
self.Done = False
self.posErr = np.zeros(2)
self.velErr = np.zeros(2)
self.tolerance = tolerance
def update(self,measurement):
"""
"""
pos,vel = measurement
self.posErr = pos - self.target
self.velErr = vel
if la.norm(self.posErr) < self.tolerance:
self.Done = True
def value(self):
"""
This computes the actions
This actually returns the input
"""
return -self.Kp * self.posErr - self.Kd * self.velErr
class controllerSequence:
def __init__(self,controllers):
self.NumControllers = len(controllers)
self.controllers = controllers
self.index = 0
self.Done = False
def update(self,measurement):
if (self.controllers[self.index].Done) and (self.index < self.NumControllers -1):
self.index += 1
self.controllers[self.index].update(measurement)
if (self.index == self.NumControllers - 1) and self.controllers[self.index].Done:
self.Done = True
def value(self):
return self.controllers[self.index].value()
class turnCar:
def __init__(self,target_angle,Kp=3,Kd=7,tolerance=0.05):
self.target = target_angle
self.Kp = Kp
self.Kd = Kd
self.tol = tolerance
self.Done = False
def update(self,measurement):
x,y,theta,v,omega = measurement
self.posError = ((theta - self.target + np.pi) % (2*np.pi)) - np.pi
self.velError = omega
if np.abs(self.posError) < self.tol and np.abs(self.velError) < self.tol:
self.Done = True
def value(self):
domega = -self.Kp * self.posError - self.Kd * self.velError
return np.array([0.,domega])
class carForward:
def __init__(self,distance,Kp=3,Kd=7,Kp_ang=.01,Kd_ang=.01,tolerance=.1):
# Distance Must be positive
self.startPosition = None
self.Kp = Kp
self.Kd = Kd
self.Kp_ang = Kp_ang
self.Kd_ang = Kd_ang
self.tol = tolerance
self.Done = False
self.goalPosition = None
self.distance = np.abs(distance)
def update(self,measurement):
x,y,theta,v,omega = measurement
curPos = np.array([x,y])
if self.goalPosition is None:
self.goalPosition = curPos + self.distance * np.array([np.cos(theta),np.sin(theta)])
self.startPosition = np.copy(curPos)
self.goalAngle = theta
self.projector = (self.goalPosition - self.startPosition) / self.distance
self.d_err = np.dot(curPos - self.startPosition,self.projector) -self.distance
self.v_err = np.dot(np.array([v * np.cos(theta),v*np.sin(theta)]),self.projector)
self.theta_err = ((theta - self.goalAngle + np.pi) % (2*np.pi)) - np.pi
self.omega_err = omega
if np.abs(self.d_err) < self.tol and np.abs(self.v_err) < self.tol:
self.Done = True
def value(self):
return np.array([-self.Kp * self.d_err-self.Kd * self.v_err,-self.Kp_ang * self.theta_err - self.Kd_ang*self.omega_err])
| [
"alampers@umn.edu"
] | alampers@umn.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.