blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23e939695280694183dd3992d3ac76614554c4fc | db54ed7753ff8d80f08125d18eb445d8dd4991e9 | /mixins/tests.py | 8ce75dbc68bbad1f867b64f21e886c580caf19bd | [] | no_license | bennettl/roket | 731cc97bbc5c89238aec6469824664a544aef524 | 39775889636709e992e444d6228982898960745a | refs/heads/master | 2020-04-09T10:38:44.071663 | 2015-02-23T00:15:12 | 2015-02-23T00:15:12 | 31,735,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,888 | py | import json as json_reader
DATA_STRING = 'data'
# Test Functions
is_str = lambda x: issubclass(x.__class__, (str, unicode))
is_int = lambda x: issubclass(x.__class__, int) or (issubclass(x.__class__, str) and x.isdigit())
is_bool = lambda x: issubclass(x.__class__, bool) or x == 'true' or x == 'false'
is_float = lambda x: issubclass(x.__class__, float)
is_date = lambda x: is_int(x)
# Wrappers
def is_none_or(fn):
def f(x):
return x is None or x == 'None' or fn(x)
return f
def is_array(verify_fn):
def f(data):
return all([verify_fn(obj) for obj in data])
return f
def verify_json(fn):
def wrapped(json, *args, **kwargs):
if not issubclass(json.__class__, (dict, list)):
raise Exception('''Verify JSON could not find JSON object. Found:\n\n %s''' % (json,))
else:
return fn(json, *args, **kwargs)
return wrapped
# Decorators
def get_data(fn):
def wrapped(json, *args, **kwargs):
json = json_reader.loads(json)
objects = get_objects(json)
if objects:
return fn(json, objects, *args, **kwargs)
return wrapped
def capture_exception(fn):
def wrapper(obj, against):
try:
return fn(obj, against)
except Exception, e:
missing = [key for key in against.keys() if not key in obj.keys()]
extra = [key for key in obj.keys() if not key in against.keys()]
raise Exception('''%s
When comparing:
%s
---- against ---
%s
-------
Extra: %s
Missing: %s
''' % (e, obj, against.keys(), extra, missing))
return wrapper
def get_objects(json):
try:
objects = json.get(DATA_STRING, False)
return objects
except AttributeError:
raise Exception('Invalid JSON = "%s"' % json)
# Functions to read datasets
@get_data
@verify_json
def obj_is(json, data, verify_fn):
return verify_fn(data)
@get_data
@verify_json
def objs_are(json, data, verify_fn):
return all([verify_fn(obj) for obj in data])
@verify_json
def objs_count(json):
return len(get_objects(json))
@capture_exception
@verify_json
def verify_json_object(obj, against):
obj_keys = set(obj.keys())
against_keys = set(against.keys())
if obj_keys == against_keys:
for key in obj.keys():
lam = against[key]
val = obj[key]
if not lam(val):
raise Exception('''Key error for "%s". Value was "%s"''' % (key, val))
else:
missing_keys = obj_keys - against_keys
extra_keys = against_keys - obj_keys
raise Exception('''Keys were mismatched.
Missing Keys:
Extra Keys:
''' % (', '.join(missing_keys), ', '.join(extra_keys)))
return True
| [
"zacktanner@gmail.com"
] | zacktanner@gmail.com |
b40d5e3395c3ba2781336eb7732ee927d96b1c76 | 0d3f78e5a3fe3f8173965e0e44e9c3b8969e84f2 | /demo.py | 5c4d267d94ef8b61efc87149ed891efb4ead5d1b | [] | no_license | zxltmj/text_simhash | f1bc6aeee87770358107b3f21b77489a7a02a0db | ad39b236b61ffb82a248a2f24324f611dbc93a2e | refs/heads/master | 2021-06-28T22:21:29.439741 | 2017-09-22T07:11:27 | 2017-09-22T07:11:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# step 1 import
from similarity import *
# setp 2 init (includeing config file )
sy = similarity('similarity.cfg')
def test():
str = "我是南翔技工拖拉机学院手扶拖拉机专业的。不用多久,我就会升职加薪,当上总经理,出任CEO,走上人生巅峰。,清华大"
# step 3 call generate simhash code
sh = sy.text_genhash(str)
print sh
if __name__ == '__main__':
num = 10
while(num):
test()
num=num-1
| [
"943574281@qq.com"
] | 943574281@qq.com |
2c10a7380fd7f6b5d8c0c03591ad1cc75fd1fb16 | 2889d4456a3e7735873c622203b65e6394195186 | /analysis/deeplearning/sjr/socket/__init__.py | d14ea641a794c4e14cf54b069e2ce8926fbb40c5 | [] | no_license | aneda/socketear2.0 | 1ecac639fc20f56df1135aeeb50c6539f37dc7db | 7717cbc567ec03996689fb3bd8f600af5be82968 | refs/heads/master | 2020-12-30T16:29:02.639329 | 2017-05-12T21:27:23 | 2017-05-12T21:27:23 | 88,665,558 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | from .model import SocketSJR
| [
"nedaa@clemex.com"
] | nedaa@clemex.com |
2dd439d58458740b50e4feda77f75a1a525730cb | 902aef0f2cde6c73a70c1833bec2c6f4fa1bc0b6 | /StimControl/LightStim/SweepController.py | dfdc9fcab16fdba8a78080dca9d2fd1147ce7c89 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | chrox/RealTimeElectrophy | 2b0c88b28cbeeb4967db5630f3dfa35764c27f54 | e1a331b23d0a034894a0185324de235091e54bf0 | refs/heads/master | 2020-04-14T12:29:47.958693 | 2013-10-07T14:12:04 | 2013-10-07T14:12:04 | 1,662,847 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | # This module contains the base class of StimulusController.
#
# Copyright (C) 2010-2011 Huang Xin
#
# See LICENSE.TXT that came with this file.
import itertools
import logging
import VisionEgg.FlowControl
import VisionEgg.ParameterTypes as ve_types
from VisionEgg.FlowControl import ONCE,TRANSITIONS,NOT_DURING_GO,NOT_BETWEEN_GO
from SweepStamp import RSTART_EVT,DAQStampTrigger
class StimulusController(VisionEgg.FlowControl.Controller):
""" Base class for real time stimulus parameter controller.
For stimulus in viewport.
"""
def __init__(self,stimulus,
return_type=ve_types.NoneType,
eval_frequency=VisionEgg.FlowControl.Controller.EVERY_FRAME):
VisionEgg.FlowControl.Controller.__init__(self,
return_type=return_type,
eval_frequency=eval_frequency)
self.logger = logging.getLogger('LightStim.SweepController')
self.stimulus = stimulus
self.viewport = stimulus.viewport
self.p = stimulus.parameters
def set_viewport(self, viewport):
self.viewport = viewport
def during_go_eval(self):
pass
def between_go_eval(self):
pass
class ViewportController(StimulusController):
""" Dummy class used to show that the controller is viewport sensitive.
SEE LightStim.ManStimulus
"""
def __init__(self,stimulus,viewport=None,*args,**kwargs):
super(ViewportController,self).__init__(stimulus,*args,**kwargs)
if viewport:
self.set_viewport(viewport)
class SweepSequeStimulusController(StimulusController):
def __init__(self,stimulus):
super(SweepSequeStimulusController,self).__init__(stimulus)
self.sweepseq = stimulus.sweepseq
repeat = int(self.sweepseq.sweep_duration * self.viewport.refresh_rate)
# frame and sweep are confusing names sometimes. Most of the time a sweep corresponse a vsync in screen sweeping.
# but in this line sweep means a frame defined in sweepseque.
if self.sweepseq.sequence_list is not None:
vsyncseque = [vsync for sweep in self.sweepseq.sequence_list for vsync in itertools.repeat(sweep,repeat)]
self.vsync_list = list(itertools.chain.from_iterable(vsyncseque))
self.sequence_iter = itertools.chain.from_iterable(vsyncseque)
self.sequence_indices = iter(range(len(self.vsync_list)))
elif self.sweepseq.sequence_iter is not None:
self.vsync_list = None
self.sequence_iter = self.sweepseq.sequence_iter
self.sequence_indices = None
def next_param(self):
try:
return self.sequence_iter.next()
except StopIteration:
self.stimulus.sweep_completed = True
return None
def next_index(self):
try:
return self.sequence_indices.next()
except StopIteration:
return None
def get_sweeps_num(self):
if self.vsync_list is None:
return float('nan')
else:
return len(self.vsync_list)
def get_estimated_duration(self):
if self.vsync_list is None:
return float('nan')
else:
return len(self.vsync_list) / self.viewport.refresh_rate
class SweepSequeTriggerController(SweepSequeStimulusController):
""" DAQStampTrigger for SweepSeque stimulus
"""
def __init__(self,*args,**kwargs):
SweepSequeStimulusController.__init__(self,*args,**kwargs)
self.stamp_trigger = DAQStampTrigger()
def post_stamp(self, postval):
self.stamp_trigger.post_stamp(postval)
class RemoteStartController(VisionEgg.FlowControl.Controller):
""" Sending a START event
"""
def __init__(self):
VisionEgg.FlowControl.Controller.__init__(self,
return_type=ve_types.NoneType,
eval_frequency=ONCE|TRANSITIONS|NOT_BETWEEN_GO)
self.stamp_trigger = DAQStampTrigger()
def during_go_eval(self):
#print 'set bits: %d' %RSTART_EVT
self.stamp_trigger.post_stamp(RSTART_EVT, 'start')
def between_go_eval(self):
pass
class RemoteStopController(VisionEgg.FlowControl.Controller):
""" Sending a STOP event
"""
def __init__(self):
VisionEgg.FlowControl.Controller.__init__(self,
return_type=ve_types.NoneType,
eval_frequency=ONCE|TRANSITIONS|NOT_DURING_GO)
self.stamp_trigger = DAQStampTrigger()
def during_go_eval(self):
pass
def between_go_eval(self):
#print 'clear bits: %d' %RSTART_EVT
self.stamp_trigger.post_stamp(RSTART_EVT, 'stop')
class SaveParamsController(SweepSequeStimulusController):
""" Use Every_Frame evaluation controller in case of real time sweep table modification
"""
def __init__(self,stimulus,file_prefix):
super(SaveParamsController, self).__init__(stimulus)
self.savedpost = []
self.file_prefix = file_prefix
import time,os
(year,month,day,hour24,_min,sec) = time.localtime(time.time())[:6]
trial_time_str = "%04d%02d%02d_%02d%02d%02d"%(year,month,day,hour24,_min,sec)
save_dir = os.path.abspath(os.curdir)+ os.path.sep + 'params'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.file_name = save_dir + os.path.sep + self.file_prefix + trial_time_str
def during_go_eval(self):
pass
def between_go_eval(self):
pass
class SweepController(VisionEgg.FlowControl.Controller):
""" Base sweep controller
"""
def __init__(self, framesweep):
VisionEgg.FlowControl.Controller.__init__(self,
return_type=ve_types.NoneType,
eval_frequency=VisionEgg.FlowControl.Controller.EVERY_FRAME)
self.framesweep = framesweep
def during_go_eval(self):
pass
def between_go_eval(self):
pass | [
"chrox.huang@gmail.com"
] | chrox.huang@gmail.com |
b9fb2c97b6dc9d41c3ed596f9709428f229ddac1 | bb0878f6fe1b998ba78dc741ca9d014b430aad39 | /cesar.py | 3ea2684dd50ae14b7e62fa1255142a212e0a8f8b | [] | no_license | raulexramos/Python | c5d41901343008602c7b536300c281e6133916e6 | 4fbe4846696556e4acee28184dfb81d6af2c2c46 | refs/heads/main | 2023-08-12T23:53:26.567328 | 2021-10-12T21:12:09 | 2021-10-12T21:12:09 | 380,391,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | alfabeto = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
chave = 3
# Mensagem a ser criptografada #
mensagem = "raulex esta mensagem será criptografada."
n = len(alfabeto)
cifrada = ""
for letra in mensagem:
indice = alfabeto.index(letra)
nova_letra = alfabeto[(indice + chave)%n]
cifrada = cifrada + nova_letra
print(mensagem)
print(cifrada)
| [
"86503819+raulexramos@users.noreply.github.com"
] | 86503819+raulexramos@users.noreply.github.com |
c303cf21bf6d1ff3eeb3773c71c758ca5533d3e5 | b4c93bad8ccc9007a7d3e7e1d1d4eb8388f6e988 | /ph_locations/migrations/0002_auto_20210319_1358.py | 6de5bb1e1c5891f630460b5a245aa21ef859f2f2 | [] | no_license | flashdreiv/fis | 39b60c010d0d989a34c01b39ea88f7fc3be0a87d | b93277785d6ad113a90a011f7c43b1e3e9209ec5 | refs/heads/main | 2023-04-02T12:46:32.249800 | 2021-03-31T00:27:29 | 2021-03-31T00:27:29 | 343,431,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | # Generated by Django 3.1.7 on 2021-03-19 05:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ph_locations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='city',
name='name',
field=models.CharField(max_length=80, null=True),
),
migrations.AddField(
model_name='city',
name='province',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ph_locations.province'),
),
migrations.AddField(
model_name='province',
name='name',
field=models.CharField(max_length=80, null=True),
),
]
| [
"dreivan.orprecio@gmail.com"
] | dreivan.orprecio@gmail.com |
c5b436ee14c6aab175507e4473ddd97b7b492707 | 1d4434f43eef9b7340483f39faf22242a27300f5 | /tests/test_operations.py | e8c454d6c77d219f5305de1750aba98b5dc9bb4c | [] | no_license | Eskalot/Appium_hw | 2ad38b45a804407b9ac5364afb2b45dc896b54c3 | c5beb1b6988db2a170801f25a270d83be868cf5d | refs/heads/master | 2023-04-13T21:50:05.029028 | 2021-04-09T14:39:32 | 2021-04-09T14:39:32 | 354,136,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py |
class TestCalculator:
def test_add_value(self, calculator):
calculator.add_values(1, 2)
result = calculator.get_result()
assert int(result) == 3
""" można też zastosować krótszy kod i zamiast tworzyć zmienną result od razu zastosować asercję:
assert int(calculator.get_result()) == 3
"""
def test_divide_value(self, calculator):
calculator.divide_values(1, 2)
result = calculator.get_result()
assert float(result) == 0.5
def test_sub_value_positive(self, calculator):
calculator.sub_values(5, 1)
result = calculator.get_result()
assert result == "4"
def test_sub_value_negative(self, calculator):
calculator.sub_values(1, 5)
result = calculator.get_result()
assert result == "-4"
def test_divide_by_zero(self, calculator):
calculator.divide_values(4, 0)
result = calculator.get_result_if_0()
assert result == "Can't divide by 0"
def test_clear_result(self,calculator):
calculator.clear_result()
def test_open_panel_expert(self, calculator):
calculator.open_expert_panel()
def test_close_panel_expert(self, calculator):
calculator.close_expert_panel()
def test_calculate_sinus(self,calculator):
calculator.calculate_sinus(4,5)
result = calculator.get_result()
assert float(result) == 0.7071067811
| [
"piotr.markowski@vp.pl"
] | piotr.markowski@vp.pl |
bac9c303768aac1ba089fbd8763dfa8f33be6259 | 7c6b84a1657160b7cd5229f012a4e83e3f0f5468 | /Calcy.py | 9cdfa05514c6ee4aeab300f9fb57297b4f850c81 | [] | no_license | Akshitha1992/BranchingExercise | 9664b855e2fa122cd0b5d899ea29d01df7ec3dbd | f2e2f34535454cb967ed8815fdcdbc080d481db7 | refs/heads/master | 2023-06-17T02:20:10.463561 | 2021-07-01T13:15:38 | 2021-07-01T13:15:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | Framework for calcy project
Adding divide functionality
| [
"akshitha.g@hcl.com"
] | akshitha.g@hcl.com |
fe79a3c72b6692fcebb8c57bf030b56313de611e | 84dac01338877f5a0a6d2501305d9491031379df | /programmers/level_1/64061/main.py | 87b6082f4edc862fa32ba46409eca7ffc2cdae17 | [] | no_license | gitdog01/AlgoPratice | 7e0623000d31df27913f7c577929d42e91e44379 | f0b42308dfdf24c74cd93bb06c58fc810d32df8b | refs/heads/master | 2023-05-26T20:23:48.968328 | 2023-05-14T05:02:57 | 2023-05-14T05:02:57 | 253,978,167 | 0 | 0 | null | 2021-01-15T16:24:03 | 2020-04-08T03:42:27 | Python | UTF-8 | Python | false | false | 591 | py | def solution(board, moves):
answer = 0
stack = []
for move in moves :
print(move)
for y in range(len(board[0])) :
if board[y][move-1] != 0 :
if len(stack)!=0 and stack[-1] == board[y][move-1] :
stack.pop()
answer = answer + 2
else :
stack.append(board[y][move-1])
board[y][move-1] = 0
break
return answer
board = [[0,0,0,0,0],[0,0,1,0,3],[0,2,5,0,1],[4,2,4,4,2],[3,5,1,3,1]]
moves = [1,5,3,5,1,2,1,4]
solution(board,moves) | [
"hostdog01@naver.com"
] | hostdog01@naver.com |
45ea805373e3cf8cd7963ff9ba3833888f7d2b32 | 9aed9de2cdf4b93d22036b983b1bf5be04aef301 | /user_profile/migrations/0003_alter_userprofile_phone.py | a44959ea36a692ccdf0c115181ede23292c52ec5 | [] | no_license | kp-786/Hospital-Management | 12dd06ba97d4498bd48191b5ebeff7c0b030cecc | 1a3eda1de88378e1963f403368668b79c0fbadc1 | refs/heads/master | 2023-05-27T17:23:31.824090 | 2021-05-29T05:53:05 | 2021-05-29T05:53:05 | 370,890,445 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # Generated by Django 3.2.2 on 2021-05-26 05:34
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0002_alter_userprofile_phone'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(regex='^[0][1-9]\\d{9}$|^[1-9]\\d{9}$')]),
),
]
| [
"kundanpurbia786@gmail.com"
] | kundanpurbia786@gmail.com |
6b172bfdaa735bf76829cc5489e174778ff42719 | 0910e259a9bd252300f19b2ff22049d640f19b1a | /keras1/keras29_LSTM_ensemble2_differ.py | d841844016d8c7af269a3cb1dde3aa105b767905 | [] | no_license | kimtaeuk-AI/Study | c7259a0ed1770f249b78f096ad853be7424a1c8e | bad5a0ea72a0117035b5e45652819a3f7206c66f | refs/heads/master | 2023-05-05T12:34:52.471831 | 2021-05-22T16:16:12 | 2021-05-22T16:16:12 | 368,745,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,932 | py | #2개의 모델을 하나는 LSTM, 하나는 DENSE로
#앙상블로 구현
# 29_1번 과 성능 비교
import numpy as np
import tensorflow as tf
x1 = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6], [5,6,7], [6,7,8], [7,8,9], [8,9,10], [9,10,11], [10,11,12], [20,30,40], [30,40,50], [40,50,60]])
x2 = np.array([[10,20,30],[20,30,40],[30,40,50],[40,50,60],[50,60,70],[60,70,80],[70,80,90],[80,90,100],[90,100,110],[100,110,120],[2,3,4],[3,4,5],[4,5,6]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x1_predict = np.array([55,65,75])
x2_predict = np.array([65,75,85])
print(x1.shape) #(13,3)
print(x2.shape) #(13,3)
print(y.shape) #(13,)
print(x1_predict.shape) #(3,)
print(x2_predict.shape) #(3,)
x1_LSTM=x1.reshape(x1.shape[0],x1.shape[1],1)
x2_LSTM=x2.reshape(x2.shape[0],x1.shape[1],1)
# x1_predict = x1_predict.reshape(1, 3,1)
from sklearn.model_selection import train_test_split
x1_train, x1_test, y_train, y_test = train_test_split(x1, y, train_size=0.8, shuffle=True, random_state=66)
x2_train, x2_test, y_train, y_test = train_test_split(x2, y, train_size=0.8, shuffle=True, random_state=66)
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# scaler.fit(x1_train)
# # scaler.fit(x2_train)
# # scaler.fit(x1_test)
# # scaler.fit(x2_test)
# x1_train = scaler.transform(x1_train)
# x1_train = scaler.transform(x1_train)
# x1_test = scaler.transform(x1_test)
# x2_test = scaler.transform(x2_test)
# from tensorflow.keras.callbacks import EarlyStopping
# early_stopping = EarlyStopping(monitor='loss',patience=20, mode='min')
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, concatenate, LSTM
input1 = Input(shape=(3,1))
dense1 = LSTM(10, activation='relu')(input1)
dense1 = Dense(10)(dense1)
input2 = Input(shape=(3))
dense2 = Dense(10, activation='relu')(input2)
dense2 = Dense(10)(dense2)
merge1 = concatenate([dense1, dense2])
# middle1 = Dense(10, activation='relu')(merge1)
# middle1 = Dense(10)(middle1) #middle 안해도 됨
output1 = Dense(10)(merge1)
output1 = Dense(30)(output1)
output1 = Dense(1)(output1)
# output2 = Dense(10)(middle1)
# output2 = Dense(1)(output2)
model = Model(inputs=[input1, input2], outputs=output1)
model.compile(loss = 'mse', optimizer='adam', metrics='mae')
model.fit([x1_train,x2_train], y_train, epochs=500, validation_split=0.2, batch_size=1)
loss = model.evaluate([x1_test,x2_test], y_test)
x1_pred= x1_predict.reshape(1,3,1) # (3,) -> (1, 3)(dense) ->(1, 3, 1)(LSTM)
x2_pred= x2_predict.reshape(1, 3, 1) # (3,) -> (1, 3)(dense) ->(1, 3, 1)(LSTM)
y1_predict = model.predict([x1_pred,x2_pred])
print('loss = ', loss)
print('result : ', y1_predict)
# loss = [5.709522724151611, 1.6373800039291382] -왼 LSTM 오른쪽이 더좋다
# result : [[94.837204]]
# loss = [2.0639169216156006, 1.1473256349563599]
# result : [[78.38083]] - train_test_split | [
"ki3123.93123@gmail.com"
] | ki3123.93123@gmail.com |
c0e5aeb86f7114841d4b51b721ba36e09212c8fa | c13564041bdea68e36c3612e8c3c56f289211f41 | /test/mount_efs_test/test_helper_function.py | 4bc61875fd31d84a4709ecac0747a2f057ad789c | [
"MIT"
] | permissive | glennm1801/efs-utils | 2ff0580c1351d0a0f147318d5e9962263a535d95 | f3bb4a228a40bcf2e82fc28ab898d2b44ed8b05e | refs/heads/master | 2023-07-03T04:59:31.667346 | 2021-06-17T15:05:48 | 2021-06-17T15:05:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,814 | py | #
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import logging
import mount_efs
import pytest
from .. import utils
from botocore.exceptions import ProfileNotFound
from collections import namedtuple
from mock import MagicMock
try:
import ConfigParser
except ImportError:
from configparser import ConfigParser
try:
from urllib2 import URLError, HTTPError
except ImportError:
from urllib.error import URLError, HTTPError
DEFAULT_REGION = 'us-east-1'
ACCESS_KEY_ID_VAL = 'FAKE_AWS_ACCESS_KEY_ID'
SECRET_ACCESS_KEY_VAL = 'FAKE_AWS_SECRET_ACCESS_KEY'
SESSION_TOKEN_VAL = 'FAKE_SESSION_TOKEN'
def get_config(config_section=mount_efs.CONFIG_SECTION, config_item=None, config_item_value=None):
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
if config_section:
config.add_section(config_section)
if config_item and config_item_value is not None:
config.set(config_section, config_item, config_item_value)
return config
def test_is_instance_metadata_url_helper():
assert False == mount_efs.is_instance_metadata_url(mount_efs.ECS_TASK_METADATA_API)
assert True == mount_efs.is_instance_metadata_url(mount_efs.INSTANCE_METADATA_TOKEN_URL)
assert True == mount_efs.is_instance_metadata_url(mount_efs.INSTANCE_METADATA_SERVICE_URL)
assert True == mount_efs.is_instance_metadata_url(mount_efs.INSTANCE_IAM_URL)
def _test_get_boolean_config_item_in_config_file_helper(config, config_section, config_item, default_value, expected_value):
assert expected_value == mount_efs.get_boolean_config_item_value(config, config_section, config_item, default_value)
def test_get_true_boolean_config_item_in_config_file():
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM
config = get_config(config_section, config_item, 'true')
assert True == mount_efs.get_boolean_config_item_value(config, config_section, config_item, True)
assert True == mount_efs.get_boolean_config_item_value(config, config_section, config_item, False)
def test_get_false_boolean_config_item_in_config_file():
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM
config = get_config(config_section, config_item, 'false')
assert False == mount_efs.get_boolean_config_item_value(config, config_section, config_item, True)
assert False == mount_efs.get_boolean_config_item_value(config, config_section, config_item, False)
def test_get_default_boolean_config_item_not_in_config_file(capsys):
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM
config = get_config()
assert True == mount_efs.get_boolean_config_item_value(config, config_section, config_item, True)
out, _ = capsys.readouterr()
assert 'does not have' in out
assert 'item in section' in out
assert False == mount_efs.get_boolean_config_item_value(config, config_section, config_item, False)
out, _ = capsys.readouterr()
assert 'does not have' in out
assert 'item in section' in out
assert True == mount_efs.get_boolean_config_item_value(config, config_section, config_item, True,
emit_warning_message=False)
out, _ = capsys.readouterr()
assert 'does not have' not in out
assert 'item in section' not in out
assert False == mount_efs.get_boolean_config_item_value(config, config_section, config_item, False,
emit_warning_message=False)
out, _ = capsys.readouterr()
assert 'does not have' not in out
assert 'item in section' not in out
def test_get_default_boolean_config_section_not_in_config_file(capsys):
config_section = 'random'
config_item = mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM
config = get_config()
assert True == mount_efs.get_boolean_config_item_value(config, config_section, config_item, True)
out, _ = capsys.readouterr()
assert 'does not have section' in out
assert False == mount_efs.get_boolean_config_item_value(config, config_section, config_item, False)
out, _ = capsys.readouterr()
assert 'does not have section' in out
assert True == mount_efs.get_boolean_config_item_value(config, config_section, config_item, True,
emit_warning_message=False)
out, _ = capsys.readouterr()
assert 'does not have section' not in out
assert False == mount_efs.get_boolean_config_item_value(config, config_section, config_item, False,
emit_warning_message=False)
out, _ = capsys.readouterr()
assert 'does not have section' not in out
def test_fetch_ec2_metadata_token_disabled_default_value():
config = get_config()
assert False == mount_efs.fetch_ec2_metadata_token_disabled(config)
def test_url_request_helper_does_not_fetch_metadata_token_due_to_token_fetch_disabled_in_config_file(mocker):
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM
config = get_config(config_section, config_item, 'true')
get_aws_ec2_metadata_token_mock = mocker.patch('mount_efs.get_aws_ec2_metadata_token')
url_open_mock = mocker.patch('mount_efs.urlopen')
mount_efs.url_request_helper(config, mount_efs.INSTANCE_METADATA_SERVICE_URL, '', '')
utils.assert_not_called(get_aws_ec2_metadata_token_mock)
utils.assert_called(url_open_mock)
def test_url_request_helper_does_not_fetch_metadata_token_due_to_url_not_instance_metadata_service(mocker):
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM
config = get_config(config_section, config_item, 'false')
get_aws_ec2_metadata_token_mock = mocker.patch('mount_efs.get_aws_ec2_metadata_token')
url_open_mock = mocker.patch('mount_efs.urlopen')
mount_efs.url_request_helper(config, mount_efs.ECS_TASK_METADATA_API, '', '')
utils.assert_not_called(get_aws_ec2_metadata_token_mock)
utils.assert_called(url_open_mock)
def test_url_request_helper_fetch_metadata_token_config_item_present(mocker):
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM
config = get_config(config_section, config_item, 'false')
get_aws_ec2_metadata_token_mock = mocker.patch('mount_efs.get_aws_ec2_metadata_token', return_value='ABCDEFG=')
url_open_mock = mocker.patch('mount_efs.urlopen')
mount_efs.url_request_helper(config, mount_efs.INSTANCE_METADATA_SERVICE_URL, '', '')
utils.assert_called(get_aws_ec2_metadata_token_mock)
utils.assert_called(url_open_mock)
def test_url_request_helper_fetch_metadata_token_config_item_not_present(mocker):
config = get_config()
get_aws_ec2_metadata_token_mock = mocker.patch('mount_efs.get_aws_ec2_metadata_token', return_value='ABCDEFG=')
url_open_mock = mocker.patch('mount_efs.urlopen')
mount_efs.url_request_helper(config, mount_efs.INSTANCE_METADATA_SERVICE_URL, '', '')
utils.assert_called(get_aws_ec2_metadata_token_mock)
utils.assert_called(url_open_mock)
def test_url_request_helper_unauthorized_error(mocker, caplog):
caplog.set_level(logging.WARNING)
config_section = mount_efs.CONFIG_SECTION
config_item = mount_efs.DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM
config = get_config(config_section, config_item, 'true')
get_aws_ec2_metadata_token_mock = mocker.patch('mount_efs.get_aws_ec2_metadata_token')
url_open_mock = mocker.patch('mount_efs.urlopen', side_effect=HTTPError('url', 401, 'Unauthorized', None, None))
resp = mount_efs.url_request_helper(config, mount_efs.INSTANCE_METADATA_SERVICE_URL, '', '')
assert None == resp
utils.assert_called(url_open_mock)
utils.assert_not_called(get_aws_ec2_metadata_token_mock)
assert 'Unauthorized request' in [rec.message for rec in caplog.records][0]
assert 'ec2 metadata token is disabled' in [rec.message for rec in caplog.records][0]
def test_get_botocore_client_use_awsprofile(mocker):
config = get_config()
get_target_region_mock = mocker.patch('mount_efs.get_target_region', return_value=DEFAULT_REGION)
mount_efs.BOTOCORE_PRESENT = True
boto_session_mock = MagicMock()
boto_session_mock.set_config_variable.return_value = None
boto_session_mock.create_client.return_value = 'fake-client'
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
client = mount_efs.get_botocore_client(config, 'efs', {'awsprofile': 'test'})
assert client == 'fake-client'
boto_session_mock.set_config_variable.assert_called_once()
utils.assert_called(get_target_region_mock)
def test_get_botocore_client_use_awsprofile_profile_not_found(mocker, capsys):
config = get_config()
get_target_region_mock = mocker.patch('mount_efs.get_target_region', return_value=DEFAULT_REGION)
mount_efs.BOTOCORE_PRESENT = True
boto_session_mock = MagicMock()
boto_session_mock.set_config_variable.return_value = None
boto_session_mock.create_client.side_effect = [ProfileNotFound(profile='test_profile')]
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
with pytest.raises(SystemExit) as ex:
mount_efs.get_botocore_client(config, 'efs', {'awsprofile': 'test-profile'})
assert 0 != ex.value.code
out, err = capsys.readouterr()
assert 'could not be found' in err
boto_session_mock.set_config_variable.assert_called_once()
utils.assert_called(get_target_region_mock)
def test_get_botocore_client_botocore_not_present(mocker):
config = get_config()
get_target_region_mock = mocker.patch('mount_efs.get_target_region', return_value=DEFAULT_REGION)
mount_efs.BOTOCORE_PRESENT = False
boto_session_mock = MagicMock()
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
client = mount_efs.get_botocore_client(config, 'efs', {})
assert client == None
boto_session_mock.assert_not_called()
utils.assert_not_called(get_target_region_mock)
def test_get_botocore_client_botocore_present(mocker):
config = get_config()
get_target_region_mock = mocker.patch('mount_efs.get_target_region', return_value=DEFAULT_REGION)
mount_efs.BOTOCORE_PRESENT = True
boto_session_mock = MagicMock()
boto_session_mock.set_config_variable.return_value = None
boto_session_mock.create_client.return_value = 'fake-client'
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
client = mount_efs.get_botocore_client(config, 'efs', {})
assert client == 'fake-client'
boto_session_mock.set_config_variable.assert_not_called()
boto_session_mock.create_client.assert_called_once()
utils.assert_called(get_target_region_mock)
def test_get_assumed_profile_credentials_via_botocore_botocore_not_present(mocker):
expected_credentials = {'AccessKeyId': None, 'SecretAccessKey': None, 'Token': None}
mount_efs.BOTOCORE_PRESENT = False
boto_session_mock = MagicMock()
boto_session_mock.set_config_variable.return_value = None
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
credentials = mount_efs.botocore_credentials_helper('test_profile')
assert credentials == expected_credentials
boto_session_mock.assert_not_called()
def test_get_assumed_profile_credentials_via_botocore_botocore_present(mocker):
expected_credentials = {'AccessKeyId': ACCESS_KEY_ID_VAL,
'SecretAccessKey': SECRET_ACCESS_KEY_VAL,
'Token': SESSION_TOKEN_VAL}
mount_efs.BOTOCORE_PRESENT = True
boto_session_mock = MagicMock()
boto_session_mock.set_config_variable.return_value = None
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials', ['access_key', 'secret_key', 'token'])
frozen_credentials = ReadOnlyCredentials(ACCESS_KEY_ID_VAL, SECRET_ACCESS_KEY_VAL, SESSION_TOKEN_VAL)
get_credential_session_mock = MagicMock()
boto_session_mock.get_credentials.return_value = get_credential_session_mock
get_credential_session_mock.get_frozen_credentials.return_value = frozen_credentials
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
credentials = mount_efs.botocore_credentials_helper('test_profile')
assert credentials == expected_credentials
boto_session_mock.set_config_variable.assert_called_once()
boto_session_mock.get_credentials.assert_called_once()
get_credential_session_mock.get_frozen_credentials.assert_called_once()
def test_get_assumed_profile_credentials_via_botocore_botocore_present_profile_not_found(mocker, capsys):
mount_efs.BOTOCORE_PRESENT = True
boto_session_mock = MagicMock()
boto_session_mock.set_config_variable.return_value = None
boto_session_mock.get_credentials.side_effect = [ProfileNotFound(profile='test_profile')]
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
with pytest.raises(SystemExit) as ex:
mount_efs.botocore_credentials_helper('test_profile')
assert 0 != ex.value.code
out, err = capsys.readouterr()
assert 'could not be found' in err
boto_session_mock.set_config_variable.assert_called_once()
boto_session_mock.get_credentials.assert_called_once()
| [
"ygaochn@amazon.com"
] | ygaochn@amazon.com |
8aa93dfa835ec3cb06e3867216f4f73df6106212 | b0e66db67b34b88e7884aa9b4a7b7607bbe9651b | /data/codec/codec.py | 3ac7e325f53e8dd3ed1092e07be8bbdf47665358 | [] | no_license | cole-brown/veredi-code | 15cf47c688c909b27ad2f2f3518df72862bd17bc | 8c9fc1170ceac335985686571568eebf08b0db7a | refs/heads/master | 2023-04-22T03:21:10.506392 | 2021-05-01T19:05:10 | 2021-05-01T19:05:10 | 296,949,870 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,719 | py | # coding: utf-8
'''
Class for Encoding/Decoding the Encodables.
'''
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from typing import (TYPE_CHECKING,
Optional, Union, Any, Type, NewType,
Dict, List, Iterable, Mapping, Tuple)
from veredi.base.null import NullNoneOr, null_or_none
if TYPE_CHECKING:
from veredi.base.context import VerediContext
from veredi.data.config.context import ConfigContext
import collections
from datetime import datetime
import enum as py_enum
from veredi.logs import log
from veredi.logs.mixin import LogMixin
from veredi.base import numbers
from veredi.base.const import SimpleTypes, SimpleTypesTuple
from veredi.base.strings import label, pretty
from veredi.base.strings.mixin import NamesMixin
from veredi import time
from veredi.data import background
from veredi.data.context import DataOperation
from veredi.data.registration import codec as registrar
from ..exceptions import EncodableError
from .const import (EncodeNull,
EncodeAsIs,
EncodedComplex,
EncodedSimple,
EncodedSimpleTuple,
EncodedEither,
Encoding)
from .encodable import Encodable
from . import enum
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
EncodableAny = NewType('EncodeInput',
Union[EncodeNull,
EncodeAsIs,
Encodable,
Mapping,
enum.EnumEncode,
py_enum.Enum])
EncodableTypes = NewType('EncodableTypes',
Union[Encodable,
enum.EnumEncode,
py_enum.Enum])
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
class Codec(LogMixin, NamesMixin,
name_dotted='veredi.codec.codec',
name_string='codec'):
'''
Coder/Decoder for Encodables and the EncodableRegistry.
Repository gets data from storage to Veredi.
Serdes gets data from storage format to Python simple data types.
Codec gets data from simple data types to Veredi classes.
And the backwards version for saving.
'''
# -------------------------------------------------------------------------
# Constants
# -------------------------------------------------------------------------
# ------------------------------
# Logging
# ------------------------------
_LOG_INIT: List[log.Group] = [
log.Group.START_UP,
log.Group.DATA_PROCESSING
]
'''
Group of logs we use a lot for log.group_multi().
'''
# -------------------------------------------------------------------------
# Initialization
# -------------------------------------------------------------------------
def _define_vars(self) -> None:
'''
Instance variable definitions, type hinting, doc strings, etc.
'''
self._bg: Dict[Any, Any] = {}
'''Our background context data that is shared to the background.'''
def __init__(self,
config_context: Optional['ConfigContext'] = None,
codec_name: Optional[str] = None) -> None:
'''
`codec_name` should be short and will be lowercased. It should be
equivalent to the Serdes names of 'json', 'yaml'... It will be 'codec'
if not supplied.
`config_context` is the context being used to set us up.
'''
self._define_vars()
if codec_name:
self.name = codec_name.lower()
# ---
# Set-Up LogMixin before _configure() so we have logging.
# ---
self._log_config(self.dotted)
self._log_group_multi(self._LOG_INIT,
self.dotted,
f"Codec ({self.klass}) init...")
self._configure(config_context)
self._log_group_multi(self._LOG_INIT,
self.dotted,
f"Done with Codec init.")
def _configure(self,
context: Optional['ConfigContext']) -> None:
'''
Do whatever configuration we can as the base class; sub-classes should
finish up whatever is needed to set up themselves.
'''
self._log_group_multi(self._LOG_INIT,
self.dotted,
f"Codec ({self.klass}) "
"configure...")
# Set up our background for when it gets pulled in.
self._make_background()
self._log_group_multi(self._LOG_INIT,
self.dotted,
"Done with Codec configuration.")
# -------------------------------------------------------------------------
# Context Properties/Methods
# -------------------------------------------------------------------------
@property
def background(self) -> Tuple[Dict[str, str], background.Ownership]:
'''
Data for the Veredi Background context.
Returns: (data, background.Ownership)
'''
return self._bg, background.Ownership.SHARE
def _make_background(self) -> Dict[str, str]:
'''
Start of the background data.
'''
self._bg = {
'dotted': self.dotted,
'type': self.name,
}
return self._bg
def _context_data(self,
context: 'VerediContext',
action: DataOperation) -> 'VerediContext':
'''
Inject our codec data into the context.
'''
key = str(background.Name.CODEC)
meta, _ = self.background
context[key] = {
# Push our context data into our sub-context key.
'meta': meta,
# And add any extra info.
'action': action,
}
return context
# -------------------------------------------------------------------------
# API Encoding
# -------------------------------------------------------------------------
def encode(self,
target: EncodableAny,
in_progress: Optional[EncodedComplex] = None,
with_reg_field: bool = True) -> EncodedEither:
'''
Encode `target`, depending on target's type and encoding settings. See
typing of `target` for all encodable types.
If target is Null or None:
- returns None
If target is an Encodable or enum with registered Encodable wrapper:
- Encodes using Encodable functionality. See `_encode_encodable()`
for details.
If target is a Mapping:
- Encodes as a dictionary.
If target is a non-Encodable py_enum.Enum:
- Encodes target.value.
If target is an EncodeAsIs type:
- Returns as-is; already 'encoded'.
Else raises an EncodableError.
'''
# log.debug(f"{self.klass}.encode: {target}")
encoded = None
if null_or_none(target):
# Null/None encode to None.
return encoded
# Translate enum to its wrapper if needed/supported.
if enum.is_encodable(target):
input_enum = target
target = enum.wrap(target)
self._log_data_processing(self.dotted,
"codec.encode: enum found. "
"enum: {} -wrap-> {}",
input_enum, target)
if isinstance(target, Encodable):
# Encode via its function.
encoded = self._encode_encodable(target,
in_progress,
with_reg_field)
elif isinstance(target, collections.abc.Mapping):
# Encode via our map helper.
encoded = self.encode_map(target)
elif isinstance(target, py_enum.Enum):
# Assume, if it's a py_enum.Enum (that isn't an Encodable), that
# just value is fine. If that isn't fine, the enum can make itself
# an Encodable.
encoded = target.value
elif (isinstance(target, time.DateTypesTuple)
or isinstance(target, SimpleTypesTuple)):
encoded = self._encode_simple_types(target)
else:
msg = (f"Do not know how to encode type '{type(target)}'.")
error = EncodableError(msg,
data={
'target': target,
'in_progress': in_progress,
'with_reg_field': with_reg_field,
})
raise self._log_exception(error, msg)
# log.debug(f"{self.klass}.encode: Done. {encoded}")
return encoded
def _encode_encodable(self,
target: Optional[Encodable],
in_progress: Optional[EncodedComplex] = None,
with_reg_field: bool = True) -> EncodedEither:
'''
Encode `target` as a simple or complex encoding, depending on
`target`.encoding().
If `target`.encoding() is SIMPLE, encodes to a string/number.
Otherwise:
- If `encode_in_progress` is provided, encodes this to a sub-field
under `target.type_field()`.
- Else encodes this to a dict and provides `target.type_field()` as
the value of `target.TYPE_FIELD_NAME`.
If `with_reg_field` is True, returns:
An output dict with key/values:
- ENCODABLE_REG_FIELD: `target.dotted`
- ENCODABLE_PAYLOAD_FIELD: `target` encoded data
Else returns:
`target` encoded data
'''
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(self.dotted,
'_encode_encodable(): target: {}, '
'in_progress: {}, with_reg_field: {}',
target,
in_progress,
with_reg_field)
encoded = None
if null_or_none(target):
# Null/None encode to None.
return encoded
encoding, encoded = target.encode(self)
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(self.dotted,
'_encode_encodable(): Encoded.\n'
' encoding: {}\n'
' data: {}',
encoding,
encoded)
# ---
# Encoding.SIMPLE
# ---
# If we encoded it simply, we're basically done.
if encoding.has(Encoding.SIMPLE):
# If there's an in_progress that's been pass in, and we just
# encoded ourtarget to a string... That's a bit awkward. But I
# guess do this. Will make weird-ish looking stuff like: 'v.mid':
# 'v.mid:1'
if in_progress is not None:
in_progress[target.type_field()] = encoded
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(
self.dotted,
'_encode_encodable(): Simple encoding was inserted into '
'`in_progress` data and is complete.\n'
' field: {}\n'
' in_progress: {}',
target.type_field(),
in_progress)
return in_progress
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(
self.dotted,
'_encode_encodable(): Simple encoding is complete.\n'
' encoded: {}',
encoded)
return encoded
# ---
# Encoding.COMPLEX
# ---
# Put the type somewhere and return encoded data.
if in_progress is not None:
# Encode as a sub-field in the provided data.
in_progress[target.type_field()] = encoded
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(
self.dotted,
'_encode_encodable(): Complex encoding inserted into '
'`in_progress` data.\n'
' field: {}\n'
' in_progress: {}',
target.type_field(),
in_progress)
return in_progress
encoded[target.TYPE_FIELD_NAME] = target.type_field()
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(
self.dotted,
'_encode_encodable(): Complex encoding had type-field '
'added to its data.\n'
' field: {}\n'
' data: {}',
target.type_field(),
encoded)
# Encode with reg/payload fields if requested.
if with_reg_field:
enc_with_reg = {
Encodable.ENCODABLE_REG_FIELD: target.dotted,
Encodable.ENCODABLE_PAYLOAD_FIELD: encoded,
}
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(
self.dotted,
'_encode_encodable(): Complex encoding had reg-field '
'added to its data and is complete.\n'
' reg: {}\n'
' value: {}\n'
' payload: {}\n'
' value: {}\n'
' encoded: {}',
Encodable.ENCODABLE_REG_FIELD,
target.dotted,
Encodable.ENCODABLE_PAYLOAD_FIELD,
encoded,
enc_with_reg)
return enc_with_reg
# Or just return the encoded data.
# TODO v://future/2021-03-14T12:27:54
self._log_data_processing(
self.dotted,
'_encode_encodable(): Complex encoding is complete.\n'
' encoded: {}',
encoded)
return encoded
def encode_map(self,
encode_from: Mapping,
encode_to: Optional[Mapping] = None,
) -> Mapping[str, Union[str, numbers.NumberTypes, None]]:
'''
If `encode_to` is supplied, use that. Else create an empty `encode_to`
dictionary. Get values in `encode_from` dict, encode them, and put them
in `encode_to` under an encoded key.
Returns `encode_to` instance (either the new one we created or the
existing updated one).
'''
if null_or_none(encode_from):
# Null/None encode to None.
return None
if encode_to is None:
encode_to = {}
# log.debug(f"\n\nlogging.encode_map: {encode_from}\n\n")
for key, value in encode_from.items():
field = self._encode_key(key)
node = self._encode_value(value)
encode_to[field] = node
# log.debug(f"\n\n done.\nencode_map: {encode_to}\n\n")
return encode_to
def _encode_key(self, key: Any) -> str:
'''
Encode a dict key.
'''
# log.debug(f"\n\nlogging._encode_key: {key}\n\n")
field = None
if enum.is_encodable(key):
input_enum = key
key = enum.wrap(key)
self._log_data_processing(self.dotted,
"codec._encode_key: enum found. "
"enum: {} -wrap-> {}",
input_enum, key)
# If key is an encodable, can it encode into a key?
if isinstance(key, Encodable):
if key.encoding().has(Encoding.SIMPLE):
field = self._encode_encodable(key)
else:
msg = (f"{self.klass}._encode_key: Encodable "
f"'{key}' cannot be encoded into a key value for "
"a dict - only Encoding.SIMPLE can be used here.")
error = EncodableError(msg,
data={
'key': key,
})
raise log.exception(error, msg)
# Is key something simple?
elif isinstance(key, SimpleTypesTuple):
field = self._encode_simple_types(key)
# If key is an enum that is not an Encodable, use it's value, I guess?
elif isinstance(key, py_enum.Enum):
field = self._encode_simple_types(key.value)
# If key is a just a number, just use it.
elif isinstance(key, numbers.NumberTypesTuple):
field = numbers.serialize(key)
# No idea... error on it.
else:
# # Final guess: stringify it.
# field = str(key)
msg = (f"{self.klass}._encode_key: Key of type "
f"'{type(key)}' is not currently supported for encoding "
" into a field for an encoded dictionary.")
error = EncodableError(msg,
data={
'key': key,
})
raise log.exception(error, msg)
# log.debug(f"\n\n done._encode_key: {field}\n\n")
return field
def _encode_simple_types(self,
value: SimpleTypes) -> Union[str, int, float]:
'''
Encode a simple type.
'''
encoded = value
if isinstance(value, numbers.NumberTypesTuple):
# Numbers have to serialize their Decimals.
encoded = numbers.serialize(value)
elif isinstance(value, str):
encoded = value
else:
msg = (f"{self.klass}._encode_simple_types: "
f"'{type(value)}' is not a member of "
"SimpleTypes and cannot be encoded this way.")
error = EncodableError(msg, value)
raise log.exception(error, msg)
return encoded
def _encode_value(self, value: Any) -> str:
'''
Encode a dict value.
If value is:
- dict or encodable: Step into them for encoding.
- enum: Use the enum's value.
Else assume it is already encoded.
'''
# log.debug(f"\n\nlogging._encode_value: {value}\n\n")
node = None
if isinstance(value, dict):
node = self.encode_map(value)
elif (isinstance(value, Encodable)
or enum.is_encodable(value)):
# Encode it with its registry field so we can
# know what it was encoded as during decoding.
node = self.encode(value, with_reg_field=True)
elif isinstance(value, py_enum.Enum):
# An enum that isn't registered as Encodable - just use value.
node = value.value
else:
node = value
# log.debug(f"\n\n done._encode_value: {node}\n\n")
return node
# -------------------------------------------------------------------------
# Decoding
# -------------------------------------------------------------------------
def _decode_enum_check(self,
target: Optional[EncodableTypes]
) -> Type[Encodable]:
'''
Convert `target` to its Encodable EnumWrap if it is an enum.
Return as-is otherwise.
'''
# Allow target of either the Enum type or the EnumWrap type
# as target.
if enum.is_encodable(target): # Given the enum type as the target?
# Target is enum; translate to its EnumWrap class for decoding.
input_enum = target
target = enum.wrap(target)
self._log_data_processing(self.dotted,
"codec._decode_enum_check: enum found. "
"enum: {} -wrap-> {}",
input_enum, target)
elif enum.is_decodable(target): # Given the EnumWrap as target?
# EnumWrap is what we need - no change.
self._log_data_processing(self.dotted,
"codec._decode_enum_check: EnumWrap found. "
"EnumWrap: {} - Use as-is.",
target)
return target
def _decode_finalize(self, result: Any) -> Any:
'''
Finalize decoding by e.g. converting EnumWrap to its enum value.
'''
finalized = result
if result and enum.needs_unwrapped(result):
finalized = enum.unwrap(result)
self._log_data_processing(self.dotted,
"codec._decode_finalize: "
"{} -> {}",
result, finalized)
return finalized
def decode(self,
target: Optional[EncodableTypes],
data: EncodedEither,
error_squelch: bool = False,
reg_find_dotted: Optional[str] = None,
reg_find_types: Optional[Type[Encodable]] = None,
map_expected: Iterable[Type[EncodableTypes]] = None,
fallback: Optional[Type[Any]] = None
) -> Optional[Any]:
'''
Decode simple or complex `data` input, using it to build an
instance of the `target` class.
If `target` is known, it is used to decode and return a new
`target` instance or None.
If `target` is unknown (and therefore None), `data` must exist and
have keys:
- Encodable.ENCODABLE_REG_FIELD
- Encodable.ENCODABLE_PAYLOAD_FIELD
Raises KeyError if not present.
Takes EncodedComplex `data` input, and uses
`Encodable.ENCODABLE_REG_FIELD` key to find registered Encodable to
decode `data[Encodable.ENCODABLE_PAYLOAD_FIELD]`.
These keyword args are used for getting Encodables from the
EncodableRegistry:
- reg_find_dotted: Encodable's dotted registry string to use for
searching for the encodable that can decode the data.
- reg_find_types: Search for Encodables of this class or its
subclasses that can decode the data.
- fallback: Thing to return if no valid Encodable found for
decoding.
If data is a map with several expected Encodables in it, supply
those in `map_expected` or just use `decode_map()`.
`error_squelch` will try to only raise the exception, instead of
raising it through log.exception().
'''
# ---
# Decode at all?
# ---
if null_or_none(data):
# Can't decode nothing; return nothing.
self._log_data_processing(
self.dotted,
"decode: Cannot decode nothing:\n"
" data: {}",
data)
return self._decode_finalize(None)
# ---
# Decode target already known?
# ---
if target:
self._log_data_processing(
self.dotted,
"decode: Attempting to decode via Encodable:\n"
" data: {}",
data)
decoded = self._decode_encodable(target, data)
self._log_data_processing(
self.dotted,
"decode: Decode via Encodable returned:\n"
" decoded: {}",
decoded)
return self._decode_finalize(decoded)
self._log_data_processing(
self.dotted,
"decode: No target...\n"
" type: {}\n"
" data: {}",
type(data),
data)
# ---
# Is it an Encoding.SIMPLE?
# ---
if isinstance(data, EncodedSimpleTuple):
self._log_data_processing(
self.dotted,
"decode: Attempting to decode simply encoded data...\n"
" data: {}",
data)
decoded = self._decode_simple(data, None)
if decoded:
self._log_data_processing(
self.dotted,
"decode: Decoded simply encoded data to:\n"
" decoded: {}",
decoded)
return self._decode_finalize(decoded)
# Else: not this... Keep looking.
self._log_data_processing(
self.dotted,
"decode: Data is not Encoding.SIMPLE. Continuing...")
# ---
# Does the EncodableRegistry know about it?
# ---
try:
self._log_data_processing(
self.dotted,
"decode: Attempting to decode with registry...\n"
" reg_find_dotted: {}\n"
" reg_find_types: {}\n"
" error_squelch: {}\n\n"
" data:\n{}\n"
" fallback:\n{}\n",
reg_find_dotted,
reg_find_types,
error_squelch,
log.format_pretty(data, prefix=' '),
log.format_pretty(fallback, prefix=' '))
decoded = self._decode_with_registry(data,
dotted=reg_find_dotted,
data_types=reg_find_types,
error_squelch=error_squelch,
fallback=fallback)
if self._log_will_output(log.Group.DATA_PROCESSING):
self._log_data_processing(
self.dotted,
"decode: Decode with registry returned:\n"
" type: {}\n"
"{}",
type(decoded),
log.format_pretty(decoded,
prefix=' '))
return self._decode_finalize(decoded)
except (KeyError, ValueError, TypeError):
# Expected exceptions from `_decode_with_registry`...
# Try more things?
pass
# ---
# Mapping?
# ---
if isinstance(data, dict):
self._log_data_processing(
self.dotted,
"decode: Attempting to decode mapping...\n"
" map_expected: {}\n"
" data: {}",
map_expected,
data)
# Decode via our map helper.
decoded = self.decode_map(data, expected=map_expected)
self._log_data_processing(
self.dotted,
"decode: Decoded mapping returned:\n"
" decoded: {}",
decoded)
return self._decode_finalize(decoded)
# ---
# Something Basic?
# ---
try:
self._log_data_processing(
self.dotted,
"decode: Attempting to decode basic data type...\n"
" data: {}",
data)
decoded = self._decode_basic_types(data)
self._log_data_processing(
self.dotted,
"decode: Decoded basic data to:\n"
" decoded: {}",
decoded)
return self._decode_finalize(decoded)
except EncodableError:
# Not this either...
pass
# ---
# Ran out of options... Return fallback or error out.
# ---
if fallback:
self._log_data_processing(
self.dotted,
"decode: No decoding known for data; returning fallback:\n"
" fallback: {}",
fallback)
return self._decode_finalize(fallback)
msg = (f"{self.klass}.decode: unknown "
f"type of data {type(data)}. Cannot decode.")
error = EncodableError(msg,
data={
'target': target,
'data': data,
'error_squelch': error_squelch,
'reg_find_dotted': reg_find_dotted,
'reg_find_types': reg_find_types,
'fallback': fallback,
})
raise self._log_exception(error, msg)
def _decode_encodable(self,
target: Optional[Type[Encodable]],
data: EncodedEither,
) -> Union[Encodable,
enum.EnumEncode,
None]:
'''
Decode simple or complex `data` input, using it to build an
instance of the `target` class.
If `target` is known, it is used to decode and return a new
`target` instance or None.
'''
self._log_data_processing(
self.dotted,
"_decode_encodable will be decoding data to target '{}'.\n"
" data: {}",
target,
data)
# ---
# Wrong data for target?
# ---
target = self._decode_enum_check(target)
target.error_for_claim(data)
# ---
# Decode it.
# ---
encoding, decoded = target.decode(data, self, None)
self._log_data_processing(
self.dotted,
"_decode_encodable decoded target '{}' to: {}",
target,
decoded)
if enum.needs_unwrapped(decoded):
self._log_data_processing(
self.dotted,
"_decode_encodable unwrapped decoded EnumWrap "
"'{}' to: {} {}",
decoded,
type(decoded.enum),
decoded.enum)
decoded = decoded.enum
# Right now, same from here on for SIMPLE vs COMPLEX.
# Keeping split up for parity with `_encode_encodable`, clarity,
# and such.
# ---
# Decode Simply?
# ---
if encoding == Encoding.SIMPLE:
self._log_data_processing(
self.dotted,
"_decode_encodable {} decoding completed.\n"
" type(): {}\n"
" decoded: {}",
encoding,
type(decoded),
decoded)
return decoded
# ---
# Decode Complexly?
# ---
self._log_data_processing(
self.dotted,
"_decode_encodable {} decoding completed.\n"
" type(): {}\n"
" decoded: {}",
encoding,
type(decoded),
decoded)
return decoded
def _decode_simple(self,
data: EncodedSimple,
data_type: Optional[Type[Encodable]]
) -> Optional[Encodable]:
'''
Input data must be a string.
Will look for an Encodable that can claim the simple encoding, and then
use that to decode it if found.
Will return None if no Encodable target is found.
'''
self._log_data_processing(self.dotted,
"_decode_simple:\n"
" data_type: {}\n"
" data: {}",
data_type, data)
if not data:
self._log_data_processing(self.dotted,
"_decode_simple:\n"
" no data; returning None")
return None
target = registrar.codec.simple(data,
data_type=data_type)
if not target:
self._log_data_processing(self.dotted,
"_decode_simple: "
"codec.simple() found no target for:\n"
" data_type: {}\n"
" data: {}",
data_type, data)
return None
self._log_data_processing(self.dotted,
"_decode_simple: "
"codec.simple() found target {}; "
"decoding...",
target)
return self._decode_encodable(target, data)
def _decode_with_registry(self,
data: EncodedComplex,
dotted: Optional[str] = None,
data_types: Optional[Type[Encodable]] = None,
error_squelch: bool = False,
fallback: Optional[Type[Encodable]] = None,
) -> Optional[Encodable]:
'''
Input `data` must have keys:
- Encodable.ENCODABLE_REG_FIELD
- Encodable.ENCODABLE_PAYLOAD_FIELD
Raises KeyError if not present.
Takes EncodedComplex `data` input, and uses
`Encodable.ENCODABLE_REG_FIELD` key to find registered Encodable to
decode `data[Encodable.ENCODABLE_PAYLOAD_FIELD]`.
All the keyword args are forwarded to EncodableRegistry.get() (e.g.
'data_types').
Return a new `target` instance.
'''
# ------------------------------
# Fallback early.
# ------------------------------
if data is None:
# No data at all. Use either fallback or None.
if fallback:
self._log_data_processing(
self.dotted,
"decode_with_registry: data is None; using "
"fallback.\n"
" data: {}\n"
" fallback:\n"
"{}",
data,
log.format_pretty(fallback, prefix=' '))
return fallback
# `None` is an acceptable enough value for us... Lots of things are
# optional. Errors for unexpectedly None things should happen in
# the caller.
self._log_data_processing(
self.dotted,
"decode_with_registry: data is None; no "
"fallback. Returning None.",
data, fallback)
return None
self._log_data_processing(
self.dotted,
"decode_with_registry: Decoding...\n"
" dotted: {}\n"
" data_types: {}\n"
" error_squelch: {}\n"
" data:\n{}\n"
" fallback:\n{}\n",
dotted,
data_types,
error_squelch,
log.format_pretty(data, prefix=' '),
log.format_pretty(fallback, prefix=' '))
# When no ENCODABLE_REG_FIELD, we can't do anything since we don't
# know how to decode. But only deal with fallback case here. If they
# don't have a fallback, let it error soon (but not here).
if (fallback
and Encodable.ENCODABLE_REG_FIELD not in data):
# No hint as to what data is - use fallback.
self._log_data_processing(
self.dotted,
"decode_with_registry: No {} in data; using fallback. "
"data: {}, fallback: {}",
Encodable.ENCODABLE_REG_FIELD,
data, fallback,
log_minimum=log.Level.WARNING)
return fallback
# ------------------------------
# Better KeyError exceptions.
# ------------------------------
if not dotted:
try:
dotted = data[Encodable.ENCODABLE_REG_FIELD]
self._log_data_processing(
self.dotted,
"decode_with_registry: No 'dotted' provided; "
"got dotted from data: {}",
dotted)
except KeyError:
# Error on the missing decoding hint.
self._log_data_processing(
self.dotted,
"decode_with_registry: No 'dotted' provided "
"and none in data!")
# Don't `self._log_exception()`... exceptions expected.
raise KeyError(Encodable.ENCODABLE_REG_FIELD,
("decode_with_registry: data has no "
f"'{Encodable.ENCODABLE_REG_FIELD}' key."),
data)
except TypeError:
# Error on the missing decoding hint.
self._log_data_processing(
self.dotted,
"decode_with_registry: No 'dotted' provided. "
"...and data is not a dict type?\n"
" type: {}",
type(data))
# Don't `self._log_exception()`... exceptions expected.
raise TypeError(Encodable.ENCODABLE_REG_FIELD,
("decode_with_registry: data is not dict "
"type? Cannot check for "
f"'{Encodable.ENCODABLE_REG_FIELD}' key."),
data)
try:
encoded_data = data[Encodable.ENCODABLE_PAYLOAD_FIELD]
self._log_data_processing(
self.dotted,
"decode_with_registry: Pulled encoded data field from data.\n"
" encoded_data: {}",
encoded_data)
except KeyError:
self._log_data_processing(
self.dotted,
"decode_with_registry: No payload in encoded_data?\n"
" payload field name: {}\n"
" encoded_data: {}",
Encodable.ENCODABLE_PAYLOAD_FIELD,
encoded_data)
# Don't `self._log_exception()`... exceptions expected.
raise KeyError(Encodable.ENCODABLE_PAYLOAD_FIELD,
("decode_with_registry: data has no "
f"'{Encodable.ENCODABLE_PAYLOAD_FIELD}' key. "
"Cannot decode data.."),
data)
except TypeError:
self._log_data_processing(
self.dotted,
"decode_with_registry: ...encoded_data is not a dict type?\n"
" type: {}"
" encoded_data: {}",
type(encoded_data),
encoded_data)
# Don't `self._log_exception()`... exceptions expected.
raise KeyError(Encodable.ENCODABLE_PAYLOAD_FIELD,
("decode_with_registry: data is not dict type? "
f"Cannot find '{Encodable.ENCODABLE_PAYLOAD_FIELD}' "
"key."),
data)
# ------------------------------
# Now decode it.
# ------------------------------
self._log_data_processing(
self.dotted,
"decode_with_registry: Checking registrar for:"
" dotted: {}\n"
" data_type: {}\n"
" encoded_data: {}",
dotted,
data_types,
encoded_data)
target = registrar.codec.get(encoded_data,
dotted=dotted,
data_type=data_types,
error_squelch=error_squelch,
fallback=fallback)
self._log_data_processing(
self.dotted,
"decode_with_registry: registrar returned target: {}",
target)
decoded = self._decode_encodable(target, data)
return decoded
def decode_map(self,
mapping: NullNoneOr[Mapping],
expected: Iterable[Type[Encodable]] = None
) -> Mapping[str, Any]:
'''
Decode a mapping.
'''
if null_or_none(mapping):
self._log_data_processing(
self.dotted,
"decode_map: Cannot decode nothing.\n"
" expecting: {}\n"
" mapping: {}",
expected,
log.format_pretty(mapping,
prefix=' '))
return None
self._log_data_processing(
self.dotted,
"decode_map: Decoding...\n"
" expecting: {}\n"
"{}",
expected,
log.format_pretty(mapping,
prefix=' '))
# ---
# Decode the Base Level
# ---
decoded = {}
for key, value in mapping.items():
field = self._decode_key(key, expected)
node = self._decode_value(value, expected)
decoded[field] = node
self._log_data_processing(
self.dotted,
"decode_map: Decoded to:\n"
" decoded: {}",
decoded)
return decoded
def _decode_key(self,
key: Any,
expected: Iterable[Type[Encodable]] = None) -> str:
'''
Decode a mapping's key.
Encodable is pretty stupid. string is only supported type. Override or
smart-ify if you need support for more key types.
'''
self._log_data_processing(
self.dotted,
"decode_key:\n"
" expecting: {}\n"
" key: {}",
expected,
key)
field = None
# Can we decode to a specified Encodable?
if expected:
for encodable in expected:
encodable = self._decode_enum_check(encodable)
# Does this encodable want the key?
claiming, claim, _ = encodable.claim(key)
if not claiming:
continue
self._log_data_processing(
self.dotted,
"decode_key:\n"
" Found expected to use as target: {}",
encodable)
# Yeah - get it to decode it then.
field = self.decode(encodable, claim,
map_expected=expected)
self._log_data_processing(
self.dotted,
"decode_key:\n"
" Decoded key to: {}",
field)
# And we are done; give the decoded field back.
return field
# Not an Encodable (or none supplied as expected). Not sure what to do
# past here, really... Use the string or error, and let the next guy
# along update it (hello Future Me, probably).
if isinstance(key, str):
field = key
self._log_data_processing(
self.dotted,
"decode_key:\n"
" Key is string; use as-is: {}",
field)
else:
self._log_data_processing(
self.dotted,
"decode_key:\n"
" Key is un-decodable?\n"
" type: {}\n"
" key: {}",
type(key),
key)
raise EncodableError(f"Don't know how to decode key: {key}",
None,
data={
'key': key,
'expected': expected,
})
return field
def _decode_value(self,
value: Any,
expected: Iterable[Type[Encodable]] = None
) -> str:
'''
Decode a mapping's value.
Passes `expected` along for continuing the decoding.
'''
self._log_data_processing(
self.dotted,
"decode_value:\n"
" Decoding...\n"
" expecting: {}\n"
" value: {}",
expected,
value)
node = self.decode(None, value,
map_expected=expected)
self._log_data_processing(
self.dotted,
"decode_value:\n"
" Decoded value to:\n"
" {}",
node)
return node
def _decode_basic_types(self,
value: Union[datetime, str, int, float]
) -> SimpleTypes:
'''
'Decode' a basic type. Generally as itself.
Returns the simple type or raises an EncodableError.
'''
self._log_data_processing(
self.dotted,
"decode_basic_types:\n"
" Decoding...\n"
" type: {}\n"
" value: {}",
type(value),
value)
if time.deserialize_claim(value):
decoded = time.deserialize(value)
self._log_data_processing(
self.dotted,
"decode_basic_types:\n"
" Decoded via time."
" type: {}\n"
" decoded: {}",
type(decoded),
decoded)
return decoded
# Give numbers a shot at Decimals saved as strings before we deal with
# other kinds of strings.
try:
decoded = numbers.deserialize(value)
self._log_data_processing(
self.dotted,
"decode_basic_types:\n"
" Decoded via numbers."
" type: {}\n"
" decoded: {}",
type(decoded),
decoded)
return decoded
except Exception as error:
self._log_exception(
error,
"decode_basic_types:\n"
" `numbers.deserialize()` raised exception on:"
" type: {}\n"
" decoded: {}",
type(decoded),
decoded)
raise
if decoded:
return decoded
if isinstance(value, str):
decoded = value
self._log_data_processing(
self.dotted,
"decode_basic_types:\n"
" Is a string; return as-is."
" type: {}\n"
" decoded: '{}'",
type(decoded),
decoded)
return decoded
self._log_data_processing(
self.dotted,
"decode_basic_types:\n"
" Unknown/unclaimed value; erroring."
" type: {}\n"
" value: {}",
type(value),
value)
msg = (f"{self.klass}.decode_basic_types: "
f"'{type(value)}' is not a known 'basic' type "
"and cannot be decoded.")
error = EncodableError(msg, value)
raise self._log_exception(error, msg)
| [
"code@brown.dev"
] | code@brown.dev |
545886cf83b5b9b4486d5345dfd76c02b06da4a2 | 878e244de48354cc4cfb3baba482bada58f90075 | /src/django/teeth/web/server.py | 4d0ae01516b58b4d11ab06394294530c09e19b60 | [] | no_license | ccdump/TeethAR | 47c97e80601a03a25aa57589fa003c8ad5ebdcfa | a13bbfc1fd6ad76da213f8784a5b1075dbad4ae3 | refs/heads/master | 2020-04-08T02:14:22.490618 | 2018-03-04T17:34:28 | 2018-03-04T17:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import socket
import sys
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('0.0.0.0', 2121)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
sock.listen(1)
while True:
# Wait for a connection
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
# Receive the data in small chunks and retransmit it
f=open("data.dat","w")
while True:
data = connection.recv(8388608)
if data:
print "%s" % data
f.write("%s" % data)
else:
print >>sys.stderr, 'no more data from', client_address
break
f.close()
finally:
# Clean up the connection
connection.close() | [
"tony92511@hanmail.net"
] | tony92511@hanmail.net |
f28c4218e7a5ee09e2d20f40aede1acc20d15eff | b5accd2d51d3b7d004fa62898d928afbb2bfa81e | /src/tweets/urls.py | 6cfd784030b5939cd52500b85ab6ff28a6fe88e7 | [] | no_license | sasili-adetunji/tweetme | 77406545622badd5eb4195777e7f2446676df75f | 43b3d4d227e1cd970cdd2b2dd9dded6ddc175ab0 | refs/heads/master | 2020-03-17T23:30:11.332524 | 2018-04-29T20:20:17 | 2018-04-29T20:20:17 | 134,048,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | from django.views.generic.base import RedirectView
from .views import (
TweetDetailView,
TweetListView,
TweetCreateView,
TweetUpdateView,
TweetDeleteView
)
from django.conf.urls import url
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/')), # /tweet
url(r'^search/$', TweetListView.as_view(), name='list'), # /tweet
url(r'^create/$', TweetCreateView.as_view(), name='create'), # /tweet/create
url(r'^(?P<pk>\d+)/$', TweetDetailView.as_view(), name='detail'), # /tweet/1/
url(r'^(?P<pk>\d+)/update/$', TweetUpdateView.as_view(), name='update'), # /tweet/1/
url(r'^(?P<pk>\d+)/delete/$', TweetDeleteView.as_view(), name='delete'), # /tweet/1/
]
| [
"sasil.adetunji@gmail.com"
] | sasil.adetunji@gmail.com |
8da873495ddad3ce0479646a6258152033221886 | a18e0e6cdf46e90e43f01e67343a74c39b326c73 | /server/env/bin/django-admin.py | a7e2ed52725cce6598a88ede6d1f74679c77d1ec | [] | no_license | Froznar/AdministradorBiblioteca | 82cc2bf7145e701f82aa00a14ae829cf99985cba | 62e6de6a03734ce51718a3a3fd62fe5f9a374fee | refs/heads/master | 2021-06-26T04:05:20.936911 | 2019-10-20T23:48:08 | 2019-10-20T23:48:08 | 216,438,548 | 0 | 0 | null | 2021-03-12T09:51:29 | 2019-10-20T23:00:01 | JavaScript | UTF-8 | Python | false | false | 163 | py | #!/home/froznar/Francisco/Library/server/env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"francisco.marin@ucsp.edu.pe"
] | francisco.marin@ucsp.edu.pe |
d4d3776eb15f2dc0fdb9379bf7c6820e9f5b5190 | e216d6c640e5b95b612ecd2f132f04a6abeb86aa | /dangdangspider/pipelines.py | 2fe82d710ebb02ad8c8d454f334d1e3c7e0c79d2 | [] | no_license | zwlazsn/dangdangspider | c51b031edded5e915bdb4e438a7a3754da975e39 | 1f8f7957f644619548c6ad3b33e9e531a60451b5 | refs/heads/master | 2020-07-01T17:08:34.290351 | 2019-08-08T10:27:32 | 2019-08-08T10:27:32 | 201,235,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class DangdangspiderPipeline(object):
def __init__(self):
self.count = 1
self.db= self.conn_mongo()
def conn_mongo(self):
client = pymongo.MongoClient(host="localhost",port=27017)
print(client)
return client.dangdang
def process_item(self, item, spider):
# print(self.count,item)
self.db.dangdang.insert(dict(item))
self.count+=1
return item
| [
"1647383326@qq.com"
] | 1647383326@qq.com |
19c5bac30dc1812153c6ada47917d8a1ad43f1cf | a4cfe8b47d3da97d335b210994fe03f8aa5b2f77 | /vint/linting/config/config_project_source.py | 0869f9ee8a02f71819a20b3062f265425bec19e2 | [
"MIT"
] | permissive | blueyed/vint | e6e7bbbf43a7b337f60d05d768d424fe400d40d8 | 9ae019d6e7863a4c9634faa39b9b75111dd2ad36 | refs/heads/master | 2021-01-13T16:40:04.962921 | 2016-12-22T12:00:45 | 2016-12-22T12:00:45 | 78,246,540 | 0 | 0 | null | 2017-01-06T23:28:35 | 2017-01-06T23:28:35 | null | UTF-8 | Python | false | false | 805 | py | from pathlib import Path
from vint.asset import get_asset_path
from vint.linting.config.config_file_source import ConfigFileSource
PROJECT_CONFIG_FILENAMES = [
'.vintrc.yaml',
'.vintrc.yml',
'.vintrc',
]
VOID_CONFIG_PATH = get_asset_path('void_config.yaml')
class ConfigProjectSource(ConfigFileSource):
def get_file_path(self, env):
proj_conf_path = VOID_CONFIG_PATH
path_list_to_search = [Path(env['cwd'])] + list(Path(env['cwd']).parents)
for project_path in path_list_to_search:
for basename in PROJECT_CONFIG_FILENAMES:
proj_conf_path_tmp = project_path / basename
if proj_conf_path_tmp.is_file():
proj_conf_path = proj_conf_path_tmp
break
return proj_conf_path
| [
"yuki.kokubun@mixi.co.jp"
] | yuki.kokubun@mixi.co.jp |
dcfafc8b9f6be8e2dae30223eb2652ab844e2ca4 | 77908c030864e8310bdb04eb401e92290a98f42e | /envs/simulator.py | 6b25a08cd503b4d65fecf53c296dfdbe754d64f7 | [] | no_license | dhfromkorea/mighty-rl | 9837d5d220f77d63fc8e202321bf8b9cf5ac893f | 44d911db66a4b3b8ed437f905a28a2d7ac7f7b2c | refs/heads/master | 2022-01-24T23:17:26.364299 | 2019-06-26T01:05:38 | 2019-06-26T01:05:38 | 126,102,600 | 3 | 0 | null | 2018-04-05T12:54:24 | 2018-03-21T00:58:36 | Jupyter Notebook | UTF-8 | Python | false | false | 2,221 | py | from collections import namedtuple
from itertools import count
import sys
import numpy as np
import logging
import plotting
T = namedtuple("Transition", ["s", "a", "r", "s_next", "done"])
class Simulator(object):
"""Docstring for MountainCar. """
def __init__(self, env, state_dim, action_dim):
"""TODO: to be defined1.
Parameters
----------
n_trials : TODO
n_episodes : TODO
max_iter : TODO
gamma : TODO
alpha : TODO
"""
self._env = env
self._state_dim = state_dim
self._action_dim = action_dim
def simulate(self, pi, n_trial, n_episode, return_stats=False):
"""TODO: Docstring for simulate
Parameters
----------
pi : behavior policy
Returns
-------
D: a collection of transition samples
"""
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(n_episode),
episode_rewards=np.zeros(n_episode))
D = []
env = self._env
for trial_i in range(n_trial):
#D_t = D[trial_i]
for epi_i in range(n_episode):
last_reward = stats.episode_rewards[epi_i - 1]
sys.stdout.flush()
#D_e = D_t[epi_i]
traj = []
s = env.reset()
for t in count():
a = pi.choose_action(s)
s_next, r, done, _ = env.step(a)
stats.episode_rewards[epi_i] += r
stats.episode_lengths[epi_i] = t
logging.debug("s {} a {} s_next {} r {} done {}".format(s, a, r, s_next, done))
transition = T(s=s, a=a, r=r, s_next=s_next, done=done)
traj.append(transition)
s = s_next
if done:
logging.debug("done after {} steps".format(t))
break
print("\rStep {} @ Episode {}/{} ({})".format(t, epi_i + 1, n_episode, last_reward), end="")
D.append(traj)
if return_stats:
return D, stats
else:
return D
| [
"dhfromkorea@gmail.com"
] | dhfromkorea@gmail.com |
f173bbc961827cbbdfb2fca0bb12a314f0c87692 | 86adf136169bc4ab5bdfec5a32d7532f05b80e92 | /chat/models.py | 89a756672677374f56769e879f4815654d13f409 | [] | no_license | idfinternship/project-c | 57865cb80a2a929ec7525194a479d92ce74ac1af | a8509f4f43d91df9e7d144f2bc507e024a92918b | refs/heads/master | 2022-09-21T11:27:22.618524 | 2020-06-04T14:28:14 | 2020-06-04T14:28:14 | 240,582,349 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from django.db import models
from django.conf import settings
from django.db import models
from django.db.models import Q
class ThreadManager(models.Manager):
def by_user(self, user):
qlookup = Q(first=user) | Q(second=user)
qlookup2 = Q(first=user) & Q(second=user)
qs = self.get_queryset().filter(qlookup).exclude(qlookup2).distinct()
return qs
def get_or_new(self, user, other_username): # get_or_create
username = user.username
if username == other_username:
return None
qlookup1 = Q(first__username=username) & Q(second__username=other_username)
qlookup2 = Q(first__username=other_username) & Q(second__username=username)
qs = self.get_queryset().filter(qlookup1 | qlookup2).distinct()
if qs.count() == 1:
return qs.first(), False
elif qs.count() > 1:
return qs.order_by('timestamp').first(), False
else:
Klass = user.__class__
user2 = Klass.objects.get(username=other_username)
if user != user2:
obj = self.model(
first=user,
second=user2
)
obj.save()
return obj, True
return None, False
class Thread(models.Model):
first = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='chat_thread_first')
second = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='chat_thread_second')
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = ThreadManager()
@property
def room_group_name(self):
return f'chat_{self.id}'
def broadcast(self, msg=None):
if msg is not None:
broadcast_msg_to_chat(msg, group_name=self.room_group_name, user='admin')
return True
return False
class ChatMessage(models.Model):
thread = models.ForeignKey(Thread, null=True, blank=True, on_delete=models.SET_NULL)
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='sender', on_delete=models.CASCADE)
message = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True) | [
"simas0315@gmail.com"
] | simas0315@gmail.com |
d0cdb49ea36d7d5c4b84a72c9441aaf50b46334e | b5420dcdf32618938e544f50f31d16c74a06db0d | /minesweeper.py | acf87689a2e1b827dd4477bb35ef37fa4df539d1 | [] | no_license | jreans/SHAPEMinesweeper | 9d3e4a17ea6bcd7bbdc91f93246380f3e15c2b0e | 61380562463e94b7bbcb610680e51850a26d5303 | refs/heads/main | 2022-12-20T12:55:16.825853 | 2020-10-07T01:48:56 | 2020-10-07T01:48:56 | 301,899,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,594 | py | """
Ebow and Jeannie
July 2017
SHAPE CS Session 1
"""
from tkinter import *
from tkinter import simpledialog
import random
import math
####################################
# Helper Functions
####################################
def getCellBounds(row, col, data):
# aka "Model to View"
# returns (x0, y0, x1, y1) corners/bounding box of given cell in grid
gridWidth = data.width - 2*data.margin
gridHeight = data.height - 2*data.margin
x0 = data.margin + gridWidth * col / data.cols
x1 = data.margin + gridWidth * (col+1) / data.cols
y0 = data.margin + gridHeight * row / data.rows
y1 = data.margin + gridHeight * (row+1) / data.rows
return (x0, y0, x1, y1)
def withInCell(x,y,data):
row=int(x/50)
col=int(y/50)
# aka "View to Model"
# given x,y coordinates and game data, calculates and returns (row, col)
# of the cell that the coordinates are within
return (row,col)
####################################
# Model
####################################
def init(data):
data.rows=10
data.cols=10
data.margin=20
data.time=0
data.timerDelay=1000
data.gameOver=False
data.paused=False
data.win=False
data.info=[[None for x in range(data.rows)] for y in range (data.cols)]
data.minesleft=0
data.flagged=[[False for x in range(data.rows)] for y in range (data.cols)]
buryMines(data)
data.leaderboard=[]
data.sortedleader=[]
data.windowbool=False
# set initial game information
return
def buryMines(data):
for i in range (0,10):
for counter in range (0,2):
data.info[i][random.randint(0,data.cols-1)]=-1
data.minesleft+=1
# randomly select spots in the grid to place one or two mines per row
return
def recursiveShow(row,col,data):
if not (0<=row<data.rows and 0<=col<data.cols):
return
count=countAround(row,col,data)
if data.info[row][col] == None:
if count!=0:
data.info[row][col]=count
else:
data.info[row][col]=count
recursiveShow(row-1,col-1,data)
recursiveShow(row-1,col,data)
recursiveShow(row-1,col+1,data)
recursiveShow(row,col-1,data)
recursiveShow(row,col+1,data)
recursiveShow(row+1,col-1,data)
recursiveShow(row+1,col,data)
recursiveShow(row+1,col+1,data)
#Your code here
return
def countAround(row,col,data):
count=0
for i in [-1,0,1]:
for j in [-1,0,1]:
if (0<=row+i<data.rows and 0<=col+j<data.cols):
if data.info[row+i][col+j]==-1:
count+=1
# count the number of mines around the cell at (row, col),
# return integer
return count
####################################
# Controller
####################################
def leftMousePressed(event, data):
if data.gameOver==False and data.paused==False:
x=event.x
y=event.y
row,col=withInCell(x,y,data)
if data.info[row][col]==-1:
data.gameOver=True
data.win=False
else:
recursiveShow(row,col,data)
# recognize left click, if the cell clicked is not a mine, check all the c
# cells around it, if it is a mine, game over
return
def rightMousePressed(event,data):
if data.gameOver==False:
x=event.x
y=event.y
row,col=withInCell(x,y,data)
if data.flagged[row][col]==False:
data.flagged[row][col]=True
data.minesleft-=1
else:
data.flagged[row][col]=False
data.minesleft+=1
if data.gameOver==True:
return
#Bonus
#Your code here
return
def keyPressed(event, data):
if data.gameOver==False:
if event.keysym=="p":
if data.paused==True:
data.paused=False
else:
data.paused=True
if event.keysym=="r":
init(data)
#Your code here
# recognize pressed key, if it is "p" pause the game
return
def timerFired(data):
if data.gameOver==False and data.paused==False:
data.time +=1
for i in range (0,10):
for j in range(0,10):
if data.info[j][i]==None:
data.win=False
return
data.win=True
data.gameOver=True
# check if the user has visited all the non-mine cells
# if so, user wins the game
return data.time
####################################
# View
####################################
def drawBoard(canvas, data):
canvas.create_rectangle(0,500,500,575,fill="#606060")
canvas.create_text(100,525, fill="white", font="Arial 18 bold", text="Time elapsed: ")
canvas.create_text(200,525, fill="white", font="Arial 18 bold", text=str(data.time))
canvas.create_text(150,550, fill="white", font="Arial 12 bold", text="press 'p' to pause")
canvas.create_text(375,525, fill="white", font="Arial 18 bold", text="Mines left: ")
canvas.create_text(460,525, fill="white", font="Arial 18 bold", text=str(data.minesleft))
canvas.create_text(350,550, fill="white", font="Arial 12 bold", text="press 'r' to restart")
#^^information in gray box below game (mines left, time, how to press pause)
canvas.create_rectangle(500,0,750,70,fill="#9ffcf3")
canvas.create_rectangle(500,70,750,575,fill="#cef29f")
canvas.create_text(625,35,fill="black", font="Arial 18 bold", text="LEADERBOARD")
data.sortedleader=sorted(data.leaderboard,key=lambda x: x[1])
print(data.sortedleader)
window=0
while window+1<=len(data.sortedleader) and window<10:
canvas.create_text(610,100+50*window, fill="black", font="Arial 18", text=data.sortedleader[window])
window+=1
for i in range (0,10):
for j in range(0,10):
canvas.create_rectangle(0+50*j,0+50*i,50+50*j,50+50*i,fill='white')
if (data.info[j][i]!=None and data.info[j][i]!=-1):
canvas.create_rectangle(0+50*j,0+50*i,50+50*j,50+50*i,fill='#f8ff82')
for row in range (data.rows):
for col in range (data.cols):
if data.info[row][col]!=-1:
if data.info[row][col]==1:
canvas.create_text(row*50+50/2, col*50+50/2,fill="#9242f4", font="Arial 18 bold", text=str(data.info[row][col]))
if data.info[row][col]==2:
canvas.create_text(row*50+50/2,col*50+50/2, fill="green", font="Arial 18 bold", text=str(data.info[row][col]))
if data.info[row][col]==3:
canvas.create_text(row*50+50/2,col*50+50/2, fill="#42b3f4", font="Arial 18 bold", text=str(data.info[row][col]))
if data.info[row][col]==4:
canvas.create_text(row*50+50/2,col*50+50/2, fill="magenta", font="Arial 18 bold", text=str(data.info[row][col]))
if data.info[row][col]==5:
canvas.create_text(row*50+50/2,col*50+50/2, fill="red", font="Arial 18 bold", text=str(data.info[row][col]))
if data.info[row][col]==6:
canvas.create_text(row*50+50/2,col*50+50/2, fill="#e88420", font="Arial 18 bold", text=str(data.info[row][col]))
if data.flagged[row][col]==True:
canvas.create_polygon((row*50+10,col*50+10),(row*50+40,col*50+25),(row*50+10,col*50+40), fill="blue")
canvas.create_rectangle(row*50+10,col*50+10,row*50+15,col*50+47,fill="black")
# draw individual cells on the board. Mine, non-mine, visited,
# and unvisited cells should all look different
return
def drawPaused(canvas,data):
if data.paused==True:
canvas.create_text(250,250,fill="black", font="Arial 45 bold",text="PAUSED")
# show text in the center of the board that tells the user the game
# is paused if data.paused is True
return
def drawGameOver(canvas, data):
if data.gameOver==True:
for i in range (0,10):
for j in range(0,10):
if data.info[j][i]==-1:
canvas.create_oval(10+50*j,10+50*i,40+50*j,40+50*i,fill='red')
if data.win==True:
canvas.create_text(250,250,fill="darkblue", font="Arial 26 bold",
text="CONGRATS! YOU WIN!")
elif data.win==False and data.gameOver==True:
canvas.create_text(250,250,fill="#721313", font="Arial 26 bold",
text="YOU LOSE! TRY AGAIN!")
if data.win==True and data.windowbool==False:
userName=simpledialog.askstring("Question","What is your name?")
data.leaderboard.append((userName,data.time))
data.leaderboard.append(("Jeannie",25))
data.leaderboard.append(("Kate", 50))
data.leaderboard.append(("Maria",45))
data.leaderboard.append(("Ebow", 30))
data.sortedleader=sorted(data.leaderboard,key=lambda x: x[1])
data.windowbool=True
winnerfile=open("minesweeperleaderboard.txt","w+")
for i in range (0,len(data.sortedleader)):
winnerfile.write(str(data.sortedleader[i]))
winnerfile.write("\n")
winnerfile.close()
# show text in the center of the board that tells the user the game
# is over if data.gameOver is True. Show different messages for winning
# and losing
return
def redrawAll(canvas, data):
drawBoard(canvas, data)
drawPaused(canvas,data)
drawGameOver(canvas, data)
####################################
# Use the run function as-is
####################################
def run(width=500, height=500):
# Run function adapted from David Kosbie's
# snake-demo.py for 15-112 (CarpeDiem!)
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
redrawAll(canvas, data)
canvas.update()
def leftMousePressedWrapper(event, canvas, data):
leftMousePressed(event, data)
redrawAllWrapper(canvas, data)
def rightMousePressedWrapper(event, canvas, data):
rightMousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 1000 # milliseconds
init(data)
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
leftMousePressedWrapper(event, canvas, data))
root.bind("<Button-3>", lambda event:
rightMousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
run(750, 575) | [
"noreply@github.com"
] | jreans.noreply@github.com |
0fd555135878bb32f10aaf05686f2f1dd263548e | 2885797602ca3ccc19c8694f304b9ba778235a84 | /custom_resource/sagemaker_data_capture.py | f34078c72ed64f19f6ab584c083dc3df83b04585 | [
"MIT-0"
] | permissive | mlvats/amazon-sagemaker-safe-deployment-pipeline | adc138b82d32ff9430948e1b980d0c53238d1fbf | 426b6e7ef9f52dc0a36949f92de5536a693d3cca | refs/heads/master | 2022-12-02T02:18:58.085447 | 2020-07-28T06:38:10 | 2020-07-28T06:38:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | import logging
import boto3
import botocore
from botocore.exceptions import ClientError
from crhelper import CfnResource
logger = logging.getLogger(__name__)
sm = boto3.client("sagemaker")
# cfnhelper makes it easier to implement a CloudFormation custom resource
helper = CfnResource()
# CFN Handlers
def lambda_handler(event, context):
helper(event, context)
@helper.create
@helper.update
def create_handler(event, context):
"""
Called when CloudFormation custom resource sends the create event
"""
return update_endpoint(event)
@helper.delete
def delete_handler(event, context):
"""
Called when CloudFormation custom resource sends the delete event
"""
delete_endpoint_config(event)
@helper.poll_create
@helper.poll_update
def poll_create(event, context):
"""
Return true if the resource has been created and false otherwise so
CloudFormation polls again.
"""
endpoint_name = get_endpoint_name(event)
logger.info("Polling for update of endpoint: %s", endpoint_name)
return is_endpoint_ready(endpoint_name)
# Helper Functions
def get_endpoint_name(event):
return event["ResourceProperties"]["EndpointName"]
def is_endpoint_ready(endpoint_name):
is_ready = False
endpoint = sm.describe_endpoint(EndpointName=endpoint_name)
status = endpoint["EndpointStatus"]
if status == "InService":
logger.info("Endpoint (%s) is ready", endpoint_name)
is_ready = True
elif status == "Updating":
logger.info(
"Endpoint (%s) still updating, waiting and polling again...", endpoint_name
)
else:
raise Exception(
"Endpoint ({}) has unexpected status: {}".format(endpoint_name, status)
)
return is_ready
def update_endpoint(event):
props = event["ResourceProperties"]
endpoint_name = get_endpoint_name(event)
# Get the endpoint validate in service
endpoint = sm.describe_endpoint(EndpointName=endpoint_name)
status = endpoint["EndpointStatus"]
if status != "InService":
raise Exception(
"Endpoint ({}) has unexpected status: {}".format(endpoint_name, status)
)
# Get the current endpoint config
endpoint_config_name = endpoint["EndpointConfigName"]
endpoint_config = sm.describe_endpoint_config(
EndpointConfigName=endpoint_config_name
)
# Get data capture config as dict
data_capture_config_dict = {
"EnableCapture": True,
"DestinationS3Uri": props["DataCaptureUri"],
"InitialSamplingPercentage": int(props.get("InitialSamplingPercentage", 100)),
"CaptureOptions": [{"CaptureMode": "Input"}, {"CaptureMode": "Output"}],
"CaptureContentTypeHeader": {
"CsvContentTypes": ["text/csv"],
"JsonContentTypes": ["application/json"],
},
}
# Add the KmsKeyId to data capture if provided
if props.get("KmsKeyId") is not None:
data_capture_config_dict["KmsKeyId"] = props["KmsKeyId"]
new_config_name = props["EndpointConfigName"]
# Get new config
request = {
"EndpointConfigName": new_config_name,
"ProductionVariants": endpoint_config["ProductionVariants"],
"DataCaptureConfig": data_capture_config_dict,
"Tags": [], # Don't copy aws:* tags from orignial
}
# Copy KmsKeyId if provided
if endpoint_config.get("KmsKeyId") is not None:
request["KmsKeyId"] = endpoint_config.get("KmsKeyId")
try:
# Create the endpoint config
logger.info("Create endpoint config: %s", new_config_name)
response = sm.create_endpoint_config(**request)
helper.Data["EndpointName"] = endpoint_name
helper.Data["DataCaptureEndpointUri"] = "{}/{}/{}".format(
props["DataCaptureUri"],
endpoint_name,
props.get("VariantName", "AllTraffic"),
)
helper.Data["Arn"] = response["EndpointConfigArn"]
# Update endpoint to point to new config
logger.info("Update endpoint: %s", endpoint_name)
sm.update_endpoint(
EndpointName=endpoint_name, EndpointConfigName=new_config_name
)
# Return the new endpoint config name
return helper.Data["Arn"]
except ClientError as e:
if e.response["Error"]["Code"] == "ValidationException":
logger.error(
"Error creating new config: %s", e.response["Error"]["Message"]
)
else:
logger.error("Unexpected error while trying to create endpoint config")
raise e
def delete_endpoint_config(event):
# Delete the newly created endpoint config
new_config_name = event["ResourceProperties"]["EndpointConfigName"]
logger.info("Deleting endpoint config: %s", new_config_name)
try:
sm.delete_endpoint_config(EndpointConfigName=new_config_name)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFound":
logger.info("Resource not found, nothing to delete")
else:
logger.error("Unexpected error while trying to delete endpoint config")
raise e
| [
"brightsparc@gmail.com"
] | brightsparc@gmail.com |
c1aaf2884f69b654897f07ef3bb94e0e6ac0e31c | 5f5d8a7d1461ff85d2282bba77d137980c43cb7c | /NAS/single-path-one-shot/src/cifar100/train_fair_way.py | 7af80218d80035a0310d2676809a43aef7db8624 | [
"Apache-2.0"
] | permissive | ahai-code/SimpleCVReproduction | 268d92667ea55e42b8a8f3c9972c486bb32766a2 | 00bdce21e9048d82b836b64ac05c99713e90e056 | refs/heads/master | 2023-04-15T00:31:24.495143 | 2021-04-11T01:03:46 | 2021-04-11T01:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,849 | py | import argparse
import copy
import functools
import os
import pickle
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch import nn
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from datasets.cifar100_dataset import get_train_loader, get_val_loader
from model.slimmable_resnet20 import mutableResNet20
from utils.utils import (ArchLoader, AvgrageMeter, CrossEntropyLabelSmooth,
DataIterator, accuracy, bn_calibration_init,
reduce_tensor, save_checkpoint)
print = functools.partial(print, flush=True)
CIFAR100_TRAINING_SET_SIZE = 50000
CIFAR100_TEST_SET_SIZE = 10000
parser = argparse.ArgumentParser("ImageNet")
parser.add_argument('--local_rank', type=int, default=None,
help='local rank for distributed training')
parser.add_argument('--batch_size', type=int, default=16384, help='batch size')
parser.add_argument('--learning_rate', type=float,
default=0.894, help='init learning rate')
parser.add_argument('--num_workers', type=int,
default=6, help='num of workers')
# hyper parameter
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float,
default=4e-5, help='weight decay')
parser.add_argument('--report_freq', type=float,
default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=30000,
help='num of training epochs')
parser.add_argument('--total_iters', type=int,
default=300000, help='total iters')
parser.add_argument('--classes', type=int, default=100,
help='number of classes')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--grad_clip', type=float,
default=5, help='gradient clipping')
parser.add_argument('--label_smooth', type=float,
default=0.1, help='label smoothing')
args = parser.parse_args()
per_epoch_iters = CIFAR100_TRAINING_SET_SIZE // args.batch_size
val_iters = CIFAR100_TEST_SET_SIZE // 200
def main():
if not torch.cuda.is_available():
print('no gpu device available')
sys.exit(1)
writer = None
num_gpus = torch.cuda.device_count()
np.random.seed(args.seed)
args.gpu = args.local_rank % num_gpus
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.deterministic = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
print('gpu device = %d' % args.gpu)
print("args = %s", args)
torch.distributed.init_process_group(
backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.batch_size = args.batch_size // args.world_size
criterion_smooth = CrossEntropyLabelSmooth(args.classes, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
model = mutableResNet20()
model = model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
# all_parameters = model.parameters()
# weight_parameters = []
# for pname, p in model.named_parameters():
# if p.ndimension() == 4 or 'classifier.0.weight' in pname or 'classifier.0.bias' in pname:
# weight_parameters.append(p)
# weight_parameters_id = list(map(id, weight_parameters))
# other_parameters = list(
# filter(lambda p: id(p) not in weight_parameters_id, all_parameters))
# optimizer = torch.optim.SGD(
# [{'params': other_parameters},
# {'params': weight_parameters, 'weight_decay': args.weight_decay}],
# args.learning_rate,
# momentum=args.momentum,
# )
optimizer = torch.optim.SGD(model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
args.total_iters = args.epochs * per_epoch_iters # // 16 # 16 代表是每个子网的个数
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lambda step: (1.0-step/args.total_iters), last_epoch=-1)
if args.local_rank == 0:
writer = SummaryWriter("./runs/%s-%05d" %
(time.strftime("%m-%d", time.localtime()), random.randint(0, 100)))
# Prepare data
train_loader = get_train_loader(
args.batch_size, args.local_rank, args.num_workers, args.total_iters)
train_dataprovider = DataIterator(train_loader)
val_loader = get_val_loader(args.batch_size, args.num_workers)
val_dataprovider = DataIterator(val_loader)
archloader = ArchLoader("data/Track1_final_archs.json")
train(train_dataprovider, val_dataprovider, optimizer, scheduler,
model, archloader, criterion_smooth, args, val_iters, args.seed, writer)
def train(train_dataprovider, val_dataprovider, optimizer, scheduler, model, archloader, criterion, args, val_iters, seed, writer=None):
objs, top1 = AvgrageMeter(), AvgrageMeter()
for p in model.parameters():
p.grad = torch.zeros_like(p)
for step in range(args.total_iters):
model.train()
t0 = time.time()
image, target = train_dataprovider.next()
datatime = time.time() - t0
n = image.size(0)
optimizer.zero_grad()
image = Variable(image, requires_grad=False).cuda(args.gpu)
target = Variable(target, requires_grad=False).cuda(args.gpu)
# Fair Sampling
fair_arc_list = archloader.generate_niu_fair_batch()
for ii, arc in enumerate(fair_arc_list):
logits = model(image, archloader.convert_list_arc_str(arc))
loss = criterion(logits, target)
loss_reduce = reduce_tensor(loss, 0, args.world_size)
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), args.grad_clip)
optimizer.step()
scheduler.step()
prec1, _ = accuracy(logits, target, topk=(1, 5))
objs.update(loss_reduce.data.item(), n)
top1.update(prec1.data.item(), n)
if step % args.report_freq == 0 and args.local_rank == 0:
now = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))
print('{} |=> train: {} / {}, lr={}, loss={:.2f}, acc={:.2f}, datatime={:.2f}, seed={}'
.format(now, step, args.total_iters, scheduler.get_lr()[0], objs.avg, top1.avg, float(datatime), seed))
if args.local_rank == 0 and step % 5 == 0 and writer is not None:
writer.add_scalar("Train/loss", objs.avg, step)
writer.add_scalar("Train/acc1", top1.avg, step)
if args.local_rank == 0 and step % args.report_freq == 0:
top1_val, objs_val = infer(val_dataprovider, model.module, criterion,
fair_arc_list, val_iters, archloader)
if writer is not None:
writer.add_scalar("Val/loss", objs_val, step)
writer.add_scalar("Val/acc1", top1_val, step)
save_checkpoint({'state_dict': model.state_dict(),}, step)
def infer(val_dataprovider, model, criterion, fair_arc_list, val_iters, archloader):
objs = AvgrageMeter()
top1 = AvgrageMeter()
model.eval()
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print('{} |=> Test rng = {}'.format(now, fair_arc_list[0]))
with torch.no_grad():
for step in range(val_iters):
t0 = time.time()
image, target = val_dataprovider.next()
datatime = time.time() - t0
image = Variable(image, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda()
logits = model(
image, archloader.convert_list_arc_str(fair_arc_list[0]))
loss = criterion(logits, target)
prec1, _ = accuracy(logits, target, topk=(1, 5))
n = image.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
now = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))
print('{} |=> valid: step={}, loss={:.2f}, acc={:.2f}, datatime={:.2f}'.format(
now, step, objs.avg, top1.avg, datatime))
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| [
"1115957667@qq.com"
] | 1115957667@qq.com |
ca666a974d6401381cd73528686f07a81dec9262 | a9d83530b82817c9b997a9f9d838f1764b05cbba | /django_project/lovely/views.py | 37706331d9d874907c4ed25e4fdca77337133683 | [] | no_license | geronimo03/front-end-curriculum | 561f07a9b8bea840b0841374397dc1f4dbeabdf4 | 905a602d992bc0a0885e8832239a98f51088f820 | refs/heads/master | 2022-11-19T01:06:50.134083 | 2020-07-17T10:26:19 | 2020-07-17T10:26:19 | 260,956,266 | 0 | 0 | null | 2020-05-03T15:16:18 | 2020-05-03T15:16:18 | null | UTF-8 | Python | false | false | 1,495 | py | from django.shortcuts import render, redirect
from .models import Post
from .forms import PostForm
import pdb
def first(request):
return render(request, 'lovely/first.html')
def second(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'lovely/second.html', context)
def third(request):
return render(request, 'lovely/third.html')
def new(request):
context = {
'form': PostForm()
}
return render(request, 'lovely/new.html', context)
def create(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
form.save()
return redirect('second')
def view(request, post_id):
post = Post.objects.get(pk=post_id)
context = {
'post': post
}
return render(request, 'lovely/view.html', context)
def edit(request, post_id):
post = Post.objects.get(pk=post_id)
context = {
'post': post,
'form': PostForm(instance=post)
}
return render(request, 'lovely/edit.html', context)
def update(request, post_id):
if request.method == "POST":
post = Post.objects.get(pk=post_id)
form = PostForm(request.POST, instance=post)
if form.is_valid():
form.save()
return redirect('lovely:view', post_id)
def delete(request, post_id):
if request.method == "POST":
post = Post.objects.get(pk=post_id)
post.delete()
return redirect('second')
| [
"christiefan03@gmail.com"
] | christiefan03@gmail.com |
0eea1f221c0a6316a2eed2457dffd111f15c8a0b | 16e69196886254bc0fe9d8dc919ebcfa844f326a | /edc/core/bhp_content_type_map/migrations/0005_update_module_name.py | 67a886c53b822966a1fc216991741f16ccb05bd3 | [] | no_license | botswana-harvard/edc | b54edc305e7f4f6b193b4498c59080a902a6aeee | 4f75336ff572babd39d431185677a65bece9e524 | refs/heads/master | 2021-01-23T19:15:08.070350 | 2015-12-07T09:36:41 | 2015-12-07T09:36:41 | 35,820,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,009 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
ContentTypeMap = orm['bhp_content_type_map.ContentTypeMap']
for obj in ContentTypeMap.objects.all():
obj.module_name = obj.model
obj.save()
print (obj.app_label, obj.module_name)
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'bhp_content_type_map.contenttypemap': {
'Meta': {'ordering': "['name']", 'unique_together': "(['app_label', 'model'],)", 'object_name': 'ContentTypeMap'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bhp_content_type_map']
symmetrical = True
| [
"ew2789@gmail.com"
] | ew2789@gmail.com |
d7bdeb211ad293ec25a6cd5c0651169bf707cf41 | efb01c5a5f00e918780009d8e870c080ece8cdc5 | /tcapy/vis/sessionmanager.py | 2aaa853615f7ca3d9776036187eb762a0a27eca4 | [
"Apache-2.0"
] | permissive | PontusHultkrantz/tcapy | 0525af2b260377a3a5479112a5a8991efc581e7d | 3699c70031c95943f70a732849a1a6dac26760e9 | refs/heads/master | 2022-10-05T15:00:41.192500 | 2020-06-05T18:25:44 | 2020-06-05T18:25:44 | 269,728,925 | 0 | 0 | Apache-2.0 | 2020-06-05T18:00:02 | 2020-06-05T18:00:01 | null | UTF-8 | Python | false | false | 12,191 | py | from __future__ import print_function
__author__ = 'saeedamen' # Saeed Amen / saeed@cuemacro.com
#
# Copyright 2018 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import uuid
from flask import session
from dash.dependencies import Output, Input, State
from tcapy.conf.constants import Constants
from tcapy.util.utilfunc import UtilFunc
constants = Constants()
class CallbackManager(object):
"""This class creates the appropriate Input and Output objects to wrap around dash components. It abstracts away
some of the complexity of dash, by allowing the user simplify to specify the string of the dash component.
It will then work out the dash type from the component name. Users need to be careful to name the dash components
with the correct names. For examples, all plots, must have 'plot' within their name.
"""
def input_callback(self, page, input):
"""Create input callbacks for Dash components, which can be used to trigger callbacks. We can have multiple
input callbacks for methods.
Parameters
----------
page : str
Name of the page (eg. 'detailed')
input : str (list)
Dash components where we wish to add dash based input callbacks
Returns
-------
dash.dependencies.Input (list)
"""
if not (isinstance(input, list)):
input = [input]
input_list = [Input(page + '-' + i.split(':')[0], self._find_type(i)) for i in input]
return input_list
def output_callback(self, page, output):
"""Create a output callback for a Dash component, which can be used to trigger callbacks. Note, that we can
only have a single output callback for each method.
Parameters
----------
page : str
Name of the page (eg. 'detailed')
output : str
Dash component where we wish to add dash based output callbacks
Returns
-------
dash.dependencies.Output (list)
"""
return Output(page + '-' + output.split(':')[0], self._find_type(output))
def state_callback(self, page, state):
"""Create state callbacks for Dash components, which can be used to trigger callbacks. We can have multiple
state callbacks for methods.
Parameters
----------
page : str
Name of the page (eg. 'detailed')
state : str (list)
Dash components where we wish to add dash based state callbacks
Returns
-------
dash.dependencies.State (list)
"""
if not (isinstance(state, list)):
state= [state]
state_list = [State(page + '-' + s.split(':')[0], self._find_type(s)) for s in state]
return state_list
def _find_type(self, tag):
"""Returns the dash type for a dash component.
Parameters
----------
tag : str
Tag for a Dash component
Returns
-------
str
"""
if ":" in tag:
return tag.split(":")[1]
# datepicker
if 'dtpicker' in tag:
return 'date'
# HTML links
if 'link' in tag:
return 'href'
# table like objects
if 'table' in tag:
if constants.gui_table_type == 'dash':
return 'data'
return 'children'
# labels
if 'status' in tag:
return 'children'
# plotly objects
if 'plot' in tag and 'val' not in tag:
return 'figure'
# drop down values
if 'val' in tag:
return 'value'
# HTML ordinary buttons
if 'button' in tag:
return 'n_clicks'
# HTML upload buttons
if 'upbutt' in tag:
return 'contents'
if 'uploadbox' in tag:
return 'contents'
import base64
import flask
class SessionManager(object):
"""Manages the caching of properties for a user's session. We use this extensively, to identify users and also to
store variables relating to users on the server side.
It is used for example, for keeping track of which lines have plotted, user's zoom actions, whether tcapy has already
plotted a particular dataset etc.
"""
def __init__(self):
self._util_func = UtilFunc()
# session ID management functions
def get_session_id(self):
"""Gets the current user's session ID and generates a unique one if necessary.
Returns
-------
str
"""
if 'id' not in session:
id = str(uuid.uuid4())
username = self.get_username()
if username is not None:
username = '_' + username
else:
username = ''
session['id'] = id + username
else:
id = session['id']
if not isinstance(id, str):
id = id.decode("utf-8")
return id
def get_username(self):
header = flask.request.headers.get('Authorization', None)
if not header:
return None
username_password = base64.b64decode(header.split('Basic ')[1])
username_password_utf8 = username_password.decode('utf-8')
username, password = username_password_utf8.split(':')
return username
def set_session_flag(self, tag, value=None):
"""Sets a value with a specific tag in the session dictionary, which is essentially unique for every user.
Parameters
----------
tag : str (dict)
The "hash key" for our variable
value : str
What to set the value in our hash table
Returns
-------
"""
if isinstance(tag, str):
tag = [tag]
if isinstance(tag, dict):
for t in tag:
self.set_session_flag(t, value=tag[t])
return
tag = self._util_func.flatten_list_of_lists(tag)
for t in tag:
session[t] = value
def get_session_flag(self, tag):
"""Gets the value of a tag in the user's session
Parameters
----------
tag : str
Tag to be fetched
Returns
-------
str
"""
if tag in session:
if isinstance(session[tag], bool):
return session[tag]
return str(session[tag])
return None
##### these methods are for keeping track of which lines, user zooms have been plotted for each chart in the user's
##### session object
def check_lines_plotted(self, lines_to_plot, tag):
"""Checks if the lines have been plotted for a particular user, by checking the plot's tag in their user session
Parameters
----------
lines_to_plot : str (list)
Lines to be plotted
tag : str
Tag of plotted lines
Returns
-------
bool
"""
if tag in session:
lines_plotted = session[tag]
if set(lines_to_plot) == set(lines_plotted):
return True
return False
def check_relayoutData_plotted(self, relayoutData, tag):
"""Checks if the relayout data (ie. related to user's clicks, such as when they zoom in) has already been plotted.
Parameters
----------
relayoutData : dict
tag : str
Tag referring to a particular plot
Returns
-------
"""
if tag in session:
# relayoutDataSet = None
# sessionTagSet = None
#
# if relayoutData is not None:
# relayoutDataSet = set(relayoutData)
#
# if session[tag] is not None:
# sessionTagSet = set(session[tag])
# if relayoutData is None:
# return False
if relayoutData == session[tag]:
return True
return False
def set_lines_plotted(self, lines_to_plot, tag):
"""Sets the lines plotted for a particular chart tag in the user's session
Parameters
----------
lines_to_plot : str (list)
Lines plotted
tag : str
Tag of the plot
Returns
-------
"""
session[tag] = lines_to_plot
def set_relayoutData_plotted(self, relayoutData, tag):
"""Sets the user's clicks (typically for zooming into charts) for a particular chart
Parameters
----------
relayoutData : dict
Details a user's click on the chart
tag : str
Tag referring to the plot
Returns
-------
"""
session[tag] = relayoutData
def set_username(self, username):
session['username'] = username
##### We identify when a user has "clicked" a button by change in the number of clicks (Dash documentation recommends
##### this to handle user clicks)
def get_session_clicks(self, tag):
"""Gets the number of clicks for the tag. If doesn't exist, we automatically set the tag as 0.
Parameters
----------
tag : str
The tag for which we want to return the number of clicks
Returns
-------
Number of clicks by current user
"""
if tag not in session:
return 0
return session[tag]
def set_session_clicks(self, tag, n_clicks, old_clicks=None):
"""Sets the number of clicks in the current user's session
Parameters
----------
tag : str
Tag to store the user's clicks under
n_clicks : int
Number of clicks to set
Returns
-------
"""
if old_clicks is None:
session[tag] = n_clicks
elif old_clicks > n_clicks:
session[tag] = n_clicks
def check_session_tag(self, tag):
"""Checks if a tag exists in the user's session, and if so returns the value of that tag in the user's session
Parameters
----------
tag : str
Tag to check
Returns
-------
str or bool
"""
if tag in session:
return session[tag]
return False
def exists_session_tag(self, tag):
"""Does a tag exist in the current user session?
Parameters
----------
tag : str
Returns
-------
bool
"""
return tag in session
def check_session_reset_tag(self, tag):
"""Checks if a tag is in session (if that tag exists already and is "True", then we reset it to "False"), otherwise
return "False"
Parameters
----------
tag : str
Tags to check
Returns
-------
bool
"""
if tag in session:
old_tag = session[tag]
if old_tag:
session[tag] = False
return True
return False
return False
def create_calculated_flags(self, prefix, lst=None, lst2=None):
"""Creates a list for a combination of prefix and list elements.
Parameters
----------
prefix : str
Prefix (typically a page name like 'detailed')
lst : str (list)
Tags will contain these
lst2 : str (list)
Tags will contain these
Returns
-------
str (list)
"""
if isinstance(prefix, list):
prefix = self._util_func.flatten_list_of_lists(prefix)
lst = [x + '-' + lst for x in prefix]
elif isinstance(lst, list):
lst = self._util_func.flatten_list_of_lists(lst)
lst = [prefix + '-' + x for x in lst]
if lst2 is None:
return lst
lst3 = []
for i in lst2:
for j in lst:
lst3.append(j + '-' + i)
return lst3 | [
"saeedamen@hotmail.com"
] | saeedamen@hotmail.com |
102b5df4584e139c55b62ed13926113f62656f79 | 0d230b383dbee273251fb48365d01896e3cee1c6 | /neural-network/nn_helloworld.py | cd942aa6abade08ba9798aa99a07496132a1d8cf | [] | no_license | tmpbook/machine-learning-notebook | 139e622915ef16777e5ccfc89a3a93480394d0f6 | 00c1f450e165bd382833f4c21015ae8421e0dfc1 | refs/heads/master | 2019-03-08T21:27:16.011115 | 2017-11-04T12:34:12 | 2017-11-04T12:34:12 | 100,873,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | #coding:utf8
from numpy import exp, array, random, dot
class NeuralNetwork():
def __init__(self):
# 随机数发生器种子,以保证每次获得相同结果
random.seed(1)
# 对单个神经元建模,含有3个输入连接和一个输出连接
# 对一个3 x 1的矩阵赋予随机权重值。范围-1~1,平均值为0
self.synaptic_weights = 2 * random.random((3, 1)) - 1
# Sigmoid函数,S形曲线
# 用这个函数对输入的加权总和做正规化,使其范围在0~1
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# Sigmoid函数的导数
# Sigmoid曲线的梯度
# 表示我们对当前权重的置信程度
def __sigmoid_derivative(self, x):
return x * (1 - x)
# 通过试错过程训练神经网络
# 每次都调整突触权重
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in xrange(number_of_training_iterations):
# 将训练集导入神经网络
output = self.think(training_set_inputs)
# 计算误差(实际值与期望值之差)
error = training_set_outputs - output
# 将误差、输入和S曲线梯度相乘
# 对于置信程度低的权重,调整程度也大
# 为0的输入值不会影响权重
adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))
# 调整权重
self.synaptic_weights += adjustment
# 神经网络一思考
def think(self, inputs):
# 把输入传递给神经网络
return self.__sigmoid(dot(inputs, self.synaptic_weights))
if __name__ == "__main__":
# 初始化神经网络
neural_network = NeuralNetwork()
print "随机的初始突触权重:"
print neural_network.synaptic_weights
# 训练集。四个样本,每个有3个输入和1个输出
training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
training_set_outputs = array([[0, 1, 1, 0]]).T
# 用训练集训练神经网络
# 重复一万次,每次做微小的调整
neural_network.train(training_set_inputs, training_set_outputs, 10000)
print "训练后的突触权重:"
print neural_network.synaptic_weights
# 用新数据测试神经网络
print "考虑新的形势 [1, 0, 0] -> ?: "
print neural_network.think(array([1, 0, 0])) | [
"nauy2011@126.com"
] | nauy2011@126.com |
fa918a4f187dcdded51b5f7c1612e5a01c064faa | 6d247ab87ad016c1ed1bba2b1e8e2e99f154f619 | /level-0/dictionnary_challenge/dictionnary_challenge.py | 458593bf1be9df49cb787ce1e17fb088ba469bd1 | [] | no_license | pcardotatgit/python_challenges | 089e75c2a971caf8568bea67ba38dd1d02ad6875 | b66a381f95c0e400bd4e86d7f79908d389bcc74b | refs/heads/master | 2023-05-12T06:39:37.750921 | 2023-05-01T10:36:38 | 2023-05-01T10:36:38 | 237,676,073 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | dict_data=[
{
"albumId": 1,
"id": 1,
"title": "accusamus beatae ad facilis cum similique qui sunt",
"url": "https://via.placeholder.com/600/92c952",
"thumbnailUrl": "https://via.placeholder.com/150/92c952"
},
{
"albumId": 1,
"id": 2,
"title": "reprehenderit est deserunt velit ipsam",
"url": "https://via.placeholder.com/600/771796",
"thumbnailUrl": "https://via.placeholder.com/150/771796"
},
{
"albumId": 1,
"id": 3,
"title": "officia porro iure quia iusto qui ipsa ut modi",
"url": "https://via.placeholder.com/600/24f355",
"thumbnailUrl2": "https://via.placeholder.com/150/24f355"
},
{
"albumId": 1,
"id": 4,
"title": "culpa odio esse rerum omnis laboriosam voluptate repudiandae",
"url": "https://via.placeholder.com/600/d32776",
"thumbnailUrl": "https://via.placeholder.com/150/d32776"
},
{
"albumId": 1,
"id": 5,
"title": "natus nisi omnis corporis facere molestiae rerum in",
"url": "https://via.placeholder.com/600/f66b97",
"thumbnailUrl": "https://via.placeholder.com/150/f66b97"
}
]
for item in dict_data:
print(item['title'],item['thumbnailUrl'])
print('BINGO') | [
"patrick.cardot@gmail.com"
] | patrick.cardot@gmail.com |
19099c9d651d594c277ba49bff0c679d160f9bfc | 17853d2708dde8882703b694fadd8708b89a50ff | /Quiz 8.py | c54a6d7b68b0c4e1fd6faab9571978fe4a334b84 | [] | no_license | vuhoang131/Quiz-8 | e7e8ba7a54f50de808f31bdc33c25a7efdabaa12 | 55b6c3a6e87f53bfca63c5b99c11fce2e34dba58 | refs/heads/master | 2022-04-15T19:21:20.159954 | 2020-04-14T20:50:17 | 2020-04-14T20:50:17 | 255,697,426 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,905 | py | from tkinter import *
from math import *
root = Tk()
root.wm_title("Quiz 8")
class Application(Frame):
global a, b
# create the box for user input
frame1 = Frame(root)
frame1.pack()
#box for number a
Label(frame1, text="Number A").grid(row=0, column=0, sticky=W)
a = IntVar()
a = Entry(frame1, textvariable = a )
a.grid(row=0, column=1, sticky=W)
#box for number b
Label(frame1, text="Number B").grid(row=1, column=0, sticky=W)
b = IntVar()
b = Entry(frame1, textvariable = b)
b.grid(row=1, column=1, sticky=W)
#calculation function
def multiplication(self):
output = StringVar()
output.set(a.get() + " * " + b.get() + " = " \
+ str(int(a.get()) * int(b.get()))) #set output for label statement below
label = Label(root, textvariable = output) #textvariable will print variable output
label.pack()
def division(self):
output = StringVar()
output.set(a.get() + " / " + b.get() + " = " \
+ str(int(a.get()) / int(b.get())))
label = Label(root, textvariable = output)
label.pack()
def summation(self):
output = StringVar()
output.set(a.get() + " + " + b.get() + " = " \
+ str(int(a.get()) + int(b.get())))
label = Label(root, textvariable = output)
label.pack()
def subtraction(self):
output = StringVar()
output.set(a.get() + " - " + b.get() + " = " \
+ str(int(a.get()) - int(b.get())))
label = Label(root, textvariable = output)
label.pack()
def squareroot(self):
output1 = StringVar()
output1.set("Square root of " + a.get() + " = "\
+ str(sqrt(int(a.get()))))
label1 = Label(root, textvariable = output1)
label1.pack()
output2 = StringVar()
output2.set("Square root of " + b.get() + " = "\
+ str(sqrt(int(b.get()))))
label2 = Label(root, textvariable = output2)
label2.pack()
# create the button option for user
def createWidgets(self):
#multiply button
self.multiply = Button(self)
self.multiply["text"] = "Multiply"
self.multiply["fg"] = "blue"
self.multiply["command"] = self.multiplication
self.multiply.pack({"side": "left"})
#divide button
self.divide = Button(self)
self.divide["text"] = "Divide"
self.divide["fg"] = "blue"
self.divide["command"] = self.division
self.divide.pack({"side": "left"})
#addition button
self.add = Button(self)
self.add["text"] = "Add"
self.add["fg"] = "blue"
self.add["command"] = self.summation
self.add.pack({"side": "left"})
#subtract button
self.subtract = Button(self)
self.subtract["text"] = "Subtract"
self.subtract["fg"] = "blue"
self.subtract["command"] = self.subtraction
self.subtract.pack({"side": "left"})
#sqrt button
self.sqrt = Button(self)
self.sqrt["text"] = "Sqrt"
self.sqrt["fg"] = "blue"
self.sqrt["command"] = self.squareroot
self.sqrt.pack({"side": "left"})
#quit button
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT"
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "left"})
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
#name and main window
if __name__ == "__main__":
app = Application(master = root)
app.mainloop()
root.destroy()
| [
"noreply@github.com"
] | vuhoang131.noreply@github.com |
ab678be4644de8b214559ca3c3adf8d6a0ad72ee | 9e9919bd87dcf4ba2f958d1f96689751503f6527 | /todo/migrations/0002_auto_20200712_2316.py | dcb6509b73c0ffc60a01818efe7d96efc7fbbb39 | [] | no_license | Zeitzew32/Django_native_auth | 711b2bfb559ae33ae9c1d67c2750b293e1366cea | 1f807cd0342a61ee2dfb782b92e7a811ff1495e3 | refs/heads/master | 2022-11-26T02:07:29.213148 | 2020-07-28T19:27:22 | 2020-07-28T19:27:22 | 279,180,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # Generated by Django 2.1.4 on 2020-07-12 23:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='datecompleted',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"zeitzew32@gmail.com"
] | zeitzew32@gmail.com |
f4363a8d8c2b5f17ccd9a2161aa5ba90eb73b75f | daab79e8ac4d4450ae0afd7892d605d7b3fece1d | /change_to_label_date.py | 27df3ee0a85d08a33d78f61e167dbe362c5f4694 | [] | no_license | johndeyrup/isodate-format | d21d57082aa1e2f1063df02923d63fec3a16b821 | 6c62e5bdd09cd8770f2af92b348f3850c5f9869a | refs/heads/master | 2021-01-18T14:49:26.771645 | 2015-03-16T02:32:04 | 2015-03-16T02:32:04 | 32,294,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | '''
Created on Feb 20, 2015
Opens a csv, splits the dates, converts the month
to a roman numeral, and then puts the date back into
the format day.month.year
@author: John Deyrup
'''
import csv
def read_csv(csv_name):
'''
Opens a csv file and stores it contents
in a list
'''
with open(csv_name, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter = ',')
for row in csvreader:
return row
def write_csv(csv_name, table):
'''
Writes a table to csv file
Line terminator prevents their from
being extra carriage returns (not sure what it does)
'''
with open(csv_name, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter = ',', lineterminator='\n')
csvwriter.writerow(table)
def convert_dates(table):
'''
Splits a date, then converts the month
to a roman numeral. Then puts the date back together
into the format day.month.year
'''
converted_table = []
month_dic = {"01":"i", "02":"ii", "03":"iii", "04":"iv","05":"v","06":"vi","07":"vii",
"08":"viii","09":"ix","10":"x","11":"xi","12":"xii"}
for elem in table:
split_elem = elem.split("-")
if len(split_elem) == 3:
converted_table.append(str(int(split_elem[2]))+"."+month_dic[split_elem[1]]+"."+split_elem[0].strip())
elif len(split_elem) == 2:
converted_table.append(month_dic[split_elem[1]]+"."+split_elem[0].strip())
else:
print("unknown string length: " + elem)
return converted_table
csv_table = read_csv('isodate.csv')
write_csv('change_date.csv',convert_dates(csv_table))
| [
"john.deyrup@gmail.com"
] | john.deyrup@gmail.com |
4404bba47db646d9416036a3aa8e535334e7902f | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4060/codes/1684_1100.py | 47661cdbda903bedb36e8d7ab2f89e70b5985e55 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Ao testar sua solução, não se limite ao caso de exemplo.
a=int(input("Entrada: "))
print("Entrada:",a)
if((a!=2)and(a!=5)and(a!=10)and(a!=20)and(a!=50)and(a!=100)):
print("Animal: Invalido")
elif(a==2):
print("Animal: Tartarura")
elif(a==5):
print("Animal: Garca")
elif(a==10):
print("Animal: Arara")
elif(a==20):
print("Animal: Mico-leao-dourado")
elif(a==50):
print("Animal: Onca-pintada")
else:
print("Animal: Garoupa") | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
bd965b2182f8c2a95a85e7249a98f5862315cae4 | bbec6289cad1263babb654ba82ed6eb639bd7879 | /filter_fixed_differences.05_07_18.py | e1c861fe09ae4e9651697fd25bfe4406773a9fd2 | [] | no_license | abwolf/NeandSeq_scripts | a4273ef1da7cf188dd8b8bfcb4b58b8952ee45c1 | cda3f5261cf1533ff4c93b69346aebfd371797a3 | refs/heads/master | 2021-07-11T23:31:25.828017 | 2020-06-10T18:31:40 | 2020-06-10T18:31:40 | 144,879,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | from __future__ import print_function
import sys
import gzip
# infile : altai_neand_filtered_vcfs.all.allele_info.gz --> allele info (1kG, Ancestral/Derived, and Altai) taken from altai_neand_vcf
with gzip.open(sys.argv[1], 'rb') as infile:
for line in infile:
if "CHROM" in line:
#continue
print(line.strip(),file=sys.stdout)
else:
line_list = line.strip().split('\t')
#print(line_list,file=sys.stderr)
CHROM=line_list[0]
POS=line_list[1]
REF=line_list[2] # Reference allele
ALT=line_list[3] # Alternative allele
ALT_1kG=line_list[4] # Alternative allele in 1kG
CAnc=line_list[5] # Chimp Ancestral allele
CAnc = 'NA' if CAnc=='?' else CAnc
###############
AF_1kG=line_list[6] # Alternative allele freq in 1kG
AF_1kG = 'NA' if AF_1kG=='?' else float(AF_1kG)
AF_AMR=line_list[7] # Alternative allele freq in American Indians (1kG)
AF_AMR = 'NA' if AF_AMR=='?' else float(AF_AMR)
AF_ASN=line_list[8] # Alternative allele freq in Asians (1kG)
AF_ASN = 'NA' if AF_ASN=='?' else float(AF_ASN)
AF_AFR=line_list[9] # Alternative allele freq in Africans (1kG)
AF_AFR = 'NA' if AF_AFR=='?' else float(AF_AFR)
AF_EUR=line_list[10] # Alternative allele freq in Europeans (1kG)
AF_EUR = 'NA' if AF_EUR=='?' else float(AF_EUR)
###############
AC_Neand=line_list[11] # Alt allele count in genotypes for Neand
# Set 'print' FLAG to False, switch this later if you want to print the line
FLAG=False
# Check REF and ALT are SNVs and Neand is Homozygous
if REF in ['A','C','T','G'] and ALT in ['A','C','T','G'] and ALT_1kG in ['A','C','T','G'] and AC_Neand in ['0','2']:
# Check ALT_1kG matches either REF or ALT alleles
if ALT_1kG in [REF, ALT]:
# If ALT_1kG matches the noted ALT allele
if ALT_1kG == ALT:
# Keep sites where Neand is fixed for ALT and AFR is fixed for REF
if AC_Neand == '2' :
if AF_AFR <= 0.02 or AF_AFR == 'NA' :
FLAG=True
# Keep sites where Neand is fixed for REF and AFR is fixed for ALT
if AC_Neand == '0' :
if AF_AFR >= 0.98 :
FLAG=True
# If ALT_1kG matches the noted REF allele
if ALT_1kG == REF:
# Keep sites where Neand is fixed for ALT, and AFR is fixed for REF (high ALT_1kG freq)
if AC_Neand == '2' :
if AF_AFR >=0.98 :
FLAG=True
# Keep sites where Neand is fixed for REF, and AFR is fixed for ALT (low ALT_1kG freq)
if AC_Neand == '0' :
if AF_AFR <= 0.02 or AF_AFR == 'NA' :
FLAG=True
# If the line pass the criteria and FLAG has been set to TRUE, print the line
if FLAG==True:
line_str = '\t'.join(
[CHROM, POS, REF, ALT, ALT_1kG, CAnc,
str(AF_1kG), str(AF_AMR), str(AF_ASN),
str(AF_AFR), str(AF_EUR), AC_Neand]
)
print(line_str, file=sys.stdout)
else:
continue
| [
"abwolf@uw.edu"
] | abwolf@uw.edu |
b1f66ccbeda4ec7ce122b2479876cad28296bf24 | dec218b849667741e017c6fb79393a83b98ca130 | /app/decorators.py | 1a3f5bb9dfa3906b20bfaf53b63f5dfc5214dfef | [
"MIT"
] | permissive | kokizzu/lxd-api-gateway | a29629fe0162860dad89c3ff0ca9f2423f2c1061 | d1eb6798635749c4440dfb8916b2f9185f6efe35 | refs/heads/master | 2023-04-23T21:36:15.086932 | 2021-05-10T19:52:08 | 2021-05-10T19:52:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from werkzeug.exceptions import Forbidden
from .models import *
# from flask_jwt_extended.view_decorators import _decode_jwt_from_request
from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims
from lgw import lxd_api_get
def import_user():
"""
Get user identity from json web token
:return: current_identity
"""
try:
from flask_jwt_extended import get_jwt_identity
current_identity = User.query.get(int(get_jwt_identity()))
return current_identity
except ImportError:
raise ImportError(
'User argument not passed')
def populate_instances_table():
"""
Search for new or deleted instances and update their status in local database
"""
database_lxdservers_list = Server.query.all()
for lxdserver in database_lxdservers_list:
all = []
try:
res = lxd_api_get(lxdserver, 'instances')
for c in res.json()['metadata']:
all.append(c[15:]) # get instance name from api url
except Exception as e:
print(e)
current_instances_list = tuple(all)
database_instances_list = Instance.query.filter_by(location=lxdserver.name)
database_instances_list_names = [str(i.name) for i in database_instances_list]
# Removing old instances from database
for inst in database_instances_list:
if not inst.name in current_instances_list:
db.session.delete(inst)
db.session.commit()
if len(inst.servers) == 0:
db.session.delete(inst)
db.session.commit()
# Adding new instances to database
for cinst in current_instances_list:
if not cinst in database_instances_list_names:
instance = Instance()
instance.name = cinst
instance.location = lxdserver.name
db.session.add(instance)
db.session.commit()
lxdserver.instances.append(instance.id)
db.session.commit()
db.session.commit()
def user_has(ability, get_user=import_user):
"""
Takes an ability (a string name of either a role or an ability) and returns the function if the user has that ability
:param ability:
:param get_user:
:return: wrapper:
"""
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
desired_ability = Ability.query.filter_by(
name=ability).first()
user_abilities = []
current_identity = get_user()
for group in current_identity._groups:
user_abilities += group.abilities
if current_identity.admin or desired_ability.id in user_abilities:
return func(*args, **kwargs)
else:
raise Forbidden("You do not have access")
return inner
return wrapper
def otp_confirmed(fn):
"""
If you decorate a vew with this, it will ensure that the requester has a
valid JWT before calling the actual view. This does check if otp is confirmed
:param fn: The view function to decorate
"""
@wraps(fn)
def wrapper(*args, **kwargs):
# jwt_data = _decode_jwt_from_request(request_type='access')
# print(jwt_data)
verify_jwt_in_request()
claims = get_jwt_claims()
if claims['otp_confirmed'] == False:
raise Forbidden("You do not have access")
else:
return fn(*args, **kwargs)
return wrapper
| [
"kolarik@skhosting.eu"
] | kolarik@skhosting.eu |
3d315aa3a5b6cb527301df0d7b37db5e3eaa4728 | cbd18a485d7a4f31235a38fabd1d414a14160dbe | /monster.py | 0773d7fe0cd309fb6a63e78bd7167dc68babefc8 | [] | no_license | illoh-py/MonsterGame | 845873bafd08190069768bf3a57d604b07814d4a | dcc9828cf79191fd7c7c8e303db2732a0c89bb73 | refs/heads/main | 2023-07-12T06:10:41.693977 | 2021-08-18T01:01:37 | 2021-08-18T01:01:37 | 364,103,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,803 | py | #Mark Boady and Matthew Burlick
#Drexel University 2020
#CS 172
#This class defines a generic monster
#It doesn't actually DO anything.
#It just gives you a template for how a monster works.
#We can make any number of monsters and have them fight
#they just all need to INHERIT from this one so that work the same way
#Since this class is not intended to be used
#none of the methods do anything
#This class is cannot be used by itself.
from abc import ABC, abstractmethod
### DO NOT CHANGE ANYTHING BELOW IN THIS monster CLASS ####
class Monster(ABC):
#Methods that need to be implemented
#The description is printed at the start to give additional details
#The constructor. In order for parameters to have whatever name they want they must be provide in the order shown.
#The parameters are (in this order, with their expected type in parenthesis):
#n (str) is the name
#health (int) is the starting health of the monster
#description (str)
#basicAttackDamage (int)
#specialAttackDamage (int)
#defenseDamage (int)
#basicAttackName (str)
#specialAttackName (str)
#defenseName
@abstractmethod
def __init__(self, name, health=100, description = 'Fast and Furious Fur Ball', basicAttackDamage=7,specialAttackDamage=10, defenseDamage=1, basicAttackName='scratch', specialAttackName='special scratch', defenseName='duck'):
pass
#String representation of the Monster.
@abstractmethod
def __str__(self):
pass
#Name the monster we are fighting
#The description is printed at the start to give
#additional details
@abstractmethod
def getName(self):
pass
@abstractmethod
def getDescription(self):
pass
#Basic Attack Move
#This will be the most common attack the monster makes
#You are passed the monster you are fighting
@abstractmethod
def basicAttack(self,enemy):
pass
#Print the name of the attack used
@abstractmethod
def getBasicName(self):
pass
#Get the basic attack damage amount.
@abstractmethod
def getBasicAttackDamage(self):
pass
#Defense Move
#This move is used less frequently to
#let the monster defend itself
@abstractmethod
def defenseAttack(self,enemy):
pass
#Print out the name of the attack used
@abstractmethod
def getDefenseName(self):
pass
#Get the defnse attack damage amount
@abstractmethod
def getDefenseAttackDamage(self):
pass
#Special Attack
#This move is used less frequently
#but is the most powerful move the monster has
@abstractmethod
def specialAttack(self,enemy):
pass
#get the special attack's name
@abstractmethod
def getSpecialName(self):
pass
#get the special attack damage amount
@abstractmethod
def getSpecialAttackDamage(self):
pass
#Health Management
#A monster at health <= 0 is unconscious
#This returns the current health level
@abstractmethod
def getHealth(self):
pass
#This returns the maximum health set at creation.
@abstractmethod
def getMaximumHealth(self):
pass
#This function is used by the other monster to
#either do damage (positive int) or heal (negative int)
@abstractmethod
def doDamage(self,damage):
pass
#Reset Health for next match
@abstractmethod
def resetHealth(self):
pass
### DO NOT CHANGE ANYTHING ABOVE IN THIS monster CLASS ####
## TODO: Create a CustomMonster class that inherits the generic monster class.
class CustomMonster(Monster):
def __init__(self, n, health=100, description='Fast and Furious Fur Ball', basicAttackDamage=7,
specialAttackDamage=10, defenseDamage=1, basicAttackName='scratch',
specialAttackName='special scratch', basicDefenseName='duck'):
self.__name = n
self.__maximumHealth = int(health)
self.__health = int(health)
self.__description = description
self.__basicAttackDamage = basicAttackDamage
self.__specialAttackDamage = specialAttackDamage
self.__defenseDamage = defenseDamage
self.__basicAttackName = basicAttackName
self.__specialAttackName = specialAttackName
self.__basicDefenseName = basicDefenseName
def __str__(self):
return "{}".format(self.__description)
def getName(self):
return "{}".format(self.__name)
def getDescription(self):
return "{}".format(self.__description)
def getBasicName(self):
return "{}".format(self.__basicAttackName)
def getBasicAttackDamage(self):
return self.__basicAttackDamage
def getDefenseName(self):
return "{}".format(self.__basicDefenseName)
def getDefenseAttackDamage(self):
return self.__defenseDamage
def getSpecialName(self):
return "{}".format(self.__specialAttackName)
def getSpecialAttackDamage(self):
return self.__specialAttackDamage
def getHealth(self):
return self.__health
def getMaximumHealth(self):
return self.__maximumHealth
def doDamage(self, damage):
self.__health = self.__health - damage
def basicAttack(self, enemy):
enemy.doDamage(self.getBasicAttackDamage())
def defenseAttack(self, enemy):
enemy.doDamage(self.getDefenseAttackDamage())
def specialAttack(self, enemy):
enemy.doDamage(self.getSpecialAttackDamage())
def resetHealth(self):
self.__health = self.__maximumHealth
| [
"noreply@github.com"
] | illoh-py.noreply@github.com |
97e8faa1c969c61eecd8ea2b0c2d73d0b950829d | 825aca806721b8e639e5d704e43df1f1f74aebd8 | /projects/venv/bin/symilar | 1a7c4da8cdc73894bac3e6dc188b93aacddea7b3 | [] | no_license | eahnivy8/Sparta | 79c5797a2043c7fc82ff9827b6dd37a29ff4e354 | 6d0e99bbeed7088e480dd412cc211d60127964b6 | refs/heads/master | 2022-11-07T07:19:28.858937 | 2020-06-26T04:39:00 | 2020-06-26T04:39:00 | 264,405,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/Users/edwardahn/Desktop/Sparta/projects/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"eahnivy8@gmail.com"
] | eahnivy8@gmail.com | |
fc1fa4990f3eb7c426991f2e920afe5ac67e8b2a | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /ReverseWordsIII.py | 35bceae2ed4be0c631984cf4c703decb182946b7 | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | '''
557. Reverse Words in a String III
Given a string s, reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order.
Example 1:
Input: s = "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
Example 2:
Input: s = "God Ding"
Output: "doG gniD"
Constraints:
1 <= s.length <= 5 * 104
s contains printable ASCII characters.
s does not contain any leading or trailing spaces.
There is at least one word in s.
All the words in s are separated by a single space.
'''
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
word_list = s.split()
word_list_list = []
result = ""
for word in word_list:
temp = list(word)
word_list_list.append(temp)
for word in word_list_list:
start = 0
end = len(word) - 1
while start <= end:
temp = word[start]
word[start] = word[end]
word[end] = temp
start = start + 1
end = end - 1
for word in word_list_list:
result = result + ''.join(word) + " "
return result[:-1]
obj = Solution()
print(obj.reverseWords("Let's take LeetCode contest")) | [
"jerinsprograms@gmail.com"
] | jerinsprograms@gmail.com |
76edb0c378f88473fcb9cb8e696f1c249bd9e8bd | 68e9838cf70085630dcbf08a02adf2fee41a538c | /078_subsets.py | 5193664fefdab7f3ad0ab8889eb0191d832ca704 | [] | no_license | harrifeng/leet-in-python | b8dcb880eae7de843f5115f353be8bae642410ff | ba8014e6fa3b0dc4fdc33e1cf23c98076a105225 | refs/heads/master | 2020-04-06T13:30:09.852368 | 2016-09-22T04:43:58 | 2016-09-22T04:43:58 | 52,344,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | """
Given a set of distinct integers, nums, return all possible subsets.
Note:
Elements in a subset must be in non-descending order.
The solution set must not contain duplicate subsets.
For example,
If nums = [1,2,3], a solution is:
[
[3],
[1],
[2],
[1,2,3],
[1,3],
[2,3],
[1,2],
[]
]
"""
import unittest
class MyTest(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(sorted([[3],
[1],
[2],
[1, 2, 3],
[1, 3],
[2, 3],
[1, 2],
[]]
), sorted(solution.subsets([1, 2, 3])))
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def helper(nums, tmp, ret):
ret.append(tmp[:])
for i, c in enumerate(nums):
tmp.append(c)
helper(nums[i + 1:], tmp, ret)
tmp.pop()
ret = []
helper(sorted(nums), [], ret)
return ret
| [
"haoran.feng@sap.com"
] | haoran.feng@sap.com |
87dc46c97d4047d73908836fa9cea55aafcbdcd3 | 24fac945c7825c502f9fb2e6df06db26b80b5676 | /blogdjangogirls/urls.py | 230fe7f82e3aea6d486403578d1fc5bd5bdbd700 | [] | no_license | mario220696/my-first-blog | 8f0a9883c4ecde58a8c3483e5ad0823b81a4db7c | 72ec03a579965c984a770f0c4cca0a3eca28aa46 | refs/heads/master | 2020-03-17T11:48:03.954834 | 2018-05-15T19:28:36 | 2018-05-15T19:28:36 | 133,564,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | """blogdjangogirls URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
f57f9d108db31cdcd32ba1693fbc41e5d0df608d | 8c7bb28ac12f5003074abea78bba147332d011ea | /main.py | 616a73f4b9d3765ba7bd85a60d886118fe374273 | [] | no_license | SunZhigang7/Visual-Scene-Understanding-and-Tracking-from-Traffic-Cameras | cc3bd5eb9abd6c22825a8af20885e81eab51ae19 | 74051fa0896c79bd7cdd51892c3ef52030911bdc | refs/heads/main | 2023-07-05T02:27:24.568540 | 2021-08-30T21:32:07 | 2021-08-30T21:32:07 | 398,634,252 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,082 | py | from multi_object_tracking import mot
from keypoint_orientation_detection import keypoint_detector
from auto_camera_calibrator import camera_calibrator
from transfer_coordinates import transfer_coordinates
from speed_estimation import speed_estimation
from crawl_videos import get_videos
from select_video import select_video, scale_video
import torch
# Procedure 0 : Hardware Detection
print('Procedure 0 : Hardware Detection')
print('----------------------------------------------------------------------')
print('The device using in this pipeline')
print(torch.cuda.get_device_name())
print()
# Procedure 1 : Get Videos from London Traffic Cameras
print('Procedure 1 : Get Videos from London Traffic Cameras')
print('----------------------------------------------------------------------')
get_videos_number = eval(input('please enter the number of video files that you want to get : '))
get_videos(get_videos_number)
print('Finished Getting Videos\n')
# Procedure 2 : Select the video that to be analysed
print('Procedure 2 : Select the video that to be analysed')
print('----------------------------------------------------------------------')
select_video_number = input('video : ')
select_video(select_video_number)
print('if there is no vehicle in the vide, please select other video.')
select_video_decision = input('whether to select other video (y/n): ')
while select_video_decision == 'y':
select_video_number = input('video : ')
select_video(select_video_number)
select_video_decision = input('whether to select other video (y/n): ')
scale_decision = input('whether to scale the video (y/n): ')
if scale_decision == 'y':
scale_number = input('scale number: ')
scale_video(scale_number)
print('Finished scaling')
print('Finished Selecting Video\n')
# Procedure 3 : Multi-Object Tracking
print('Procedure 3 : Multi-Object Tracking')
print('----------------------------------------------------------------------')
iou = eval(input('set iou threshold (0.4 recommended) :'))
score = eval(input('set score threshold (0.5 recommended) :'))
mot(0.4, 0.5)
print('Finished Multi-Object Tracking\n')
# Procedure 4 : Key_points and Orientation detection
print('Procedure 4 : Key_points and Orientation detection')
print('----------------------------------------------------------------------')
keypoint_detector()
print('Finished Key_points and Orientation detection\n')
# Procedure 5 : auto_camera_calibrator
print('Procedure 5 : auto_camera_calibrator')
print('----------------------------------------------------------------------')
camera_calibrator()
print('Finished auto_camera_calibrator\n')
# Procedure 6 : transfer_coordinates
print('Procedure 6 : transfer_coordinates')
print('----------------------------------------------------------------------')
transfer_coordinates()
print('Finished transfer_coordinates\n')
# Procedure 7 : speed_estimation
print('Procedure 7 : speed_estimation')
print('----------------------------------------------------------------------')
speed_estimation()
print('Finished speed_estimation\n')
| [
"noreply@github.com"
] | SunZhigang7.noreply@github.com |
5972e9b3b763273f9a652d66f3d080b66c693961 | 6dedbcff0af848aa979574426ad9fa3936be5c4a | /cengal/parallel_execution/coroutines/coro_standard_services/remote_nodes/versions/v_0/request_class_info.py | fb2ff414d2289c06f43f46ac97d35a61e59d0cfe | [
"Apache-2.0"
] | permissive | FI-Mihej/Cengal | 558d13541865e22006431bd1a1410ad57261484a | d36c05f4c90dfdac7296e87cf682df2f4d367e4b | refs/heads/master | 2023-06-08T00:39:39.414352 | 2023-06-05T21:35:50 | 2023-06-05T21:35:50 | 68,829,562 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,043 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright © 2012-2023 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <gtalk@butenkoms.space>"
__copyright__ = "Copyright © 2012-2023 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>"
__credits__ = ["ButenkoMS <gtalk@butenkoms.space>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "3.2.6"
__maintainer__ = "ButenkoMS <gtalk@butenkoms.space>"
__email__ = "gtalk@butenkoms.space"
# __status__ = "Prototype"
__status__ = "Development"
# __status__ = "Production"
from enum import Enum
from cengal.parallel_execution.coroutines.coro_scheduler import *
from cengal.parallel_execution.coroutines.coro_tools.await_coro import *
from cengal.parallel_execution.coroutines.coro_standard_services.asyncio_loop import *
from cengal.parallel_execution.coroutines.coro_standard_services.loop_yield import CoroPriority
from cengal.parallel_execution.coroutines.coro_standard_services.put_coro import *
from cengal.parallel_execution.coroutines.coro_standard_services.timer_func_runner import *
from cengal.file_system.file_manager import path_relative_to_current_dir
from cengal.time_management.load_best_timer import perf_counter
from cengal.data_manipulation.serialization import *
from typing import Hashable, Tuple, List, Any, Dict, Callable, Type
from cengal.introspection.inspect import get_exception, entity_owning_module_importable_str, entity_owning_module_info_and_owning_path, entity_properties
from cengal.io.core.memory_management import IOCoreMemoryManagement
from cengal.parallel_execution.asyncio.efficient_streams import StreamManagerIOCoreMemoryManagement, TcpStreamManager, UdpStreamManager, StreamManagerAbstract
from cengal.code_flow_control.smart_values import ValueExistence
from cengal.io.named_connections.named_connections_manager import NamedConnectionsManager
from cengal.code_flow_control.args_manager import number_of_provided_args
from cengal.data_manipulation.serialization import Serializer, Serializers, best_serializer
from cengal.code_flow_control.args_manager import find_arg_position_and_value, UnknownArgumentError
from cengal.data_generation.id_generator import IDGenerator, GeneratorType
from cengal.system import PLATFORM_NAME, PYTHON_VERSION
from importlib import import_module
import sys
import os
import asyncio
import lmdb
from .exceptions import *
from .commands import *
from .class_info import *
class LocalRequestClassInfo(LocalClassInfo):
def __init__(self, local_id: Hashable, request: Request) -> None:
super().__init__(local_id, type(request))
self._properties: Dict[str, Hashable] = {property_name: index for index, property_name in enumerate(entity_properties(request))} # key: property name; value: property id
self._properties_tuple: Tuple[Tuple[str, Hashable]] = tuple(self._properties.items())
def __call__(self) -> Type:
return {
CommandDataFieldsDeclareServiceRequestClass.local_id.value: self._local_id,
CommandDataFieldsDeclareServiceRequestClass.class_name.value: self._class_name,
CommandDataFieldsDeclareServiceRequestClass.module_importable_str.value: self._module_importable_str,
CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value: self._properties_tuple,
}
@property
def properties(self):
return self._properties
@property
def properties_tuple(self):
return self._properties_tuple
def request_to_data(self, request: Request) -> Dict:
return {
CommandDataFieldsServiceRequestWithRequestClass.request_class_id.value: self._local_id,
CommandDataFieldsServiceRequestWithRequestClass.properties_tuple.value: tuple(((property_id, getattr(request, property_name)) for property_name, property_id in self._properties_tuple)),
}
class RemoteRequestClassInfo(RemoteClassInfo):
def __init__(self, local_id: Hashable, class_name: str, module_importable_str: str, properties_tuple: Tuple[Tuple[str, Hashable]]) -> None:
super().__init__(local_id, class_name, module_importable_str)
self._properties_tuple: Tuple[Tuple[str, Hashable]] = properties_tuple
self._properties: Dict[Hashable, str] = {index: property_name for property_name, index in properties_tuple} # key: property id; value: property name
@classmethod
def from_data(cls, data: Dict[Hashable, Any]) -> 'RemoteRequestClassInfo':
local_id: Hashable = data[CommandDataFieldsDeclareServiceRequestClass.local_id.value]
class_name: str = data[CommandDataFieldsDeclareServiceRequestClass.class_name.value]
module_importable_str: str = data[CommandDataFieldsDeclareServiceRequestClass.module_importable_str.value]
properties_tuple: Tuple[Tuple[str, Hashable]] = data[CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value]
return cls(local_id, class_name, module_importable_str, properties_tuple)
def __call__(self, data: Dict) -> Request:
request: Request = self.class_type()
properties_tuple: Tuple[Tuple[Hashable, Any]] = data[CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value]
for index, value in properties_tuple:
name: str = self._properties[index]
setattr(request, name, value)
return request
| [
"gtalk@butenkoms.space"
] | gtalk@butenkoms.space |
a71747d0f56f0068b0d80949bd981bb05ad8169c | ad8b24a89fefd32b3f97865fb8de3ed421f4d418 | /Drawing Fractal or Recursion tree.py | 191cfa519ad2a4a6a0825299392db7d66b8ca118 | [] | no_license | Jaya-Chand/Fractals | 375f2f5563bfc0e82bd69b831018cf76114c3534 | 02e77a6eea7e83f8b1cfcbd888f01ea778f5181e | refs/heads/master | 2022-12-10T09:21:41.503480 | 2020-09-16T16:45:53 | 2020-09-16T16:45:53 | 296,092,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #Jaya Chand
#16/09/2020
"""
Drawing Fractal or Recursion tree
Using turtle to plot fractal and recursion trees
"""
import turtle ##using turtle to plot output
hr=turtle.Turtle()
hr.left(90)
hr.speed(150)
def tree(i):
if i<10:
exit
else:
hr.forward(i)
hr.left(30)
tree(3*i/4)
hr.right(60)
tree(3*i/4)
hr.left(30)
hr.backward(i)
tree(50) | [
"noreply@github.com"
] | Jaya-Chand.noreply@github.com |
a36aab1dcb0aafff6ce134f82d406e4872c78a5e | 48367cd37f284f42f04457509710d1482dc4b95e | /Achy Breaky Newlines/achy_breaky_newlines.py | 58a7f0292f4e611140d66db78e9fb682683747a6 | [] | no_license | JTMaher2/RunCode-Solutions | 0bf2a7affd24c571404872795f95081662a83ccd | 1d8c0d79c4181132c004937ddcd9ccfaa088213f | refs/heads/master | 2020-04-28T03:03:43.835011 | 2019-05-06T01:15:11 | 2019-05-06T01:15:11 | 174,921,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!/usr/bin/env python3.6
import sys
with open(sys.argv[1]) as input:
for row in input:
if row.isspace() == False:
sys.stdout.write(row) | [
"jtmaher2@gmail.com"
] | jtmaher2@gmail.com |
a6557fe99e4b8165db996f484d03ffb3b86fe81a | b1ebe84e1af4315a20948cede8664fa075c1258f | /102-Binary-Tree-Level-Order-Traversal.py | 8ae112da941bb3b5739e4454bedda5460016a0d2 | [] | no_license | qcl643062/leetcode | b99ab00da3844be0e1b3caf6a34bc7b694a9c1bf | 575bf539ccc6a0b302eca8139840c421691bc8f0 | refs/heads/master | 2021-01-10T04:49:26.856676 | 2016-04-06T07:42:39 | 2016-04-06T07:42:39 | 54,179,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | #coding=utf-8
"""
Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
"""
class TreeNode(object):
def __init__(self, x, left = None, right = None):
self.val = x
self.left = left
self.right = right
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root == None:
return []
a = [root]
b = []
c = []
d = []
while a != []:
for i in range(len(a)):
#可以用双向队列的leftpop来写
last = a[0]
del a[0]
c.append(last.val)
if last.left or last.right:
if last.left:
b.append(last.left)
if last.right:
b.append(last.right)
else:
pass
d.append(c)
a = b[::]
b = []
c = []
return d
s = Solution()
print s.levelOrder(TreeNode(0, TreeNode(1, TreeNode(2)))) | [
"howtogetin@126.com"
] | howtogetin@126.com |
c7be3da8472cb8def0f76e0cac71b79a7063ba14 | c829275111b9025dcccc9ac1b92d8dc51adbb71d | /photo/urls.py | 4fec7ab88e9eb5f629a088b997138a2b641ed5cb | [
"MIT"
] | permissive | Ken-mbira/PHOTO_BOOK | f1bd1bd65af228b0600bf69da12840897eb109ad | d47cd8dabd4b92e3befdafe2d99db266be31ffff | refs/heads/master | 2023-08-19T06:55:07.309342 | 2021-10-12T11:05:00 | 2021-10-12T11:05:00 | 414,297,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('',views.index,name = 'home'),
path('images/',views.images,name = 'images'),
path('images/<int:pk>',views.image_spec,name = 'image'),
path('category/<int:pk>',views.image_category,name = 'category'),
path('search',views.search_images, name="search")
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"ken.mbira@student.moringaschool.com"
] | ken.mbira@student.moringaschool.com |
122973b22e084187725dc3a591d114310bbd301f | 1a8a224652a4e73f25137d1c5cc09f7aeded047a | /15.py | 02abe746a3ee7c3df8b80f4330b9cc2d853ec82a | [] | no_license | BigShow1949/Python100- | 1d9892be4981b8d04f4774e1d1b54c02d460f05d | 79db05e5673a326377c468e7cd2844befdca9c8c | refs/heads/master | 2021-01-19T23:20:50.219611 | 2017-05-09T02:43:01 | 2017-05-09T02:43:01 | 88,966,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。
# 程序分析:程序分析:(a>b)?a:b这是条件运算符的基本例子。
score = int(raw_input('input score:\n'))
if score >= 90:
grade = 'A'
elif score >= 60:
grade = 'B'
else:
grade = 'C'
print '%d is %s' % (score, grade)
| [
"1029883589@qq.com"
] | 1029883589@qq.com |
1e5550569eedf5ace9cd8a5dd1e818c31298eae3 | 5338ba4f3cd24ffcec50349d23f672287e046047 | /ch05/vartest.py | 6dbb7b59827c129937e7d7165527d701bd6e0866 | [] | no_license | a1012p/Python | 85a337c35bac7ec9d8ff39a9441c52fd13e8d567 | 7178c7c132dcd9ac2a955be5e0758affc41db993 | refs/heads/main | 2023-06-13T21:29:25.679757 | 2021-07-08T00:27:57 | 2021-07-08T00:27:57 | 378,030,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | # vartest.py
a = 1 #전역 변수
def vartest(a): # call by value
#global a
a += 1 # a : 지역 변수(매개 변수)
vartest(a)
print(a)
| [
"a1027p@gmail.com"
] | a1027p@gmail.com |
20cf30291dd3e1ce42e9eac92e98cb83666fbc14 | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/946.py | 08e5a7f4e305aa8dfddd5a773566d9bdd70744e5 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 392 | py | class Solution:
def validateStackSequences(self, pushed, popped):
"""
:type pushed: List[int]
:type popped: List[int]
:rtype: bool
"""
arr, i = [], 0
for num in pushed:
arr.append(num)
while arr and arr[-1] == popped[i]:
i += 1
arr.pop()
return arr == popped[i:][::-1] | [
"cenkay.arapsagolu@gmail.com"
] | cenkay.arapsagolu@gmail.com |
c6ca79410b6aea00242ffdd41c948eaa87534edc | bd1c0e9d4ec4e5df9ed1902448c8c0532ad21ed8 | /master/tutorials/30-days-of-code/30-sorting/solution.py | 360a3b3c704231a93a2d2b5acedbd0044b644ce4 | [
"MIT"
] | permissive | bitnot/hackerrank-solutions | dda93bcba0ea254cdeaa500d0ea39511ea6f67dc | cb54af3e80f9aa188a7aadf357c5d9bcc0715249 | refs/heads/master | 2020-03-10T14:18:05.135333 | 2019-01-27T14:12:42 | 2019-01-27T14:12:42 | 129,422,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!/bin/python3
import sys
n = int(input().strip())
a = [int(a_temp) for a_temp in input().strip().split(' ')]
numberOfSwaps = 0
for i in range(0,n):
for j in range(0,n-1):
if a[j] > a[j+1]:
a[j], a[j+1] = a[j+1], a[j]
numberOfSwaps += 1
if numberOfSwaps == 0:
break
print("Array is sorted in {} swaps.".format(numberOfSwaps))
print("First Element: {}".format(a[0]))
print("Last Element: {}".format(a[n-1])) | [
"pavel.kiper@gmail.com"
] | pavel.kiper@gmail.com |
538ee26ee137dacb452213d880cb7d19ce78d4ed | f2bbca06f941cb3b2c9519bef8c8964ba168de16 | /GUI/demo2.py | ef5806872b6db852cecceee797f225a1429f5409 | [] | no_license | superman-wrdh/python-application | 76c9651f698ecbc8552cfc55ee27a1d83b16a2f5 | 1f318b4467fae819b50edb25de2cd28a8dd783a0 | refs/heads/master | 2021-06-06T00:36:18.257009 | 2020-07-21T15:02:10 | 2020-07-21T15:02:10 | 110,427,063 | 19 | 3 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # -*- encoding: utf-8 -*-
import PySimpleGUI as sg
layout = [[sg.Text('Persistent window')],
[sg.Input(do_not_clear=True)],
[sg.Button('Read'), sg.Exit()]]
window = sg.Window('Window that stays open').Layout(layout)
while True:
event, values = window.Read()
if event is None or event == 'Exit':
break
print(event, values)
window.Close() | [
"13419655360@163.com"
] | 13419655360@163.com |
f2a2c09d102ebb4c12b5678990d4b07e6fa71280 | 16eaa90eec58137c7cf0e429e574499d00ee21f2 | /apps/manga/models/manga.py | 325ffa9c350f2a247de6aad14b844a1d38c33887 | [
"MIT"
] | permissive | eliezer-borde-globant/lemanga | 53c48f91f5df4671c1653ab927acab3c95097468 | 57c799804754f6a91fd214faac84d9cd017fc0c4 | refs/heads/master | 2023-02-16T23:25:49.889702 | 2020-12-28T17:27:49 | 2020-12-28T17:27:49 | 322,420,102 | 0 | 0 | MIT | 2020-12-17T23:10:32 | 2020-12-17T21:43:56 | null | UTF-8 | Python | false | false | 748 | py | from __future__ import unicode_literals
import uuid
from django.core.urlresolvers import reverse_lazy
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from autoslug import AutoSlugField
@python_2_unicode_compatible
class Manga(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=200, unique=True)
slug = AutoSlugField(populate_from='name', unique=True, always_update=True)
class Meta:
verbose_name = "Manga"
verbose_name_plural = "Mangas"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse_lazy('detail-manga', kwargs={"name": self.slug})
| [
"leonardoorozcop@gmail.com"
] | leonardoorozcop@gmail.com |
528f4f027317f1d22c63b7a145d3182c87daa77f | 86fc644c327a8d6ea66fd045d94c7733c22df48c | /scripts/managed_cpe_services/customer/single_cpe_dual_wan_site/single_cpe_dual_wan_site_services/cpe_primary_wan/end_points/bgp_peers/service_customization.py | 7e389e7294aa0bde9644faa5fec5bf5a73b91948 | [] | no_license | lucabrasi83/anutacpedeployment | bfe703657fbcf0375c92bcbe7560051817f1a526 | 96de3a4fd4adbbc0d443620f0c53f397823a1cad | refs/heads/master | 2021-09-24T16:44:05.305313 | 2018-10-12T02:41:18 | 2018-10-12T02:41:18 | 95,190,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | #
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2015-2016 Anuta Networks, Inc. All Rights Reserved.
#
#
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO THIS FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
single-cpe-dual-wan-site
|
single-cpe-dual-wan-site-services
|
cpe-primary-wan
|
end-points
|
bgp-peers
Schema Representation:
/services/managed-cpe-services/customer/single-cpe-dual-wan-site/single-cpe-dual-wan-site-services/cpe-primary-wan/end-points/bgp-peers
"""
"""
Names of Leafs for this Yang Entity
BGP-peer-name
peer-ip
peer-description
remote-as
password
import-route-map
export-route-map
next-hop-self
soft-reconfiguration
default-originate
default-originate-route-map
send-community
encrypted-password
advertisement-interval
time-in-sec
timers
keepalive-interval
holdtime
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
from cpedeployment.bgppeer_lib import bgp_peer
from cpedeployment.bgppeer_lib import update_bgp_peer
class ServiceDataCustomization:
@staticmethod
def process_service_create_data(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the inputs"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
@staticmethod
def process_service_device_bindings(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the device bindings or Call the Business Login Handlers"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
devbindobjs = kwargs['devbindobjs']
for device in util.convert_to_list(dev):
bgp_peer('cpe_dual', 'cpe_primary_dual', smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_update_data(smodelctx, sdata, **kwargs):
"""callback called for update operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
dev = kwargs['dev']
for device in util.convert_to_list(dev):
update_bgp_peer('cpe_dual', 'cpe_primary_dual', smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_delete_data(smodelctx, sdata, **kwargs):
"""callback called for delete operation"""
modify = False
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
class DeletePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for Deletion"""
log('operations: %s' % (operations))
class CreatePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for creation"""
log('operations: %s' % (operations))
| [
"sebastien.pouplin@tatacommunications.com"
] | sebastien.pouplin@tatacommunications.com |
5b2629b550625d93f6d81cce8067f59834f2dfb9 | 2588687aebbe7c8c4c54e7d7a1456a4d35d63271 | /tensorflow/numpy-func.py | 19c9ac92768fc86629bce4f72b718807ffdef322 | [] | no_license | MrHjf/python | ec0648a017a0ccb0a1ff426040a83f0bfb332d5f | bbf5c980c189f4daa10d57bafdd1c5f0733ebbe3 | refs/heads/master | 2020-05-15T02:36:41.068948 | 2019-04-18T08:51:46 | 2019-04-18T08:51:46 | 182,051,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import numpy as np
x = np.arange(4)
y = 3
#print(x)
#print(x[1:y])
#print(x[1:y:4])
#arr_slice = x[1:3]
#print(arr_slice)
#print(arr_slice[:])
xx = x.reshape(2, 2)
#print('xx', xx, xx.shape)
y = np.arange(35).reshape(5, 7)
#print(y)
for i in range(100):
xs = np.array([[i]])
ys = np.array([[2*i]])
#print('xs:', xs)
#print('ys:', ys)
def testFun():
temp = [lambda x : i*x for i in range(4)]
return temp
print(testFun)
for everyLambda in testFun():
print(everyLambda(2))
foo = range(1,4)
print([i for i in foo])
print(foo) | [
"huangjfc@yonyou.com"
] | huangjfc@yonyou.com |
f6ab2a32056e94cd7e41c934646a127dfdfb143c | 743910213873cfc51201f4543140dbe514757574 | /data/groups.py | 17de9102520248f4cc6bc91df07adb83d1d71fa0 | [
"Apache-2.0"
] | permissive | rgurevych/python_for_testers | b6a3fb0b5befbffa54546e03de34fb064f139626 | 04023a5d6ea480f7828aa56e8a4094b744e05721 | refs/heads/master | 2020-06-09T14:34:38.868533 | 2017-02-17T23:32:02 | 2017-02-17T23:32:02 | 76,032,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py |
from models.group import Group
testdata = [Group(name="Name_1", header="Header_1", footer="Footer_1")]
| [
"rostikg@gmail.com"
] | rostikg@gmail.com |
3504c60da601297fcb54f37851d0a3411903c267 | 5d7794b373a0888f100197a8ac0fffbd0472780e | /TEST_RUN/run.py | 837079116efddd7af3de9bcf2ba693c6b9048f39 | [] | no_license | michalbali256/emida | 3e4db08c56d2c1e68698194f37c2e1c75bd10915 | b1ea798d6f8253fef98a2906d4aa8ca704ba4e5f | refs/heads/master | 2021-07-06T03:38:27.080749 | 2021-01-27T13:15:01 | 2021-01-27T13:15:01 | 223,661,168 | 0 | 1 | null | 2020-04-13T18:14:31 | 2019-11-23T22:26:11 | Cuda | UTF-8 | Python | false | false | 1,309 | py | #!/usr/bin/python3
from math import sqrt
import numpy as np
import time
import subprocess
def hex_pos(size, step):
m = int(size/step/sqrt(0.75))
n = int(size/step)
for j in range(m+1):
for i in range(n-j%2+1):
x = (i+j%2/2)*step
y = j*sqrt(0.75)*step
yield x, y
if __name__ == "__main__":
ref = "../test_data/initial/INITIAL_x0y0.tif"
fmt = "../test_data/deformed/DEFORMED_x{x:d}y{y:d}.tif"
roi = "roi-cryst.txt"
work = "def.txt"
#set the size of processed rectangle here
it = hex_pos(1200, 60)
#it = hex_pos(7000, 60) #the size of original data
with open(work, "w") as fh:
for x,y in it:
print(x, y, fmt.format(x=int(x), y=int(y)), file=fh)
executable_path = "../build/x64-Release/bin/emida"
#executable_path = "../build/bin/emida"
started = time.time()
with open("out-emida.txt","w") as fh:
subprocess.call([
"../build/x64-Release/bin/emida",
"-d", work,
"-i", ref,
"-b", roi,
"--batchsize", "7",
"--crosspolicy", "fft",
"--precision","float",
"--loadworkers", "5",
"-f","3",
"-a"
], stdout=fh)
print(time.time()-started)
| [
"michalbali32@gmail.com"
] | michalbali32@gmail.com |
4a76ff7570018db1f0f93953187fb24eb1386f8f | 206e1931b71e5970a946c4adf7d15daf71b970dd | /py/gs_townlands.py | fb29f6f8bb8cd0a4355b795a5e3ada688e7d7d38 | [
"MIT"
] | permissive | FellowsFreiesWissen/Ogham | 7185eb3dafb835fb3c882b999d1b581a3720c64c | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | refs/heads/main | 2023-05-28T23:47:46.760982 | 2021-06-12T14:40:23 | 2021-06-12T14:40:23 | 305,460,994 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,075 | py | __author__ = "Florian Thiery"
__copyright__ = "MIT Licence 2021, Florian Thiery"
__credits__ = ["Florian Thiery"]
__license__ = "MIT"
__version__ = "beta"
__maintainer__ = "Florian Thiery"
__email__ = "mail@fthiery.de"
__status__ = "beta"
__update__ = "2021-05-11"
# import dependencies
import uuid
import requests
import io
import pandas as pd
import os
import codecs
import datetime
import importlib
import sys
import hashlib
import _config
# set UTF8 as default
importlib.reload(sys)
print("*****************************************")
# set starttime
starttime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
# set paths
file_name = "gs_townlands_centroidpoint"
dir_path = os.path.dirname(os.path.realpath(__file__))
file_in = dir_path.replace("\\py", "\\data_v1\\csv\\geodata") + "\\" + file_name + ".csv"
# read csv file
data = pd.read_csv(
file_in,
encoding='utf-8',
sep=',',
usecols=['id', 'label', 'label_ga', 'wkt', 'osm_id', 'logainm_id', 'tie_url'],
na_values=['.', '??', 'NULL'] # take any '.' or '??' values as NA
)
print(data.info())
# create triples from dataframe
lineNo = 2
outStr = ""
lines = []
for index, row in data.iterrows():
# print(lineNo)
tmpno = lineNo - 2
if tmpno % 1000 == 0:
print(tmpno)
lineNo += 1
# info
lines.append("ogham:GSD" + str(row['id']) + " " + "rdf:type" + " oghamonto:Townland .")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdf:type" + " <http://www.opengis.net/ont/geosparql#Feature> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdf:type" + " <http://ontologies.geohive.ie/osi#Townland> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdfs:label" + " " + "'" + str(row['label']) + "'@en" + ".")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdfs:label" + " " + "'" + str(row['label_ga']) + "'@ga" + ".")
lines.append("ogham:GSD" + str(row['id']) + " " + "oghamonto:LogainmMatch" + " <http://data.logainm.ie/place/" + str(row['logainm_id']) + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "oghamonto:OpenStreetMapMatch" + " '" + str(row['osm_id']) + "' .")
lines.append("ogham:GSD" + str(row['id']) + " " + "oghamonto:TownlandsIEMatch" + " <" + str(row['tie_url']) + "> .")
# geom
lines.append("ogham:GSD" + str(row['id']) + " " + "geosparql:hasGeometry" + " ogham:GSD" + str(row['id']) + "_geom .")
lines.append("ogham:GSD" + str(row['id']) + "_geom " + "rdf:type" + " sf:Point .")
geom = "\"" + str(row['wkt']) + "\"^^geosparql:wktLiteral"
lines.append("ogham:GSD" + str(row['id']) + "_geom " + "geosparql:asWKT " + geom + ".")
lines.append("ogham:GSD" + str(row['id']) + "_geom " + "oghamonto:hasEPSG " + "<http://www.opengis.net/def/crs/EPSG/0/4326>" + ".")
# license
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:license" + " <" + "https://creativecommons.org/licenses/by-sa/4.0/deed.de" + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:license" + " <" + "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/" + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:creator" + " <" + "https://orcid.org/0000-0002-3246-3531" + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:rightsHolder" + " wd:Q3355441 .") # OSi
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:rightsHolder" + " wd:Q7100893 .") # OSNI
# prov-o
lines.append("ogham:GSD" + str(row['id']) + " " + "prov:wasAttributedTo" + " ogham:PythonStonesCIIC .")
lines.append("ogham:GSD" + str(row['id']) + " " + "prov:wasDerivedFrom" + " <https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> .")
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasDerivedFrom" + " <https://www.opendatani.gov.uk/dataset/osni-open-data-50k-boundaries-townlands> .")
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasDerivedFrom" + " <https://data-osi.opendata.arcgis.com/datasets/townlands-osi-national-statutory-boundaries-generalised-20m?geometry=-29.136%2C51.106%2C12.678%2C55.686> .")
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasAttributedTo" + " wd:Q3355441 .") # OSNI
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasAttributedTo" + " wd:Q3355441 .") # OSi
lines.append("ogham:GSD" + str(row['id']) + " " + "prov:wasGeneratedBy" + " ogham:GSD" + str(row['id']) + "_activity .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "rdf:type" + " prov:Activity .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "prov:startedAtTime '" + starttime + "'^^xsd:dateTime .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "prov:endedAtTime '" + datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + "'^^xsd:dateTime .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "prov:wasAssociatedWith" + " ogham:PythonStonesCIIC .")
lines.append("")
files = (len(lines) / 100000) + 1
print("triples", len(lines), "files", int(files))
thiscount = len(lines)
_config.count(thiscount)
# write output files
f = 0
step = 100000
fileprefix = file_name + "_"
prefixes = ""
prefixes += "@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\r\n"
prefixes += "@prefix owl: <http://www.w3.org/2002/07/owl#> .\r\n"
prefixes += "@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\r\n"
prefixes += "@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\r\n"
prefixes += "@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .\r\n"
prefixes += "@prefix dc: <http://purl.org/dc/elements/1.1/> .\r\n"
prefixes += "@prefix dct: <http://purl.org/dc/terms/> .\r\n"
prefixes += "@prefix sf: <http://www.opengis.net/ont/sf#> .\r\n"
prefixes += "@prefix prov: <http://www.w3.org/ns/prov#> .\r\n"
prefixes += "@prefix oghamonto: <http://ontology.ogham.link/> .\r\n"
prefixes += "@prefix ogham: <http://lod.ogham.link/data/> .\r\n"
prefixes += "@prefix skos: <http://www.w3.org/2004/02/skos/core#> .\r\n"
prefixes += "@prefix wd: <http://www.wikidata.org/entity/> .\r\n"
prefixes += "\r\n"
for x in range(1, int(files) + 1):
strX = str(x)
filename = dir_path.replace("\\py", "\\data_v1\\rdf\\geodata") + "\\" + fileprefix + strX + ".ttl"
file = codecs.open(filename, "w", "utf-8")
file.write("# create triples from " + file_name + ".csv \r\n")
file.write("# on " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\r\n\r\n")
file.write(prefixes)
i = f
for i, line in enumerate(lines):
if (i > f - 1 and i < f + step):
file.write(line)
file.write("\r\n")
f = f + step
print(" > " + fileprefix + strX + ".ttl")
file.close()
print("*****************************************")
print("SUCCESS: closing script")
print("*****************************************")
| [
"web@florian-thiery.de"
] | web@florian-thiery.de |
0f027c2827c530aad18df15779751a60eea682d4 | c9d99f8a8e2f41bba4605c6564329eaa35d93857 | /test/day1test2.py | 470e6743f8ded34655f1c70a1678130db825a531 | [] | no_license | rhflocef521/myproject | c86be05b5029ffe9e8a11ae660a3205253a51449 | 996ddee17b86f8378a4ebeb4ca14c13f66e3900f | refs/heads/master | 2020-04-29T06:52:50.709035 | 2019-03-16T06:11:51 | 2019-03-16T06:11:51 | 175,928,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | if __name__ == '__main__':
num=int(input("请输入一个数字:"))
result=num**(1/2)
print(result) | [
"997828925@qq.com"
] | 997828925@qq.com |
33342edc351835d96fc30b2229c64f36d1195aa5 | 0e25538b2f24f1bc002b19a61391017c17667d3d | /storefront/win_sfstore.py | 182c135f774ac6bf02adb5b401eac608aa296006 | [] | no_license | trondhindenes/Ansible-Auto-Generated-Modules | 725fae6ba9b0eef00c9fdc21179e2500dfd6725f | efa6ac8cd2b545116f24c1929936eb8cc5c8d337 | refs/heads/master | 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,533 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_sfstore
version_added:
short_description: Generated from DSC module storefront version 0.9.4 at 07.10.2016 01.23.53
description:
- The Storefront DSC resources can automate the deployment and configuration of Citrix Storefront 3.5. These DSC resources are provided AS IS, and are not supported through any means.
options:
AuthenticationServiceVirtualPath:
description:
-
required: True
default:
aliases: []
VirtualPath:
description:
-
required: True
default:
aliases: []
Ensure:
description:
-
required: False
default:
aliases: []
choices:
- Absent
- Present
FriendlyName:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
SiteId:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
| [
"trond@hindenes.com"
] | trond@hindenes.com |
b5401689dd990ba41cdff67c1c3460e1a63c59f4 | 383b45c4d2bc4f91d070235336bfcdb74612d2eb | /persimmon/view/blocks/csvoutblock.py | 08a44a4d5423bc24d10d60c34524642ea8970d9d | [
"MIT"
] | permissive | code4days/Persimmon | 0a3e0c5900cada4c91402c9020dcb0639a0cb9aa | da08ed854dd0305d7e4684e97ee828acffd76b4d | refs/heads/master | 2020-05-25T07:20:00.821385 | 2017-06-30T09:59:09 | 2017-06-30T09:59:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | from persimmon.view.pins import InputPin
from persimmon.view.util import FileDialog
from persimmon.view.blocks.block import Block
from kivy.properties import ObjectProperty, StringProperty
from kivy.lang import Builder
import numpy as np
import pandas as pd
Builder.load_file('persimmon/view/blocks/csvoutblock.kv')
class CSVOutBlock(Block):
in_1 = ObjectProperty()
path = StringProperty()
file_dialog = ObjectProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.file_dialog = FileDialog(dir='~', filters=['*.csv'],
size_hint=(0.8, 0.8))
# Let's bind two together
self.file_dialog.bind(file_chosen=self.setter('path'))
self.tainted = True
self.tainted_msg = 'File not chosen in block {}!'.format(self.title)
def function(self):
if type(self.in_1.val) == np.ndarray:
self.in_1.val = pd.DataFrame(self.in_1.val)
self.in_1.val.to_csv(path_or_buf=self.path, index=False)
def on_path(self, instance, value):
self.tainted = not value.endswith('.csv')
| [
"alvaro.garcia95@hotmail.com"
] | alvaro.garcia95@hotmail.com |
c333fff2357c728767dfef696ac9b4747c0c0333 | bdd6c0e9b57042e286d3b87995ee9a75c1c72242 | /jts/backend/college/admin.py | de545aa2aa1533cb4fd661844fdf3e5947d6b05e | [
"MIT"
] | permissive | goupaz/babylon-hackathon | 7851e4e8b72c1e951180e124ad389c137657d8c7 | 4e638d02705469061e563fec349676d8faa9f648 | refs/heads/main | 2022-12-27T06:04:01.009978 | 2020-10-12T06:53:41 | 2020-10-12T06:53:41 | 302,469,675 | 1 | 0 | MIT | 2020-10-11T22:24:41 | 2020-10-08T21:50:36 | null | UTF-8 | Python | false | false | 1,101 | py | from django.contrib import admin
# Register your models here.
from college.models import College, CollegeCoach
from .models import HomePage, HomePageVideo, LandingPage
@admin.register(HomePage)
class HomePageAdmin(admin.ModelAdmin):
list_display = ("college", "id")
list_filter = ("college", "id")
@admin.register(LandingPage)
class LandingPageAdmin(admin.ModelAdmin):
list_display = ("college", "id")
list_filter = ("college", "id")
@admin.register(HomePageVideo)
class HomePageVideoAdmin(admin.ModelAdmin):
list_display = ("college", "id", 'title')
list_filter = ("college", "id", 'title')
@admin.register(College)
class CollegeAdmin(admin.ModelAdmin):
list_display = ("name", "short_name", "country", "alpha_two_code", "state_province")
list_filter = ("name", "short_name", "country", "alpha_two_code", "state_province")
@admin.register(CollegeCoach)
class CollegeCoachAdmin(admin.ModelAdmin):
list_display = ("college", "first_name", "last_name", "title", "is_publish")
list_filter = ("college", "first_name", "last_name", "title", "is_publish")
| [
"sako@opengov.com"
] | sako@opengov.com |
d42e36a884dc6a3f7f4831c59a8908c381811f33 | 0de56aed5714b04f2236300b2ba8252d9a0bf71a | /2016_11_Python/higher/simpleShell/func/exit.py | 0ce89b58161669a6686118de25f9a735516ba2a1 | [] | no_license | JasonatWang/LearnToProgram | fb5d6a0ade9732312cf8d257d70537af76fcb891 | 677872a940bfe635901460385d22d4ee45818c08 | refs/heads/master | 2020-12-03T05:21:00.315712 | 2016-12-23T06:12:58 | 2016-12-23T06:13:17 | 68,612,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | def exit():
pass | [
"jason_wangxiao@163.com"
] | jason_wangxiao@163.com |
a2f1fc3e3b51597b382062c492eba1b6d4546b1c | 8d06c8d8e4fb09045ece6910a4069a7d15817e82 | /tutorial/tutorial/spiders/quotes_spider.py | 5e407b5f7e2644f770683ac1ff8f4caa5a7d7461 | [] | no_license | AshDyh1999/MySpider | 512ad21f7be1413a5671a7d383f923cc72aa60e3 | 6cdea5736b05aeb06d93d73b9cee27fb296bdc2f | refs/heads/master | 2021-05-19T05:10:57.997281 | 2020-03-31T09:02:57 | 2020-03-31T09:02:57 | 251,541,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
def start_requests(self):
urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
page = response.url.split("/")[-2]
filename = 'quotes-%s.html'% page
with open(filename, 'wb') as f:
f.write(response.body)
self.log('save file %s'% filename) | [
"759045809@qq.com"
] | 759045809@qq.com |
abf55a6e89c418a0d6cb8142f1025f77d7a05d97 | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /binarysearch/Generate-Primes/Generate-Primes.py | 732a67fc393a9520ae82b86615fcb9d57bfa042b | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 538 | py | # https://helloacm.com/teaching-kids-programmaing-generate-prime-numbers-using-sieve-of-eratosthenes-algorithms/
# https://binarysearch.com/problems/Generate-Primes
# EASY, MATH
class Solution:
def solve(self, n):
isPrimes = [False] * 2 + [True] * (n - 1)
i = 2
while i * i <= n:
if isPrimes[i]:
j = i + i
while j <= n:
isPrimes[j] = False
j += i
i += 1
return [x for x in range(1, n + 1) if isPrimes[x]]
| [
"noreply@github.com"
] | DoctorLai.noreply@github.com |
599987b7dbb124e5d64f330120f7e207cbc195a1 | e5dc6e88ed0db261ab6f4e13b16fe27b9908621d | /pybbox/bboxConstant.py | ea44c7269860db1931af5b56f41f9b5c9d2e7bcf | [
"MIT"
] | permissive | c10h22/pybbox | 21431adf19a86d95647270739976abfc36e0c777 | bedcdccab5d18d36890ef8bf414845f2dec18b5c | refs/heads/master | 2021-12-11T01:39:14.273669 | 2016-10-12T07:59:58 | 2016-10-12T07:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | class BboxConstant:
HTTP_METHOD_GET = "get"
HTTP_METHOD_PUT = "put"
HTTP_METHOD_POST = "post"
DEFAULT_LOCAL_IP = "192.168.1.254"
DEFAULT_REMOTE_PORT = "443"
API_DEVICE = "device"
API_HOSTS = "hosts"
API_WAN = "wan"
AUTHENTICATION_TYPE_LOCAL = 0
AUTHENTICATION_TYPE_REMOTE = 1
AUTHENTICATION_LEVEL_PUBLIC = 2
AUTHENTICATION_LEVEL_PRIVATE = 1
AUTHENTICATION_LEVEL_NONE = 0
COOKIE_BBOX_ID = "BBOX_ID"
def __init__(self):
pass
| [
"hydreliox@gmail.com"
] | hydreliox@gmail.com |
0b775362d2b90d3cd366835e7a89454975dee6dc | 7e65cd7bdf826419fa16d5b2910b1435f62a1a9f | /scrapy_olx/modules/cautos.py | 80a237553222cdfea92eddc90c2c00490a4708a8 | [] | no_license | vladiH/olx_scrap | 9f94b6e46ea44dc5e46ebdc3657e98fa88a48f91 | 2004c812642e2d876622de9c20134452dc4ed660 | refs/heads/main | 2023-01-22T06:01:36.836511 | 2020-11-26T18:58:20 | 2020-11-26T18:58:20 | 316,312,546 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py |
# coding: utf-8
# In[ ]:
class AutosOLX():
def __init__(self, pMarca, pModelo, pAño, pKilometraje, pCondicion,
pCombustible, pColor, pTransmision, pTipoVendedor, pPrecio,
pLugar, pDiaPublicacion, pImage=""):
self._marca = pMarca
self._modelo =pModelo
self._año =pAño
self._kilometraje =pKilometraje
self._condicion =pCondicion
self._combustible =pCombustible
self._color =pColor
self._transmision =pTransmision
self._tipoVendedor =pTipoVendedor
self._precio =pPrecio
self._lugar =pLugar
self._diaPublicacion =pDiaPublicacion
self._image = pImage
| [
"noreply@github.com"
] | vladiH.noreply@github.com |
ccd42e064c82d2ddf378a6eb3ee187a30e951d07 | 4cbf6c2693fceb7f56fc84222fd0d0b1671ed53b | /clientes/models.py | 4f2ec0aa63d11bbfdf5535c5015f1c22c6569dab | [] | no_license | rodrigoctoledo/SerUNivesp | 02802fdf95e927e42bf9151eb8d71f9bc1375e0c | 7878f2d6cf2b81885d1cd73c0190969402a5e5d3 | refs/heads/main | 2023-08-29T13:51:04.697922 | 2021-10-23T01:29:48 | 2021-10-23T01:29:48 | 420,285,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from django.db import models
# Create your models here.
class Cliente(models.Model):
SEXO_CHOICES = (
("F", "Feminino"),
("M", "Masculino"),
("N", "Nenhuma das opções")
)
nome = models.CharField(max_length=100, null=False, blank=False)
problema = models.CharField(max_length=100, null=False, blank=False)
data_nascimento = models.DateField(null=False, blank=False)
email = models.EmailField(null=False, blank=False)
profissao = models.CharField(max_length=50, null=False, blank=False)
sexo = models.CharField(max_length=1, choices=SEXO_CHOICES, blank=False, null=False)
def __str__(self):
return self.nome
| [
"rodrigo_toledo@outlook.com"
] | rodrigo_toledo@outlook.com |
0f45e9b018c09a1ea0bceb0cc5a473f025a73c35 | cb36b696e7d4128d9b234239b1b27e845193a81e | /specification/webapps/urls.py.bak | ab4a65a1a728901c374f0561067eeb77789d68fd | [] | no_license | js8383/Plaza | 8682922e5c36b72da83346058d7920b2a6f64a38 | 15e09db12369dfa7c275a9df43c80ad6d260eccb | refs/heads/master | 2021-01-20T14:03:38.489840 | 2017-02-22T03:27:56 | 2017-02-22T03:27:56 | 82,732,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | bak | """webapps URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
urlpatterns = [
url(r'^', include('plaza.urls'))
]
| [
"mrigesh@cmu.edu"
] | mrigesh@cmu.edu |
7e9bc8c8ada0baa06ab47fa561af1ba9a1656353 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Uranium/UM/Scene/GroupDecorator.py | 683f4d0b12d57e068187742b233f7f8283baa708 | [
"GPL-3.0-only",
"LGPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 1,777 | py | from UM.Scene.SceneNodeDecorator import SceneNodeDecorator
from UM.Scene.Selection import Selection
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from UM.Scene.SceneNode import SceneNode
class GroupDecorator(SceneNodeDecorator):
def __init__(self, remove_when_empty: bool = True) -> None:
super().__init__()
# Used to keep track of previous parent when an empty group removes itself from the scene.
# We keep this option so that it's possible to undo it.
self._old_parent = None # type: Optional[SceneNode]
self._remove_when_empty = remove_when_empty
def setNode(self, node: "SceneNode") -> None:
super().setNode(node)
if self._node is not None:
self._node.childrenChanged.connect(self._onChildrenChanged)
def isGroup(self) -> bool:
return True
def getOldParent(self) -> Optional["SceneNode"]:
return self._old_parent
def _onChildrenChanged(self, node: "SceneNode") -> None:
if self._node is None:
return
if not self._remove_when_empty:
return
if not self._node.hasChildren():
# A group that no longer has children may remove itself from the scene
self._old_parent = self._node.getParent()
self._node.setParent(None)
Selection.remove(self._node)
else:
# A group that has removed itself from the scene because it had no children may add itself back to the scene
# when a child is added to it.
if not self._node.getParent() and self._old_parent:
self._node.setParent(self._old_parent)
self._old_parent = None
def __deepcopy__(self, memo):
return GroupDecorator() | [
"t106360212@ntut.org.tw"
] | t106360212@ntut.org.tw |
321dbc18ce72b16a04c2a6863716f27a3cf2afd6 | d3b0941251ae665d1abef0ab9556f55d5b1fa866 | /code/10-django/project1/templalte_test/logining/views.py | 0eb95e9096e91ab1a4e21bfc587386aea6a4d912 | [] | no_license | yujingjingwudi/python_0_to_1 | 371d8c6f327e7184489b5cfa10f30f6d3546e6c6 | d2f8e9f2c530740a0c0cd30038468c489d4d34d5 | refs/heads/main | 2023-02-12T09:29:47.383328 | 2021-01-09T15:14:08 | 2021-01-09T15:14:08 | 302,399,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | from django.shortcuts import render,redirect
from django.http import response,HttpResponse
from django.urls import reverse
# Create your views here.
def login_required(func_login):
def draw(require,*args,**kwargs):
if require.session.has_key('islogin'):
return func_login(require,*args,**kwargs)
else:
return redirect('/login/login')
return draw
@login_required
def index(request):
return render(request, 'login/index.html')
def login(request):
if request.session.has_key('islogin'):
return redirect('/login/index')
else:
if 'username' in request.COOKIES:
username = request.COOKIES['username']
else:
username = ''
return render(request,'login/loginpage.html',{'username':username})
def login_check(request):
username = request.POST.get('user')
password = request.POST.get('pwd')
remember = request.POST.get('rem')
print(username + password)
if username == "yujingjing" and password == "19970806":
response = redirect('/login/index')
if remember == "on":
response.set_cookie('username', username, max_age=7 * 24 * 3600)
request.session['islogin'] = True
return response
else:
response = redirect('/login/login')
return response
def index2(request):
return render(request,'login/index2.html')
def url_reverse(request):
return render(request,'login/url_reverse.html')
def show_args(request,a,b):
return HttpResponse(a+':'+b)
def show_kwargs(request,c,d):
return HttpResponse(c+ ':' + d)
def test_redirect(request):
url = reverse('logining:show_kwargs',kwargs={'c':3,'d':4})
return redirect(url) | [
"1561871629@qq.com"
] | 1561871629@qq.com |
ce22cb65150d2169f449c5ca93eb7758b2bee038 | e8b87b6ae9c7d479b66f82e17cb9f96423a9c4a3 | /conf.py | be5168ebceb1d51159f39615f15085966978466b | [
"MIT"
] | permissive | tmarktaylor/phmdoctest | d6c612d698c8e4c17e25fa5d245df9524b5d649b | bbb8a4fb4678a7147d16bbbd87edb3c638fc1d71 | refs/heads/master | 2022-06-26T14:24:12.377691 | 2022-03-22T15:34:24 | 2022-03-22T15:34:24 | 248,006,245 | 21 | 4 | MIT | 2022-03-22T15:32:41 | 2020-03-17T15:31:54 | Python | UTF-8 | Python | false | false | 2,635 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
py_sources_path = os.path.abspath("./src")
sys.path.insert(0, py_sources_path)
# -- Project information -----------------------------------------------------
# This file is placed in the project root directory rather than /doc.
# Configuration for Sphinx
project = "phmdoctest"
copyright = "2021, Mark Taylor"
author = "Mark Taylor"
# The full version, including alpha/beta/rc tags
release = "1.4.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
"myst_parser",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
myst_heading_anchors = 2
source_suffix = {
".rst": "restructuredtext",
".md": "markdown",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# tmarktaylor: The documentation sources are at the project root.
# Any .md, .rst, or folders at the project root that don't
# belong in the documentation should be listed here.
#
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"tests",
"src",
".tox",
"env",
".pytest_cache",
"_build",
"Thumbs.db",
".DS_Store",
# for personal dev environments
".export*",
]
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = [] # ['_static']
| [
"24257134+tmarktaylor@users.noreply.github.com"
] | 24257134+tmarktaylor@users.noreply.github.com |
2f9bce858147dcf1996bd5661690506c4d32d259 | d7fe33ef0959cf8d319db5e8c9d08b22ac100f50 | /04_tavli/main/iso.py | 0702a8fc0b4e479c0242e8db766984c6c5095ffb | [
"MIT"
] | permissive | georstef/GoogleAppEngine | 79aaa3a969457ea318c4d5e50258d7b424dff7cc | 008845ec768926513b1e5219267ea12e184cf3be | refs/heads/master | 2020-04-20T21:34:29.654551 | 2014-08-03T11:07:04 | 2014-08-03T11:07:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,179 | py | # coding: utf-8
ISO_3166 = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia',
'BQ': 'Bonaire',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': "Côte d'Ivoire",
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'North Korea',
'KR': 'South Korea',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': "Lao People's Democratic Republic",
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russia',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela',
'VN': 'Vietnam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
| [
"georstef@gmail.com"
] | georstef@gmail.com |
4ed166a20d9979c64bea9b3a6e8eb9f78c5a8259 | 440462ee8f021c5a81b7f08b95a79e91c1d14ae8 | /python/test_turtletree.py | 2085f3dc8107dce57e4544517fadfca058992e0a | [] | no_license | rickruan/my_git | 4c9b668618ae21c831aac71608411269da19b36f | 3f7161a10609e7739cb3f1edfdec7b3335488ad0 | refs/heads/master | 2020-04-11T02:54:52.971722 | 2018-12-28T06:49:20 | 2018-12-28T06:49:20 | 161,461,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__='Rick Ruan'
from turtle import *
colormode(255)
lt(90)
lv = 14
l = 120
s = 45
width(45)
r = 0
g = 0
b = 0
pencolor(r,g,b)
penup()
bk(1)
pendown()
fd(1)
def draw_tree(l,level):
global r, g, b
w = width()
width(w * 3.0 / 4.0)
r = r + 1
g = g + 2
b = b + 3
pencolor(r % 200,g % 200,b % 200)
l = 3.0 / 4.0 * l
lt(s)
fd(l)
if level < lv:
draw_tree(l, level + 1)
bk(l)
rt(2*s)
fd(l)
if level < lv:
draw_tree(l, level + 1)
bk(l)
lt(s)
# restore the previous pen width
width(w)
speed("fastest")
draw_tree(l, 1)
done() | [
"ruanchenghua@hotmail.com"
] | ruanchenghua@hotmail.com |
5ea7260fd32549cedb655f25f0203c46a9cf9fc4 | d9749c17d7177f4ee9478296270e2aec8bc9326e | /mysite2/polls/migrations/0001_initial.py | fbec2eddd4e3a9b3556fb01965a2ee65e733457e | [] | no_license | haoii/django_practice | 7ed4e1da9f055f68a468b1c13da1e7054c01e236 | bb930f2603ea7356365f3932d4e06dc3d5663f0b | refs/heads/master | 2020-04-20T18:47:40.616898 | 2019-05-29T06:49:23 | 2019-05-29T06:49:23 | 169,030,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # Generated by Django 2.1.5 on 2019-02-02 09:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"gf_hao@126.com"
] | gf_hao@126.com |
c25b86b53b2e5695b7796c87743d2cf18ea88a94 | 167fa45edcf1e681102c0d908cb50bd5d130c7cf | /blog/settings.py | bf645230ebbcdda62f9e93505f1b98b61cf8c8b3 | [] | no_license | snahmd/django-rest | 3b790e190a3d06083f26f42c471cb25d0eccecbb | 11cb58cf47422e120a8b0541286762588bb17fc3 | refs/heads/master | 2021-01-16T12:27:53.544670 | 2020-05-14T21:58:34 | 2020-05-14T21:58:34 | 243,120,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&(!%4-7&p7l!2^o1gvg0nw-p!%y!$x+8_sb8g@lnzw95ml!f60'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post',
'comment',
'favourite',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"sanahmed7@gmail.com"
] | sanahmed7@gmail.com |
38b82490a29e0b7e8f7e27fc7c93ad47b1c58978 | 0105fca7815ca339dd4bf41761d2441f69aa178c | /detox/__init__.py | a506fc880f2c34e3950adc92cf52b8080bf44012 | [
"MIT"
] | permissive | tdhopper/detox | c6ffce7a895b847cf1cb6b6d797b0c3fb52bb5b1 | c1eb45830a31af1e86bf1fc5c088e04d5db3a99e | refs/heads/master | 2020-04-05T00:33:14.340978 | 2018-11-04T17:08:23 | 2018-11-04T17:08:23 | 156,401,355 | 0 | 0 | MIT | 2018-11-06T15:05:04 | 2018-11-06T15:05:03 | null | UTF-8 | Python | false | false | 21 | py | __version__ = "0.18"
| [
"oliver.bestwalter@avira.com"
] | oliver.bestwalter@avira.com |
c8644a92cf46aee0b53de559ec154935c6fe5d33 | 95f179faa843361ae72f5126926bf05d51925a24 | /parser/state_machine_test.py | c87ba718eb2921056f03c9e4ffff2a6b10e148dc | [] | no_license | ReliableDragon/NestedGenerator | e2597fd72c85e26bf0534d9e35e5365a7fb214d2 | 848f9589bbf4447eb3282b458db5ebbd0e827385 | refs/heads/master | 2022-12-25T19:32:42.832712 | 2020-10-05T07:12:47 | 2020-10-05T07:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,091 | py | import unittest
import logging
import sys
from state_machine import StateMachine, StateRecord, State, Edge, START_STATE, FINAL_STATE
logging.basicConfig(level=logging.DEBUG)
def gen_test_state_record(id_, start, end):
return StateRecord(0, id_, start, end)
class StateMethodsTestCase(unittest.TestCase):
def test_clone(self):
original_state = State('a', [Edge('a', 'a_state'), Edge('b', 'b_state')], is_automata = True)
cloned_state = original_state.clone()
self.assertTrue(cloned_state.id == 'a')
self.assertTrue(cloned_state.is_automata == True)
self.assertTrue(cloned_state.edges[0].input == 'a')
self.assertTrue(cloned_state.edges[0].dest == 'a_state')
self.assertTrue(cloned_state.edges[1].input == 'b')
self.assertTrue(cloned_state.edges[1].dest == 'b_state')
cloned_state.edges[0].input = 'aaaaaaaaaaaaaaa'
self.assertTrue(original_state.edges[0].input == 'a')
class SimpleMachinesTestCase(unittest.TestCase):
def test_state_record_equality(self):
state_record_1 = StateRecord('a', START_STATE, 0, 0)
state_record_2 = StateRecord('a', START_STATE, 0, 0)
self.assertEqual(state_record_1, state_record_2)
# For accepting raw tokens
def test_accepts_single_symbol(self):
machine = StateMachine(states=[State(START_STATE, Edge('abc', FINAL_STATE)), State(FINAL_STATE)])
self.assertTrue(machine.accepts('abc')[0])
# For accepting [optional] tokens
def test_accepts_optional_symbols(self):
machine = StateMachine(
states=[
State(START_STATE, [Edge('a', FINAL_STATE), Edge('a', 'b')]),
State('b', Edge('b', FINAL_STATE)),
State(FINAL_STATE)])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('ab')[0])
machine.reset()
self.assertFalse(machine.accepts('b')[0])
# For accepting repeated tokens
def test_accepts_repeated_symbols(self):
machine = StateMachine(states=[State(START_STATE, Edge('a', FINAL_STATE)), State(FINAL_STATE, Edge('', START_STATE))])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('aa')[0])
machine.reset()
self.assertTrue(machine.accepts('aaa')[0])
# For accepting N repeated tokens
def test_accepts_N_repeated_symbols(self):
machine = StateMachine(
states=[
State(START_STATE, Edge('a', 'a1')),
State('a1', Edge('a', FINAL_STATE)),
State(FINAL_STATE)
])
self.assertFalse(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('aa')[0])
machine.reset()
self.assertFalse(machine.accepts('aaa')[0])
# For accepting N* repeated tokens
def test_accepts_N_star_repeated_symbols(self):
machine = StateMachine(
states=[
State(START_STATE, Edge('a', 'a1')),
State('a1', Edge('a', 'a2')),
State('a2', [Edge('a', FINAL_STATE), Edge('a', 'a2')]),
State(FINAL_STATE),
])
self.assertFalse(machine.accepts('a')[0])
machine.reset()
self.assertFalse(machine.accepts('aa')[0])
machine.reset()
self.assertTrue(machine.accepts('aaa')[0])
machine.reset()
self.assertTrue(machine.accepts('aaaa')[0])
# For accepting *N repeated tokens
def test_accepts_star_N_repeated_symbols(self):
machine = StateMachine(
states=[
State(START_STATE, [Edge('a', 'a1'), Edge('a', FINAL_STATE)]),
State('a1', [Edge('a', 'a2'), Edge('a', FINAL_STATE)]),
State('a2', Edge('a', FINAL_STATE)),
State(FINAL_STATE)
])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('aa')[0])
machine.reset()
self.assertTrue(machine.accepts('aaa')[0])
machine.reset()
self.assertFalse(machine.accepts('aaaa')[0])
# For accepting A / B tokens
def test_accepts_both_optional_paths(self):
machine = StateMachine(
states=[
State(START_STATE, [Edge('a', 'b'), Edge('a', 'c')]),
State('b', Edge('b', FINAL_STATE)),
State('c', Edge('c', FINAL_STATE)),
State(FINAL_STATE)
])
self.assertTrue(machine.accepts('ab')[0])
machine.reset()
self.assertTrue(machine.accepts('ac')[0])
class CharacterClassesTestCase(unittest.TestCase):
# For accepting raw tokens
def test_accepts_single_digit(self):
machine = StateMachine(states=[State(START_STATE, Edge('DIGIT', FINAL_STATE, True)), State(FINAL_STATE)])
self.assertTrue(machine.accepts('1')[0])
machine.reset()
self.assertTrue(machine.accepts('2')[0])
machine.reset()
self.assertTrue(machine.accepts('3')[0])
machine.reset()
self.assertFalse(machine.accepts('A')[0])
machine.reset()
self.assertFalse(machine.accepts('a')[0])
# For accepting raw tokens
def test_accepts_single_alpha(self):
machine = StateMachine(states=[State(START_STATE, Edge('ALPHA', FINAL_STATE, True)), State(FINAL_STATE)])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('A')[0])
machine.reset()
self.assertTrue(machine.accepts('z')[0])
machine.reset()
self.assertTrue(machine.accepts('Z')[0])
machine.reset()
self.assertFalse(machine.accepts('0')[0])
# For accepting raw tokens
def test_accepts_single_char(self):
machine = StateMachine(states=[State(START_STATE, Edge('CHAR', FINAL_STATE, True)), State(FINAL_STATE)])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('A')[0])
machine.reset()
self.assertTrue(machine.accepts('1')[0])
machine.reset()
self.assertTrue(machine.accepts('.')[0])
machine.reset()
self.assertTrue(machine.accepts('*')[0])
machine.reset()
self.assertTrue(machine.accepts('[')[0])
machine.reset()
self.assertFalse(machine.accepts('aa')[0])
# For accepting repeated tokens
def test_accepts_repeated_digit(self):
machine = StateMachine(states=[State(START_STATE, Edge('DIGIT', FINAL_STATE, True)), State(FINAL_STATE, Edge('', START_STATE))])
self.assertTrue(machine.accepts('1')[0])
machine.reset()
self.assertTrue(machine.accepts('12')[0])
machine.reset()
self.assertTrue(machine.accepts('123')[0])
machine.reset()
self.assertFalse(machine.accepts('1a3')[0])
# For accepting repeated tokens
def test_accepts_repeated_alpha(self):
machine = StateMachine(states=[State(START_STATE, Edge('ALPHA', FINAL_STATE, True)), State(FINAL_STATE, Edge('', START_STATE))])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('ab')[0])
machine.reset()
self.assertTrue(machine.accepts('abc')[0])
machine.reset()
self.assertFalse(machine.accepts('a2c')[0])
# For accepting repeated tokens
def test_accepts_repeated_char(self):
machine = StateMachine(states=[State(START_STATE, Edge('CHAR', FINAL_STATE, True)), State(FINAL_STATE, Edge('', START_STATE))])
self.assertTrue(machine.accepts('a')[0])
machine.reset()
self.assertTrue(machine.accepts('ab')[0])
machine.reset()
self.assertTrue(machine.accepts('abc')[0])
machine.reset()
self.assertTrue(machine.accepts('1')[0])
machine.reset()
self.assertTrue(machine.accepts('12')[0])
machine.reset()
self.assertTrue(machine.accepts('123')[0])
machine.reset()
self.assertTrue(machine.accepts('1a3')[0])
machine.reset()
self.assertTrue(machine.accepts('a2c')[0])
# For accepting repeated tokens
def test_accepts_mixed_character_classes(self):
machine = StateMachine(states=[
State(START_STATE, Edge('ALPHA', '1', True)),
State('1', Edge('DIGIT', '2', True)),
State('2', Edge('CHAR', FINAL_STATE, True)),
State(FINAL_STATE, Edge('', START_STATE))])
self.assertTrue(machine.accepts('a1a')[0])
machine.reset()
self.assertTrue(machine.accepts('b22')[0])
machine.reset()
self.assertTrue(machine.accepts('Z0&')[0])
machine.reset()
self.assertTrue(machine.accepts('p0)m3<')[0])
machine.reset()
self.assertTrue(machine.accepts('a1!b2@c3#d4$e5%')[0])
machine.reset()
self.assertFalse(machine.accepts('1a&')[0])
machine.reset()
self.assertFalse(machine.accepts('aaa')[0])
machine.reset()
self.assertFalse(machine.accepts('111')[0])
machine.reset()
self.assertFalse(machine.accepts('a1')[0])
class ContinuationTestCases(unittest.TestCase):
# For allowing repeated calls to sub-automata
def test_continuation_works(self):
machine = StateMachine(states=[State(START_STATE, Edge('a', FINAL_STATE)), State(FINAL_STATE, Edge('', START_STATE))])
start_record = gen_test_state_record(START_STATE, 0, 1)
state_records = [start_record]
result = (True, state_records, 1)
self.assertEqual(machine.accepts_partial('aaa'), result)
end_record = gen_test_state_record(FINAL_STATE, 1, 1)
start_record = gen_test_state_record(START_STATE, 0, 1)
start_record2 = gen_test_state_record(START_STATE, 1, 2)
state_records = [start_record, end_record, start_record2]
result = (True, state_records, 2)
self.assertEqual(machine.accepts_partial('aaa'), result)
start_record = gen_test_state_record(START_STATE, 0, 1)
end_record = gen_test_state_record(FINAL_STATE, 1, 1)
start_record2 = gen_test_state_record(START_STATE, 1, 2)
end_record2 = gen_test_state_record(FINAL_STATE, 2, 2)
start_record3 = gen_test_state_record(START_STATE, 2, 3)
state_records = [start_record, end_record, start_record2, end_record2, start_record3]
result = (True, state_records, 3)
self.assertEqual(machine.accepts_partial('aaa'), result)
self.assertEqual(machine.accepts_partial('aaa'), (False, None, -1))
# For allowing repeated calls to sub-automata
def test_continuation_with_full_automata_repetition(self):
machine = StateMachine(states=[
State(START_STATE, Edge('', 'a')),
State('a', Edge('a', FINAL_STATE)),
State(FINAL_STATE, Edge('', START_STATE))])
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 1)
state_records = [start_record, a_record]
result = (True, state_records, 1)
self.assertEqual(machine.accepts_partial('aab'), result)
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 1)
end_record = gen_test_state_record(FINAL_STATE, 1, 1)
start_record_2 = gen_test_state_record(START_STATE, 1, 1)
a_record_2 = gen_test_state_record('a', 1, 2)
state_records = [start_record, a_record, end_record, start_record_2, a_record_2]
result = (True, state_records, 2)
self.assertEqual(machine.accepts_partial('aab'), result)
self.assertEqual(machine.accepts_partial('aab'), (False, None, -1))
# For allowing repeated calls to sub-automata
def test_continuation_with_internal_repetition(self):
machine = StateMachine(states=[
State(START_STATE, Edge('', 'a')),
State('a', [Edge('a', 'a'), Edge('', FINAL_STATE)]),
State(FINAL_STATE)])
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 0)
state_records = [start_record, a_record]
result = (True, state_records, 0)
self.assertEqual(machine.accepts_partial('aab'), result)
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 1)
a_record_2 = gen_test_state_record('a', 1, 1)
state_records = [start_record, a_record, a_record_2]
result = (True, state_records, 1)
self.assertEqual(machine.accepts_partial('aab'), result)
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 1)
a_record_2 = gen_test_state_record('a', 1, 2)
a_record_3 = gen_test_state_record('a', 2, 2)
state_records = [start_record, a_record, a_record_2, a_record_3]
result = (True, state_records, 2)
self.assertEqual(machine.accepts_partial('aab'), result)
self.assertEqual(machine.accepts_partial('aab'), (False, None, -1))
# For testing the ordering of edge traversal. Since traversal is via DFS,
# the internal loop should be preferred over the outside loop. Looks very
# similar to the previous test, but validates something different.
def test_edge_traversal_order(self):
machine = StateMachine(states=[
State(START_STATE, Edge('', 'a')),
State('a', [Edge('a', 'a'), Edge('', FINAL_STATE)]),
State(FINAL_STATE)])
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 0)
end_record = gen_test_state_record(FINAL_STATE, 0, 0)
state_records = [start_record, a_record]
result = (True, state_records, 0)
self.assertEqual(machine.accepts_partial('aab'), result)
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 1)
a_record_2 = gen_test_state_record('a', 1, 1)
state_records = [start_record, a_record, a_record_2]
result = (True, state_records, 1)
self.assertEqual(machine.accepts_partial('aab'), result)
start_record = gen_test_state_record(START_STATE, 0, 0)
a_record = gen_test_state_record('a', 0, 1)
a_record_2 = gen_test_state_record('a', 1, 2)
a_record_3 = gen_test_state_record('a', 2, 2)
state_records = [start_record, a_record, a_record_2, a_record_3]
result = (True, state_records, 2)
self.assertEqual(machine.accepts_partial('aab'), result)
self.assertEqual(machine.accepts_partial('aab'), (False, None, -1))
class SubMachineTestCases(unittest.TestCase):
# For ensuring that sub-automata register and run properly.
def test_sub_machines(self):
machine = StateMachine(id_='a', states=[
State(START_STATE, Edge('', 'b')),
State('b', Edge('a', FINAL_STATE), is_automata=True),
State(FINAL_STATE, Edge('', START_STATE))
])
submachine_b = StateMachine(id_='b', states=[
State(START_STATE, Edge('b', FINAL_STATE)),
State(FINAL_STATE, Edge('', START_STATE))
])
machine.register_automata(submachine_b)
start_record = StateRecord('a', START_STATE, 0, 0)
nested_start_record = StateRecord('b', START_STATE, 0, 1)
submachine_record = StateRecord('a', 'b_internal', 0, 1, [nested_start_record])
b_record = StateRecord('a', 'b', 1, 2)
state_records = [start_record, submachine_record, b_record]
result = (True, state_records, 2)
self.assertEqual(machine.accepts_partial('ba'), result)
# For ensuring that sub-automata handle repetition.
def test_repeated_sub_machines(self):
machine = StateMachine(id_='b', states=[
State(START_STATE, Edge('', 'a')),
State('a', Edge('b', FINAL_STATE), is_automata=True),
State(FINAL_STATE, Edge('', START_STATE))
])
submachine_b = StateMachine(id_='a', states=[
State(START_STATE, Edge('a', FINAL_STATE)),
State(FINAL_STATE, Edge('', START_STATE))
])
machine.register_automata(submachine_b)
start_record = StateRecord('b', START_STATE, 0, 0)
nested_start_record = StateRecord('a', START_STATE, 0, 1)
submachine_record = StateRecord('a', 'a_internal', 0, 1, [nested_start_record])
b_record = StateRecord('b', 'a', 1, 2)
end_record = StateRecord('b', FINAL_STATE, 2, 2)
start_record2 = StateRecord('b', START_STATE, 2, 2)
nested_start_record2 = StateRecord('a', START_STATE, 2, 3)
submachine_record2 = StateRecord('a', 'a_internal', 2, 3, [nested_start_record2])
b_record2 = StateRecord('b', 'a', 3, 4)
state_records = [start_record, submachine_record, b_record]
result = (True, state_records, 2)
self.assertEqual(machine.accepts_partial('abab'), result)
state_records = [start_record, submachine_record, b_record, end_record, start_record2, submachine_record2, b_record2]
result = (True, state_records, 4)
self.assertEqual(machine.accepts_partial('abab'), result)
#For ensuring that nested sub-automata register and run properly.
def test_double_nested_sub_machines(self):
machine = StateMachine(id_='c', states=[
State(START_STATE, Edge('', 'b')),
State('b', Edge('c', FINAL_STATE), is_automata=True),
State(FINAL_STATE)
])
submachine_b = StateMachine(id_='b', states=[
State(START_STATE, Edge('', 'a')),
State('a', Edge('b', FINAL_STATE), is_automata=True),
State(FINAL_STATE)
])
submachine_a = StateMachine(id_='a', states=[
State(START_STATE, Edge('a', FINAL_STATE)),
State(FINAL_STATE)
])
submachine_b.register_automata(submachine_a)
machine.register_automata(submachine_b)
start_record = StateRecord('c', START_STATE, 0, 0)
b_start_record = StateRecord('b', START_STATE, 0, 0)
a_record = StateRecord('a', START_STATE, 0, 1)
b_machine_record = StateRecord('b', 'a_internal', 0, 1, [a_record])
b_record = StateRecord('b', 'a', 1, 2)
c_machine_record = StateRecord('c', 'b_internal', 0, 2, [b_start_record, b_machine_record, b_record])
c_record = StateRecord('c', 'b', 2, 3)
state_records = [start_record, c_machine_record, c_record]
result = (True, state_records, 3)
self.assertEqual(machine.accepts_partial('abc'), result)
class AStarBRepeatedStateMachineTestCase(unittest.TestCase):
def setUp(self):
# Build a machine for (a+b)*
state_0 = State(START_STATE, [Edge('a', "a*"), Edge('', FINAL_STATE)])
state_1 = State("a*", [Edge("a", "a*"), Edge("b", FINAL_STATE)])
state_2 = State(FINAL_STATE, [Edge('', START_STATE)])
self.machine = StateMachine(states=[state_0, state_1, state_2])
def test_accepts_empty_string(self):
self.assertTrue(self.machine.accepts("")[0])
def test_does_not_accept_a(self):
self.assertFalse(self.machine.accepts("a")[0])
def test_does_not_accept_b(self):
self.assertFalse(self.machine.accepts("b")[0])
def test_does_not_accept_c(self):
self.assertFalse(self.machine.accepts("c")[0])
def test_accepts_ab(self):
self.assertTrue(self.machine.accepts("ab")[0])
def test_does_not_accept_ba(self):
self.assertFalse(self.machine.accepts("ba")[0])
def test_does_not_accept_abc(self):
self.assertFalse(self.machine.accepts("abc")[0])
def test_does_not_accept_aaa(self):
self.assertFalse(self.machine.accepts("aaa")[0])
def test_does_not_accept_bbb(self):
self.assertFalse(self.machine.accepts("bbb")[0])
def test_accepts_aaabaaab(self):
self.assertTrue(self.machine.accepts("aaaaaaab")[0])
def test_accepts_aaabaaab(self):
self.assertTrue(self.machine.accepts("aaabaaab")[0])
def test_does_not_accept_aaabbaaab(self):
self.assertFalse(self.machine.accepts("aaabbaaab")[0])
class AorBCorDEFFiveTimesStateMachineTestCase(unittest.TestCase):
def setUp(self):
# Build a machine for 5("a" / "bc" / "def")
gen_edges = lambda i: [Edge('a', f'a{i}'), Edge('bc', f'bc{i}'), Edge('def', f'def{i}')]
states = [State(START_STATE, gen_edges(0))]
for i in range(4):
states += [
State(f'a{i}', gen_edges(i+1)),
State(f'bc{i}', gen_edges(i+1)),
State(f'def{i}', gen_edges(i+1)),
]
states += [
State(f'a4', Edge('', FINAL_STATE)),
State(f'bc4', Edge('', FINAL_STATE)),
State(f'def4', Edge('', FINAL_STATE)),
State(f'FINAL')
]
self.machine = StateMachine(states=states)
def test_does_not_accept_empty_string(self):
self.assertFalse(self.machine.accepts("")[0])
def test_does_not_accept_a(self):
self.assertFalse(self.machine.accepts("a")[0])
def test_does_not_accept_b(self):
self.assertFalse(self.machine.accepts("b")[0])
def test_does_not_accept_bc(self):
self.assertFalse(self.machine.accepts("bc")[0])
def test_does_not_accept_c(self):
self.assertFalse(self.machine.accepts("c")[0])
def test_does_not_accept_cd(self):
self.assertFalse(self.machine.accepts("cd")[0])
def test_does_not_accept_cde(self):
self.assertFalse(self.machine.accepts("cde")[0])
def test_accepts_abcdefabc(self):
self.assertTrue(self.machine.accepts("abcdefabc")[0])
def test_does_not_accept_aaaa(self):
self.assertFalse(self.machine.accepts("aaaa")[0])
def test_accepts_aaaaa(self):
self.assertTrue(self.machine.accepts("aaaaa")[0])
def test_does_not_accept_aaaaaa(self):
self.assertFalse(self.machine.accepts("aaaaaa")[0])
def test_does_not_accept_cdecdecdecde(self):
self.assertFalse(self.machine.accepts("cdecdecdecde")[0])
def test_accepts_cdecdecdecdecde(self):
self.assertFalse(self.machine.accepts("cdecdecdecdecde")[0])
def test_does_not_accept_cdecdecdecde(self):
self.assertFalse(self.machine.accepts("cdecdecdecdecdecde")[0])
def test_accepts_defabcaa(self):
self.assertTrue(self.machine.accepts("defabcaa")[0])
if __name__ == '__main__':
unittest.main()
| [
"gabe@databaseguy.com"
] | gabe@databaseguy.com |
90765918c7b59d144b6a9249b8e43d2b68bd6f18 | 385f55a278b8ee1f02c8948273a1fea377c1196a | /sports/migrations/0006_auto_20190425_1614.py | e880492ed59188df8252e36cfbb93b91035cb690 | [] | no_license | pedromendes96/CMS | 0b23030cd776b95febb26690347a2f7429547ebd | f3d502bec6bb76053a120932d6a9ff6657fedff9 | refs/heads/master | 2020-05-19T13:06:39.843634 | 2019-05-16T19:23:02 | 2019-05-16T19:23:02 | 185,022,077 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | # Generated by Django 2.1.7 on 2019-04-25 16:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sports', '0005_apifootballseasson'),
]
operations = [
migrations.AddField(
model_name='apifootballcountry',
name='api_code',
field=models.CharField(default=1, max_length=256),
preserve_default=False,
),
migrations.AddField(
model_name='apifootballleague',
name='flag',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='apifootballleague',
name='logo',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='apifootballleague',
name='season',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='sports.ApiFootballSeason'),
preserve_default=False,
),
migrations.AddField(
model_name='apifootballleague',
name='standings',
field=models.BooleanField(default=1),
preserve_default=False,
),
]
| [
"pedro.trabalho.uma@gmail.com"
] | pedro.trabalho.uma@gmail.com |
e924f88ace813dfebc38b2db2ca355be065de2e3 | 8ce4bd8367d6a5f47c6243133b62f7f87021374f | /neutronpy/instrument/chopper.py | 24a1f054b5c3c8ad3bbf192528298f37c3ffdf74 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | me2d09/neutronpy | 7900517cbb4bad61d41c714eae7c79fbf6247555 | 03dbb058d50118c7a9fe5a1fc8b28eaed82932ab | refs/heads/master | 2022-04-29T03:49:20.268581 | 2022-03-09T05:22:03 | 2022-03-09T05:22:03 | 90,857,694 | 0 | 0 | null | 2017-05-10T11:40:44 | 2017-05-10T11:40:44 | null | UTF-8 | Python | false | false | 3,804 | py | # -*- coding: utf-8 -*-
r"""Chopper class for Time of Flight instrument
"""
import numpy as np
from .exceptions import ChopperError
class Chopper(object):
r"""Class defining a chopper object for using Time of Flight spectrometer
resolution calculations.
Parameters
----------
distance : float
Distance of the chopper from the source in cm.
speed : float
Speed of the chopper in Hz.
width : float
width of the beam at the chopper in cm.
chopper_type : string
The type of chopper: 'disk' or 'fermi'.
acceptance : float
If chopper_type == 'disk', angular acceptance of the chopper in
degrees, unless `radius` is defined, in which case `acceptance`
is the size of the opening in the disk in cm. If chopper_type ==
'fermi', distance between chopper blades in cm.
counter_rot : bool, optional
If the disk chopper consists of two counter rotating choppers, set to
True (Default: False).
radius : float, optional
radius of the chopper in cm. If defined, and chopper_type == 'disk',
then `acceptance` is assumed to be in units of cm.
depth : float, optional
The depth of the Fermi Chopper blades, to calculate angular
acceptance. Required if chopper_type == 'fermi'.
tau : float, optional
Custom value of the resolution of the chopper in standard deviation in
units of microseconds. Used to override the automatic calculation of
tau.
Attributes
----------
distance
speed
width
chopper_type
acceptance
radius
depth
tau
"""
def __init__(self, distance, speed, width, chopper_type, acceptance, counter_rot=False, radius=None, depth=None, tau=None):
self.distance = distance
self.speed = speed
self.width = width
self.chopper_type = chopper_type
self.acceptance = acceptance
if counter_rot:
self.counter_rot = 2.0
else:
self.counter_rot = 1.0
if radius is not None:
self.radius = radius
if depth is not None:
self.depth = depth
if tau is not None:
self.tau_override = tau
def __repr__(self):
args = ', '.join(
[str(getattr(self, key)) for key in ['distance', 'speed', 'width', 'chopper_type', 'acceptance']])
kwargs = ', '.join(
['{0}={1}'.format(getattr(self, key)) for key in ['depth', 'tau'] if getattr(self, key, None) is not None])
return "Chopper({0})".format(', '.join([args, kwargs]))
@property
def tau(self):
"""Calculate the time resolution of the chopper
Returns
-------
tau : float
Returns the resolution of the chopper in standard deviation in units of microseconds
"""
if hasattr(self, 'tau_override'):
return self.tau_override
elif self.chopper_type == 'disk' and hasattr(self, 'radius'):
return self.acceptance / (self.radius * self.speed * self.counter_rot) / np.sqrt(8 * np.log(2))
elif self.chopper_type == 'disk' and ~hasattr(self, 'radius'):
return 1e6 / (self.speed * self.acceptance * self.counter_rot) / 360.0
elif self.chopper_type == 'fermi':
try:
return 1e6 / (self.speed * 2.0 * np.arctan(self.acceptance / self.depth)) / 360.
except AttributeError:
raise ChopperError("'depth' not specified, and is a required value for a Fermi Chopper.")
else:
raise ChopperError("'{0}' is an invalid chopper_type. Choose 'disk' or 'fermi', or specify custom tau \
via `tau_override` attribute".format('chopper_type'))
| [
"noreply@github.com"
] | me2d09.noreply@github.com |
5194f8f07da3f6076e1d822c8540eeca734e3ed5 | 043f47483e53dcef563d24298783fb00ed3b4f40 | /2018/aoc_2018_15.py | 781fe3ed6a2ef0a9ace0204bfbb940119123ae14 | [] | no_license | leppyr64/advent_of_code_python | bd2ed1fb4ea182cd63c5345fc9a0ccd1f45425e5 | 5354271b4874cd9b6fe66b441a911b26cdc90579 | refs/heads/master | 2022-02-03T22:30:10.019039 | 2022-01-05T03:07:41 | 2022-01-05T03:07:41 | 225,717,127 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,220 | py | from operator import itemgetter, attrgetter
from collections import deque
f = open("./2019/2018_15.txt")
inp = f.read().splitlines()
f.close()
M = [[c for c in a] for a in inp]
P = []
class Player(object):
def __init__(self, row, col, team):
self.hp = 200
self.r = row
self.c = col
self.t = team
self.target = None
dr = [-1, 0, 0, 1]
dc = [0, -1, 1, 0]
def __str__(self):
return str(self.t) + ' (' + str(self.r) + ', ' + str(self.c) + ') ' + str(self.hp)
def other_team(self):
if self.t == 'G':
return 'E'
return 'G'
def can_attack(self):
self.target = None
for i in range(4):
for x in P:
if self.c + self.dc[i] == x.c and self.r + self.dr[i] == x.r and self.t != x.t:
if self.target == None:
self.target = x
elif self.target.hp > x.hp:
self.target = x
if self.target == None:
return False
return True
def can_move(self):
D = [[100000 for a in b] for b in M]
PREV = [[[10000, 10000] for a in b] for b in M]
q = deque()
q.append([0, self.r, self.c])
D[self.r][self.c] = 0
while len(q) != 0:
x = q.pop()
r = x[1]
c = x[2]
d = x[0]
for i in range(4):
newr = r + self.dr[i]
newc = c + self.dc[i]
if M[newr][newc] == '.' and D[newr][newc] > d + 1:
D[newr][newc] = d + 1
PREV[newr][newc] = [r,c]
q.append([d + 1, newr, newc])
elif M[newr][newc] == '.' and D[newr][newc] == d + 1 and (r < PREV[newr][newc][0] or (r == PREV[newr][newc] and c < PREV[newr][newc][1])):
PREV[newr][newc] = [r,c]
bestd = 100000
bestr = 0
bestc = 0
for p in P:
if p.t != self.t:
for i in range(4):
newr = p.r + self.dr[i]
newc = p.c + self.dc[i]
if D[newr][newc] < bestd or (D[newr][newc] == bestd and (newr < bestr or (newr == bestr and newc < bestc))):
bestd = D[newr][newc]
bestr = newr
bestc = newc
if bestd == 100000:
return False
print(self.r, self.c, bestr, bestc)
while PREV[bestr][bestc] != [self.r, self.c]:
nextr = PREV[bestr][bestc][0]
nextc = PREV[bestr][bestc][1]
bestr = nextr
bestc = nextc
print (bestr, bestc)
M[self.r][self.c] = '.'
self.r = bestr
self.c = bestc
M[self.r][self.c] = self.t
return True
def take_turn(self):
if self.can_attack() == True:
self.target.hp -= 3
if self.target.hp < 0:
M[self.target.r][self.target.c] = '.'
else:
if self.can_move() == True:
pass
if self.can_attack() == True:
self.target.hp -= 3
if self.target.hp < 0:
G[self.target.r][self.target.c] = '.'
def print_system():
for r in range(len(M)):
s = ''
for c in range(len(M[0])):
s += M[r][c]
print (s)
for x in P:
print (x)
print(' ')
for r in range(len(M)):
for c in range(len(M[r])):
if M[r][c] == 'G':
P.append(Player(r, c, 'G'))
elif M[r][c] == 'E':
P.append(Player(r, c, 'E'))
G = [x for x in P if x.t == 'G']
E = [x for x in P if x.t == 'E']
rounds = 0
P = sorted(P, key=attrgetter('r', 'c'))
print_system()
while len(G) > 0 and len(E) > 0:
rounds += 1
for x in P:
if x.hp > 0:
x.take_turn()
P = [x for x in P if x.hp > 0]
P = sorted(P, key=attrgetter('r', 'c'))
G = [x for x in P if x.t == 'G']
E = [x for x in P if x.t == 'E']
print_system()
sum = 0
for p in P:
sum += p.hp
print (rounds, sum, rounds * sum) | [
"jlepack@gmail.com"
] | jlepack@gmail.com |
d6162670ef8447a5eee238b3fae3ecbc47b7bba6 | c523eff326b8bc6c0c903bf7fe16ec3b98605bff | /choieungi/lv3_네트워크.py | 3041ada5899f6fef4751fee1dbc7020fb06ded27 | [] | no_license | IgoAlgo/Problem-Solving | c76fc157c4dd2afeeb72a7e4a1833b730a0b441d | 5cc57d532b2887cf4eec8591dafc5ef611c3c409 | refs/heads/master | 2023-06-26T05:12:02.449706 | 2021-07-14T06:57:00 | 2021-07-14T06:57:00 | 328,959,557 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | from collections import deque
def solution(n, computers):
answer = 0
visited = [False] * n
def dfs(graph, start, visited):
visited[start] = True
for i in range(n):
if not visited[i] and graph[start][i]:
dfs(graph, i, visited)
def bfs(graph, start, visited):
visited[start] = True
q = deque([start])
while len(q):
v = q.popleft()
for i in range(n):
if not visited[i] and graph[v][i]:
q.append(i)
visited[i] = True
for i in range(n):
if not visited[i]:
bfs(computers, i, visited)
answer += 1
return answer
print(solution(3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]] )) | [
"choieungi@gm.gist.ac.kr"
] | choieungi@gm.gist.ac.kr |
adaf1da2130a33488620aa75caf973fd545999c8 | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/dialogs/waterfall_dialog_20170816111635.py | a04802520a5bea5ee054bf9af320a163c394bcc6 | [] | no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,845 | py | '''
Refs:
Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/
'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QColorDialog, QHeaderView, QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem, QComboBox)
from PyQt5 import QtCore, QtGui
import core.gui.waterfall as waterfall
import numpy as np
import shelve
from pprint import pprint
class CustomCombo(QComboBox):
def __init__(self,parent,bar_keys_colors,response_type):
super(QComboBox,self).__init__(parent)
#keys is a dictionary: {'key description':color,...}
self.dict_of_keys = bar_keys_colors
self.response_type = response_type
self.populate()
def populate(self):
'''Override method to add items to list'''
for key in list(self.dict_of_keys.keys()):
self.pixmap = QtGui.QPixmap(20,20)
self.pixmap.fill(QtGui.QColor(self.dict_of_keys[key]))
self.color_icon = QtGui.QIcon(self.pixmap)
self.addItem(self.color_icon,key)
self.setCurrentIndex(self.findText(self.response_type,flags=QtCore.Qt.MatchContains)) #default to the patient cancer type
class Waterfall(QWidget, waterfall.Ui_Waterfall):
plot_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params
updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing
def __init__(self, parent):
super(Waterfall,self).__init__(parent)
self.setupUi(self)
self.get_settings()
self.send_settings()
#Button functions
self.btn_apply_general_settings.clicked.connect(self.send_settings)
self.btn_apply_keys_and_colors_settings.clicked.connect(self.send_settings)
self.patient_tree = self.create_patient_tree()
self.data_viewer_container.addWidget(self.patient_tree)
self.btn_color_test.clicked.connect(self.get_color)
def get_color(self):
self.color = QColorDialog.getColor() #returns a color object
print(color)
def get_settings(self):
try:
with shelve.open('WaterfallSettings') as shelfFile:
self.keys_and_colors = shelfFile['keys_and_colors']
shelfFile.close()
except:
#set and use default settings
self.keys_and_colors = {
'CR':'#03945D',
'PR':'#B1EE97',
'PD':'#FF6F69',
'SD':'#707070'}
with shelve.open('WaterfallSettings') as shelfFile:
shelfFile['keys_and_colors'] = self.keys_and_colors
shelfFile.close()
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
def on_generated_rectangles_signal(self,signal):
self.rectangles_received = signal[0]
self.add_items() #display in table
def send_settings(self):
'''
Emit both general plot settings, and color labeling settings. These are the settings to be used when the plot is created.
'''
self.general_settings = [
self.plot_title.text(),
self.x_label.text(),
self.y_label.text(),
[self.twenty_percent_line.isChecked(),
self.thirty_percent_line.isChecked(),
self.zero_percent_line.isChecked()],
[self.display_responses_as_text.isChecked(),
self.display_responses_as_color.isChecked(),
self.display_no_responses.isChecked()],
self.include_table.isChecked()
]
self.plot_settings_signal.emit(self.general_settings)
def create_patient_tree(self):
'''
Create QTreeWidget populated with a patient's data for the DataEntry dialog.
Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog.
'''
self.tree = QTreeWidget()
self.root = self.tree.invisibleRootItem()
self.headers = [
'Patient #',
'Best response %',
'Response',
'Cancer',
'Color key',
]
self.headers_item = QTreeWidgetItem(self.headers)
self.tree.setColumnCount(len(self.headers))
self.tree.setHeaderItem(self.headers_item)
self.root.setExpanded(True)
self.tree.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tree.header().setStretchLastSection(False)
return self.tree
def add_items(self):
'''
Populate viewing tree
'''
self.tree.clear() #clear prior to entering items, prevent aggregation
i=0
for rect in self.rectangles_received:
#populate editable tree with rect data
self.rect_item = QTreeWidgetItem(self.root)
self.rect_params = [
self.waterfall_data['Patient number'][i],
rect.get_height(),
self.waterfall_data['Overall response'][i],
self.waterfall_data['Cancer'][i]
]
for col in range(0,4):
self.rect_item.setText(col,str(self.rect_params[col]))
self.rect_item.setTextAlignment(col,4)
self.tree.setItemWidget(self.rect_item, 4, CustomCombo(self,self.keys_and_colors,self.waterfall_data['Overall response'][i]))
self.rect_item.setFlags(self.rect_item.flags() | QtCore.Qt.ItemIsEditable)
i+=1
def on_updated_tree_item(self):
#update the rectangle which was edited
pass
class WaterfallPlotter(QWidget):
generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree
def __init__(self,parent):
super(WaterfallPlotter,self).__init__(parent)
self.get_settings()
self.settings_update = False
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
self.btn_apply_general_settings.setEnabled(True)
def get_settings(self):
try:
with shelve.open('WaterfallSettings') as shelfFile:
self.keys_and_colors = shelfFile['keys_and_colors']
shelfFile.close()
except:
#set and use default settings
self.keys_and_colors = {
'CR':'#03945D',
'PR':'#B1EE97',
'PD':'#FF6F69',
'SD':'#707070'}
with shelve.open('WaterfallSettings') as shelfFile:
shelfFile['keys_and_colors'] = self.keys_and_colors
shelfFile.close()
def on_general_settings_signal(self,signal):
self.gen_settings = signal
self.settings_update = True
try:
hasattr(self,'ax')
self.ax.set_title(self.gen_settings[0])
self.ax.set_xlabel(self.gen_settings[1])
self.ax.set_ylabel(self.gen_settings[2])
self.canvas.draw()
except Exception as e:
print(e)
self.default_plot()
def bar_colors(self,responses):
return [self.keys_and_colors[x] for x in responses]
def default_plot(self):
'''
Plot waterfall data
'''
self.figure.clear()
self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change']))
self.ax = self.figure.add_subplot(111)
if self.settings_update == False:
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.bar_colors = self.bar_colors(self.waterfall_data['Overall response'])
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'], color=self.bar_colors)
else:
#settings were updated, we received them and stored in variable self.gen_settings
if self.gen_settings[3][0]:
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
if self.gen_settings[3][1]:
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
if self.gen_settings[3][2]:
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
if self.gen_settings[4][0]:
#show responses as labels, default color bars
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'])
self.auto_label_responses(self.ax, self.rects, self.waterfall_data)
elif self.gen_settings[4][1]:
self.bar_colors = self.bar_colors(self.waterfall_data['Overall response'])
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'], color=self.bar_colors)
else:
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'])
if self.gen_settings[5]:
self.plot_table()
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called
self.generated_rectangles_signal.emit([self.rects])
def plot_table(self):
rows = ['%s' % x for x in self.waterfall_data.keys()]
rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows
columns = self.waterfall_data['Patient number'] #patient numbers
cell_text = []
for row in rows:
cell_text_temp = []
for col in range(len(columns)):
cell_text_temp.append(self.waterfall_data[row][col])
cell_text.append(cell_text_temp)
the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center')
plt.subplots_adjust(bottom=0.15,left=0.5)
self.ax.set_xlim(-0.5,len(columns)-0.5)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
) # labels along the bottom edge are off
def update_plot(self):
'''
TODO
'''
pass
def auto_label_responses(self, ax, rects, waterfall_data):
'''Add labels above/below bars'''
i = 0
for rect in rects:
height = rect.get_height()
if height >= 0:
valign = 'bottom'
else:
valign = 'top'
ax.text(rect.get_x() + rect.get_width()/2., height,
'%s' % waterfall_data['Overall response'][i], ha='center', va=valign)
i+=1
| [
"ngoyal95@terpmail.umd.edu"
] | ngoyal95@terpmail.umd.edu |
605d0e2113aa05eb58ff1969f93d682fdfc83470 | 8733637c89965bc886bbe2085062e8cb9efaea30 | /hello.py | c2e16806b1d21a99259411c1f3c96f4196fd14ae | [] | no_license | yanglei9211/boostpython | bf638285f9d9cf23a2e60e19655d0e34e66dc9db | cf96408cc7713503de0ba00ab560d8f5dabcedad | refs/heads/master | 2021-01-17T04:52:22.708988 | 2017-02-24T08:17:46 | 2017-02-24T08:17:46 | 83,017,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | #!/usr/bin/env python
# encoding: utf-8
import hello
print hello.greet()
| [
"yanglei040@hotmail.com"
] | yanglei040@hotmail.com |
79405573eddab6b0596f012647ef0bd4a0299122 | 1a08cc80e607d808fd0f43fd427f4d8399c37ed2 | /pybio/course2/w1/genome_path.py | ae8b87be16703f6c8a066c630ae3652cb93c6083 | [] | no_license | belsglee/bioinformatics | 8a9742e8e60c6ba6ee844fd64a12d3de2a586357 | 12bd7da13331b84d911eb44e9cfb373373760387 | refs/heads/master | 2021-11-23T08:22:05.788832 | 2021-11-08T14:52:31 | 2021-11-08T14:52:31 | 182,023,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def genome_path(patterns):
string = patterns[0][:-1]
for pattern in patterns:
string += patterns[-1]
return string
| [
"bel.sglee@gmail.com"
] | bel.sglee@gmail.com |
7a45c79e0a2bfce9f24d71e8468e8b1011943dea | 81d2b0b43bf0be14cd98358d47e64a933fab412d | /tutorial/latency_predictor_driver.py | e9987310ee95b90e0bc7bb235b537964c34927e6 | [
"Apache-2.0"
] | permissive | OFANAS/OFANAS_PerformanceEstimation | 28fc26d2f62bec6d53adaa272229c9b1e2b167f7 | 55ac5b54252830f99227fb97108549a5e2569dbe | refs/heads/main | 2023-03-06T01:09:21.655464 | 2021-02-25T23:23:00 | 2021-02-25T23:23:00 | 342,403,013 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | import torch
import numpy as np
import time
import random
import os
from accuracy_predictor import AccuracyPredictor
from latency_table import LatencyTable
from evolution_finder import EvolutionFinder
import csv
import sys
sys.path.append("")
from ofa.model_zoo import ofa_net
sample_child_arch = {'wid': None, 'ks': [5, 5, 3, 3, 5, 3, 5, 7, 3, 3, 5, 7, 3, 5, 5, 7, 5, 7, 5, 5],
'e': [6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6], 'd': [4, 3, 3, 4, 4],
'r': [176]}
sample_latency = 37.044331933251044
# set random seed
random_seed = 10291284
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
print('Successfully imported all packages and configured random seed to %d!'%random_seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(random_seed)
print('Using GPU.')
else:
print('Using CPU.')
ofa_network = ofa_net('ofa_mbv3_d234_e346_k357_w1.0', pretrained=True)
print('The OFA Network is ready.')
data_loader = None
# Accuracy Predictor
accuracy_predictor = AccuracyPredictor(
pretrained=True,
device='cuda:0' if cuda_available else 'cpu'
)
print('The accuracy predictor is ready!')
print(accuracy_predictor.model)
# Latency Predictor
target_hardware = 'gpu'
latency_table = LatencyTable(device=target_hardware, use_latency_table=False)
print('The Latency lookup table on %s is ready!' % target_hardware)
""" Hyper-parameters for the evolutionary search process
You can modify these hyper-parameters to see how they influence the final ImageNet accuracy of the search sub-net.
"""
latency_constraint = 25 # ms, suggested range [15, 33] ms
P = 100 # The size of population in each generation
N = 500 # How many generations of population to be searched
r = 0.25 # The ratio of networks that are used as parents for next generation
params = {
'constraint_type': target_hardware, # Let's do FLOPs-constrained search
'efficiency_constraint': latency_constraint,
'mutate_prob': 0.1, # The probability of mutation in evolutionary search
'mutation_ratio': 0.5, # The ratio of networks that are generated through mutation in generation n >= 2.
'efficiency_predictor': latency_table, # To use a predefined efficiency predictor.
'accuracy_predictor': accuracy_predictor, # To use a predefined accuracy_predictor predictor.
'population_size': P,
'max_time_budget': N,
'parent_ratio': r,
'arch': 'compofa', ## change
}
# build the evolution finder
finder = EvolutionFinder(**params)
# import latency_predictor model
if '/latency_predictor' not in os.getcwd():
os.chdir('./latency_predictor')
from latency_predictor import LatencyPredictor
def create_latency_dataset():
# time to create dataset
start = time.time()
# create latency dataset
number_of_datapoints = 10000
latency_dataset = finder.create_latency_dataset(search_space='ofa', num_of_samples=number_of_datapoints)
# create dataset csv file
curr_hardware = 'Tesla_P40_GPU' #'Tesla_P40_GPU' #'note10_lookuptable' #'Intel_Xeon_CPU' #RTX_2080_Ti_GPU'
filename = 'latency_predictor/datasets/' + curr_hardware + '_ofa.csv'
with open(filename, 'w') as csv_file:
w = csv.writer(csv_file)
w.writerow(['child_arch', 'latency'])
for i in range(len(latency_dataset['child_arch'])):
child_arch = latency_dataset['child_arch'][i]
latency = latency_dataset['latency'][i]
#accuracy = latency_dataset['accuracy'][i]
w.writerow([child_arch, latency])
end = time.time()
print('Wrote Latency Dataset to File: {}'.format(filename))
print('Time to Create Dataset of {} points: {}'.format(number_of_datapoints, end-start))
def test_new_inference_time(model_path):
RTX_checkpt = torch.load(model_path)
RTX_model = LatencyPredictor().to(device)
RTX_model.load_state_dict(RTX_checkpt)
times = []
for i in range(1000):
start = time.time()
prediction = RTX_model.predict_efficiency(sample_child_arch)
end = time.time()
model_time = end - start
times.append(model_time)
print('New Inference Time: {} seconds'.format(sum(times) / len(times)))
def test_old_inference_time():
start = time.time()
prediction = finder.efficiency_predictor.predict_efficiency(sample_child_arch)
end = time.time()
lookup_time = end - start
print('Old Inference Time: {} seconds'.format(lookup_time))
if __name__ == '__main__':
test_new_inference_time('../checkpoints/latency_prediction_model/checkpoint_note10_ofa.pt')#RTX_2080_Ti_GPU_ofa.pt')
test_old_inference_time()
# create_latency_dataset()
| [
"ofanas2021@gmail.com"
] | ofanas2021@gmail.com |
8494c0c3f9e66a99da55112d74c3aa5ee4a579e9 | 8dedd2fab474a598c9796e86c7b3a85765e5dd5a | /apps/registro_hora_extra/migrations/0001_initial.py | 99df080cce4f4141a245a1456d7cc177e50ec9fb | [] | no_license | silvio-fullstack/gestao_rh | 82f0082e0f73f7d6809c1253d95645da3c1c3831 | e4a674e3a1fbff2d6237a1f31031ae7054881410 | refs/heads/main | 2023-01-23T07:15:56.285348 | 2020-11-18T07:27:27 | 2020-11-18T07:27:27 | 312,456,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | # Generated by Django 3.1.3 on 2020-11-14 03:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RegistroHoraExtra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motivo', models.CharField(help_text='Motivo da hora extra', max_length=100)),
],
),
]
| [
"sbalves@bol.com.br"
] | sbalves@bol.com.br |
4540a99ba97998f5e018d10fb451d577ef767abb | abf04257eee53cd309801812b5af18fb8193bd7d | /FisicaComputacional/CuartaPractica/2.py | 8fd88969d1fcfc8075207d754978a5963d6ea7da | [] | no_license | Antszz/TrabajosUniversidad | 8614024743232b6cd9bd2a54e2cb79f720d4f293 | 006978f852868eb54c322e5b4bb005ba97e3485d | refs/heads/master | 2023-08-31T08:54:53.336991 | 2021-09-15T17:18:32 | 2021-09-15T17:18:32 | 295,745,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
k = 0.1
m = 0.2
f0 = 0
c = 0.05
w = 0
h = 0.01
t = 40
ax = 0
x = 0
vx = -2
pos = []
velocidad = []
aceleracion = []
U = []
K = []
E = []
pt = np.arange(0,t,h)
ax = -k*x/m-c*vx/m+f0*np.cos(w*t)/m
vx = vx+ax*h/2
for t in pt:
ax = -k*x/m-c*vx/m+f0*np.cos(w*t)/m
vx = vx+ax*h
x = x+vx*h
pos.append(x)
velocidad.append(vx)
aceleracion.append(ax)
U.append(k*(x*x)/2)
K.append(m*(vx*vx)/2)
E.append(u[-1]+K[-1])
plt.subplot(2,2,1)
plt.plot(pt,px)
plt.xlabel('tiempo')
plt.ylabel('espacio')
plt.title('X-T')
plt.grid(True)
plt.subplot(2,2,2)
plt.plot(pt,pv)
plt.xlabel('tiempo')
plt.ylabel('velocidad')
plt.title('V-T')
plt.grid(True)
plt.subplot(2,2,3)
plt.plot(pt,pa)
plt.xlabel('tiempo')
plt.ylabel('aceleración')
plt.title('A-T')
plt.grid(True)
plt.subplot(2,2,4)
plt.plot(px,pv)
plt.xlabel('velocidad')
plt.ylabel('espacio')
plt.title('espacio de fases')
plt.grid(True)
'''
plt.subplot(2,3,5)
plt.plot(px,pu,px,pk,px,pe)
plt.xlabel('velocidad')
plt.ylabel('espacio')
plt.title('espacio de fases')
plt.grid(True)
'''
plt.show()
fig = plt.figure()
ax=plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.scatter3D(px,pv,pt)
plt.show()
plt.plot(px,pu,px,pk,px,pe)
plt.show() | [
"esanchezh@unsa.edu.pe"
] | esanchezh@unsa.edu.pe |
927dc67e29b0c300e49333b1c9ccbfb09671ddbd | 7b6f78ea6903132cd27821e49025fc8156b4629e | /Demo/webadmins3/webadmins3/wsgi.py | 0187e8855e40204c27adeccd5e89b73d8cc57ea3 | [] | no_license | xiaolangshou/Python_inventory | d5890b0c5dbdb8fd6cba60477a62f6c68b6dfc20 | 87cab79a79bd410a16749fc8ebf7657df8cadccd | refs/heads/master | 2021-07-21T05:44:35.128294 | 2020-07-09T11:50:18 | 2020-07-09T11:50:18 | 193,176,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for webadmins3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webadmins3.settings")
application = get_wsgi_application()
| [
"xiaolangshou@foxmail.com"
] | xiaolangshou@foxmail.com |
f5499c03d79d8f556272f201849ddbe9e5f1f2dd | 360f94dc20a01f0bfb1f14ccbf3f28501588f666 | /crypto-009/week_1/explore.py | eaad3b032dcf1282b48bd1d894727d426593ad19 | [] | no_license | rocco66/courses | deb8818162b68e7a4645b1572edca8066e9562f4 | 1a423fbc785605d751e1ba60024294682c0f6ff0 | refs/heads/master | 2021-01-19T06:27:17.254292 | 2014-01-14T19:57:58 | 2014-01-14T19:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py |
import string
from itertools import combinations
from origin import strxor
MESSAGES_FILE = "messages.txt"
messages = map(lambda s: s[:-1].decode("hex"), open(MESSAGES_FILE).readlines())
alphabet = string.ascii_lowercase + string.ascii_uppercase
key_len = max(map(len, messages))
key = " " * key_len
enum_messasges = enumerate(messages)
combinations = combinations(enum_messasges, 2)
for ((first_num, first), (second_num, second)) in combinations:
# print "*" * 10
# print "{} ^ {} \n".format(first_num, second_num)
for (char_position, check_char) in enumerate(strxor(first, second)):
if check_char in
| [
"rocco66max@gmail.com"
] | rocco66max@gmail.com |
85566a279360d8fee75c2ed3b6a5c4fe6426afc1 | 30d360f965253167c99f9b4cd41001491aed08af | /PTFE_code/integrate_profile.py | 4ba0587d6305af16574d6b5b2d36c2e9a6d5dba3 | [] | no_license | petervanya/PhDcode | d2d9f7170f201d6175fec9c3d4094617a5427fb5 | 891e6812a2699025d26b901c95d0c46a706b0c96 | refs/heads/master | 2020-05-22T06:43:47.293134 | 2018-01-29T12:59:42 | 2018-01-29T12:59:42 | 64,495,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | #!/usr/bin/env python
"""Usage: integrate_profile.py <profile> <d> [--L <L>]
[AD HOC] Load 1d water profile and integrate
volume of water in polymer and in electrodes.
Arguments:
<file> Water profile, columns [r, f]
<d> Slab width in nm
Options:
--L <L> Box size in DPD units [default: 40]
09/11/16
"""
import numpy as np
from scipy.integrate import simps
import sys
from docopt import docopt
rc = 8.14e-10
if __name__ == "__main__":
args = docopt(__doc__)
L = float(args["--L"])
d_nm = float(args["<d>"])
d = d_nm * 1e-9 / rc
try:
A = np.loadtxt(args["<profile>"])
except FileNotFoundError:
sys.exit("No file found: %s." % args["<profile>"])
r, f = A[:, 0], A[:, 1]
if d < 0.0 or d > L:
sys.exit("Slab width larger than box size.")
print("===== Integrating water profile =====")
print("L: %.2f | slab width: %.2f (%.2f nm)" % (L, d, d_nm))
dr = r[1] - r[0]
re1 = r[r < (L-d)/2]
re2 = r[r > (L+d)/2]
rm = r[(r >= (L-d)/2) & (r <= (L+d)/2)]
fe1 = f[r < (L-d)/2]
fe2 = f[r > (L+d)/2]
fm = f[(r >= (L-d)/2) & (r <= (L+d)/2)]
water_film = simps(fm, dx=dr)
water_elec = simps(fe1, dx=dr) + simps(fe2, dx=dr)
water_tot = simps(f, dx=dr)
print("Total water: %.2f" % water_tot)
print("Electrodes: %.2f | Film: %.2f | mat / el: %.2f" % \
(water_elec, water_film, water_film / water_elec))
R = water_film / (water_film + water_elec)
print("Ratio of water in the film: %.2f" % R)
# water_film = np.sum(fm) * dr
# water_elec = (np.sum(fe1) + np.sum(fe2)) * dr
# water_tot = np.sum(f) * dr
#
# print("Naive quadrature | Total water: %.2f" % water_tot)
# print("Electrodes: %.2f | Matrix: %.2f | mat / el: %.2f" % \
# (water_elec, water_film, water_film / water_elec))
| [
"peter.vanya@gmail.com"
] | peter.vanya@gmail.com |
7f69f24e1c997e33a56d0d3ec14b50c19403efbf | cba54caf11475dc9471a35e9abbf28d5300128ca | /itp/ITP1_4_A.py | fbbf713a74c9c2331618aec687909df01a1f5d67 | [
"MIT"
] | permissive | sacredgift/aoj | 7d64e8b067ee6452afb4afb8a92a16671c927d7c | f9dd0274db04e9b876343cf1e631578833c56ded | refs/heads/master | 2020-05-19T17:05:00.521806 | 2019-05-21T01:12:44 | 2019-05-21T01:12:44 | 185,125,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # -*- coding:utf-8 -*-
def main():
i = [int(x) for x in input().split()]
a, b = i[0], i[1]
print('{} {} {:.5f}'.format(a // b, a % b, a / b))
if __name__ == '__main__':
main()
| [
"ponkotsu@ponkotsu.jp"
] | ponkotsu@ponkotsu.jp |
4b6ea1dff52c5e44c60ca5842cf5289c2167aee3 | c68393315be71d59054cffdbff8e4e3c3f6d76e3 | /beginnerlevel/countnoofchars.py | f37fb4128568c230098befcb4974b05d3c060258 | [] | no_license | vkavin18/pythonprogramming | d3f2eac9eac2f9995051fcafa90ae37e4624fd7e | 38e05367d4412f2b2c0de25e67730cbb56d1db3a | refs/heads/master | 2021-09-13T10:17:54.490731 | 2018-04-28T04:17:52 | 2018-04-28T04:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | n=str(raw_input(""))
print(len(n)-n.count(' '))
| [
"noreply@github.com"
] | vkavin18.noreply@github.com |
f7d9157b72650dbd4b305a29c4086f10d6093177 | ba712db29e8bf09b506ec4c1adbc2b3eeb359098 | /InputOutputOperations/Bigwriter.py | 312b199bbc992775ef66c1639b3b32ca9f4c3962 | [] | no_license | Prog-keith/Learning_Python | a7c20b6e42fb59282d0aba109bc70dc0bb8bf553 | c949862c8817e6ac73bb41ff8ad7a7811a432870 | refs/heads/main | 2023-08-27T21:37:49.107754 | 2021-10-14T11:39:38 | 2021-10-14T11:39:38 | 416,299,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | def main():
buffersize = 100000
infile = open('bigfile.txt', 'r')
outfile = open('newbig.txt', 'w')
buffer = infile.read(buffersize)
while len(buffer):
outfile.write(buffer)
print('.', end='')
buffer= infile.read(buffersize)
print()
print('Done.')
main() | [
"noreply@github.com"
] | Prog-keith.noreply@github.com |
25707c6d54fb09934c8c58a16a93821399a2947b | 54365ee95eea9679bca6dad5eeb82ab9a54f7803 | /src/plots.py | 5d76de25df984ec7096a7401fa437f48ca7c0684 | [] | no_license | afcarl/COMP3740-Learning-Machine-Learning | e1ee446e4679ba357e781025a27c48c82fb05ef8 | 7343d9b04132b58371a83b197fd07420e9e3423e | refs/heads/master | 2020-03-19T10:21:02.222874 | 2015-05-18T14:09:09 | 2015-05-18T14:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | from read_data_file import *
import pylab
def show_experiment(rules, show_times=True, epochs=100000):
stuff = get_experiments(rules)
for thing in stuff:
experimental_data = thing["data"]
times = [x[3] for x in experimental_data][:epochs]
accuracies = [x[2] for x in experimental_data][:epochs]
print thing["hiddens"], thing["learning_rate"], thing["corruption"],
print times[-1]/len(times), accuracies[-1]
if show_times:
pylab.plot(times, accuracies)
pylab.xlabel("time (s)")
else:
pylab.plot(accuracies)
pylab.xlabel("epoch")
pylab.ylabel("error rate (%)")
pylab.show()
def everything_experiment():
show_experiment({}, True)
def show_hidden_size_experiment():
show_experiment({"learning_rate": 0.1,
"corruption": 0.3,
"experiment_type":
"denoising autoencoder, full feedback"},
False,
20)
def show_hidden_size_experiment_2():
show_experiment_averaging({"learning_rate": 0.1,
"corruption": 0.3,
"experiment_type":
"denoising autoencoder, full feedback"},
"hiddens", True, 20)
def show_learning_rate_experiment():
show_experiment_averaging({"hiddens": 600,
"corruption": 0.3,
"experiment_type":
"denoising autoencoder, full feedback"},
"learning_rate", False, 20)
def show_experiment_averaging(rules, dependent, show_times = True, epochs = 10000):
stuff = get_experiments(rules)
x_values = sorted(list(set(test[dependent] for test in stuff)))
for x_value in x_values:
relevant_data = [run["data"] for run in stuff if run[dependent] == x_value]
times = average_of_transpose([[x[3] for x in run] for run in relevant_data])[:epochs]
print "average time: ", x_value, times[-1] / len(times)
accuracies = average_of_transpose([[x[2] for x in run] for run in relevant_data])[:epochs]
if show_times:
pylab.plot(times, accuracies, label="%s = %s"%(dependent, str(x_value)))
else:
pylab.plot(accuracies, label="%s = %s"%(dependent, str(x_value)))
pylab.xlabel("time (s)" if show_times else "epoch")
pylab.ylabel("error rate (%)")
pylab.legend(loc = "upper center")
pylab.show()
def average_of_transpose(list_of_lists):
return [sum(x)/len(x) for x in zip(*list_of_lists)]
show_learning_rate_experiment() | [
"bshlegeris@gmail.com"
] | bshlegeris@gmail.com |
37a03683132fde1a31b463e6130f1b868c096526 | 45d090a32e5411e5d43238e94cf854cf3eccb1ff | /BrightPi/brightpi-test.py | 6a848f2001474c6c357d71d2427196dd717f8e1a | [] | no_license | LeoWhite/RaspberryPi | 2f73399bcf936de760a532af9145e3a8f8524298 | dc6d6c54d3fa0bae28648120cefa83bacd85eedb | refs/heads/master | 2021-10-22T08:30:28.915498 | 2019-03-09T15:15:03 | 2019-03-09T15:15:03 | 103,762,795 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import smbus
import time
# Select the bus to use.. This will be 0 for revision 1 boards
bus = smbus.SMBus(1)
# BrightPi's i2c address
address = 0x70
visibleLEDs=[1,3,4,6]
IRLEDs=[0,2,5,7]
# Select the visible LEDs and set to 50% brightness (Range 0-50)
ActiveLEDs=0
for x in visibleLEDs:
bus.write_byte_data(address, x + 1, 25)
ActiveLEDs |= (1 << x)
# And turn them on
bus.write_byte_data(address, 0x00, ActiveLEDs)
# Wait 5 seconds
time.sleep(5)
# Now switch over to the IR LEDs
ActiveLEDs = 0
for x in IRLEDs:
bus.write_byte_data(address, x + 1, 25)
ActiveLEDs |= (1 << x)
# And turn them on
bus.write_byte_data(address, 0x00, ActiveLEDs)
# Wait another 5 seconds
time.sleep(5)
# Turn off all the LEDS
bus.write_byte_data(address, 0x00, 0x00)
| [
"leo.white@mybigideas.co.uk"
] | leo.white@mybigideas.co.uk |
1395182661d8e80720d1fe9c0bd1a7db56ac3f02 | 6ef5df9bef318b34b2b18ca8d85696d0ee8a7da1 | /djadminapp/urls.py | c9f0c81312f863886d6eff60547c08bfd6fd034d | [
"MIT"
] | permissive | mr--dev/djadminapp | 18c44d387f6c9ab588d3d09e5e6df4b5ff866d6a | aca8bb7e603bd3e43befd6c25505ede99da17bbf | refs/heads/master | 2021-01-18T13:54:56.025000 | 2014-02-16T18:59:24 | 2014-02-16T18:59:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djadminapp.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('appmanager.urls')),
url(r'^appmanager/', include('appmanager.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"marco.risi87@gmail.com"
] | marco.risi87@gmail.com |
103f0737858e2fb2bf87cc04c04cdb0a9e167dbd | 98d48bb6d1f8b4b5714ede42641c5de2e86f374d | /generator/contact.py | a23383e46aaa049d2161c040e4ab882dfb5b5f1d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | aakulich/python_training | f2524a0d5de04ce3296dedee4deab113e63c3fd1 | d2f258f66b07d2b3f74f69a575db9bcc3e089243 | refs/heads/main | 2023-05-06T10:36:51.035274 | 2021-05-28T15:18:32 | 2021-05-28T15:18:32 | 347,194,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | from model.contact import Contact
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname='', lastname='')] + [
Contact(firstname=random_string("firstname", 10), lastname=random_string("lastname", 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata)) | [
"kryaelena85@gmail.com"
] | kryaelena85@gmail.com |
dcc38f789aa96c2644691f126485291feaf2b441 | 3cc3b3efa625f612dc2cacc5fbb71187730effc7 | /Machine Learning A-Z/Part 2 - Regression/Section 6 - Polynomial Regression/polynomial_regression_nk.py | f14545bc6b3790c30fa9f2b223b106962f88f03a | [] | no_license | naresh-upadhyay/Machine-learning | 18c981021359bb2ed40f43744b8d79ca0dd25b2b | 38ff771734abcd22cb47f43910409df2e749b23f | refs/heads/master | 2020-05-22T21:01:19.093334 | 2019-05-14T01:17:15 | 2019-05-14T01:17:15 | 186,520,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 22:19:41 2019
@author: king
"""
#importing labraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd #import
#import dataset
dataset = pd.read_csv('Position_Salaries.csv')
x=dataset.iloc[:,1:2].values #line , column(except last column)
y=dataset.iloc[:,2].values
# simple linear regressor
from sklearn.linear_model import LinearRegression
lin_reg1=LinearRegression()
lin_reg1.fit(x,y)
# polynomial regressor
from sklearn.preprocessing import PolynomialFeatures
poly=PolynomialFeatures(degree=8)
x_poly=poly.fit_transform(x)
lin_reg2=LinearRegression().fit(x_poly,y)
#testing our prediction
y_pred=lin_reg2.predict(x_poly)
#ploting the graph for simple
plt.scatter(x,y,color='red')
plt.plot(x,lin_reg1.predict(x),color='blue')
plt.title('simple regressor predicted vs actual')
plt.xlabel('level')
plt.ylabel('salary')
plt.show()
#ploting graph of polynomial regressor
x_grid=np.arange(min(x),max(x),0.05) # using grid we split all intermediate number in 0.05 size partiiton
x_grid=x_grid.reshape(len(x_grid),1) # reshape (no.of row ,no. of column)
plt.scatter(x,y,color='red')
plt.plot(x_grid,lin_reg2.predict(poly.fit_transform(x_grid)),color='blue')
plt.title('Polynomial regressor predicted vs actual')
plt.xlabel('level')
plt.ylabel('salary')
plt.show()
# prediction using simple regressor model
lin_reg1.predict([[6.5]])
#prediction using polynomial regressor model
lin_reg2.predict(poly.fit_transform([[13]]))
| [
"naresh.king88898@gmail.com"
] | naresh.king88898@gmail.com |
0bea389e510b7977e448170db9a97655fd4abd53 | 7b4e9342d42be2b55af5dc23a8abedd672d68e99 | /MobileApps/libs/flows/mac/smart/screens/printersettings/printer_from_other_devices.py | 96ab7c3b1a277a1e82cb0c00364ddd13f515ba52 | [] | no_license | Amal548/QAMA | af5bb335c92a90b461f1ee9a3870435d83d46802 | b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5 | refs/heads/master | 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # encoding: utf-8
'''
Description: It defines the operations of element and verification methods on
the printer from other devices screen.
@author: Sophia
@create_date: Sep 18, 2019
'''
import logging
from MobileApps.libs.flows.mac.smart.screens.smart_screens import SmartScreens
from MobileApps.libs.flows.mac.smart.screens.printersettings.printer_setting_scroll import PrinterSettingScroll
class PrinterFromOtherDevices(PrinterSettingScroll, SmartScreens):
folder_name = "printersettings"
flow_name = "print_from_other_devices"
def __init__(self, driver):
'''
This is initial method for class.
:parameter:
:return:
'''
super(PrinterFromOtherDevices, self).__init__(driver)
# -------------------------------Operate Elements------------------------------
def wait_for_screen_load(self, timeout=30, raise_e=True):
'''
This is a method to wait print from other devices screen load correctly
:parameter:
:return:
'''
logging.debug("[PrinterFromOtherDevices]:[wait_for_screen_load]-Wait for screen loading... ")
return self.driver.wait_for_object("send_link_btn", timeout=timeout, raise_e=raise_e)
def click_send_link_btn(self):
'''
This is a method to click send link button.
:parameter:
:return:
'''
logging.debug("[PrinterFromOtherDevices]:[click_send_link_btn]-Click send link button... ")
self.driver.click("send_link_btn")
# -------------------------------Verification Methods--------------------------
| [
"amal.muthiah@hp.com"
] | amal.muthiah@hp.com |
70f4df3ace3aaca74de3e2057c6526f040a775ff | a1eef11d4255490b9f5444a8524a060fe0010196 | /_python/OOP/usersBankAccounts.py | 1985a7f85840939fe5b21afb1306c4a0407006ca | [] | no_license | ahmadmalbzoor/python_stack-1 | c830eac58a747f748bb96f60cdfe51e31e8639cf | ff17f29dfc927e31034a6e850983f6b535b5e10a | refs/heads/main | 2023-05-31T02:29:19.541819 | 2021-06-12T17:30:40 | 2021-06-12T17:30:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | class BankAccount:
def __init__(self, interest_rate, balance):
self.interest_rate = interest_rate
self.balance = balance
def deposit(self, amount):
self.balance += amount
return self
def withdraw(self, amount):
if self.balance > amount:
self.balance -= amount
else:
print("Insufficient funds: Charging a $5 fee")
self.balance -= 5
return self
def display_aaccount_info(self):
print(f"Balance: {self.balance}")
return self
def yield_interest(self):
self.balance = self.balance + self.balance * self.interest_rate
return self
class User:
def __init__(self, name, email):
self.name = name
self.email = email
self.accounts = [BankAccount(int_rate=0.02, balance=0)]
def makeAnotherAccount(self):
self.accounts.append(BankAccount(int_rate=0.02, balance=0))
return self
def deleteAccount(self):
self.accounts.pop()
def deposit(self, amount, account_index):
self.accounts[account_index].deposit(amount)
return self
def withdraw(self, amount,account_index):
self.accounts[account_index].withdraw(amount)
return self
def display_aaccount_info(self,account_index):
self.accounts[account_index].display_aaccount_info()
return self
def yield_interest(self,account_index):
self.accounts[account_index].yield_interest()
return self
| [
"khalilayyash@gmail.com"
] | khalilayyash@gmail.com |
884cfec3f2df2a65abd66c68a17cf407eb050282 | 7a69041bb1d7593e10b912d506050fc17389ccba | /impl/python/network/BroadcastSender.py | 650ed0f15b3c8a4918a077fcaf6fb11e4050a9f9 | [] | no_license | mswiders1/multitalk | bcdf195ebb8590d262530eb142c6f24e0c4a1dc5 | 7b24b7805e20191fab9949c67aed4cbf00788538 | refs/heads/master | 2021-01-10T06:47:51.618772 | 2011-01-21T12:40:01 | 2011-01-21T12:40:01 | 51,404,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | # -*- coding: utf-8 -*-
from twisted.internet.protocol import DatagramProtocol
from socket import SOL_SOCKET, SO_BROADCAST
import appVar
BROADCAST_COUNT = 3
BROADCAST_TIME_BETWEEN_IN_SEC = 1
BROADCAST_TIME_AFTER_IN_SEC =2
class BroadcastSender(DatagramProtocol):
MSG = u'MULTITALK_5387132'
def __init__(self, reactor, port = 3554):
self.__port = port
self.__reactor = reactor
def startProtocol(self):
print "BC: start protokołu"
self.transport.socket.setsockopt(SOL_SOCKET, SO_BROADCAST, True)
self.sendDatagram()
for delay in range(1, BROADCAST_COUNT * BROADCAST_TIME_BETWEEN_IN_SEC):
self.__reactor.callLater(delay, self.sendDatagram)
def sendDatagram(self):
print "BC: wysyłam rozgłoszenie"
self.transport.write(self.getPacket(), ("<broadcast>", self.__port))
def getPacket(self):
return BroadcastSender.MSG
def startSender(reactor):
broadcastSender = BroadcastSender(reactor)
reactor.listenUDP(0, broadcastSender)
__startTimers(reactor)
return broadcastSender
def __startTimers(reactor):
appVar.coreInstance.broadcastProgress(0)
time = BROADCAST_COUNT * BROADCAST_TIME_BETWEEN_IN_SEC + BROADCAST_TIME_AFTER_IN_SEC
for delay in range(1, time + 1):
procentage = delay * (100/time)
reactor.callLater(delay, _handleDiscoveryProgress, procentage)
def _handleDiscoveryProgress(procentage):
appVar.coreInstance.broadcastProgress(procentage)
| [
"soldier.kam@186253f6-d097-5331-538c-8b5206fdb8b3"
] | soldier.kam@186253f6-d097-5331-538c-8b5206fdb8b3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.