blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d0efeb97f3ace99264220f2eaf6af63002d710c | 98f7aecacd75c1145360a600c010df7d2496f74b | /Contents/scripts/sisidebar/common.py | e6bac884e1329a605e4693538fb03af99d7bdcaf | [
"MIT"
] | permissive | HoL1124/SISideBar | 13811578bb607282982aaeb4828c0922f94511df | a9e51558bbef71c98e027f88a3ec5f06097ffe14 | refs/heads/master | 2021-08-16T09:51:23.559373 | 2017-11-19T15:14:47 | 2017-11-19T15:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,668 | py | # -*- coding: utf-8 -*-
from maya import cmds
from maya import mel
import pymel.core as pm
import re
import functools
import datetime
import os
import locale
import datetime as dt
import json
def search_polygon_mesh(object, serchChildeNode=False, fullPath=False):
'''
選択したものの中からポリゴンメッシュを返す関数
serchChildeNode→子供のノードを探索するかどうか
'''
# リストタイプじゃなかったらリストに変換する
if not isinstance(object, list):
temp = object
object = []
object.append(temp)
polygonMesh = []
# 子供のノードを加えるフラグが有効な場合は追加
if serchChildeNode is True:
parentNodes = object
for node in parentNodes:
try:
nodes = cmds.listRelatives(node, ad=True, c=True, typ='transform', fullPath=fullPath, s=False)
except:
pass
if nodes is not None:
object = object + nodes
# メッシュノードを探して見つかったらリストに追加して返す
for node in object:
try:
meshnode = cmds.listRelatives(node, s=True, pa=True, type='mesh', fullPath=True)
if meshnode:
polygonMesh.append(node)
except:
pass
if len(polygonMesh) != 0:
return polygonMesh
else:
return
class TemporaryReparent():
'''
一時的に子供のノードをダミーロケータの子供に退避、再親子付けする関数。
ウェイト操作、UVSet操作など親子付けがあると処理が破たんする場合に利用
parent→カットしてダミーに親子付けするか、ダミーから再親子付けするか 'cut'or'reparent'or'create'or'delete'
createした場合はダミーペアレントを戻り値として返す
objects→カット、リペアレントする対象親ノード
dummyParent→リペアレントする場合は作成したダミーペアレントのノードを渡す。
'''
node_list = ['transform', 'joint', 'KTG_ModelRoot', 'KTG_SSCTransform']
def main(self, objects='', dummyParent='', mode='cut'):
self.objects = objects
self.dummyParent = dummyParent
# リストタイプじゃなかったらリストに変換する
if not isinstance(self.objects, list):
temp = self.objects
self.objects = []
self.objects.append(temp)
for self.node in self.objects:
if mode == 'create':
self.dummyParent = cmds.spaceLocator(name='dummyLocatorForParent')
return self.dummyParent
elif mode == 'delete':
cmds.delete(self.dummyParent)
return
elif mode == 'cut':
self.cutChildNode()
return
elif mode == 'parent':
self.reparentNode()
return
def cutChildNode(self):
# 処理ノードの親子を取得しておく
nodeChildren = cmds.listRelatives(self.node, children=True, fullPath=True)
for child in nodeChildren:
# 子のノードがトランスフォームならダミーに親子付けして退避
if cmds.nodeType(child) in self.node_list:
cmds.parent(child, self.dummyParent)
def reparentNode(self):
dummyChildren = cmds.listRelatives(self.dummyParent, children=True, fullPath=True)
for child in dummyChildren:
if cmds.nodeType(child) in self.node_list:
cmds.parent(child, self.node) | [
"bs45yh@yahoo.co.jp"
] | bs45yh@yahoo.co.jp |
f26347b28fb06519c7f754fa9909bc22ae20270a | f2bffa5992cd133cf11fd4287adc20a2d21a9471 | /workflows/tianchi_main/tianchi_executor.py | 56c40ca40e76da2601dd6109db5cda95d8deb6c1 | [] | no_license | lawson-source/tianchi_aiflow | 107e9c2abd4c4514974be32945f7e0ab40138777 | 7926922f7d3235ee6f24300f5c517496ace8c45a | refs/heads/master | 2023-07-18T15:03:40.972833 | 2021-08-24T00:11:28 | 2021-08-24T00:11:28 | 397,904,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,308 | py | import time
from typing import List
import ai_flow as af
from ai_flow_plugins.job_plugins import python, flink
from pyflink.table import Table, ScalarFunction, DataTypes
from pyflink.table.udf import udf
from kafka import KafkaProducer, KafkaAdminClient, KafkaConsumer
from kafka.admin import NewTopic
from tf_main import train
from subprocess import Popen
import json
import sys, getopt
from notification_service.client import NotificationClient
from notification_service.base_notification import EventWatcher, BaseEvent
def get_model_path():
return '/host'
def get_data_path():
return '/tcdata'
def get_dependencies_path():
return "/opt"
class TrainModel(python.PythonProcessor):
def process(self, execution_context: python.python_processor.ExecutionContext, input_list: List) -> List:
train_path = get_data_path() + '/train.csv'
model_dir = get_model_path() + '/model/base_model'
save_name = 'base_model'
train(train_path, model_dir, save_name)
af.register_model_version(model=execution_context.config['model_info'], model_path=model_dir)
return []
class Source(flink.FlinkPythonProcessor):
def __init__(self, input_topic, output_topic) -> None:
super().__init__()
self.input_topic = input_topic
self.output_topic = output_topic
def process(self, execution_context: flink.ExecutionContext, input_list: List[Table] = None) -> List[Table]:
print("### {} setup done2 for {}".format(self.__class__.__name__, "sads"))
t_env = execution_context.table_env
t_env.get_config().set_python_executable('/opt/python-occlum/bin/python3.7')
print("Source(flink.FlinkPythonProcessor)")
print(t_env.get_config().get_configuration().to_dict())
t_env.get_config().get_configuration().set_boolean("python.fn-execution.memory.managed", True)
t_env.get_config().get_configuration().set_string('pipeline.global-job-parameters',
'"modelPath:""{}/model/base_model/frozen_model"""'
.format(get_model_path()))
t_env.get_config().get_configuration().set_string("pipeline.classpaths",
"file://{}/analytics-zoo-bigdl_0.12.2-spark_2.4.3-0.10.0-serving.jar;file://{}/flink-sql-connector-kafka_2.11-1.11.2.jar"
.format(get_dependencies_path(), get_dependencies_path()))
t_env.get_config().get_configuration().set_string("classloader.resolve-order", "parent-first")
t_env.get_config().get_configuration().set_integer("python.fn-execution.bundle.size", 1)
t_env.register_java_function("cluster_serving",
"com.intel.analytics.zoo.serving.operator.ClusterServingFunction")
t_env.execute_sql('''
CREATE TABLE input_table (
uuid STRING,
visit_time STRING,
user_id STRING,
item_id STRING,
features STRING
) WITH (
'connector' = 'kafka',
'topic' = '{}',
'properties.bootstrap.servers' = '127.0.0.1:9092',
'properties.group.id' = 'testGroup',
'format' = 'csv',
'scan.startup.mode' = 'earliest-offset'
)
'''.format(self.input_topic))
t_env.execute_sql('''
CREATE TABLE write_example (
uuid STRING,
data STRING
) WITH (
'connector.type' = 'kafka',
'connector.version' = 'universal',
'connector.topic' = '{}',
'connector.properties.zookeeper.connect' = '127.0.0.1:2181',
'connector.properties.bootstrap.servers' = '127.0.0.1:9092',
'connector.properties.group.id' = 'testGroup',
'connector.properties.batch.size' = '1',
'connector.properties.linger.ms' = '1',
'format.type' = 'csv'
)
'''.format(self.output_topic))
t_env.from_path('input_table').print_schema()
return [t_env.from_path('input_table')]
class Transformer(flink.FlinkPythonProcessor):
def __init__(self):
super().__init__()
self.model_name = None
def setup(self, execution_context: flink.ExecutionContext):
self.model_name = execution_context.config['model_info']
def process(self, execution_context: flink.ExecutionContext, input_list: List[Table] = None) -> List[Table]:
result_table = input_list[0].select('uuid, cluster_serving(uuid, features)')
return [result_table]
class Sink(flink.FlinkPythonProcessor):
def process(self, execution_context: flink.ExecutionContext, input_list: List[Table] = None) -> List[Table]:
print("### {} setup done".format(self.__class__.__name__))
execution_context.statement_set.add_insert("write_example", input_list[0])
notification_client = NotificationClient('127.0.0.1:50051', default_namespace="default")
notification_client.send_event(BaseEvent(key='KafkaWatcher', value='model_registered'))
return []
| [
"liuyiyuan2007@icloud.com"
] | liuyiyuan2007@icloud.com |
2d88cb43b0a88fd154048f4cf70f5790dd9a57b5 | 5d91c54eaba1a3608d4fa1c06760a628059f984b | /data_processing/h36mwalking_to_windows.py | 147341c875a4167aae175d948b05e5382845d346 | [] | no_license | jejay/human-pose-2d-to-3d | d7bfbb7e1f342a9d689386316b369739cb6e7327 | f7e7bfa1adf583e4ad59c3833063a1640f2c36eb | refs/heads/master | 2020-03-20T23:00:07.600536 | 2018-08-12T22:49:22 | 2018-08-12T22:49:22 | 137,824,650 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,006 | py | import sys
sys.path.append('../libs/')
import numpy as np
import h5py
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy.io as io
import scipy.ndimage.filters as filters
from Quaternions import Quaternions
from Pivots import Pivots
import glob
import matplotlib.animation as animation
import matplotlib.colors as colors
from matplotlib.animation import ArtistAnimation
import matplotlib.patheffects as pe
def preprocess(data, window=256, window_step=128):
original_positions = np.array(data).T.reshape(-1, 32, 3)#[0:10]
positions = original_positions[:, np.array([
#0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
#11,#
12,
13,
14,
15,
#16,#
17,
18,
#19,#
20,
21,
22,
#23,#
#24,#
25,
26,
#27,#
28,
29,
30,
#31#
])]#[0:10]
""" Add Reference Joint """
#trajectory_filterwidth = 3
#reference = original_positions[:,0] * np.array([1,1,1])
#reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')
#positions = np.concatenate([reference[:,np.newaxis], positions], axis=1)
""" Get Root Velocity """
velocity = original_positions[1:,0:1] - original_positions[:-1,0:1]
""" Remove Translation """
positions[:,:] = positions[:,:] - original_positions[:,0:1]
""" Get Forward Direction """
spine, hip_l, hip_r = 10, 5, 0
normal = np.cross(positions[:,hip_l] - positions[:,spine], positions[:,hip_r] - positions[:,hip_l])
normal = normal / np.sqrt((normal**2).sum(axis=-1))[...,np.newaxis]
""" Remove Z Rotation """
lever = np.cross(normal, np.array([[0,0,1]]))
lever = lever / np.sqrt((lever**2).sum(axis=-1))[...,np.newaxis]
target = np.array([[1,0,0]]).repeat(len(lever), axis=0)
rotation = Quaternions.between(lever, target)[:,np.newaxis]
positions = rotation * positions
""" Get Root Rotation """
velocity = rotation[1:] * velocity
#rvelocity = (rotation[1:] * -rotation[:-1]).euler()
rvelocity = Pivots.from_quaternions(rotation[1:] * -rotation[:-1], forward="y", plane="xy").ps
""" Add Velocity, RVelocity, Foot Contacts to vector """
positions = positions[:-1]
positions = positions.reshape(len(positions), -1)
positions = np.concatenate([positions, np.ones(shape=(len(positions), 1))], axis=-1)
positions = np.concatenate([positions, velocity.reshape(-1, 3)], axis=-1)
positions = np.concatenate([positions, rvelocity.reshape(-1, 1)], axis=-1)
""" Slide over windows """
windows = []
for j in range(0, len(positions)-window//8, window_step):
""" If slice too small pad out by repeating start and end poses """
slice = positions[j:j+window]
if len(slice) < window:
left = np.zeros(shape=slice[:1].shape).repeat((window-len(slice))//2 + (window-len(slice))%2, axis=0)
right = np.zeros(shape=slice[:1].shape).repeat((window-len(slice))//2, axis=0)
slice = np.concatenate([left, slice, right], axis=0)
if len(slice) != window: raise Exception()
windows.append(slice)
return windows
files = glob.glob("../data/raw/h36m/S1/MyPoses/3D_positions/Walking*.h5")
windows = []
for file in files:
windows += preprocess(h5py.File(file)["3D_positions"])
X = np.array(windows)
np.random.shuffle(X)
Xtrain = X[0:32]
Xvalid = X[32:]
Xmean = np.mean(Xtrain, axis=(0,1))
Xstd = np.std(Xtrain, axis=(0,1))
Xtrain = (Xtrain - Xmean) / Xstd
Xvalid = (Xvalid - Xmean) / Xstd
np.savez("../data/windows/h36mwalking.npz",
Xtrain=Xtrain.astype(np.float32),
Xvalid=Xvalid.astype(np.float32),
Xmean=Xmean.astype(np.float32),
Xstd=Xstd.astype(np.float32)
) | [
"julian.habekost@googlemail.com"
] | julian.habekost@googlemail.com |
29be14e09570b86ffcab98f451db67bf498470dd | a57ac48fffd6aab0930861b9e08495437eb3d239 | /FoxDot/Troop/multiplayer_jam_17.py | 3449914985371b4d0aa8f6fa323f0da7c8b1602a | [] | no_license | diggleweb/algorithmic-music | aa4aee5214ef99be6ea9a9ee9c3fb9bfc084eb82 | 523e6f4c7157ff61e6feebe2b89b5b1764c51803 | refs/heads/master | 2023-07-30T16:10:04.242291 | 2021-07-15T22:48:19 | 2021-07-15T22:48:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,884 | py | Clock.meter = (3,4)
print(Clock.meter)
print(SynthDefs)
print(Samples)
print(Attributes)
we are down
sc >> play('sound {check}', dur=[0.25,1,2], rate=PStep(13,-1*PWhite(0.2,0.25),PWhite(0.2,0.25)), room=1, mix=0.2, pan=(linvar([-1,1],32), sinvar([-1,1],55)))
ns >> noise(
room=0.5,
octave=(5,6),
mix=[0.25, 0.1, 0.25, 0.25],
formant=P*[[0.25,1.25], 2.25, 1.2, 3.5],
dur=PDur([3,5],8),
lpf=[1050, 1200, 1500, 750], lpr=[0.5, 0.75, 0.75, 0.4],
amp=0.8,
pan=linvar([-1,1],24),
amplify=var([1,0],[2,[6,14,30]]),
echo=linvar([0.25,0.8],24),
echotime=4,
slide=2,
slidedelay=0.7
).stop()
a1 >> sawbass((PSine(64)*0.2,PSine(45)*0.2), oct=(3,4,[5,6]), lpf=5800,lpr=0.45, sus=a1.dur*linvar([0.7,1.5],[64,0]), dur=4).unison(3, linvar([0.1,0.25],128)).stop()
rs >> play('I', room=1, mix=0.25, dur=var([16, 8, 4, 2], [128, 64, 64, [32, 64]]), amp=1.25)
h1 >> play('~', rate=[var([1.05, 0], [[128, 64], [32, 16]]), 1, 1, 1], sample=var([0, 1], [256, [64, 128]]), pan=PWhite(-0.5, 0.5), dur=0.5, amp=var([1.5, 0], [[256, 128], [[64, 32, 128], [32, 64, 128]]]))
Group(rs, h1).stop()
h2 >> play('-', room=0.85, mix=0.2, sample=2, dur=1, pan=(-0.7,0.7), delay=PWhite(0.48,0.52), amp=var([PWhite(0.9,2), 0], [[64, 128, 256], [32, 64, 128]]))
4), rate=4, pan=[-1,1])
h3 >> play('-', sample=var([1, 0], [64, 32, 32, 128, 16, 16]), dur=0.25, amp=expvar([1, 0.1], 1/3) * expvar([0, 1.75, 1.75], [128, [32, 64, 128], 0]) * 1.5)
h4 >> play('=', room=0.5, mix=0.25, pan=PWhite(-0.5, 0.5), dur=0.25, amp=expvar([0, 0, 2.25], [[64, 64, 32], [128, 64], 0]))
z8 >> sawbass(var.cho[0], dur=PDur([8,[7,5,3]],8), lpf=0, cutoff=PRand(200,500), rq=linvar([0.1,0.3],24)).stop()
z8.room = 0.9
z8.mix=0.5
var.cho = var([I,III], 8)
from .Chords import * # :o
os >> play('s', room=1, mix=0.35, sample=1, dur=1, delay=0.5, amp=var([0, 1.25], [[32, 128, 64], [64, 64, 32]]) * var([1, 0], [[512, 256, 128], [128, 128, 64]]))
ss >> play('S', dur=1, room=0.75, mix=0.35, amp=expvar([0, 2], [[128, 256, 32, 32], 0]))
tt >> play('m', dur=0.25, delay=0.5, amp=P[0, 0.15, 0.25, 0.5] * var([2.5, 0], [[128, 64, [64, 32]], [64, 32, [32, 64]]]))
bl >> sawbass(dur=0.25,
cut=[var([0.75, 1, 0.85, 1], [[32, 64, 128], [16, 16, 32]]), var([1, 0.5], 64), 1, 1],
hpf=expvar([150, [1300, 400]], [[32, 64], [16, 8, 8]]),
hpr=expvar([0.4, 0.15], [[64, 8, 8], 32, 32]),
amp=expvar([0, 1], [0, 512])
)
dc >> play('*', dur=2, delay=0.75, amp=var([0.9, 0], [[64, 64, 128, 256, 64], [32, 64, 32, 32]]))
var.brk = var([1, 0], [[31, 28, 32], [1, 4, 0]])
Master().rate = [[[-1, -1, -1, -2], 1], 1, 1, 1]
Master().rate = 1
kd >> play('X', sample=1, dur=1, amp=var([1, 0], [28, 4]) * var([1, 0], [[128, 64, 256], [64, 32, 128]]) * 1.25 * var.brk)
sk >> play('X', sample=1, dur=1,
delay=[0.5, [0.5, 0.75], 0.5, [0.5, 0.5, 0.75]],
amp=P[
var([0, 1], [[256, 128, 64, 32], [64, 32]]), var([0, 0.75, 0, 1], [64, 64, 32, 8, 8]), [var([1, 0], 8), 0], 0,
var([0, 1], 64), var([0, 1], 64), 0, 0
] * var([0.75, 0], [[256, 64], [64, 32], [128, 32], [32, 16]]) * kd.amp
)
Group(bd, b2).stop()
#Group(kd, sk).stop()
bd >> play('V', sample=var([1, 0, 2], [128, 64, 64]), dur=1, lpf=1800, hpf=40, amp=1 * var.brk)
b2 >> play('V', sample=bd.sample, dur=1, delay=0.5, hpf=40, lpf=7500, amp=P[0.85, var([0, 0.85], [128, 64]), 0, var([0, 0.85, [256, 64, 32, 32]])] * bd.amp)
b2.amp = P[var([0.85, 0.5], 32), [0, 0.15], var([0.5, 0], 128), var([0, 0.5], 128)] * 1.25
Master().hpf = [100, 150, 120, 120]
bb >> gong(dur=[1,2], echo=P*[0.25,0.5], tremolo=[2, 2, var([2, 4], 64), 4], amp=0.85, rate=0.2, room=0.7, mix=0.4).unison(5,0.75)
b3 >> blip(dur=1, delay=0.5, amp=[0.35, [0, [0.15, 0.5]], 0, var([0, 0.5], [64, 32, 16, 16, 8, 8])], rate=PWhite(0.2,2)).unison(3,0.25)
sr >> play('u', pan=PWhite(-0.75, 0.75), dur=0.25, amp=P[PWhite(0.1, 0.75), [0.5, 0.25, 0.15, 0.75], 1, [0.75, 0.25]] * expvar([0, 1.15], [128, 0]))
s2 >> play('O', pan=PWhite(-0.75, 0.75), dur=0.25, amp=P[[0.25, 0.5], 0.15, [0.85, 0.15, 0.5], [1, 0.25, 0.5]] * var([0, 1], [[28, 56], [4, 8]]) * 0.75)
z5 >> play("M", amplify=var([0,1],[14,2]), lpf=800, rate=PWhite(0.7,1.5), pan=[-1,1], dur=P*[1,2,1/4,1/2,1/4,1/2,1/8,1/8])
fl >> feel(dur=16, tremolo=8, amp=1)
b2.amp = [[0.85, 0], 0, 0, 0]
z7 >> dbass(dur=PDur([3,5],8), oct=(linvar([4,6],[24,0]),linvar([7,4],[58,0])), slide=PStep(16,PwRand([PRand(2,6),-0.5],[60,40]),0), slidedealy=PWhite(0.7,0.9), amp=db.amp==0, rate=1).unison(3).stop()
sh >> play('s', sample=var([0, 1, 2], 128), dur=0.25, amp=expvar([1, 0], 4) * var([1, 0], [[4, 4, 8], [8, 8, 4, 4]]) * 2).stop()
ml >> play('T', room=1, mix=0.5, dur=15, delay=0.75, lpf=500, amp=0.35).stop()
# StageLimiter.activate(2)
clean everything , too muc lag ok
# 5 min for me
# K
# hi
# let's start over?
# I'm AFK for a cig
#Ok I will have one too
| [
"vladislav.burzakovskij@satoshilabs.com"
] | vladislav.burzakovskij@satoshilabs.com |
29577dd61fc0b2ab4e46b2f21f32670e2dc1af00 | 1bc7053e7582e43bdd4b943c5700677e07449d3c | /pytsite/tpl/_error.py | 4bb04363d32ffdb9ef93b8c458e31aa365c0ac61 | [
"MIT"
] | permissive | pytsite/pytsite | f92eaa041d85b245fbfcdff44224b24da5d9b73a | e4896722709607bda88b4a69400dcde4bf7e5f0a | refs/heads/master | 2021-01-18T01:06:12.357397 | 2019-08-03T02:56:48 | 2019-08-03T02:56:48 | 34,899,242 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | """PytSite Templates Support Errors
"""
__author__ = 'Oleksandr Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
import jinja2 as _jinja
class TemplateNotFound(_jinja.TemplateNotFound):
pass
| [
"a@shepetko.com"
] | a@shepetko.com |
4538d7e62dd4c9c9193ce83bf061350a67f1d919 | 55ffd013c48314e3b5fa53ce813a4b97bfa5e5d2 | /Day 14/day14.py | 08d72f6671c6e7ba4ff1465198948504ba23cdb8 | [] | no_license | MarynaLongnickel/AdventOfCode2018 | 4f93b8adcb24e44c2eb8fe352c13fc7c46748940 | 0abb6b4f508d9d7dd68157a24f1abb6f870c1c05 | refs/heads/master | 2020-04-09T14:01:36.048194 | 2018-12-28T19:48:28 | 2018-12-28T19:48:28 | 160,385,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | class Node():
def __init__(self, v = None, next = None):
self.v = v
self.next = next
e1, e2 = Node(3), Node(7)
e1.next = e2
e2.next = e1
start = e1
cur = e2
num = '409551'
n = 0
count = 2
recepies = []
done = False
while not done:
r = list(map(int,str(e1.v + e2.v)))
for i in r:
new = Node(i)
cur.next = new
cur = new
count += 1
if cur.v == int(num[n]):
recepies.append(cur.v)
n += 1
if n == len(str(num)):
done = True
print(count - len(num))
break
elif cur.v == int(num[0]):
recepies = [cur.v]
n = 1
else:
recepies = []
n = 0
if done: break
cur.next = start
for _ in range(1 + e1.v): e1 = e1.next
for _ in range(1 + e2.v): e2 = e2.next
if count == int(num) + 10 + 1:
arr = []
x = start
for z in range(count):
if z >= int(num) and len(arr) < 10:
arr.append(x.v)
x = x.next
print(arr)
| [
"noreply@github.com"
] | MarynaLongnickel.noreply@github.com |
7b36d7a5065fd89df2b5bc334d8040071b76cf75 | eb1a9a8a2b735a9f9eb0e8182e92897f6a2b0d2c | /linearReg_manual.py | 40aaa19107c292c38ef93f99003a7ba5cce6bb39 | [] | no_license | prateekpanwar/machineLearningModels | 0a12d69faa793809c64c55132d9c7f032ae51a54 | 04c4bebf323c667897185b593009c5bb470bf2e4 | refs/heads/master | 2021-07-23T03:42:27.993303 | 2020-04-15T19:46:35 | 2020-04-15T19:46:35 | 137,018,595 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | #writing linear reg algo
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
##################################define simple dataset#####################################
xs = np.array([1,2,3,4,5,6], dtype=np.float64)
ys = np.array([5,4,6,5,6,7], dtype=np.float64)
###################################Linear regression functions#############################
def best_fit_slope_intercept(xs, ys):
m = ( ((mean(xs) * mean(ys)) - mean(xs*ys)) /
((mean(xs)**2) - mean(xs**2)) )
b = mean(ys) - m*mean(xs)
return m, b
def squared_error(ys_org, ys_line):
return sum((ys_line - ys_org)**2)
def coff_of_determination(ys_org, ys_line):
y_mean_line = [mean(ys_org) for y in ys_org]
squared_error_regr = squared_error(ys_org, ys_line)
squared_error_y_mean = squared_error(ys_org, y_mean_line)
return 1 - (squared_error_regr / squared_error_y_mean)
####################################Calculating value of slope and the y intercept and implementation of the line eq##############################
m, b = best_fit_slope_intercept(xs, ys)
print(m, b)
regression_line = [ (m*x)+b for x in xs]
###################################prediction##############################################
predict_x = 8 #predict y where x=8
predict_y = (m*predict_x) + b
r_squared = coff_of_determination(ys, regression_line)
print(r_squared) #accuracy
#############################plotting############################################
plt.scatter(xs, ys)
plt.scatter(predict_x, predict_y, color='g')
plt.plot(xs, regression_line)
plt.show()
| [
"100605231@oncampus.local"
] | 100605231@oncampus.local |
cf5162cac66fc297d23a734d6bb7a8848f53e50b | 9e780f17eb49171d1f234944563225ca22b3c286 | /postgresqleu/confsponsor/management/commands/sponsor_generate_discount_invoices.py | ab716d6862d691c73452bd021d108291aeca7354 | [
"MIT"
] | permissive | pgeu/pgeu-system | e5216d5e90eec6c72770b88a5af4b3fd565cda59 | 885cfdcdadd4a721f72b699a39f26c94d1f636e0 | refs/heads/master | 2023-08-06T13:03:55.606562 | 2023-08-03T12:47:37 | 2023-08-03T12:47:37 | 161,434,221 | 15 | 27 | MIT | 2023-05-30T11:21:24 | 2018-12-12T04:48:14 | Python | UTF-8 | Python | false | false | 6,001 | py | # Generate invoices for discount codes. That is, sponsors that have ordered discount codes,
# that have now either expired or been used fully.
#
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.db import transaction
from django.conf import settings
from datetime import timedelta, time
from django.db.models import Q, F, Count
from postgresqleu.confreg.models import DiscountCode
from postgresqleu.confreg.util import send_conference_mail
from postgresqleu.confsponsor.util import send_conference_sponsor_notification, send_sponsor_manager_email
from postgresqleu.invoices.util import InvoiceManager, InvoiceWrapper
from postgresqleu.util.time import today_global
class Command(BaseCommand):
help = 'Generate invoices for discount codes'
class ScheduledJob:
scheduled_times = [time(5, 19), ]
internal = True
@classmethod
def should_run(self):
return DiscountCode.objects.filter(sponsor__isnull=False, is_invoiced=False).exists()
@transaction.atomic
def handle(self, *args, **options):
# We're always going to process all conferences, since most will not have any
# open discount codes.
filt = Q(sponsor__isnull=False, is_invoiced=False) & (Q(validuntil__lte=today_global()) | Q(num_uses__gte=F('maxuses')))
codes = DiscountCode.objects.annotate(num_uses=Count('registrations')).filter(filt)
for code in codes:
# Either the code has expired, or it is fully used by now. Time to generate the invoice. We'll also
# send an email to the sponsor (and the admins) to inform them of what's happening.
# The invoice will be a one-off one, we don't need a registered manager for it since the
# discounts have already been given out.
if code.count == 0:
# In case there is not a single user, we just notify the user of this and set it to
# invoiced in the system so we don't try again.
code.is_invoiced = True
code.save()
send_conference_sponsor_notification(
code.conference,
"[{0}] Discount code expired".format(code.conference),
"Discount code {0} has expired without any uses.".format(code.code),
)
send_sponsor_manager_email(
code.sponsor,
"Discount code {0} expired".format(code.code),
'confsponsor/mail/discount_expired.txt',
{
'code': code,
'sponsor': code.sponsor,
'conference': code.conference,
},
)
else:
# At least one use, so we generate the invoice
invoicerows = []
for r in code.registrations.all():
if code.discountamount:
# Fixed amount discount. Always apply
discountvalue = code.discountamount
else:
# Percentage discount, so we need to calculate it. Ordered discount codes will
# only support a registration-only style discount code, so only count it
# against that.
discountvalue = r.regtype.cost * code.discountpercentage / 100
invoicerows.append(['Attendee "{0}"'.format(r.fullname), 1, discountvalue, r.conference.vat_registrations])
# All invoices are always due immediately
manager = InvoiceManager()
code.invoice = manager.create_invoice(
code.sponsor_rep,
code.sponsor_rep.email,
"{0} {1}".format(code.sponsor_rep.first_name, code.sponsor_rep.last_name),
'%s\n%s' % (code.sponsor.name, code.sponsor.invoiceaddr),
'{0} discount code {1}'.format(code.conference, code.code),
timezone.now(),
timezone.now() + timedelta(days=1),
invoicerows,
accounting_account=settings.ACCOUNTING_CONFREG_ACCOUNT,
accounting_object=code.conference.accounting_object,
paymentmethods=code.conference.paymentmethods.all(),
)
code.invoice.save()
code.is_invoiced = True
code.save()
wrapper = InvoiceWrapper(code.invoice)
wrapper.email_invoice()
# Now also fire off emails, both to the admins and to all the managers of the sponsor
# (so they know where the invoice was sent).
send_conference_sponsor_notification(
code.conference,
"[{0}] Discount code {1} has been invoiced".format(code.conference, code.code),
"The discount code {0} has been closed,\nand an invoice has been sent to {1}.\n\nA total of {2} registrations used this code, and the total amount was {3}.\n".format(
code.code,
code.sponsor,
len(invoicerows),
code.invoice.total_amount,
),
)
send_sponsor_manager_email(
code.sponsor,
"Discount code {0} has been invoiced".format(code.code),
'confsponsor/mail/discount_invoiced.txt',
{
'code': code,
'conference': code.conference,
'sponsor': code.sponsor,
'invoice': code.invoice,
'curr': settings.CURRENCY_ABBREV,
'expired_time': code.validuntil < today_global(),
},
)
| [
"magnus@hagander.net"
] | magnus@hagander.net |
ee7756fe0f1dd8c92351b2653d9fa416284cdd6e | de919b727799fa3054386769466dc3e9b2f30322 | /utils.py | 88444485f7b3cb032377ab3132993c6bbc71a9d7 | [] | no_license | dengxiaotian123/EMN | 79471cd26e0055202f5566721b1bc30961f90adc | f4ecdf615b3f5f7e4b60c104d026a43756217b61 | refs/heads/master | 2023-01-02T20:17:16.933831 | 2020-10-31T08:44:48 | 2020-10-31T08:44:48 | 305,112,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,725 | py | #coding=utf-8
import concurrent.futures
import pickle
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import text_to_word_sequence
def build_data(lines, word_dict, tid=0):
def word2id(c):
if c in word_dict:
return word_dict[c]
else:
return 0
cnt = 0
history = []
true_utt = []
for line in lines:
fields = line.rstrip().lower().split('\t')
utterance = fields[1].split('###')
history.append([list(map(word2id, text_to_word_sequence(each_utt))) for each_utt in utterance])
true_utt.append(list(map(word2id, text_to_word_sequence(fields[2]))))
cnt += 1
if cnt % 10000 == 0:
print(tid, cnt) #显示读文件的一个进程
return history, true_utt
def build_evaluate_data(lines, tid=0):
with open('worddata/word_dict.pkl', 'rb') as f:
word_dict = pickle.load(f)
def word2id(c):
if c in word_dict:
return word_dict[c]
else:
return 0
cnt = 0
history = []
true_utt = []
for line in lines:
fields = line.rstrip().lower().split('\t')
utterance = fields[-1].split('###')
history.append([list(map(word2id, text_to_word_sequence(each_utt))) for each_utt in utterance])
true_utt.append(list(map(word2id, text_to_word_sequence(fields[0]))))
cnt += 1
if cnt % 10000 == 0:
print(tid, cnt)
return history, true_utt
def multi_sequences_padding(all_sequences, max_sentence_len=50):
max_num_utterance = 10
PAD_SEQUENCE = [0] * max_sentence_len
padded_sequences = []
sequences_length = []
for sequences in all_sequences:
sequences_len = len(sequences)
sequences_length.append(get_sequences_length(sequences, maxlen=max_sentence_len))
if sequences_len < max_num_utterance:
sequences += [PAD_SEQUENCE] * (max_num_utterance - sequences_len)
sequences_length[-1] += [0] * (max_num_utterance - sequences_len)
else:
sequences = sequences[-max_num_utterance:]
sequences_length[-1] = sequences_length[-1][-max_num_utterance:]
sequences = pad_sequences(sequences, padding='post', maxlen=max_sentence_len)
padded_sequences.append(sequences)
return padded_sequences, sequences_length
def get_sequences_length(sequences, maxlen):
sequences_length = [min(len(sequence), maxlen) for sequence in sequences]
return sequences_length
def load_data(total_words):
process_num = 10
executor = concurrent.futures.ProcessPoolExecutor(process_num) #多进程
base = 0
results = []
history = []
true_utt = []
word_dict = dict()
vectors = []
with open('data/glove.twitter.27B.200d.txt', encoding='utf8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = line.split(' ')
word_dict[line[0]] = i #word_dict:key:word ,value:index
vectors.append(line[1:]) #
if i > total_words: #前500000个高频词
break
with open('worddata/embedding_matrix.pkl', "wb") as f:
pickle.dump(vectors, f)
with open("data/biglearn_train.old.txt", encoding="utf8") as f:
lines = f.readlines()
total_num = 1000000
print(total_num)
low = 0
step = total_num // process_num
print(step)
while True:
if low < total_num:
results.append(executor.submit(build_data, lines[low:low + step], word_dict, base))
else:
break
base += 1
low += step
for result in results:
h, t = result.result()
history += h
true_utt += t
print(len(history))
print(len(true_utt))
pickle.dump([history, true_utt], open("worddata/train.pkl", "wb"))
actions_id = []
with open('emb/actions.txt', encoding='utf8') as f:
actions = f.readlines()
def word2id(c):
if c in word_dict: #在字典里面就返回对应的value
return word_dict[c]
else:
return 0 #不在字典中就返回0
for action in actions:
actions_id.append([word2id(word) for word in text_to_word_sequence(action)])
with open('worddata/actions_embeddings.pkl', 'wb') as f:
pickle.dump(actions_id, f)
def evaluate(test_file, sess, actions, actions_len, max_sentence_len, utterance_ph, all_utterance_len_ph,
response_ph, response_len, y_pred):
each_test_run = len(actions) // 3
acc1 = [0.0] * 10
rank1 = 0.0
cnt = 0
print('evaluating')
with open(test_file, encoding="utf8") as f:
lines = f.readlines()
low = 0
history, true_utt = build_evaluate_data(lines)
history, history_len = multi_sequences_padding(history, max_sentence_len)
true_utt_len = np.array(get_sequences_length(true_utt, maxlen=max_sentence_len))
true_utt = np.array(pad_sequences(true_utt,padding='post', maxlen=max_sentence_len))
history, history_len = np.array(history), np.array(history_len)
feed_dict = {utterance_ph: history,
all_utterance_len_ph: history_len,
response_ph: true_utt,
response_len: true_utt_len
}
true_scores = sess.run(y_pred, feed_dict=feed_dict)
true_scores = true_scores[:, 1]
for i in range(true_scores.shape[0]):
all_candidate_scores = []
for j in range(3):
feed_dict = {utterance_ph: np.concatenate([history[low:low + 1]] * each_test_run, axis=0),
all_utterance_len_ph: np.concatenate([history_len[low:low + 1]] * each_test_run, axis=0),
response_ph: actions[each_test_run * j:each_test_run * (j + 1)],
response_len: actions_len[each_test_run * j:each_test_run * (j + 1)]
}
candidate_scores = sess.run(y_pred, feed_dict=feed_dict)
all_candidate_scores.append(candidate_scores[:, 1])
all_candidate_scores = np.concatenate(all_candidate_scores, axis=0)
pos1 = np.sum(true_scores[i] + 1e-8 < all_candidate_scores)
if pos1 < 10:
acc1[pos1] += 1
rank1 += pos1
low += 1
cnt += true_scores.shape[0]
print([a / cnt for a in acc1]) # rank top 1 to top 10 acc
print(rank1 / cnt) # average rank
print(np.sum(acc1[:3]) * 1.0 / cnt) # top 3 acc
if __name__ == '__main__':
load_data(500000)
| [
"dhnl2016@163.com"
] | dhnl2016@163.com |
4a9de25ca11e00c67d838a2589d66d6418e577d4 | 900b8dbfbd8a9a7899b3da3a0b24c03721b1ac49 | /daopilot/daophot.py | 280b7078aad4aa18760038ee3bb987b5c8993b98 | [
"BSD-3-Clause"
] | permissive | jonathansick/daopilot | e9735956b43428fe38db0656b06e246546e09efc | d1757f3df61b715606d2027bea0d71c85b8fab07 | refs/heads/master | 2021-01-10T18:40:17.811411 | 2014-05-30T23:39:30 | 2014-05-30T23:39:30 | 6,196,022 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,474 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Class wrapper to daophot.
2012-05-05 - Created by Jonathan Sick
"""
import os
import sys
import pexpect
class Daophot(object):
"""Object-oriented interface to drive daophot.
:param inputImagePath: is the path to the FITS image that will be measured.
This is a real (filesystem) path. All paths should be supplied, and
will be returned to the user as filesystem paths. The class internally
converts these into shortened (symlinked) paths.
:type inputImagePath: str
:param shell: name of the shell that `daophot` will run in
:type shell: str (optional)
:param cmd: name of the `daophot` executable
:type shell: str (optional)
"""
def __init__(self, inputImagePath, shell="/bin/zsh", cmd="daophot"):
super(Daophot, self).__init__()
self.inputImagePath = inputImagePath
self.cmd = cmd
self.shell = shell
self._workDir = os.path.dirname(self.inputImagePath)
# a pexpect process running daophot, None when shutdown
self._daophot = None
# Cache for paths; two levels of dictionaries. First level is keyed
# to the types of files (represented by file extension strings). Second
# level is keyed by the path names themselves
self._pathCache = {'fits': {},
'coo': {}, 'lst': {}, 'ap': {}, 'psf': {}, 'nei': {}}
self._pathCache['fits']['input_image'] \
= os.path.basename(self.inputImagePath)
self._pathCache['fits']['last'] \
= os.path.basename(self.inputImagePath)
self._startup()
def _startup(self):
"""Start a daophot session and attaches the inputImagePath's image.
Automatically called by :meth:`__init__`.
"""
# We start daophot from the working directory (the directory of the
# input image.) All output will be placed in this directory. From
# the user's perspective, the returned paths will still be relative
# to the pipeline's base directory.
startupCommand = '/bin/tcsh -c "cd %s;daophot"' % self._workDir
self._daophot = pexpect.spawn(startupCommand)
self._daophot.logfile = sys.stdout # DEBUG
self._daophot.expect("Command:")
# print self._daophot.before
self.set_option("WA", "-2") # turn off extraneous printing
self.attach('input_image')
def shutdown(self):
"""Shutdown the daophot process."""
self._daophot.sendline("exit")
self._daophot = None
def set_option(self, name, value):
"""Set the named option in daophot to a given value."""
self._daophot.sendline("OPTION")
self._daophot.expect(":") # asks for the file with parameter values
self._daophot.sendline("") # accept the defaults
self._daophot.expect("OPT>")
self._daophot.sendline("%s=%s" % (name, value))
self._daophot.expect("OPT>")
self._daophot.sendline("")
self._daophot.expect("Command:")
print self._daophot.before
def attach(self, image):
"""Attaches the given image to daophot. *image* will be resolved
either as a name in the imageCache, or as a path. (Runs daophot
*ATTACH*)
By default, the attached image will be the last one attached (or the
inputImagePath on the first run). But if *image* is specified, then
it will be resolved in two steps
1. If a name in the imageCache, that path will be used
2. If not in the imageCache, then it will be used as a path itself
"""
imagePath = self._resolve_path(image, 'fits')
self._set_last_path(imagePath, 'fits')
command = "ATTACH %s" % imagePath
self._daophot.sendline(command)
self._daophot.expect("Command:")
def find(self, nAvg=1, nSum=1, cooName=None, cooPath=None):
"""Runs the *FIND* command on the previously attached image.
:param cooName: Set to have the coordinate path cached under this name.
:type cooName: str (optional)
:param cooPath: Set as the filepath for the output coordinate file,
otherwise a default path is made.
:type cooPath: str (optional)
"""
cooPath = self._make_output_path(cooPath, cooName, "coo")
self._name_path(cooName, cooPath, 'coo')
self._set_last_path(cooPath, 'coo')
self._daophot.sendline("FIND")
# asks 'Number of frames averaged, summed:'
self._daophot.expect(":")
self._daophot.sendline("%i,%i" % (nAvg, nSum))
# asks 'File for positions (default ???.coo):'
self._daophot.expect(":")
self._daophot.sendline(cooPath)
self._daophot.expect("Are you happy with this?", timeout=60 * 20)
# print self._daophot.before
self._daophot.sendline("Y")
self._daophot.expect("Command:")
def apphot(self, coordinates, apRadPath=None, photOutputPath=None,
photOutputName=None, options=None):
"""Run aperture photometry routine *PHOTOMETRY* in daophot.
:param coordinates: refers to coordinates of stars from the find
method; it is a string to be resolved either into a name in the
path cache, a filepath itself.
:type coordinates: str
:param apRadPath: is path to the aperture radii options file. This file
must be in the working diretory. If None, then the default of
'photo.opt' is assumed.
:type apRadPath: str (optional)
:param photOutputPath: Set as the filepath of output .ap file.
:param photOutputName: Set to have .ap path cached.
:param options: Sequence of `(optionName, optionValue)` pairs (both str
values) passed to the PHOTOMETRY sub routine.
"""
self._daophot.sendline("PHOTOMETRY")
# asks for 'File with aperture radii (default photo.opt)'
self._daophot.expect(":")
if apRadPath is not None:
self._daophot.sendline(os.path.basename(apRadPath))
else:
self._daophot.sendline("") # assume default photo.opt file
self._daophot.expect("PHO>")
if options is not None:
for optionName, optionValue in options.iteritems():
self._daophot.sendline(optionName + "=" + optionValue)
self._daophot.expect("PHO>")
self._daophot.sendline("")
# asks 'Input position file (default source/sky28k.coo):'
self._daophot.expect(":")
cooPath = self._resolve_path(coordinates, 'coo')
self._daophot.sendline(cooPath)
# asks 'Output file (default source/sky28k.ap):'
self._daophot.expect(":")
photOutputPath = self._make_output_path(photOutputPath,
photOutputName, "ap")
self._name_path(photOutputName, photOutputPath, 'ap')
self._set_last_path(photOutputPath, 'ap')
self._daophot.sendline(photOutputPath)
self._daophot.expect("Command:", timeout=60 * 20)
def pick_psf_stars(self, nStars, apPhot, starListPath=None,
starListName=None, magLimit=99):
"""Picks *nStars* number of stars from the aperture photometry list
that will be used as prototypes for making a PSF model;
runs daophot *PICK*.
:param apPhot: points to the aperture photometry list (made by
apphot()). It is resolved into a name in apCache or a filepath to
the .ap file
:param nStars: is the number of stars to select, can be a str or int.
:param starListPath: and starListName and the path/name that may
be specified for the .lst file that lists the prototype psf stars.
:param magLimit: is the limiting instrumental magnitude that can be
used as a PSF prototype. Can be a str object.
"""
magLimit = str(magLimit)
nStars = str(int(nStars))
apPhotPath = self._resolve_path(apPhot, 'ap')
starListPath = self._make_output_path(starListPath,
starListName, 'lst')
self._name_path(starListName, starListPath, 'lst')
self._set_last_path(starListPath, 'lst')
self._daophot.sendline("PICK")
# ask for input file name to .ap file
self._daophot.expect(":")
self._daophot.sendline(apPhotPath)
# asks for 'Desired number of stars, faintest magnitude:'
self._daophot.expect(":")
self._daophot.sendline(",".join((nStars, magLimit)))
# asks for output file path, .lst
self._daophot.expect(":")
# TODO implement output filepath
self._daophot.sendline("")
self._daophot.expect("Command:", timeout=60 * 10)
def make_psf(self, apPhot, starList, psfPath=None, psfName=None):
"""Computes a PSF model with the daophot *PSF* command.
:param apPhot: points to the aperture photometry list (made by
apphot()). It is resolved into a name in apCache or a filepath
to the .ap file
:param starList: points to the psf prototype star list.
:return: text output of fitting routine, path to the psf file and path
to the neighbours file
"""
apPhotPath = self._resolve_path(apPhot, 'ap')
starListPath = self._resolve_path(starList, 'lst')
psfPath = self._make_output_path(psfPath, psfName, 'psf')
self._name_path(psfName, psfPath, 'psf')
self._set_last_path(psfPath, 'psf')
# make with the neighbours file name (.nei).
# It always follows this form:
fileRoot = os.path.splitext(psfPath)[0]
neiPath = ".".join((fileRoot, 'nei'))
self._set_last_path(neiPath, 'nei')
if os.path.exists(neiPath):
os.remove(neiPath)
self._daophot.sendline("PSF")
# asks for file with aperture phot results
self._daophot.expect(":")
self._daophot.sendline(apPhotPath)
# asks for file with PSF prototype star list
self._daophot.expect(":")
self._daophot.sendline(starListPath)
# asks for file for the psf output file
self._daophot.expect(":")
self._daophot.sendline(psfPath)
# funny hack; pexpect has trouble here, but works
# self._daophot.expect(".nei", timeout=120)
# send a CR to make sure we're clean before leaving
# self._daophot.sendline("")
result = self._daophot.expect(["nei", "Failed to converge.",
"Command:"], timeout=60 * 10)
# save daophot's output of fit quality
fittingText = self._daophot.before
if result == 1 or result == 2:
# failed to converge
print "didn't converge. now what?"
# raise PSFNotConverged
return None, None, None
# otherwise we should have good convergence
print result,
print "Ok convergence?"
self._daophot.sendline("")
self._daophot.expect("Command:")
return fittingText, os.path.join(self._workDir, psfPath), \
os.path.join(self._workDir, neiPath)
def substar(self, substarList, psf, outputPath, keepers=None):
"""Subtracts stars in `substarList` from the attached image using the
`psf` model.
:param substarList: is a **path* to a photometry file of all stars
that should be subtracted out of the image.
:type substarList: str
:param psf: is a name/path resolved into a path to a PSF model.
:param outputPath: is a **path** where the star-subtracted FITS image
will be placed. Any existing file will be deleted.
:param keepers: is a **path** (not a resolvable file name) to a
listing stars that should be kept in the subtracted image.
If `None`, then no stars are kept.
:return: outputPath, relative to the pipeline.
"""
psfPath = self._resolve_path(psf, 'psf')
if os.path.exists(outputPath):
os.remove(outputPath)
self._daophot.sendline("SUBSTAR")
self._daophot.expect(":") # File with the PSF (*)
self._daophot.sendline(os.path.basename(psfPath))
self._daophot.expect(":") # File with photometry (*)
self._daophot.sendline(os.path.basename(substarList))
# print "send substarList"
# print self._daophot.interact()
self._daophot.expect("in\?") # Do you have stars to leave in
if keepers is not None:
self._daophot.sendline("Y")
# print self._daophot.before
self._daophot.expect(":") # File with star list (*)
self._daophot.sendline(os.path.basename(keepers))
else:
self._daophot.sendline("N")
self._daophot.expect(":") # Name for subtracted image (*)
self._daophot.sendline(os.path.basename(outputPath))
self._daophot.expect("Command:", timeout=60 * 10)
return outputPath
def get_path(self, name, ext):
"""Returns the named path of type ext. The path will be relative
to the pipeline's base... as the user would expect."""
return os.path.join(self._workDir, self._resolve_path(name, ext))
def _resolve_path(self, path, ext):
"""Resolves path into a path to the given type (via ext extension)
of file if it is name. Or if it is a path already, that
path will be passed through. The returned path is relative to the
workDir (working directory) of this Daophot.
"""
print path,
print ext
try:
resolvedPath = self._pathCache[ext][path]
except:
print "This is a path"
print path
resolvedPath = os.path.basename(path)
return resolvedPath
def _name_path(self, name, path, ext):
"""Adds the path of type ext(ention) to its cache under given name,
if the name is not None.
"""
if name is not None:
self._pathCache[ext][name] = path
def _set_last_path(self, path, ext):
"""Makes the path be filed under 'last' in its type's cache."""
self._pathCache[ext]['last'] = path
def _make_output_path(self, path, name, ext):
"""Forms an output file path. If path is None, then a path is made
using the name. If both path and name are None, then a path is formed
from the inputImagePath and the filename extension *ext*.
The path is force to be relative to `workDir`.
"""
if path is None:
# make a default ap photometry output file path
fileRoot = os.path.splitext(
os.path.basename(self.inputImagePath))[0]
if name is not None:
fileRoot = "_".join((fileRoot, name))
path = ".".join((fileRoot, ext))
else:
path = os.path.basename(path)
fullpath = os.path.join(self._workDir, path)
if os.path.exists(fullpath):
print "removing existing %s" % fullpath
os.remove(fullpath)
return path
| [
"jonathansick@mac.com"
] | jonathansick@mac.com |
db1e4cd907367ea490f246ec7b48230d612edf58 | f913913b9f248f5a5d38500f34980a1e81d9e23c | /authenticate.py | 9b0cc5df899687f0dfe880564126acbbc5ecb11e | [
"MIT"
] | permissive | ericpotvin/RaspberryEcho | f65ab73be453d5ba890d65c07c17599f4030a3b9 | bc49f7c48fcef849f096e746dd7920bd349bf51a | refs/heads/master | 2021-01-12T21:07:17.008669 | 2016-09-22T04:28:58 | 2016-09-22T04:28:58 | 68,559,965 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | # -*- coding: UTF-8 -*-
""" Authenticate the user credentials
"""
from lib.AlexaService import AlexaService
from lib.Config import Config
import cherrypy
import os
import requests
import json
import urllib
class Start(object):
""" The Web object
"""
def index(self):
""" The main page
"""
product_id = Config.get_config(Config.FIELD_PRODUCT_ID)
client_id = Config.get_config(Config.FIELD_CLIENT_ID)
scope_data = json.dumps(
{"alexa:all": {
"productID": product_id,
"productInstanceAttributes": {
"deviceSerialNumber": "001"}
}}
)
callback = cherrypy.url() + "code"
payload = {
"client_id": client_id,
"scope": "alexa:all",
"scope_data": scope_data,
"response_type": "code",
"redirect_uri": callback
}
req = requests.Request(
'GET', AlexaService.AMAZON_BASE_URL, params=payload
)
raise cherrypy.HTTPRedirect(req.prepare().url)
def code(self, var=None, **params):
""" The code page
"""
client_id = Config.get_config(Config.FIELD_CLIENT_ID)
client_secret = Config.get_config(Config.FIELD_CLIENT_SECRET)
code = urllib.quote(cherrypy.request.params['code'])
callback = cherrypy.url()
payload = {
"client_id" : client_id,
"client_secret" : client_secret,
"code" : code,
"grant_type" : "authorization_code",
"redirect_uri" : callback
}
result = requests.post(AlexaService.AMAZON_TOKEN_URL, data=payload)
result = result.json()
# Save the refresh token and reset access token
Config.save_config(
Config.FIELD_REFRESH_TOKEN,
format(result['refresh_token'])
)
Config.save_config(Config.FIELD_ACCESS_TOKEN, "")
html = "<b>Success!</b><br/>"
html += "Refresh token has been added to your credentials file.<br/>"
html += "You may now reboot the Pi or restart the service.<br/>"
html += "Your token: %s" % result['refresh_token']
return html
index.exposed = True
code.exposed = True
if __name__ == "__main__":
cherrypy.config.update(
{'server.socket_host': '0.0.0.0'}
)
cherrypy.config.update(
{'server.socket_port': int(os.environ.get('PORT', '5000'))}
)
cherrypy.quickstart(Start())
| [
"ericpotvin@users.noreply.github.com"
] | ericpotvin@users.noreply.github.com |
173ed8d2231ae8369216186c5c54ca5160d57e3f | 263f99fe094e4a6ad65fdd61a43fd3c2a10cd433 | /Python/OpenCVFaceRecognize/faceDetect/filters.py | 6438f25cb800548748577bd78a67487c94af2580 | [] | no_license | 6769/m14kabing | f08934efda1ba96ff6cfaf3e2ffbb1f1d6e15f4a | 5bcfc10d95ae0d3cb6dc8a7d958fa1605559bf0a | refs/heads/master | 2021-01-19T02:02:19.845158 | 2020-04-29T15:17:02 | 2020-04-29T15:17:02 | 18,762,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,824 | py | import cv2
import numpy
import utils
def recolorRC(src, dst):
"""Simulate conversion from BGR to RC (red, cyan).
The source and destination images must both be in BGR format.
Blues and greens are replaced with cyans. The effect is similar
to Technicolor Process 2 (used in early color movies) and CGA
Palette 3 (used in early color PCs).
Pseudocode:
dst.b = dst.g = 0.5 * (src.b + src.g)
dst.r = src.r
"""
b, g, r = cv2.split(src)
cv2.addWeighted(b, 0.5, g, 0.5, 0, b)
cv2.merge((b, b, r), dst)
def recolorRGV(src, dst):
"""Simulate conversion from BGR to RGV (red, green, value).
The source and destination images must both be in BGR format.
Blues are desaturated. The effect is similar to Technicolor
Process 1 (used in early color movies).
Pseudocode:
dst.b = min(src.b, src.g, src.r)
dst.g = src.g
dst.r = src.r
"""
b, g, r = cv2.split(src)
cv2.min(b, g, b)
cv2.min(b, r, b)
cv2.merge((b, g, r), dst)
def recolorCMV(src, dst):
"""Simulate conversion from BGR to CMV (cyan, magenta, value).
The source and destination images must both be in BGR format.
Yellows are desaturated. The effect is similar to CGA Palette 1
(used in early color PCs).
Pseudocode:
dst.b = max(src.b, src.g, src.r)
dst.g = src.g
dst.r = src.r
"""
b, g, r = cv2.split(src)
cv2.max(b, g, b)
cv2.max(b, r, b)
cv2.merge((b, g, r), dst)
def blend(foregroundSrc, backgroundSrc, dst, alphaMask):
# Calculate the normalized alpha mask.
maxAlpha = numpy.iinfo(alphaMask.dtype).max
normalizedAlphaMask = (1.0 / maxAlpha) * alphaMask
# Calculate the normalized inverse alpha mask.
normalizedInverseAlphaMask = \
numpy.ones_like(normalizedAlphaMask)
normalizedInverseAlphaMask[:] = \
normalizedInverseAlphaMask - normalizedAlphaMask
# Split the channels from the sources.
foregroundChannels = cv2.split(foregroundSrc)
backgroundChannels = cv2.split(backgroundSrc)
# Blend each channel.
numChannels = len(foregroundChannels)
i = 0
while i < numChannels:
backgroundChannels[i][:] = \
normalizedAlphaMask * foregroundChannels[i] + \
normalizedInverseAlphaMask * backgroundChannels[i]
i += 1
# Merge the blended channels into the destination.
cv2.merge(backgroundChannels, dst)
def strokeEdges(src, dst, blurKsize = 7, edgeKsize = 5):
if blurKsize >= 3:
blurredSrc = cv2.medianBlur(src, blurKsize)
graySrc = cv2.cvtColor(blurredSrc, cv2.COLOR_BGR2GRAY)
else:
graySrc = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
cv2.Laplacian(graySrc, cv2.cv.CV_8U, graySrc, ksize = edgeKsize)
normalizedInverseAlpha = (1.0 / 255) * (255 - graySrc)
channels = cv2.split(src)
for channel in channels:
channel[:] = channel * normalizedInverseAlpha
cv2.merge(channels, dst)
class VFuncFilter(object):
"""A filter that applies a function to V (or all of BGR)."""
def __init__(self, vFunc = None, dtype = numpy.uint8):
length = numpy.iinfo(dtype).max + 1
self._vLookupArray = utils.createLookupArray(vFunc, length)
def apply(self, src, dst):
"""Apply the filter with a BGR or gray source/destination."""
srcFlatView = utils.flatView(src)
dstFlatView = utils.flatView(dst)
utils.applyLookupArray(self._vLookupArray, srcFlatView,
dstFlatView)
class VCurveFilter(VFuncFilter):
"""A filter that applies a curve to V (or all of BGR)."""
def __init__(self, vPoints, dtype = numpy.uint8):
VFuncFilter.__init__(self, utils.createCurveFunc(vPoints),
dtype)
class BGRFuncFilter(object):
"""A filter that applies different functions to each of BGR."""
def __init__(self, vFunc = None, bFunc = None, gFunc = None,
rFunc = None, dtype = numpy.uint8):
length = numpy.iinfo(dtype).max + 1
self._bLookupArray = utils.createLookupArray(
utils.createCompositeFunc(bFunc, vFunc), length)
self._gLookupArray = utils.createLookupArray(
utils.createCompositeFunc(gFunc, vFunc), length)
self._rLookupArray = utils.createLookupArray(
utils.createCompositeFunc(rFunc, vFunc), length)
def apply(self, src, dst):
"""Apply the filter with a BGR source/destination."""
b, g, r = cv2.split(src)
utils.applyLookupArray(self._bLookupArray, b, b)
utils.applyLookupArray(self._gLookupArray, g, g)
utils.applyLookupArray(self._rLookupArray, r, r)
cv2.merge([b, g, r], dst)
class BGRCurveFilter(BGRFuncFilter):
"""A filter that applies different curves to each of BGR."""
def __init__(self, vPoints = None, bPoints = None,
gPoints = None, rPoints = None, dtype = numpy.uint8):
BGRFuncFilter.__init__(self,
utils.createCurveFunc(vPoints),
utils.createCurveFunc(bPoints),
utils.createCurveFunc(gPoints),
utils.createCurveFunc(rPoints), dtype)
class BGRCrossProcessCurveFilter(BGRCurveFilter):
"""A filter that applies cross-process-like curves to BGR."""
def __init__(self, dtype = numpy.uint8):
BGRCurveFilter.__init__(
self,
bPoints = [(0,20),(255,235)],
gPoints = [(0,0),(56,39),(208,226),(255,255)],
rPoints = [(0,0),(56,22),(211,255),(255,255)],
dtype = dtype)
class BGRPortraCurveFilter(BGRCurveFilter):
"""A filter that applies Portra-like curves to BGR."""
def __init__(self, dtype = numpy.uint8):
BGRCurveFilter.__init__(
self,
vPoints = [(0,0),(23,20),(157,173),(255,255)],
bPoints = [(0,0),(41,46),(231,228),(255,255)],
gPoints = [(0,0),(52,47),(189,196),(255,255)],
rPoints = [(0,0),(69,69),(213,218),(255,255)],
dtype = dtype)
class BGRProviaCurveFilter(BGRCurveFilter):
"""A filter that applies Provia-like curves to BGR."""
def __init__(self, dtype = numpy.uint8):
BGRCurveFilter.__init__(
self,
bPoints = [(0,0),(35,25),(205,227),(255,255)],
gPoints = [(0,0),(27,21),(196,207),(255,255)],
rPoints = [(0,0),(59,54),(202,210),(255,255)],
dtype = dtype)
class BGRVelviaCurveFilter(BGRCurveFilter):
"""A filter that applies Velvia-like curves to BGR."""
def __init__(self, dtype = numpy.uint8):
BGRCurveFilter.__init__(
self,
vPoints = [(0,0),(128,118),(221,215),(255,255)],
bPoints = [(0,0),(25,21),(122,153),(165,206),(255,255)],
gPoints = [(0,0),(25,21),(95,102),(181,208),(255,255)],
rPoints = [(0,0),(41,28),(183,209),(255,255)],
dtype = dtype)
class VConvolutionFilter(object):
"""A filter that applies a convolution to V (or all of BGR)."""
def __init__(self, kernel):
self._kernel = kernel
def apply(self, src, dst):
"""Apply the filter with a BGR or gray source/destination."""
cv2.filter2D(src, -1, self._kernel, dst)
class BlurFilter(VConvolutionFilter):
"""A blur filter with a 2-pixel radius."""
def __init__(self):
kernel = numpy.array([[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04]])
VConvolutionFilter.__init__(self, kernel)
class SharpenFilter(VConvolutionFilter):
"""A sharpen filter with a 1-pixel radius."""
def __init__(self):
kernel = numpy.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
VConvolutionFilter.__init__(self, kernel)
class FindEdgesFilter(VConvolutionFilter):
"""An edge-finding filter with a 1-pixel radius."""
def __init__(self):
kernel = numpy.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
VConvolutionFilter.__init__(self, kernel)
class EmbossFilter(VConvolutionFilter):
"""An emboss filter with a 1-pixel radius."""
def __init__(self):
kernel = numpy.array([[-2, -1, 0],
[-1, 1, 1],
[ 0, 1, 2]])
VConvolutionFilter.__init__(self, kernel) | [
"5pipitk@gmail.com"
] | 5pipitk@gmail.com |
a46a35d0aa6bca0111fdad98c350780534df69cd | d088b4edfa6c4211c31f681947ad942e7a4974aa | /test.py | 7f4b252ecc9a22cdfd09511d1b4085f761a3201c | [] | no_license | leftshoe/cython-example | b5dfc875e85c19faa3a997741d8647ea4f5ab3ae | 3d181422b1a5e2e788cf045991acfffd0e880d6a | refs/heads/master | 2016-09-10T21:37:07.870489 | 2013-03-07T07:27:28 | 2013-03-07T07:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import numpy as np
import heap
#import heap_original as heap
import time
# Make a initial heap
data = np.sort(np.random.rand(100000))
sTime = time.time()
# Do some processing
for i in range(100000):
data[0] += 0.1*np.random.rand()
#heap.siftup(data,0)
eTime = time.time()
print "Took: %1.2f seconds" % (eTime-sTime) | [
"aaron.defazio@gmail.com"
] | aaron.defazio@gmail.com |
3fd932279804d8d778f6c54d1e6481c85aea76df | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/containerservice/latest/outputs.py | 98aae238eabc64dcdf8cf187997aef870b01307b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98,356 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'AgentPoolUpgradeSettingsResponse',
'ContainerServiceAgentPoolProfileResponse',
'ContainerServiceCustomProfileResponse',
'ContainerServiceDiagnosticsProfileResponse',
'ContainerServiceLinuxProfileResponse',
'ContainerServiceMasterProfileResponse',
'ContainerServiceNetworkProfileResponse',
'ContainerServiceOrchestratorProfileResponse',
'ContainerServiceServicePrincipalProfileResponse',
'ContainerServiceSshConfigurationResponse',
'ContainerServiceSshPublicKeyResponse',
'ContainerServiceVMDiagnosticsResponse',
'ContainerServiceWindowsProfileResponse',
'CredentialResultResponseResult',
'KeyVaultSecretRefResponse',
'ManagedClusterAADProfileResponse',
'ManagedClusterAPIServerAccessProfileResponse',
'ManagedClusterAddonProfileResponse',
'ManagedClusterAddonProfileResponseIdentity',
'ManagedClusterAgentPoolProfileResponse',
'ManagedClusterIdentityResponse',
'ManagedClusterIdentityResponseUserAssignedIdentities',
'ManagedClusterLoadBalancerProfileResponse',
'ManagedClusterLoadBalancerProfileResponseManagedOutboundIPs',
'ManagedClusterLoadBalancerProfileResponseOutboundIPPrefixes',
'ManagedClusterLoadBalancerProfileResponseOutboundIPs',
'ManagedClusterPropertiesResponseAutoScalerProfile',
'ManagedClusterPropertiesResponseIdentityProfile',
'ManagedClusterSKUResponse',
'ManagedClusterServicePrincipalProfileResponse',
'ManagedClusterWindowsProfileResponse',
'NetworkProfileResponse',
'OpenShiftManagedClusterAADIdentityProviderResponse',
'OpenShiftManagedClusterAgentPoolProfileResponse',
'OpenShiftManagedClusterAuthProfileResponse',
'OpenShiftManagedClusterIdentityProviderResponse',
'OpenShiftManagedClusterMasterPoolProfileResponse',
'OpenShiftRouterProfileResponse',
'PowerStateResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'PurchasePlanResponse',
'ResourceReferenceResponse',
]
@pulumi.output_type
class AgentPoolUpgradeSettingsResponse(dict):
"""
Settings for upgrading an agentpool
"""
def __init__(__self__, *,
max_surge: Optional[str] = None):
"""
Settings for upgrading an agentpool
:param str max_surge: Count or percentage of additional nodes to be added during upgrade. If empty uses AKS default
"""
if max_surge is not None:
pulumi.set(__self__, "max_surge", max_surge)
@property
@pulumi.getter(name="maxSurge")
def max_surge(self) -> Optional[str]:
"""
Count or percentage of additional nodes to be added during upgrade. If empty uses AKS default
"""
return pulumi.get(self, "max_surge")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceAgentPoolProfileResponse(dict):
"""
Profile for the container service agent pool.
"""
def __init__(__self__, *,
fqdn: str,
name: str,
vm_size: str,
count: Optional[int] = None,
dns_prefix: Optional[str] = None,
os_disk_size_gb: Optional[int] = None,
os_type: Optional[str] = None,
ports: Optional[Sequence[int]] = None,
storage_profile: Optional[str] = None,
vnet_subnet_id: Optional[str] = None):
"""
Profile for the container service agent pool.
:param str fqdn: FQDN for the agent pool.
:param str name: Unique name of the agent pool profile in the context of the subscription and resource group.
:param str vm_size: Size of agent VMs.
:param int count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
:param str dns_prefix: DNS prefix to be used to create the FQDN for the agent pool.
:param int os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
:param str os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param Sequence[int] ports: Ports number array used to expose on this agent pool. The default opened ports are different based on your choice of orchestrator.
:param str storage_profile: Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice.
:param str vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
"""
pulumi.set(__self__, "fqdn", fqdn)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "vm_size", vm_size)
if count is not None:
pulumi.set(__self__, "count", count)
if dns_prefix is not None:
pulumi.set(__self__, "dns_prefix", dns_prefix)
if os_disk_size_gb is not None:
pulumi.set(__self__, "os_disk_size_gb", os_disk_size_gb)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if vnet_subnet_id is not None:
pulumi.set(__self__, "vnet_subnet_id", vnet_subnet_id)
@property
@pulumi.getter
def fqdn(self) -> str:
"""
FQDN for the agent pool.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def name(self) -> str:
"""
Unique name of the agent pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> str:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="dnsPrefix")
def dns_prefix(self) -> Optional[str]:
"""
DNS prefix to be used to create the FQDN for the agent pool.
"""
return pulumi.get(self, "dns_prefix")
@property
@pulumi.getter(name="osDiskSizeGB")
def os_disk_size_gb(self) -> Optional[int]:
"""
OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
"""
return pulumi.get(self, "os_disk_size_gb")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter
def ports(self) -> Optional[Sequence[int]]:
"""
Ports number array used to expose on this agent pool. The default opened ports are different based on your choice of orchestrator.
"""
return pulumi.get(self, "ports")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional[str]:
"""
Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter(name="vnetSubnetID")
def vnet_subnet_id(self) -> Optional[str]:
"""
VNet SubnetID specifies the VNet's subnet identifier.
"""
return pulumi.get(self, "vnet_subnet_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceCustomProfileResponse(dict):
"""
Properties to configure a custom container service cluster.
"""
def __init__(__self__, *,
orchestrator: str):
"""
Properties to configure a custom container service cluster.
:param str orchestrator: The name of the custom orchestrator to use.
"""
pulumi.set(__self__, "orchestrator", orchestrator)
@property
@pulumi.getter
def orchestrator(self) -> str:
"""
The name of the custom orchestrator to use.
"""
return pulumi.get(self, "orchestrator")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceDiagnosticsProfileResponse(dict):
"""
Profile for diagnostics on the container service cluster.
"""
def __init__(__self__, *,
vm_diagnostics: 'outputs.ContainerServiceVMDiagnosticsResponse'):
"""
Profile for diagnostics on the container service cluster.
:param 'ContainerServiceVMDiagnosticsResponseArgs' vm_diagnostics: Profile for diagnostics on the container service VMs.
"""
pulumi.set(__self__, "vm_diagnostics", vm_diagnostics)
@property
@pulumi.getter(name="vmDiagnostics")
def vm_diagnostics(self) -> 'outputs.ContainerServiceVMDiagnosticsResponse':
"""
Profile for diagnostics on the container service VMs.
"""
return pulumi.get(self, "vm_diagnostics")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceLinuxProfileResponse(dict):
"""
Profile for Linux VMs in the container service cluster.
"""
def __init__(__self__, *,
admin_username: str,
ssh: 'outputs.ContainerServiceSshConfigurationResponse'):
"""
Profile for Linux VMs in the container service cluster.
:param str admin_username: The administrator username to use for Linux VMs.
:param 'ContainerServiceSshConfigurationResponseArgs' ssh: SSH configuration for Linux-based VMs running on Azure.
"""
pulumi.set(__self__, "admin_username", admin_username)
pulumi.set(__self__, "ssh", ssh)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> str:
"""
The administrator username to use for Linux VMs.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter
def ssh(self) -> 'outputs.ContainerServiceSshConfigurationResponse':
"""
SSH configuration for Linux-based VMs running on Azure.
"""
return pulumi.get(self, "ssh")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceMasterProfileResponse(dict):
"""
Profile for the container service master.
"""
def __init__(__self__, *,
dns_prefix: str,
fqdn: str,
vm_size: str,
count: Optional[int] = None,
first_consecutive_static_ip: Optional[str] = None,
os_disk_size_gb: Optional[int] = None,
storage_profile: Optional[str] = None,
vnet_subnet_id: Optional[str] = None):
"""
Profile for the container service master.
:param str dns_prefix: DNS prefix to be used to create the FQDN for the master pool.
:param str fqdn: FQDN for the master pool.
:param str vm_size: Size of agent VMs.
:param int count: Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1.
:param str first_consecutive_static_ip: FirstConsecutiveStaticIP used to specify the first static ip of masters.
:param int os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
:param str storage_profile: Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice.
:param str vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
"""
pulumi.set(__self__, "dns_prefix", dns_prefix)
pulumi.set(__self__, "fqdn", fqdn)
pulumi.set(__self__, "vm_size", vm_size)
if count is not None:
pulumi.set(__self__, "count", count)
if first_consecutive_static_ip is not None:
pulumi.set(__self__, "first_consecutive_static_ip", first_consecutive_static_ip)
if os_disk_size_gb is not None:
pulumi.set(__self__, "os_disk_size_gb", os_disk_size_gb)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if vnet_subnet_id is not None:
pulumi.set(__self__, "vnet_subnet_id", vnet_subnet_id)
@property
@pulumi.getter(name="dnsPrefix")
def dns_prefix(self) -> str:
"""
DNS prefix to be used to create the FQDN for the master pool.
"""
return pulumi.get(self, "dns_prefix")
@property
@pulumi.getter
def fqdn(self) -> str:
"""
FQDN for the master pool.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> str:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="firstConsecutiveStaticIP")
def first_consecutive_static_ip(self) -> Optional[str]:
"""
FirstConsecutiveStaticIP used to specify the first static ip of masters.
"""
return pulumi.get(self, "first_consecutive_static_ip")
@property
@pulumi.getter(name="osDiskSizeGB")
def os_disk_size_gb(self) -> Optional[int]:
"""
OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
"""
return pulumi.get(self, "os_disk_size_gb")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional[str]:
"""
Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter(name="vnetSubnetID")
def vnet_subnet_id(self) -> Optional[str]:
"""
VNet SubnetID specifies the VNet's subnet identifier.
"""
return pulumi.get(self, "vnet_subnet_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceNetworkProfileResponse(dict):
"""
Profile of network configuration.
"""
def __init__(__self__, *,
dns_service_ip: Optional[str] = None,
docker_bridge_cidr: Optional[str] = None,
load_balancer_profile: Optional['outputs.ManagedClusterLoadBalancerProfileResponse'] = None,
load_balancer_sku: Optional[str] = None,
network_mode: Optional[str] = None,
network_plugin: Optional[str] = None,
network_policy: Optional[str] = None,
outbound_type: Optional[str] = None,
pod_cidr: Optional[str] = None,
service_cidr: Optional[str] = None):
"""
Profile of network configuration.
:param str dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
:param str docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:param 'ManagedClusterLoadBalancerProfileResponseArgs' load_balancer_profile: Profile of the cluster load balancer.
:param str load_balancer_sku: The load balancer sku for the managed cluster.
:param str network_mode: Network mode used for building Kubernetes network.
:param str network_plugin: Network plugin used for building Kubernetes network.
:param str network_policy: Network policy used for building Kubernetes network.
:param str outbound_type: The outbound (egress) routing method.
:param str pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
:param str service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
if dns_service_ip is not None:
pulumi.set(__self__, "dns_service_ip", dns_service_ip)
if docker_bridge_cidr is not None:
pulumi.set(__self__, "docker_bridge_cidr", docker_bridge_cidr)
if load_balancer_profile is not None:
pulumi.set(__self__, "load_balancer_profile", load_balancer_profile)
if load_balancer_sku is not None:
pulumi.set(__self__, "load_balancer_sku", load_balancer_sku)
if network_mode is not None:
pulumi.set(__self__, "network_mode", network_mode)
if network_plugin is not None:
pulumi.set(__self__, "network_plugin", network_plugin)
if network_policy is not None:
pulumi.set(__self__, "network_policy", network_policy)
if outbound_type is not None:
pulumi.set(__self__, "outbound_type", outbound_type)
if pod_cidr is not None:
pulumi.set(__self__, "pod_cidr", pod_cidr)
if service_cidr is not None:
pulumi.set(__self__, "service_cidr", service_cidr)
@property
@pulumi.getter(name="dnsServiceIP")
def dns_service_ip(self) -> Optional[str]:
"""
An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
"""
return pulumi.get(self, "dns_service_ip")
@property
@pulumi.getter(name="dockerBridgeCidr")
def docker_bridge_cidr(self) -> Optional[str]:
"""
A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
"""
return pulumi.get(self, "docker_bridge_cidr")
@property
@pulumi.getter(name="loadBalancerProfile")
def load_balancer_profile(self) -> Optional['outputs.ManagedClusterLoadBalancerProfileResponse']:
"""
Profile of the cluster load balancer.
"""
return pulumi.get(self, "load_balancer_profile")
@property
@pulumi.getter(name="loadBalancerSku")
def load_balancer_sku(self) -> Optional[str]:
"""
The load balancer sku for the managed cluster.
"""
return pulumi.get(self, "load_balancer_sku")
@property
@pulumi.getter(name="networkMode")
def network_mode(self) -> Optional[str]:
"""
Network mode used for building Kubernetes network.
"""
return pulumi.get(self, "network_mode")
@property
@pulumi.getter(name="networkPlugin")
def network_plugin(self) -> Optional[str]:
"""
Network plugin used for building Kubernetes network.
"""
return pulumi.get(self, "network_plugin")
@property
@pulumi.getter(name="networkPolicy")
def network_policy(self) -> Optional[str]:
"""
Network policy used for building Kubernetes network.
"""
return pulumi.get(self, "network_policy")
@property
@pulumi.getter(name="outboundType")
def outbound_type(self) -> Optional[str]:
"""
The outbound (egress) routing method.
"""
return pulumi.get(self, "outbound_type")
@property
@pulumi.getter(name="podCidr")
def pod_cidr(self) -> Optional[str]:
"""
A CIDR notation IP range from which to assign pod IPs when kubenet is used.
"""
return pulumi.get(self, "pod_cidr")
@property
@pulumi.getter(name="serviceCidr")
def service_cidr(self) -> Optional[str]:
"""
A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
return pulumi.get(self, "service_cidr")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceOrchestratorProfileResponse(dict):
"""
Profile for the container service orchestrator.
"""
def __init__(__self__, *,
orchestrator_type: str,
orchestrator_version: Optional[str] = None):
"""
Profile for the container service orchestrator.
:param str orchestrator_type: The orchestrator to use to manage container service cluster resources. Valid values are Kubernetes, Swarm, DCOS, DockerCE and Custom.
:param str orchestrator_version: The version of the orchestrator to use. You can specify the major.minor.patch part of the actual version.For example, you can specify version as "1.6.11".
"""
pulumi.set(__self__, "orchestrator_type", orchestrator_type)
if orchestrator_version is not None:
pulumi.set(__self__, "orchestrator_version", orchestrator_version)
@property
@pulumi.getter(name="orchestratorType")
def orchestrator_type(self) -> str:
"""
The orchestrator to use to manage container service cluster resources. Valid values are Kubernetes, Swarm, DCOS, DockerCE and Custom.
"""
return pulumi.get(self, "orchestrator_type")
@property
@pulumi.getter(name="orchestratorVersion")
def orchestrator_version(self) -> Optional[str]:
"""
The version of the orchestrator to use. You can specify the major.minor.patch part of the actual version.For example, you can specify version as "1.6.11".
"""
return pulumi.get(self, "orchestrator_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceServicePrincipalProfileResponse(dict):
"""
Information about a service principal identity for the cluster to use for manipulating Azure APIs. Either secret or keyVaultSecretRef must be specified.
"""
def __init__(__self__, *,
client_id: str,
key_vault_secret_ref: Optional['outputs.KeyVaultSecretRefResponse'] = None,
secret: Optional[str] = None):
"""
Information about a service principal identity for the cluster to use for manipulating Azure APIs. Either secret or keyVaultSecretRef must be specified.
:param str client_id: The ID for the service principal.
:param 'KeyVaultSecretRefResponseArgs' key_vault_secret_ref: Reference to a secret stored in Azure Key Vault.
:param str secret: The secret password associated with the service principal in plain text.
"""
pulumi.set(__self__, "client_id", client_id)
if key_vault_secret_ref is not None:
pulumi.set(__self__, "key_vault_secret_ref", key_vault_secret_ref)
if secret is not None:
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The ID for the service principal.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="keyVaultSecretRef")
def key_vault_secret_ref(self) -> Optional['outputs.KeyVaultSecretRefResponse']:
"""
Reference to a secret stored in Azure Key Vault.
"""
return pulumi.get(self, "key_vault_secret_ref")
@property
@pulumi.getter
def secret(self) -> Optional[str]:
"""
The secret password associated with the service principal in plain text.
"""
return pulumi.get(self, "secret")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceSshConfigurationResponse(dict):
"""
SSH configuration for Linux-based VMs running on Azure.
"""
def __init__(__self__, *,
public_keys: Sequence['outputs.ContainerServiceSshPublicKeyResponse']):
"""
SSH configuration for Linux-based VMs running on Azure.
:param Sequence['ContainerServiceSshPublicKeyResponseArgs'] public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. Only expect one key specified.
"""
pulumi.set(__self__, "public_keys", public_keys)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Sequence['outputs.ContainerServiceSshPublicKeyResponse']:
"""
The list of SSH public keys used to authenticate with Linux-based VMs. Only expect one key specified.
"""
return pulumi.get(self, "public_keys")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceSshPublicKeyResponse(dict):
"""
Contains information about SSH certificate public key data.
"""
def __init__(__self__, *,
key_data: str):
"""
Contains information about SSH certificate public key data.
:param str key_data: Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers.
"""
pulumi.set(__self__, "key_data", key_data)
@property
@pulumi.getter(name="keyData")
def key_data(self) -> str:
"""
Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers.
"""
return pulumi.get(self, "key_data")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceVMDiagnosticsResponse(dict):
"""
Profile for diagnostics on the container service VMs.
"""
def __init__(__self__, *,
enabled: bool,
storage_uri: str):
"""
Profile for diagnostics on the container service VMs.
:param bool enabled: Whether the VM diagnostic agent is provisioned on the VM.
:param str storage_uri: The URI of the storage account where diagnostics are stored.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "storage_uri", storage_uri)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether the VM diagnostic agent is provisioned on the VM.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="storageUri")
def storage_uri(self) -> str:
"""
The URI of the storage account where diagnostics are stored.
"""
return pulumi.get(self, "storage_uri")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerServiceWindowsProfileResponse(dict):
"""
Profile for Windows VMs in the container service cluster.
"""
def __init__(__self__, *,
admin_password: str,
admin_username: str):
"""
Profile for Windows VMs in the container service cluster.
:param str admin_password: The administrator password to use for Windows VMs.
:param str admin_username: The administrator username to use for Windows VMs.
"""
pulumi.set(__self__, "admin_password", admin_password)
pulumi.set(__self__, "admin_username", admin_username)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> str:
"""
The administrator password to use for Windows VMs.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> str:
"""
The administrator username to use for Windows VMs.
"""
return pulumi.get(self, "admin_username")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CredentialResultResponseResult(dict):
"""
The credential result response.
"""
def __init__(__self__, *,
name: str,
value: str):
"""
The credential result response.
:param str name: The name of the credential.
:param str value: Base64-encoded Kubernetes configuration file.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the credential.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class KeyVaultSecretRefResponse(dict):
"""
Reference to a secret stored in Azure Key Vault.
"""
def __init__(__self__, *,
secret_name: str,
vault_id: str,
version: Optional[str] = None):
"""
Reference to a secret stored in Azure Key Vault.
:param str secret_name: The secret name.
:param str vault_id: Key vault identifier.
:param str version: The secret version.
"""
pulumi.set(__self__, "secret_name", secret_name)
pulumi.set(__self__, "vault_id", vault_id)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> str:
"""
The secret name.
"""
return pulumi.get(self, "secret_name")
@property
@pulumi.getter(name="vaultID")
def vault_id(self) -> str:
"""
Key vault identifier.
"""
return pulumi.get(self, "vault_id")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The secret version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterAADProfileResponse(dict):
"""
AADProfile specifies attributes for Azure Active Directory integration.
"""
def __init__(__self__, *,
admin_group_object_ids: Optional[Sequence[str]] = None,
client_app_id: Optional[str] = None,
enable_azure_rbac: Optional[bool] = None,
managed: Optional[bool] = None,
server_app_id: Optional[str] = None,
server_app_secret: Optional[str] = None,
tenant_id: Optional[str] = None):
"""
AADProfile specifies attributes for Azure Active Directory integration.
:param Sequence[str] admin_group_object_ids: AAD group object IDs that will have admin role of the cluster.
:param str client_app_id: The client AAD application ID.
:param bool enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
:param bool managed: Whether to enable managed AAD.
:param str server_app_id: The server AAD application ID.
:param str server_app_secret: The server AAD application secret.
:param str tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription.
"""
if admin_group_object_ids is not None:
pulumi.set(__self__, "admin_group_object_ids", admin_group_object_ids)
if client_app_id is not None:
pulumi.set(__self__, "client_app_id", client_app_id)
if enable_azure_rbac is not None:
pulumi.set(__self__, "enable_azure_rbac", enable_azure_rbac)
if managed is not None:
pulumi.set(__self__, "managed", managed)
if server_app_id is not None:
pulumi.set(__self__, "server_app_id", server_app_id)
if server_app_secret is not None:
pulumi.set(__self__, "server_app_secret", server_app_secret)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="adminGroupObjectIDs")
def admin_group_object_ids(self) -> Optional[Sequence[str]]:
"""
AAD group object IDs that will have admin role of the cluster.
"""
return pulumi.get(self, "admin_group_object_ids")
@property
@pulumi.getter(name="clientAppID")
def client_app_id(self) -> Optional[str]:
"""
The client AAD application ID.
"""
return pulumi.get(self, "client_app_id")
@property
@pulumi.getter(name="enableAzureRBAC")
def enable_azure_rbac(self) -> Optional[bool]:
"""
Whether to enable Azure RBAC for Kubernetes authorization.
"""
return pulumi.get(self, "enable_azure_rbac")
@property
@pulumi.getter
def managed(self) -> Optional[bool]:
"""
Whether to enable managed AAD.
"""
return pulumi.get(self, "managed")
@property
@pulumi.getter(name="serverAppID")
def server_app_id(self) -> Optional[str]:
"""
The server AAD application ID.
"""
return pulumi.get(self, "server_app_id")
@property
@pulumi.getter(name="serverAppSecret")
def server_app_secret(self) -> Optional[str]:
"""
The server AAD application secret.
"""
return pulumi.get(self, "server_app_secret")
@property
@pulumi.getter(name="tenantID")
def tenant_id(self) -> Optional[str]:
"""
The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription.
"""
return pulumi.get(self, "tenant_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterAPIServerAccessProfileResponse(dict):
"""
Access profile for managed cluster API server.
"""
def __init__(__self__, *,
authorized_ip_ranges: Optional[Sequence[str]] = None,
enable_private_cluster: Optional[bool] = None):
"""
Access profile for managed cluster API server.
:param Sequence[str] authorized_ip_ranges: Authorized IP Ranges to kubernetes API server.
:param bool enable_private_cluster: Whether to create the cluster as a private cluster or not.
"""
if authorized_ip_ranges is not None:
pulumi.set(__self__, "authorized_ip_ranges", authorized_ip_ranges)
if enable_private_cluster is not None:
pulumi.set(__self__, "enable_private_cluster", enable_private_cluster)
@property
@pulumi.getter(name="authorizedIPRanges")
def authorized_ip_ranges(self) -> Optional[Sequence[str]]:
"""
Authorized IP Ranges to kubernetes API server.
"""
return pulumi.get(self, "authorized_ip_ranges")
@property
@pulumi.getter(name="enablePrivateCluster")
def enable_private_cluster(self) -> Optional[bool]:
"""
Whether to create the cluster as a private cluster or not.
"""
return pulumi.get(self, "enable_private_cluster")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterAddonProfileResponse(dict):
"""
A Kubernetes add-on profile for a managed cluster.
"""
def __init__(__self__, *,
enabled: bool,
identity: 'outputs.ManagedClusterAddonProfileResponseIdentity',
config: Optional[Mapping[str, str]] = None):
"""
A Kubernetes add-on profile for a managed cluster.
:param bool enabled: Whether the add-on is enabled or not.
:param 'ManagedClusterAddonProfileResponseIdentityArgs' identity: Information of user assigned identity used by this add-on.
:param Mapping[str, str] config: Key-value pairs for configuring an add-on.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "identity", identity)
if config is not None:
pulumi.set(__self__, "config", config)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether the add-on is enabled or not.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def identity(self) -> 'outputs.ManagedClusterAddonProfileResponseIdentity':
"""
Information of user assigned identity used by this add-on.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def config(self) -> Optional[Mapping[str, str]]:
"""
Key-value pairs for configuring an add-on.
"""
return pulumi.get(self, "config")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterAddonProfileResponseIdentity(dict):
"""
Information of user assigned identity used by this add-on.
"""
def __init__(__self__, *,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
resource_id: Optional[str] = None):
"""
Information of user assigned identity used by this add-on.
:param str client_id: The client id of the user assigned identity.
:param str object_id: The object id of the user assigned identity.
:param str resource_id: The resource id of the user assigned identity.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The client id of the user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
The object id of the user assigned identity.
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The resource id of the user assigned identity.
"""
return pulumi.get(self, "resource_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterAgentPoolProfileResponse(dict):
"""
Profile for the container service agent pool.
"""
def __init__(__self__, *,
name: str,
node_image_version: str,
power_state: 'outputs.PowerStateResponse',
provisioning_state: str,
availability_zones: Optional[Sequence[str]] = None,
count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
enable_node_public_ip: Optional[bool] = None,
max_count: Optional[int] = None,
max_pods: Optional[int] = None,
min_count: Optional[int] = None,
mode: Optional[str] = None,
node_labels: Optional[Mapping[str, str]] = None,
node_taints: Optional[Sequence[str]] = None,
orchestrator_version: Optional[str] = None,
os_disk_size_gb: Optional[int] = None,
os_disk_type: Optional[str] = None,
os_type: Optional[str] = None,
proximity_placement_group_id: Optional[str] = None,
scale_set_eviction_policy: Optional[str] = None,
scale_set_priority: Optional[str] = None,
spot_max_price: Optional[float] = None,
tags: Optional[Mapping[str, str]] = None,
type: Optional[str] = None,
upgrade_settings: Optional['outputs.AgentPoolUpgradeSettingsResponse'] = None,
vm_size: Optional[str] = None,
vnet_subnet_id: Optional[str] = None):
"""
Profile for the container service agent pool.
:param str name: Unique name of the agent pool profile in the context of the subscription and resource group.
:param str node_image_version: Version of node image
:param 'PowerStateResponseArgs' power_state: Describes whether the Agent Pool is Running or Stopped
:param str provisioning_state: The current deployment or provisioning state, which only appears in the response.
:param Sequence[str] availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
:param int count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
:param bool enable_auto_scaling: Whether to enable auto-scaler
:param bool enable_node_public_ip: Enable public IP for nodes
:param int max_count: Maximum number of nodes for auto-scaling
:param int max_pods: Maximum number of pods that can run on a node.
:param int min_count: Minimum number of nodes for auto-scaling
:param str mode: AgentPoolMode represents mode of an agent pool
:param Mapping[str, str] node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:param Sequence[str] node_taints: Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
:param str orchestrator_version: Version of orchestrator specified when creating the managed cluster.
:param int os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
:param str os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed values are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation.
:param str os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param str proximity_placement_group_id: The ID for Proximity Placement Group.
:param str scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
:param str scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
:param float spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
:param Mapping[str, str] tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:param str type: AgentPoolType represents types of an agent pool
:param 'AgentPoolUpgradeSettingsResponseArgs' upgrade_settings: Settings for upgrading the agentpool
:param str vm_size: Size of agent VMs.
:param str vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "node_image_version", node_image_version)
pulumi.set(__self__, "power_state", power_state)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if count is not None:
pulumi.set(__self__, "count", count)
if enable_auto_scaling is not None:
pulumi.set(__self__, "enable_auto_scaling", enable_auto_scaling)
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if max_count is not None:
pulumi.set(__self__, "max_count", max_count)
if max_pods is not None:
pulumi.set(__self__, "max_pods", max_pods)
if min_count is not None:
pulumi.set(__self__, "min_count", min_count)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if node_labels is not None:
pulumi.set(__self__, "node_labels", node_labels)
if node_taints is not None:
pulumi.set(__self__, "node_taints", node_taints)
if orchestrator_version is not None:
pulumi.set(__self__, "orchestrator_version", orchestrator_version)
if os_disk_size_gb is not None:
pulumi.set(__self__, "os_disk_size_gb", os_disk_size_gb)
if os_disk_type is not None:
pulumi.set(__self__, "os_disk_type", os_disk_type)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if scale_set_eviction_policy is not None:
pulumi.set(__self__, "scale_set_eviction_policy", scale_set_eviction_policy)
if scale_set_priority is not None:
pulumi.set(__self__, "scale_set_priority", scale_set_priority)
if spot_max_price is not None:
pulumi.set(__self__, "spot_max_price", spot_max_price)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
if upgrade_settings is not None:
pulumi.set(__self__, "upgrade_settings", upgrade_settings)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
if vnet_subnet_id is not None:
pulumi.set(__self__, "vnet_subnet_id", vnet_subnet_id)
@property
@pulumi.getter
def name(self) -> str:
"""
Unique name of the agent pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeImageVersion")
def node_image_version(self) -> str:
"""
Version of node image
"""
return pulumi.get(self, "node_image_version")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> 'outputs.PowerStateResponse':
"""
Describes whether the Agent Pool is Running or Stopped
"""
return pulumi.get(self, "power_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[Sequence[str]]:
"""
Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="enableAutoScaling")
def enable_auto_scaling(self) -> Optional[bool]:
"""
Whether to enable auto-scaler
"""
return pulumi.get(self, "enable_auto_scaling")
@property
@pulumi.getter(name="enableNodePublicIP")
def enable_node_public_ip(self) -> Optional[bool]:
"""
Enable public IP for nodes
"""
return pulumi.get(self, "enable_node_public_ip")
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> Optional[int]:
"""
Maximum number of nodes for auto-scaling
"""
return pulumi.get(self, "max_count")
@property
@pulumi.getter(name="maxPods")
def max_pods(self) -> Optional[int]:
"""
Maximum number of pods that can run on a node.
"""
return pulumi.get(self, "max_pods")
@property
@pulumi.getter(name="minCount")
def min_count(self) -> Optional[int]:
"""
Minimum number of nodes for auto-scaling
"""
return pulumi.get(self, "min_count")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
AgentPoolMode represents mode of an agent pool
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter(name="nodeLabels")
def node_labels(self) -> Optional[Mapping[str, str]]:
"""
Agent pool node labels to be persisted across all nodes in agent pool.
"""
return pulumi.get(self, "node_labels")
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> Optional[Sequence[str]]:
"""
Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
"""
return pulumi.get(self, "node_taints")
@property
@pulumi.getter(name="orchestratorVersion")
def orchestrator_version(self) -> Optional[str]:
"""
Version of orchestrator specified when creating the managed cluster.
"""
return pulumi.get(self, "orchestrator_version")
@property
@pulumi.getter(name="osDiskSizeGB")
def os_disk_size_gb(self) -> Optional[int]:
"""
OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
"""
return pulumi.get(self, "os_disk_size_gb")
@property
@pulumi.getter(name="osDiskType")
def os_disk_type(self) -> Optional[str]:
"""
OS disk type to be used for machines in a given agent pool. Allowed values are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation.
"""
return pulumi.get(self, "os_disk_type")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="proximityPlacementGroupID")
def proximity_placement_group_id(self) -> Optional[str]:
"""
The ID for Proximity Placement Group.
"""
return pulumi.get(self, "proximity_placement_group_id")
@property
@pulumi.getter(name="scaleSetEvictionPolicy")
def scale_set_eviction_policy(self) -> Optional[str]:
"""
ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
"""
return pulumi.get(self, "scale_set_eviction_policy")
@property
@pulumi.getter(name="scaleSetPriority")
def scale_set_priority(self) -> Optional[str]:
"""
ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
"""
return pulumi.get(self, "scale_set_priority")
@property
@pulumi.getter(name="spotMaxPrice")
def spot_max_price(self) -> Optional[float]:
"""
SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
"""
return pulumi.get(self, "spot_max_price")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Agent pool tags to be persisted on the agent pool virtual machine scale set.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
AgentPoolType represents types of an agent pool
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="upgradeSettings")
def upgrade_settings(self) -> Optional['outputs.AgentPoolUpgradeSettingsResponse']:
"""
Settings for upgrading the agentpool
"""
return pulumi.get(self, "upgrade_settings")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[str]:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@property
@pulumi.getter(name="vnetSubnetID")
def vnet_subnet_id(self) -> Optional[str]:
"""
VNet SubnetID specifies the VNet's subnet identifier.
"""
return pulumi.get(self, "vnet_subnet_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterIdentityResponse(dict):
"""
Identity for the managed cluster.
"""
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None,
user_assigned_identities: Optional[Mapping[str, 'outputs.ManagedClusterIdentityResponseUserAssignedIdentities']] = None):
"""
Identity for the managed cluster.
:param str principal_id: The principal id of the system assigned identity which is used by master components.
:param str tenant_id: The tenant id of the system assigned identity which is used by master components.
:param str type: The type of identity used for the managed cluster. Type 'SystemAssigned' will use an implicitly created identity in master components and an auto-created user assigned identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster, service principal will be used instead.
:param Mapping[str, 'ManagedClusterIdentityResponseUserAssignedIdentitiesArgs'] user_assigned_identities: The user identity associated with the managed cluster. This identity will be used in control plane and only one user assigned identity is allowed. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of the system assigned identity which is used by master components.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id of the system assigned identity which is used by master components.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of identity used for the managed cluster. Type 'SystemAssigned' will use an implicitly created identity in master components and an auto-created user assigned identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster, service principal will be used instead.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.ManagedClusterIdentityResponseUserAssignedIdentities']]:
"""
The user identity associated with the managed cluster. This identity will be used in control plane and only one user assigned identity is allowed. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterIdentityResponseUserAssignedIdentities(dict):
def __init__(__self__, *,
client_id: str,
principal_id: str):
"""
:param str client_id: The client id of user assigned identity.
:param str principal_id: The principal id of user assigned identity.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client id of user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of user assigned identity.
"""
return pulumi.get(self, "principal_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterLoadBalancerProfileResponse(dict):
"""
Profile of the managed cluster load balancer.
"""
def __init__(__self__, *,
allocated_outbound_ports: Optional[int] = None,
effective_outbound_ips: Optional[Sequence['outputs.ResourceReferenceResponse']] = None,
idle_timeout_in_minutes: Optional[int] = None,
managed_outbound_ips: Optional['outputs.ManagedClusterLoadBalancerProfileResponseManagedOutboundIPs'] = None,
outbound_ip_prefixes: Optional['outputs.ManagedClusterLoadBalancerProfileResponseOutboundIPPrefixes'] = None,
outbound_ips: Optional['outputs.ManagedClusterLoadBalancerProfileResponseOutboundIPs'] = None):
"""
Profile of the managed cluster load balancer.
:param int allocated_outbound_ports: Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports.
:param Sequence['ResourceReferenceResponseArgs'] effective_outbound_ips: The effective outbound IP resources of the cluster load balancer.
:param int idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
:param 'ManagedClusterLoadBalancerProfileResponseManagedOutboundIPsArgs' managed_outbound_ips: Desired managed outbound IPs for the cluster load balancer.
:param 'ManagedClusterLoadBalancerProfileResponseOutboundIPPrefixesArgs' outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer.
:param 'ManagedClusterLoadBalancerProfileResponseOutboundIPsArgs' outbound_ips: Desired outbound IP resources for the cluster load balancer.
"""
if allocated_outbound_ports is not None:
pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports)
if effective_outbound_ips is not None:
pulumi.set(__self__, "effective_outbound_ips", effective_outbound_ips)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if managed_outbound_ips is not None:
pulumi.set(__self__, "managed_outbound_ips", managed_outbound_ips)
if outbound_ip_prefixes is not None:
pulumi.set(__self__, "outbound_ip_prefixes", outbound_ip_prefixes)
if outbound_ips is not None:
pulumi.set(__self__, "outbound_ips", outbound_ips)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> Optional[int]:
"""
Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter(name="effectiveOutboundIPs")
def effective_outbound_ips(self) -> Optional[Sequence['outputs.ResourceReferenceResponse']]:
"""
The effective outbound IP resources of the cluster load balancer.
"""
return pulumi.get(self, "effective_outbound_ips")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="managedOutboundIPs")
def managed_outbound_ips(self) -> Optional['outputs.ManagedClusterLoadBalancerProfileResponseManagedOutboundIPs']:
"""
Desired managed outbound IPs for the cluster load balancer.
"""
return pulumi.get(self, "managed_outbound_ips")
@property
@pulumi.getter(name="outboundIPPrefixes")
def outbound_ip_prefixes(self) -> Optional['outputs.ManagedClusterLoadBalancerProfileResponseOutboundIPPrefixes']:
"""
Desired outbound IP Prefix resources for the cluster load balancer.
"""
return pulumi.get(self, "outbound_ip_prefixes")
@property
@pulumi.getter(name="outboundIPs")
def outbound_ips(self) -> Optional['outputs.ManagedClusterLoadBalancerProfileResponseOutboundIPs']:
"""
Desired outbound IP resources for the cluster load balancer.
"""
return pulumi.get(self, "outbound_ips")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterLoadBalancerProfileResponseManagedOutboundIPs(dict):
"""
Desired managed outbound IPs for the cluster load balancer.
"""
def __init__(__self__, *,
count: Optional[int] = None):
"""
Desired managed outbound IPs for the cluster load balancer.
:param int count: Desired number of outbound IP created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
"""
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Desired number of outbound IP created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
"""
return pulumi.get(self, "count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterLoadBalancerProfileResponseOutboundIPPrefixes(dict):
"""
Desired outbound IP Prefix resources for the cluster load balancer.
"""
def __init__(__self__, *,
public_ip_prefixes: Optional[Sequence['outputs.ResourceReferenceResponse']] = None):
"""
Desired outbound IP Prefix resources for the cluster load balancer.
:param Sequence['ResourceReferenceResponseArgs'] public_ip_prefixes: A list of public IP prefix resources.
"""
if public_ip_prefixes is not None:
pulumi.set(__self__, "public_ip_prefixes", public_ip_prefixes)
@property
@pulumi.getter(name="publicIPPrefixes")
def public_ip_prefixes(self) -> Optional[Sequence['outputs.ResourceReferenceResponse']]:
"""
A list of public IP prefix resources.
"""
return pulumi.get(self, "public_ip_prefixes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterLoadBalancerProfileResponseOutboundIPs(dict):
"""
Desired outbound IP resources for the cluster load balancer.
"""
def __init__(__self__, *,
public_ips: Optional[Sequence['outputs.ResourceReferenceResponse']] = None):
"""
Desired outbound IP resources for the cluster load balancer.
:param Sequence['ResourceReferenceResponseArgs'] public_ips: A list of public IP resources.
"""
if public_ips is not None:
pulumi.set(__self__, "public_ips", public_ips)
@property
@pulumi.getter(name="publicIPs")
def public_ips(self) -> Optional[Sequence['outputs.ResourceReferenceResponse']]:
"""
A list of public IP resources.
"""
return pulumi.get(self, "public_ips")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterPropertiesResponseAutoScalerProfile(dict):
"""
Parameters to be applied to the cluster-autoscaler when enabled
"""
def __init__(__self__, *,
balance_similar_node_groups: Optional[str] = None,
expander: Optional[str] = None,
max_empty_bulk_delete: Optional[str] = None,
max_graceful_termination_sec: Optional[str] = None,
max_total_unready_percentage: Optional[str] = None,
new_pod_scale_up_delay: Optional[str] = None,
ok_total_unready_count: Optional[str] = None,
scale_down_delay_after_add: Optional[str] = None,
scale_down_delay_after_delete: Optional[str] = None,
scale_down_delay_after_failure: Optional[str] = None,
scale_down_unneeded_time: Optional[str] = None,
scale_down_unready_time: Optional[str] = None,
scale_down_utilization_threshold: Optional[str] = None,
scan_interval: Optional[str] = None,
skip_nodes_with_local_storage: Optional[str] = None,
skip_nodes_with_system_pods: Optional[str] = None):
"""
Parameters to be applied to the cluster-autoscaler when enabled
"""
if balance_similar_node_groups is not None:
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
if expander is not None:
pulumi.set(__self__, "expander", expander)
if max_empty_bulk_delete is not None:
pulumi.set(__self__, "max_empty_bulk_delete", max_empty_bulk_delete)
if max_graceful_termination_sec is not None:
pulumi.set(__self__, "max_graceful_termination_sec", max_graceful_termination_sec)
if max_total_unready_percentage is not None:
pulumi.set(__self__, "max_total_unready_percentage", max_total_unready_percentage)
if new_pod_scale_up_delay is not None:
pulumi.set(__self__, "new_pod_scale_up_delay", new_pod_scale_up_delay)
if ok_total_unready_count is not None:
pulumi.set(__self__, "ok_total_unready_count", ok_total_unready_count)
if scale_down_delay_after_add is not None:
pulumi.set(__self__, "scale_down_delay_after_add", scale_down_delay_after_add)
if scale_down_delay_after_delete is not None:
pulumi.set(__self__, "scale_down_delay_after_delete", scale_down_delay_after_delete)
if scale_down_delay_after_failure is not None:
pulumi.set(__self__, "scale_down_delay_after_failure", scale_down_delay_after_failure)
if scale_down_unneeded_time is not None:
pulumi.set(__self__, "scale_down_unneeded_time", scale_down_unneeded_time)
if scale_down_unready_time is not None:
pulumi.set(__self__, "scale_down_unready_time", scale_down_unready_time)
if scale_down_utilization_threshold is not None:
pulumi.set(__self__, "scale_down_utilization_threshold", scale_down_utilization_threshold)
if scan_interval is not None:
pulumi.set(__self__, "scan_interval", scan_interval)
if skip_nodes_with_local_storage is not None:
pulumi.set(__self__, "skip_nodes_with_local_storage", skip_nodes_with_local_storage)
if skip_nodes_with_system_pods is not None:
pulumi.set(__self__, "skip_nodes_with_system_pods", skip_nodes_with_system_pods)
@property
@pulumi.getter(name="balanceSimilarNodeGroups")
def balance_similar_node_groups(self) -> Optional[str]:
return pulumi.get(self, "balance_similar_node_groups")
@property
@pulumi.getter
def expander(self) -> Optional[str]:
return pulumi.get(self, "expander")
@property
@pulumi.getter(name="maxEmptyBulkDelete")
def max_empty_bulk_delete(self) -> Optional[str]:
return pulumi.get(self, "max_empty_bulk_delete")
@property
@pulumi.getter(name="maxGracefulTerminationSec")
def max_graceful_termination_sec(self) -> Optional[str]:
return pulumi.get(self, "max_graceful_termination_sec")
@property
@pulumi.getter(name="maxTotalUnreadyPercentage")
def max_total_unready_percentage(self) -> Optional[str]:
return pulumi.get(self, "max_total_unready_percentage")
@property
@pulumi.getter(name="newPodScaleUpDelay")
def new_pod_scale_up_delay(self) -> Optional[str]:
return pulumi.get(self, "new_pod_scale_up_delay")
@property
@pulumi.getter(name="okTotalUnreadyCount")
def ok_total_unready_count(self) -> Optional[str]:
return pulumi.get(self, "ok_total_unready_count")
@property
@pulumi.getter(name="scaleDownDelayAfterAdd")
def scale_down_delay_after_add(self) -> Optional[str]:
return pulumi.get(self, "scale_down_delay_after_add")
@property
@pulumi.getter(name="scaleDownDelayAfterDelete")
def scale_down_delay_after_delete(self) -> Optional[str]:
return pulumi.get(self, "scale_down_delay_after_delete")
@property
@pulumi.getter(name="scaleDownDelayAfterFailure")
def scale_down_delay_after_failure(self) -> Optional[str]:
return pulumi.get(self, "scale_down_delay_after_failure")
@property
@pulumi.getter(name="scaleDownUnneededTime")
def scale_down_unneeded_time(self) -> Optional[str]:
return pulumi.get(self, "scale_down_unneeded_time")
@property
@pulumi.getter(name="scaleDownUnreadyTime")
def scale_down_unready_time(self) -> Optional[str]:
return pulumi.get(self, "scale_down_unready_time")
@property
@pulumi.getter(name="scaleDownUtilizationThreshold")
def scale_down_utilization_threshold(self) -> Optional[str]:
return pulumi.get(self, "scale_down_utilization_threshold")
@property
@pulumi.getter(name="scanInterval")
def scan_interval(self) -> Optional[str]:
return pulumi.get(self, "scan_interval")
@property
@pulumi.getter(name="skipNodesWithLocalStorage")
def skip_nodes_with_local_storage(self) -> Optional[str]:
return pulumi.get(self, "skip_nodes_with_local_storage")
@property
@pulumi.getter(name="skipNodesWithSystemPods")
def skip_nodes_with_system_pods(self) -> Optional[str]:
return pulumi.get(self, "skip_nodes_with_system_pods")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterPropertiesResponseIdentityProfile(dict):
def __init__(__self__, *,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
resource_id: Optional[str] = None):
"""
:param str client_id: The client id of the user assigned identity.
:param str object_id: The object id of the user assigned identity.
:param str resource_id: The resource id of the user assigned identity.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The client id of the user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
The object id of the user assigned identity.
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The resource id of the user assigned identity.
"""
return pulumi.get(self, "resource_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterSKUResponse(dict):
def __init__(__self__, *,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
:param str name: Name of a managed cluster SKU.
:param str tier: Tier of a managed cluster SKU.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of a managed cluster SKU.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
Tier of a managed cluster SKU.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterServicePrincipalProfileResponse(dict):
"""
Information about a service principal identity for the cluster to use for manipulating Azure APIs.
"""
def __init__(__self__, *,
client_id: str,
secret: Optional[str] = None):
"""
Information about a service principal identity for the cluster to use for manipulating Azure APIs.
:param str client_id: The ID for the service principal.
:param str secret: The secret password associated with the service principal in plain text.
"""
pulumi.set(__self__, "client_id", client_id)
if secret is not None:
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The ID for the service principal.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter
def secret(self) -> Optional[str]:
"""
The secret password associated with the service principal in plain text.
"""
return pulumi.get(self, "secret")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedClusterWindowsProfileResponse(dict):
"""
Profile for Windows VMs in the container service cluster.
"""
def __init__(__self__, *,
admin_username: str,
admin_password: Optional[str] = None,
license_type: Optional[str] = None):
"""
Profile for Windows VMs in the container service cluster.
:param str admin_username: The administrator username to use for Windows VMs.
:param str admin_password: The administrator password to use for Windows VMs.
:param str license_type: The licenseType to use for Windows VMs. Windows_Server is used to enable Azure Hybrid User Benefits for Windows VMs.
"""
pulumi.set(__self__, "admin_username", admin_username)
if admin_password is not None:
pulumi.set(__self__, "admin_password", admin_password)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> str:
"""
The administrator username to use for Windows VMs.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> Optional[str]:
"""
The administrator password to use for Windows VMs.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[str]:
"""
The licenseType to use for Windows VMs. Windows_Server is used to enable Azure Hybrid User Benefits for Windows VMs.
"""
return pulumi.get(self, "license_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkProfileResponse(dict):
"""
Represents the OpenShift networking configuration
"""
def __init__(__self__, *,
peer_vnet_id: Optional[str] = None,
vnet_cidr: Optional[str] = None,
vnet_id: Optional[str] = None):
"""
Represents the OpenShift networking configuration
:param str peer_vnet_id: CIDR of the Vnet to peer.
:param str vnet_cidr: CIDR for the OpenShift Vnet.
:param str vnet_id: ID of the Vnet created for OSA cluster.
"""
if peer_vnet_id is not None:
pulumi.set(__self__, "peer_vnet_id", peer_vnet_id)
if vnet_cidr is not None:
pulumi.set(__self__, "vnet_cidr", vnet_cidr)
if vnet_id is not None:
pulumi.set(__self__, "vnet_id", vnet_id)
@property
@pulumi.getter(name="peerVnetId")
def peer_vnet_id(self) -> Optional[str]:
"""
CIDR of the Vnet to peer.
"""
return pulumi.get(self, "peer_vnet_id")
@property
@pulumi.getter(name="vnetCidr")
def vnet_cidr(self) -> Optional[str]:
"""
CIDR for the OpenShift Vnet.
"""
return pulumi.get(self, "vnet_cidr")
@property
@pulumi.getter(name="vnetId")
def vnet_id(self) -> Optional[str]:
"""
ID of the Vnet created for OSA cluster.
"""
return pulumi.get(self, "vnet_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OpenShiftManagedClusterAADIdentityProviderResponse(dict):
"""
Defines the Identity provider for MS AAD.
"""
def __init__(__self__, *,
kind: str,
client_id: Optional[str] = None,
customer_admin_group_id: Optional[str] = None,
secret: Optional[str] = None,
tenant_id: Optional[str] = None):
"""
Defines the Identity provider for MS AAD.
:param str kind: The kind of the provider.
:param str client_id: The clientId password associated with the provider.
:param str customer_admin_group_id: The groupId to be granted cluster admin role.
:param str secret: The secret password associated with the provider.
:param str tenant_id: The tenantId associated with the provider.
"""
pulumi.set(__self__, "kind", 'AADIdentityProvider')
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if customer_admin_group_id is not None:
pulumi.set(__self__, "customer_admin_group_id", customer_admin_group_id)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the provider.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The clientId password associated with the provider.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="customerAdminGroupId")
def customer_admin_group_id(self) -> Optional[str]:
"""
The groupId to be granted cluster admin role.
"""
return pulumi.get(self, "customer_admin_group_id")
@property
@pulumi.getter
def secret(self) -> Optional[str]:
"""
The secret password associated with the provider.
"""
return pulumi.get(self, "secret")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The tenantId associated with the provider.
"""
return pulumi.get(self, "tenant_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OpenShiftManagedClusterAgentPoolProfileResponse(dict):
"""
Defines the configuration of the OpenShift cluster VMs.
"""
def __init__(__self__, *,
count: int,
name: str,
vm_size: str,
os_type: Optional[str] = None,
role: Optional[str] = None,
subnet_cidr: Optional[str] = None):
"""
Defines the configuration of the OpenShift cluster VMs.
:param int count: Number of agents (VMs) to host docker containers.
:param str name: Unique name of the pool profile in the context of the subscription and resource group.
:param str vm_size: Size of agent VMs.
:param str os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param str role: Define the role of the AgentPoolProfile.
:param str subnet_cidr: Subnet CIDR for the peering.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "vm_size", vm_size)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if role is not None:
pulumi.set(__self__, "role", role)
if subnet_cidr is not None:
pulumi.set(__self__, "subnet_cidr", subnet_cidr)
@property
@pulumi.getter
def count(self) -> int:
"""
Number of agents (VMs) to host docker containers.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter
def name(self) -> str:
"""
Unique name of the pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> str:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter
def role(self) -> Optional[str]:
"""
Define the role of the AgentPoolProfile.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="subnetCidr")
def subnet_cidr(self) -> Optional[str]:
"""
Subnet CIDR for the peering.
"""
return pulumi.get(self, "subnet_cidr")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OpenShiftManagedClusterAuthProfileResponse(dict):
"""
Defines all possible authentication profiles for the OpenShift cluster.
"""
def __init__(__self__, *,
identity_providers: Optional[Sequence['outputs.OpenShiftManagedClusterIdentityProviderResponse']] = None):
"""
Defines all possible authentication profiles for the OpenShift cluster.
:param Sequence['OpenShiftManagedClusterIdentityProviderResponseArgs'] identity_providers: Type of authentication profile to use.
"""
if identity_providers is not None:
pulumi.set(__self__, "identity_providers", identity_providers)
@property
@pulumi.getter(name="identityProviders")
def identity_providers(self) -> Optional[Sequence['outputs.OpenShiftManagedClusterIdentityProviderResponse']]:
"""
Type of authentication profile to use.
"""
return pulumi.get(self, "identity_providers")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OpenShiftManagedClusterIdentityProviderResponse(dict):
"""
Defines the configuration of the identity providers to be used in the OpenShift cluster.
"""
def __init__(__self__, *,
name: Optional[str] = None,
provider: Optional['outputs.OpenShiftManagedClusterAADIdentityProviderResponse'] = None):
"""
Defines the configuration of the identity providers to be used in the OpenShift cluster.
:param str name: Name of the provider.
:param 'OpenShiftManagedClusterAADIdentityProviderResponseArgs' provider: Configuration of the provider.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if provider is not None:
pulumi.set(__self__, "provider", provider)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the provider.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def provider(self) -> Optional['outputs.OpenShiftManagedClusterAADIdentityProviderResponse']:
"""
Configuration of the provider.
"""
return pulumi.get(self, "provider")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OpenShiftManagedClusterMasterPoolProfileResponse(dict):
"""
OpenShiftManagedClusterMaterPoolProfile contains configuration for OpenShift master VMs.
"""
def __init__(__self__, *,
count: int,
vm_size: str,
name: Optional[str] = None,
os_type: Optional[str] = None,
subnet_cidr: Optional[str] = None):
"""
OpenShiftManagedClusterMaterPoolProfile contains configuration for OpenShift master VMs.
:param int count: Number of masters (VMs) to host docker containers. The default value is 3.
:param str vm_size: Size of agent VMs.
:param str name: Unique name of the master pool profile in the context of the subscription and resource group.
:param str os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param str subnet_cidr: Subnet CIDR for the peering.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "vm_size", vm_size)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if subnet_cidr is not None:
pulumi.set(__self__, "subnet_cidr", subnet_cidr)
@property
@pulumi.getter
def count(self) -> int:
"""
Number of masters (VMs) to host docker containers. The default value is 3.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> str:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Unique name of the master pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="subnetCidr")
def subnet_cidr(self) -> Optional[str]:
"""
Subnet CIDR for the peering.
"""
return pulumi.get(self, "subnet_cidr")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OpenShiftRouterProfileResponse(dict):
"""
Represents an OpenShift router
"""
def __init__(__self__, *,
fqdn: str,
public_subdomain: str,
name: Optional[str] = None):
"""
Represents an OpenShift router
:param str fqdn: Auto-allocated FQDN for the OpenShift router.
:param str public_subdomain: DNS subdomain for OpenShift router.
:param str name: Name of the router profile.
"""
pulumi.set(__self__, "fqdn", fqdn)
pulumi.set(__self__, "public_subdomain", public_subdomain)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def fqdn(self) -> str:
"""
Auto-allocated FQDN for the OpenShift router.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="publicSubdomain")
def public_subdomain(self) -> str:
"""
DNS subdomain for OpenShift router.
"""
return pulumi.get(self, "public_subdomain")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the router profile.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PowerStateResponse(dict):
"""
Describes the Power State of the cluster
"""
def __init__(__self__, *,
code: Optional[str] = None):
"""
Describes the Power State of the cluster
:param str code: Tells whether the cluster is Running or Stopped
"""
if code is not None:
pulumi.set(__self__, "code", code)
@property
@pulumi.getter
def code(self) -> Optional[str]:
"""
Tells whether the cluster is Running or Stopped
"""
return pulumi.get(self, "code")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
Private endpoint which a connection belongs to.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Private endpoint which a connection belongs to.
:param str id: The resource Id for private endpoint
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The resource Id for private endpoint
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
The state of a private link service connection.
"""
def __init__(__self__, *,
description: Optional[str] = None,
status: Optional[str] = None):
"""
The state of a private link service connection.
:param str description: The private link service connection description.
:param str status: The private link service connection status.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The private link service connection description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The private link service connection status.
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PurchasePlanResponse(dict):
"""
Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
"""
def __init__(__self__, *,
name: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
publisher: Optional[str] = None):
"""
Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
:param str name: The plan ID.
:param str product: Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
:param str promotion_code: The promotion code.
:param str publisher: The plan ID.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
pulumi.set(__self__, "product", product)
if promotion_code is not None:
pulumi.set(__self__, "promotion_code", promotion_code)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def product(self) -> Optional[str]:
"""
Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
"""
return pulumi.get(self, "product")
@property
@pulumi.getter(name="promotionCode")
def promotion_code(self) -> Optional[str]:
"""
The promotion code.
"""
return pulumi.get(self, "promotion_code")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
The plan ID.
"""
return pulumi.get(self, "publisher")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceReferenceResponse(dict):
"""
A reference to an Azure resource.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
A reference to an Azure resource.
:param str id: The fully qualified Azure resource id.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The fully qualified Azure resource id.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
6f9598807bcbe723a47dac6ba58bb3cbe8d37e39 | 9d259b0fdfae72b1af15ec0419590c735bff6578 | /SQLite_example4a.py | b870f673f9e3a56550fe7819ff5f5deafb0fa4a6 | [] | no_license | petermooney/datamining | 7a0c68d44471434aa01a8e7b6dbe97bb76bc9f72 | 51812a0033a9dd0ec213c475484f0576a70d60e6 | refs/heads/master | 2021-01-10T21:29:43.280421 | 2013-12-04T22:15:15 | 2013-12-04T22:15:15 | 14,603,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,097 | py | ### This is source code used for an invited lecture on Data Mining using Python for
### the Institute of Technology at Blanchardstown, Dublin, Ireland
### Lecturer and presenter: Dr. Peter Mooney
### email: peter.mooney@nuim.ie
### Date: November 2013
###
### The purpose of this lecture is to provide students with an easily accessible overview, with working
### examples of how Python can be used as a tool for data mining.
### For those using these notes and sample code: This code is provided as a means of showing some basic ideas around
### data extraction, data manipulation, and data visualisation with Python.
### The code provided could be written in many different ways as is the Python way. However I have tried to keep things simple and practical so that students can get an understanding of the process of data mining rather than this being a programming course in Python.
###
### If you use this code - please give me a little citation with a link back to the GitHub Repo where you found this piece of code: https://github.com/petermooney/datamining
import sqlite3 as sqlite
# we need to import sqlite.
def main():
chosenAmenityType = "pub"
pubName = "Tavern"
# this time we are going to supply part of the name of the pub we are looking for.
resultsSpecificAmenityType = doAnSQLiteQuerySpecificAmenityTypeLikeOperator("OSM_BritishIsles_Amenities.sqlite",chosenAmenityType,pubName)
print ("Number of rows {}".format(len(resultsSpecificAmenityType)))
outputCSVFile = open("Pubs_BritanIreland_Tavern.csv","w")
outputCSVFile.write("PubName,Longitude,Latitude\n")
for amenity in resultsSpecificAmenityType:
# remember there are going to be two columns returned from this
# the first column is the amenity name, the second is the amenityName
a = amenity[0]
n = amenity[1]
longitude = amenity[2]
latitude = amenity[3]
# into the file we want to print the amenity names and the lat long - we don't need the amenity type
# we already know that this is a pub for this example
outputCSVFile.write("\"{}\",{},{}\n".format(a,longitude,latitude))
outputCSVFile.close()
# We could write this is a few different ways. For simplicity in terms of the lecture material
# we will simply just change the SQL Lite Query within the method here. As always there are many
# different ways this method can be written
# The method returns the rows which are the result of the query - they can then be processed after this method
# has been executed and has finished.
#Query purpose:
# Find all amenities with a specific type - amenityType
# we will also supply part of the name of the amenity we are looking for - in this case the name of a pub
#This query will return every amenity in the database where the name of the amenity (AmenityName)
# is not an empty string - so it has string length > 0 (there are some characters). The AmenityName str
# value must include the contents of the variable pubName in some part of the string.
# For example if pubName = "Tavern" then an AmenityName could be "The Old Tavern" or "John's Tavern"
# It must be at the end of the name as we used "%Tavern" meaning that Tavern must be the last string of characters.
# We will also return the latitude longitude.
#
def doAnSQLiteQuerySpecificAmenityTypeLikeOperator(databaseName,amenityType,pubName):
# We start off with no query results.
queryResults = None
# connect to the sqlite database
con = sqlite.connect(databaseName)
con.text_factory = str
try:
with con:
con.row_factory = sqlite.Row # this will allow us to index by column names from the table
cur = con.cursor()
cur.execute('SELECT AmenityName,Amenity,Longitude,Latitude from amenities where amenity = ? and AmenityName LIKE "%" || ? || "%" and length(AmenityName) > 0',(amenityType,pubName,))
queryResults = cur.fetchall()
# fetch all of the query results from the database
# these results shall now be returned to the calling environment method.
except sqlite.IntegrityError as e:
print("An error occurred:", e.args[0])
con.close()
return queryResults
main()
| [
"peter.mooney@nuim.ie"
] | peter.mooney@nuim.ie |
d44457e6bef9c78c814f71e2fa50950b6724d850 | 94c5dc7af2762d1ca30cda3ebc962085036a54d4 | /main.py | 13b309ce0fa80f7defeaa538d34e3a7fe69f0bbc | [] | no_license | sreeram79/trail_medical_survey | 71d9dc0ef3e93be35d332cc66d15d1413de49dc3 | 3610743fc30336d6e41257f69fb78616152dacd7 | refs/heads/master | 2021-08-28T15:00:10.386308 | 2017-12-12T14:24:44 | 2017-12-12T14:24:44 | 113,998,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | import os, sys, traceback
from trialsurvey.csv_file_process import CSVFileReader
from trialsurvey.prob_trail import ProbabilityRecord
from trialsurvey.ra_input import RawInputCommand
def main():
try:
filename = 'trialsurvey.csv'
csvfile = CSVFileReader(filename)
header_list=csvfile.get_headerlist()
prob_list = [ProbabilityRecord(value) for value in header_list]
for iter in csvfile.get_data(csvfile.get_row_count()):
for item,value in iter.items():
try:
prob_list[header_list.index(item)].increment(value)
except:
pass
new_record = {}
metrics = csvfile.get_acceptancerate()
all_false = True
for prob_iter in prob_list:
ra = RawInputCommand()
answer_value = ra.run_command(prob_iter.get_name(),csvfile.get_acceptancerate(),metrics)
new_record.update({prob_iter.get_name():answer_value})
#TODO need to do a metric class to do more better computational
metrics += prob_iter.get_metrics_based_on_boolean_fact(answer_value)
if answer_value=='F' and all_false:
all_false = True
else:
all_false = False
if all_false:
print '+'*100
print '+'*100
print 'Based on information provided you will be invited for the Trials'
print '+'*100
print '+'*100
else:
print '+'*100
print '+'*100
print ("Based on information provided your score for eligibility is around {}%").format(metrics)
print '+'*100
print '+'*100
# also the new candidate record should be added to the existing file which logic is completely missing herel
except KeyboardInterrupt:
print "Shutdown requested...exiting"
except Exception:
traceback.print_exc(file=sys.stdout)
finally:
#TODO how to handle the exception cases which is not currently handled. I should have recalculated the probability as well
#the logic incase of failure case should be completely different as well.
# also the new candidate record should be added to the existing file which logic is completely missing here
sys.exit(0)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | sreeram79.noreply@github.com |
685fe8a95e27c5d1242463044fe21807bea841f3 | 050694c84d3688958fa56c8f1e7a43e018c1552d | /resources/lib/utils.py | 652bb762b3debdd10f2f15062ec8084bd52f915f | [
"MIT"
] | permissive | dersphere/xbmcbackup | 69b472ef5737c4d5837153f8e967442f38382b67 | 2fd7b130c73bc6ef91cf2758417f368ef428c602 | refs/heads/master | 2020-04-08T04:06:09.982648 | 2012-09-13T14:52:17 | 2012-09-13T14:52:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | import xbmc
import xbmcaddon
__addon_id__= 'script.xbmcbackup'
__Addon = xbmcaddon.Addon(__addon_id__)
def data_dir():
return __Addon.getAddonInfo('profile')
def log(message,loglevel=xbmc.LOGNOTICE):
xbmc.log(encode(__addon_id__ + ": " + message),level=loglevel)
def showNotification(message):
xbmc.executebuiltin("Notification(" + getString(30010) + "," + message + ",4000," + xbmc.translatePath(__Addon.getAddonInfo('path') + "/icon.png") + ")")
def getSetting(name):
return __Addon.getSetting(name)
def setSetting(name,value):
__Addon.setSetting(name,value)
def getString(string_id):
return __Addon.getLocalizedString(string_id)
def encode(string):
return string.encode('UTF-8','replace')
| [
"robweberjr@gmail.com"
] | robweberjr@gmail.com |
7f8393fdeb7bf16bcdd2c3cacfde79321df3b658 | 7a7ccb87862fa51b83d231d265f0a73202c48703 | /lista03-geral/FibonacciNaoFoi.py | cf4048cc5c4e659cb2e8bb95150654d8861fab5a | [] | no_license | dlavinia/uri | 5fb962e9bb72e89724f3164b7459f53823573dff | 1ec7b5e867e59499f13acef98ba76e3c4881b486 | refs/heads/master | 2023-08-18T08:53:26.725048 | 2021-09-27T00:10:30 | 2021-09-27T00:10:30 | 395,132,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | #ta certo mas deu limite de tempo :C
def fibo(n):
if n==1:
return 0
elif n==2:
return 1
else:
return fibo(n-1) + fibo(n-2)
n = int(input())
lista = []
while n > 0:
lista.append(fibo(n))
n= n-1
lista_ok = lista.reverse()
print(*lista, " ")
| [
"dlavinia2003@gmail.com"
] | dlavinia2003@gmail.com |
2c5bf2bf8d23b2a78ba019cbb2fe8bfc6ee0c3ca | 205640a2a8681d74afd290846231fe5136617fff | /accounts/views.py | 441aa8d569cbab09bae77dd3f041368f4f147a39 | [] | no_license | devgel/FullStack-PythonDjango | 69244802958bd9a3f62d481fae6b44458381fa8a | 712493f239b9ffc966edf4ef83e7304890e832e3 | refs/heads/master | 2020-04-07T05:16:52.671511 | 2018-11-18T14:30:11 | 2018-11-18T14:30:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | from django.shortcuts import render,redirect
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.contrib.auth import login,logout
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
#log the user in
login(request,user)
return redirect('articles:list')
else:
form = UserCreationForm()
return render(request, 'accounts/signup.html',{'form': form })
def login_view(request):
if request.method =='POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
#login the user
user = form.get_user()
login(request,user)
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('articles:list')
else:
form = AuthenticationForm()
return render(request, 'accounts/login.html', {'form':form})
def logout_view(request):
if request.method == 'POST':
logout(request)
return redirect('articles:list')
| [
"argelpolicarpio@gmail.com"
] | argelpolicarpio@gmail.com |
e3ac7e428dfe0d3753d93c0d46206d6b3856e8e8 | 398b08d7a035825056dc6ff65188a9daf51cfa46 | /sample.py | 53d0bd68444bce2f70c66bdb999e580237ec91db | [] | no_license | uxdx/CryptoIntoNN | 5e4c0a276e9ef8ebf02170a47bb55fc63e53c4c7 | 0b5273f50b37e9df717d9e234931746f54c4c185 | refs/heads/master | 2023-07-24T16:45:07.808344 | 2021-09-05T07:48:26 | 2021-09-05T07:48:26 | 396,058,647 | 0 | 0 | null | 2021-08-29T12:33:23 | 2021-08-14T16:04:06 | Python | UTF-8 | Python | false | false | 8,878 | py | import numpy as np
import pandas as pd
SAMPLE_DATA= [
[32569.00, 32700.00, 32530.96, 32673.91, 726.200571],
[32674.35, 32870.00, 32579.43, 32790.08, 1284.616543],
[32791.71, 32887.90, 32630.00, 32775.89, 819.642472],
[32775.90, 32856.91, 32693.29, 32817.77, 531.816098],
[32817.77, 32956.26, 32742.49, 32745.15, 708.443560],
[32745.16, 32799.90, 32631.26, 32772.98, 626.851759],
[32768.73, 32768.74, 32628.61, 32660.99, 524.817194],
[32660.99, 32690.00, 32470.64, 32479.99, 816.279277],
[32480.00, 32631.22, 32237.45, 32320.65, 1625.239175],
[32320.65, 32428.35, 32231.42, 32309.69, 1154.412007],
[32309.69, 32493.92, 32202.25, 32485.47, 984.259666],
[32485.47, 32559.05, 32400.00, 32507.07, 618.460264],
[32509.23, 32646.09, 32400.21, 32494.29, 617.198233],
[32494.30, 32610.65, 32494.29, 32589.11, 512.629923],
[32589.11, 32630.00, 32536.85, 32622.34, 456.832836],
[32622.33, 32627.01, 32426.88, 32582.69, 560.485786],
[32582.69, 32750.00, 32522.30, 32687.99, 521.764493],
[32687.99, 32746.00, 32538.37, 32729.77, 439.116931],
[32729.12, 32807.41, 32580.00, 32621.06, 803.661683],
[32621.06, 32675.94, 32351.00, 32421.69, 1023.405062],
[32421.69, 32475.24, 32251.31, 32348.37, 1018.655307],
[32348.36, 32497.99, 32265.00, 32376.20, 894.482977],
[32374.07, 32525.65, 32342.91, 32467.62, 617.065006],
[32467.61, 32570.96, 32422.05, 32545.43, 399.201299],
[32545.42, 32583.99, 31666.66, 31836.83, 2963.425163],
[31836.83, 31899.98, 31650.61, 31830.01, 1683.348082],
[31830.01, 32015.98, 31782.00, 31953.03, 986.319516],
[31958.16, 31986.94, 31875.95, 31930.12, 728.020831],
[31932.74, 31993.61, 31858.97, 31912.52, 723.940217],
[31912.52, 31937.42, 31655.83, 31807.01, 1510.713271],
[31807.01, 31899.85, 31754.55, 31843.50, 807.870297],
[31843.50, 31942.35, 31800.00, 31835.47, 824.933513],
[31829.03, 31920.98, 31825.56, 31872.94, 632.339241],
[31872.94, 32000.00, 31853.21, 31920.01, 819.690314],
[31920.01, 31992.68, 31839.44, 31992.67, 668.685473],
[31991.85, 31996.00, 31702.92, 31751.48, 968.418755],
[31751.48, 32500.00, 31550.00, 32351.82, 4023.953250],
[32351.81, 32401.76, 32258.99, 32327.70, 1176.645234],
[32330.00, 32689.70, 32260.83, 32595.99, 1456.773651],
[32595.99, 32707.27, 32391.79, 32427.58, 1302.998242],
[32427.59, 32562.48, 32423.09, 32504.58, 756.985916],
[32504.57, 32529.94, 32366.34, 32451.99, 639.559352],
[32450.95, 32517.00, 32307.76, 32364.06, 783.497397],
[32364.06, 32429.97, 32280.00, 32313.18, 691.868460],
[32313.18, 32899.99, 32313.18, 32721.69, 1909.045487],
[32721.69, 32891.54, 32678.03, 32784.00, 1405.017117],
[32784.00, 32959.21, 32700.00, 32763.13, 1824.577277],
[32763.13, 32958.11, 32760.00, 32863.07, 874.690254],
[32863.07, 32938.99, 32792.00, 32792.01, 723.286084],
[32792.00, 32874.27, 32733.83, 32786.00, 1076.767214],
[32786.01, 32861.31, 32710.01, 32831.05, 674.529695],
[32832.85, 32902.89, 32665.14, 32833.88, 793.717717],
[32838.26, 32911.79, 32801.00, 32809.25, 604.061829],
[32809.25, 32880.30, 32774.99, 32832.99, 445.615310],
[32832.99, 32933.00, 32807.50, 32892.54, 562.765845],
[32890.29, 33114.03, 32671.03, 32743.96, 1634.508784],
[32743.96, 32777.81, 32564.44, 32684.06, 822.818274],
[32684.05, 32798.62, 32681.30, 32729.76, 454.555934],
[32729.77, 32846.48, 32708.00, 32839.40, 425.185367],
[32839.39, 32869.58, 32741.73, 32800.41, 450.306575],
[32800.41, 32940.00, 32800.40, 32873.98, 589.404875],
[32873.97, 32925.34, 32830.99, 32852.23, 314.907070],
[32852.24, 32965.00, 32739.28, 32886.37, 597.635905],
[32887.99, 33063.82, 32887.99, 32959.96, 641.213931],
[32959.97, 32974.44, 32780.00, 32790.10, 509.468768],
[32790.09, 32858.18, 32681.00, 32820.02, 537.286663],
[32820.03, 32881.00, 32651.26, 32814.95, 636.686947],
[32814.95, 33046.86, 32761.45, 32951.87, 780.539975],
[32952.60, 33185.25, 32847.00, 32905.42, 1408.630270],
[32905.41, 32908.59, 32762.50, 32784.33, 625.069262],
[32784.33, 32911.03, 32727.90, 32779.12, 757.391868],
[32779.13, 32787.79, 32690.59, 32760.00, 617.704013],
[32760.00, 32785.23, 32580.12, 32780.01, 777.795091],
[32780.01, 32780.01, 32635.46, 32641.65, 544.985848],
[32641.66, 32691.42, 32441.86, 32629.01, 1160.049055],
[32629.01, 32681.36, 32534.79, 32665.53, 546.015507],
[32665.53, 32665.53, 32550.00, 32622.92, 426.512628],
[32622.92, 32658.98, 32473.72, 32583.90, 497.126870],
[32582.71, 32650.00, 32430.83, 32480.01, 490.484033],
[32480.00, 32592.26, 32366.12, 32483.00, 914.144960],
[32483.00, 32600.00, 32381.81, 32535.14, 817.657322],
[32535.14, 32535.14, 32359.65, 32469.50, 858.046470],
[32468.30, 32551.99, 32340.00, 32415.00, 1022.621040],
[32415.00, 32500.02, 32316.45, 32376.51, 954.496890],
[32376.50, 32620.31, 32332.54, 32501.09, 1061.302333],
[32501.10, 32561.31, 32433.99, 32505.00, 884.606329],
[32505.00, 32579.33, 32112.00, 32174.02, 2052.800737],
[32170.44, 32170.44, 31816.06, 31866.76, 2513.094331],
[31866.76, 31949.99, 31800.00, 31836.14, 1177.888720],
[31836.15, 31992.58, 31820.03, 31880.50, 899.400595],
[31880.50, 32152.97, 31612.34, 31755.06, 2341.310305],
[31755.06, 31824.12, 31605.84, 31723.25, 1239.329527],
[31723.25, 31922.29, 31455.00, 31758.29, 2402.474065],
[31758.28, 31923.35, 31642.20, 31828.82, 1587.243611],
[31828.86, 31957.77, 31744.01, 31754.87, 1183.121733],
[31754.87, 31888.20, 31675.50, 31771.83, 791.678978],
]
SAMPLE_COLUMN = [
'open','high','low','close','volume'
]
SAMPLE_INDEX = [
'2021-07-14 00:00:00',
'2021-07-14 00:30:00',
'2021-07-14 01:00:00',
'2021-07-14 01:30:00',
'2021-07-14 02:00:00',
'2021-07-14 02:30:00',
'2021-07-14 03:00:00',
'2021-07-14 03:30:00',
'2021-07-14 04:00:00',
'2021-07-14 04:30:00',
'2021-07-14 05:00:00',
'2021-07-14 05:30:00',
'2021-07-14 06:00:00',
'2021-07-14 06:30:00',
'2021-07-14 07:00:00',
'2021-07-14 07:30:00',
'2021-07-14 08:00:00',
'2021-07-14 08:30:00',
'2021-07-14 09:00:00',
'2021-07-14 09:30:00',
'2021-07-14 10:00:00',
'2021-07-14 10:30:00',
'2021-07-14 11:00:00',
'2021-07-14 11:30:00',
'2021-07-14 12:00:00',
'2021-07-14 12:30:00',
'2021-07-14 13:00:00',
'2021-07-14 13:30:00',
'2021-07-14 14:00:00',
'2021-07-14 14:30:00',
'2021-07-14 15:00:00',
'2021-07-14 15:30:00',
'2021-07-14 16:00:00',
'2021-07-14 16:30:00',
'2021-07-14 17:00:00',
'2021-07-14 17:30:00',
'2021-07-14 18:00:00',
'2021-07-14 18:30:00',
'2021-07-14 19:00:00',
'2021-07-14 19:30:00',
'2021-07-14 20:00:00',
'2021-07-14 20:30:00',
'2021-07-14 21:00:00',
'2021-07-14 21:30:00',
'2021-07-14 22:00:00',
'2021-07-14 22:30:00',
'2021-07-14 23:00:00',
'2021-07-14 23:30:00',
'2021-07-15 00:00:00',
'2021-07-15 00:30:00',
'2021-07-15 01:00:00',
'2021-07-15 01:30:00',
'2021-07-15 02:00:00',
'2021-07-15 02:30:00',
'2021-07-15 03:00:00',
'2021-07-15 03:30:00',
'2021-07-15 04:00:00',
'2021-07-15 04:30:00',
'2021-07-15 05:00:00',
'2021-07-15 05:30:00',
'2021-07-15 06:00:00',
'2021-07-15 06:30:00',
'2021-07-15 07:00:00',
'2021-07-15 07:30:00',
'2021-07-15 08:00:00',
'2021-07-15 08:30:00',
'2021-07-15 09:00:00',
'2021-07-15 09:30:00',
'2021-07-15 10:00:00',
'2021-07-15 10:30:00',
'2021-07-15 11:00:00',
'2021-07-15 11:30:00',
'2021-07-15 12:00:00',
'2021-07-15 12:30:00',
'2021-07-15 13:00:00',
'2021-07-15 13:30:00',
'2021-07-15 14:00:00',
'2021-07-15 14:30:00',
'2021-07-15 15:00:00',
'2021-07-15 15:30:00',
'2021-07-15 16:00:00',
'2021-07-15 16:30:00',
'2021-07-15 17:00:00',
'2021-07-15 17:30:00',
'2021-07-15 18:00:00',
'2021-07-15 18:30:00',
'2021-07-15 19:00:00',
'2021-07-15 19:30:00',
'2021-07-15 20:00:00',
'2021-07-15 20:30:00',
'2021-07-15 21:00:00',
'2021-07-15 21:30:00',
'2021-07-15 22:00:00',
'2021-07-15 22:30:00',
'2021-07-15 23:00:00',
'2021-07-15 23:30:00',
]
SAMPLE_DATAFRAME = pd.DataFrame(data=np.array(SAMPLE_DATA),index=SAMPLE_INDEX,columns=SAMPLE_COLUMN)
| [
"uxdx@naver.com"
] | uxdx@naver.com |
00a18079b169ce59a3eecd93c7f89b7c002ec17f | 00669f6d510768fee3a4837c9d3a0b70f5e6e3b3 | /gen.py | e4f2b4e439ae89cf9d8106c951f149b6627be4d5 | [] | no_license | Jacuos/linParallel | 48a81730434c4325115d1c37e216f423983e1490 | 2d83a347d33c3a4045791bbf042c19b7e9d5043a | refs/heads/master | 2021-01-20T19:57:31.376733 | 2016-06-20T19:07:44 | 2016-06-20T19:07:44 | 61,137,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import sys,random
def main(argv):
f = open(argv[1],'w')
dim = int(argv[2])
A = [[int for i in range(dim+1)] for j in range(dim)]
for x in range(dim):
for y in range(dim+1):
A[x][y] = random.uniform(-1000,1000)
for x in range(dim):
for y in range(dim+1):
f.write(str(A[x][y])+" ")
f.write("\n")
if __name__ =="__main__":
main(sys.argv) | [
"jacek.kozieja@gmail.com"
] | jacek.kozieja@gmail.com |
b7868249902bfe1fb69ee6e3267b9e1aab3b8417 | 6b247e365d97951ae7137bb8140447fe72100ff6 | /app/urls.py | c942d1ca3e5b888307b6d9ccafa4f13c869944b5 | [] | no_license | tharcissie/Discussion_Board | 27f251875218174b3285a48b5d1de58653930e5a | 42b3c14b9993a906dc6bfa142dab0d3ddfac66b8 | refs/heads/master | 2023-02-27T18:03:36.251799 | 2021-02-10T15:57:33 | 2021-02-10T15:57:33 | 336,992,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | from django.urls import path
from .views import home, topics, new_topic, signup, topic_detail,reply_topic, profile, delete_topic,update_topic
urlpatterns = [
path('', home, name='home'),
path('topics/<id>', topics, name='topics'),
path('topics/<id>/create_topic', new_topic, name='create_topic'),
path('signup/', signup, name='signup'),
path('topic_detail/<id>', topic_detail, name='topic_detail'),
path('topic_detail/<id>/reply_topic', reply_topic , name='reply_topic'),
path('profile/<username>', profile, name='profile'),
path('topic_detail/<id>/delete', delete_topic , name='delete_topic'),
path('topic_detail/<id>/update_topic', update_topic , name='update_topic'),
] | [
"tharcissieidufashe@gmail.com"
] | tharcissieidufashe@gmail.com |
c160b2fd652c4d0736f0a665cc390380549bddf8 | 5a0697981ec5af415bc907ce25bd52965b46234f | /community/views.py | c8cf09bf1599a1cf1d7e851f32e4df4f1c95109b | [] | no_license | Zeelcon/comm | 3398ce6d6d2a7749bc624f22d114db112e7affab | b93551896b00bde6222028b72cc345fa624c0741 | refs/heads/master | 2021-05-21T13:30:15.200492 | 2020-04-08T13:42:33 | 2020-04-08T13:42:33 | 252,667,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse, request
from .forms import CreateUserForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.decorators import login_required
# Create your views here.
#account-注册-登录-忘记密码
def register(request):
if request.user.is_authenticated:
return redirect('home_page')
else:
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.clean_data.get('username')
messages.success(request, 'Account was created for' + user)
return redirect
context = {'form':form}
return render(request, 'account/register_page.html', context)
def loginUser(request):
if request.user.is_authenticated:
return redirect('home_page')
else:
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect('home_page')
else:
messages.info(request, 'Username Or password is wrong!')
context = {}
return render(request, 'account/login_page.html', context)
def logoutUser():
logout(request)
return redirect('login_page')
def forget(request):
context = {}
return render(request, 'account/forget_page.html', context)
# @login_required(login_url='login_page')
#blog-首页-用户信息-文章
def home(request):
context = {}
return render(request, 'commusic/home_page.html', context)
# @login_required(login_url='login_page')
def users(request):
context = {}
return render(request, 'commusic/users_page.html', context)
# @login_required(login_url='login_page')
def article(request):
context = {}
return render(request, 'commusic/article_page.html', context) | [
"15217458846@163.com"
] | 15217458846@163.com |
f100b72745d582e380b0e88a97e27aa8d8d7f373 | 389e5d7ae4f604a3b2e8e0b4d8b33f8828b3ab3e | /tcp_psh_ack.py | fb61ab3b562a75b3533fb8eb8ae6d40193e2c2ac | [] | no_license | rizkyghosto/gighosto | 959163205bca3429786e2565d8796817dd4c0bca | e4d8ddc2dbd1559bd2fadf78da9eeb4f9afa0089 | refs/heads/main | 2023-05-22T16:13:15.146017 | 2021-06-13T10:29:53 | 2021-06-13T10:29:53 | 376,510,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | import time
import socket, sys
from struct import *
# checksum functions needed for calculation checksum
def checksum(msg):
s = 0
# loop taking 2 characters at a time
for i in range(0, len(msg), 2):
w = ord(msg[i]) + (ord(msg[i+1]) << 8 )
s = s + w
s = (s>>16) + (s & 0xffff);
s = s + (s >> 16);
#complement and mask to 4 byte short
s = ~s & 0xffff
return s
#create a raw socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print "success"
# tell kernel not to put in headers, since we are providing it, when using IPPROTO_RAW this is not necessary
# s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# now start constructing the packet
packet = '';
source_ip = '10.1.10.1'
dest_ip = '10.1.12.173' # or socket.gethostbyname('www.google.com')
# ip header fields
ip_ihl = 5
ip_ver = 4
ip_tos = 0
ip_tot_len = 0 # kernel will fill the correct total length
ip_id = 54321 #Id of this packet
ip_frag_off = 0
ip_ttl = 255
ip_proto = socket.IPPROTO_TCP
ip_check = 0 # kernel will fill the correct checksum
ip_saddr = socket.inet_aton ( source_ip ) #Spoof the source ip address if you want to
ip_daddr = socket.inet_aton ( dest_ip )
ip_ihl_ver = (ip_ver << 4) + ip_ihl
# the ! in the pack format string means network order
ip_header = pack('!BBHHHBBH4s4s' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off, ip_ttl, ip_proto, ip_check, ip_saddr, ip_daddr)
# tcp header fields
tcp_source = 1234 # source port
tcp_dest = 80 # destination port
tcp_seq = 454
tcp_ack_seq = 0
tcp_doff = 5 #4 bit field, size of tcp header, 5 * 4 = 20 bytes
#tcp flags
tcp_fin = 0
tcp_syn = 1
tcp_rst = 0
tcp_psh = 0
tcp_ack = 0
tcp_urg = 0
tcp_window = socket.htons (5840) # maximum allowed window size
tcp_check = 0
tcp_urg_ptr = 0
tcp_offset_res = (tcp_doff << 4) + 0
tcp_flags = tcp_fin + (tcp_syn << 1) + (tcp_rst << 2) + (tcp_psh <<3) + (tcp_ack << 4) + (tcp_urg << 5)
# the ! in the pack format string means network order
tcp_header = pack('!HHLLBBHHH' , tcp_source, tcp_dest, tcp_seq, tcp_ack_seq, tcp_offset_res, tcp_flags, tcp_window, tcp_check, tcp_urg_ptr)
user_data = 'Hello, how are you'
# pseudo header fields
source_address = socket.inet_aton( source_ip )
dest_address = socket.inet_aton(dest_ip)
placeholder = 0
protocol = socket.IPPROTO_TCP
tcp_length = len(tcp_header) + len(user_data)
psh = pack('!4s4sBBH' , source_address , dest_address , placeholder , protocol , tcp_length);
psh = psh + tcp_header + user_data;
tcp_check = checksum(psh)
#print tcp_checksum
# make the tcp header again and fill the correct checksum - remember checksum is NOT in network byte order
tcp_header = pack('!HHLLBBH' , tcp_source, tcp_dest, tcp_seq, tcp_ack_seq, tcp_offset_res, tcp_flags, tcp_window) + pack('H' , tcp_check) + pack('!H' , tcp_urg_ptr)
# final full packet - syn packets dont have any data
packet = ip_header + tcp_header + user_data
#Send the packet finally - the port specified has no effect
st = time.time()
while(1):
end = time.time()
if(end-st <60):
s.sendto(packet, (dest_ip , 0 ))
else:
exit() | [
"noreply@github.com"
] | rizkyghosto.noreply@github.com |
ec041b2621b0dc49c1d05fa7ab066b9e1ca2feaa | a2705dca5db55b112b7a690962bd8aa256ed7564 | /back-end/pyworkflow/pyworkflow/nodes/__init__.py | 23853aa31688f5e047927fd93943fc682863b64e | [
"MIT"
] | permissive | wjhrdy/visual-programming | 5ea1ada48f17a596a89dba4eb2246e0bfab83571 | 8e8c6edafd98c42ad24967b8e0f1ee97be81819b | refs/heads/master | 2022-12-26T05:22:58.529184 | 2020-10-09T12:49:39 | 2020-10-09T12:49:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from .flow_control import *
from .io import *
from .manipulation import *
from .visualization import *
| [
"matthew_thomas@hms.harvard.edu"
] | matthew_thomas@hms.harvard.edu |
15229c083ab6f2fca69c227fee1c7675b44be211 | 1e93545d9cc0ee2aef2f68a9fdd99bd64edeae3d | /Implement-6-Small-APP/2.lucky-color.py | 942f5e07836dba41b77c8916a0379e9cf0ebb51f | [] | no_license | spike688023/10-Real-World-Application | 3e7d6a15a76e9c2384a42d24235e6831bfbeb2c2 | 770c841ab3011d4875a88dc2d04a38ac417bdb8b | refs/heads/master | 2021-01-19T17:39:40.288635 | 2017-09-16T03:53:04 | 2017-09-16T03:53:04 | 101,082,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | """
choice(seq) method of random.Random instance
Choose a random element from a non-empty sequence.
每猜錯一次顏色,就把猜 錯的從 list拿掉.
"""
import random
colors = ['red', 'blue', 'green', 'purple', 'yellow']
luckyColor = random.choice(colors)
for i in range(3):
print('There are {} colors'.format(colors))
guess = input('Guess your lucky color: ')
if guess != luckyColor:
print('Seems like {} is not your lucky color:('.format(guess))
colors.remove(guess)
else:
break
if guess == luckyColor:
print('Great! {} is your lucky color!'.format(luckyColor))
else:
print('Actually, {} is your lucky color!'.format(luckyColor))
| [
"jack_6880hotmail.com"
] | jack_6880hotmail.com |
335643303130079ae5cb0edd6ad8a699c6391fe5 | 81a0a97e66cbcf86aa0c040fc3ccd06552ab179c | /accounts/views.py | 80ed108012bccd1a393ad7d1b1887c4c26cf612e | [] | no_license | ayush-karn/Quiz_app | 3de62264dd55c9fad466552fea3344a9c3268f88 | eda04e86e8b8c850472254fb443a4142753cb84b | refs/heads/main | 2023-06-06T12:03:16.750630 | 2021-06-26T21:18:15 | 2021-06-26T21:18:15 | 380,597,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | from django.shortcuts import render,redirect
from django.contrib.auth.models import User
# Create your views here.
from django.contrib.auth import authenticate,login
from django.contrib.auth import logout
def login_attempt(request):
if request.method == "POST":
email = request.POST.get('email')
password = request.POST.get('password')
print(email)
user = User.objects.filter(email = email).first()
if not user:
message = {'error' : 'user does not exists'}
context = message
return render(request, 'auth/login.html', context)
user = authenticate(username=email, password=password)
print(user)
if user is not None:
print("login")
login(request , user)
return redirect('/')
else:
message = {'error' : 'invalid credentials'}
context = message
return render(request, 'auth/login.html', context)
return render(request, 'auth/login.html')
def register_attempt(request):
if request.method == 'POST':
f_name = request.POST.get('f_name')
l_name = request.POST.get('l_name')
email = request.POST.get('email')
password = request.POST.get('password')
user = User.objects.filter(email = email).first()
if user:
message = {'error' : 'user already exists'}
context = message
return render(request, 'auth/register.html', context)
user = User(first_name = f_name , last_name = l_name , email = email , username=email)
user.set_password(password)
user.save()
return render(request, 'auth/register.html')
def logout_attempt(request):
logout(request)
return redirect('/') | [
"ayushkarn2127@gmail.com"
] | ayushkarn2127@gmail.com |
d23690b10700d834432702a5b133c61e359439af | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-sql/azure/mgmt/sql/models/restorable_dropped_managed_database_paged.py | d6e432c2c35188bdfa6c829f58d7c60fe70a9ab3 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,036 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RestorableDroppedManagedDatabasePaged(Paged):
"""
A paging container for iterating over a list of :class:`RestorableDroppedManagedDatabase <azure.mgmt.sql.models.RestorableDroppedManagedDatabase>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[RestorableDroppedManagedDatabase]'}
}
def __init__(self, *args, **kwargs):
super(RestorableDroppedManagedDatabasePaged, self).__init__(*args, **kwargs)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
a42b874d58734949d3b054019a599a8224df6ec5 | 216e6e4957e02780129ab4e917e94cfb975dbfcb | /chapter_6/es148.py | 22764021b34b3dcfda7b4d40ffc5d138be098555 | [] | no_license | DamManc/workbook | d7e72fd1ed098bd7bccb23fa5fd9a102cfff10db | 2103dbdc8a6635ffd6a1b16b581c98800c9f21a2 | refs/heads/master | 2023-04-19T21:26:23.940011 | 2021-05-23T22:37:27 | 2021-05-23T22:37:27 | 335,064,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # Exercise 148: Play Bingo
from es147 import *
import copy
def main():
print('Welcome to the Bingo Game!!')
print('------ Your card ------')
card = bingo_card()
print_bingo_card(card)
n_calls = []
for i in range(0, 1000):
copy_card = copy.deepcopy(card)
gamble = False
count = 0
while not gamble:
numbers = []
while len(numbers) < 5:
r = random.randint(1, 75)
if r not in numbers:
numbers.append(r)
gamble = check_card(copy_card, numbers)
if gamble:
print('Your call:', end='\t')
print(f'{numbers} *****************---> WIN {gamble}')
print(f'tot calls: {count}')
n_calls.append(count)
else:
count += 1
print(f'The minimum number of calls is {min(n_calls)}')
print(f'The maximum number of calls is {max(n_calls)}')
print(f'The average number of calls is {sum(n_calls) / 1000}')
if __name__ == '__main__':
main()
| [
"damiano.mancini1@gmail.com"
] | damiano.mancini1@gmail.com |
35f62ae64a0ab7a5879238d77477d64c1beae21f | f10982b909f1c298d4a44caea022c346fdc3321f | /fibo.py | 0db54ba1364d8615d82a32d6884b859e1976daca | [] | no_license | Vidika/Python | 4cc8a91e9caf633d4e778ac0289b1e9e536103e3 | 22da0657c8154ae932464ccb6a390fbeca296879 | refs/heads/master | 2020-03-23T09:15:23.862167 | 2018-07-18T03:42:13 | 2018-07-18T03:42:13 | 141,376,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | def fibonnaci_numbers_upto_n(n):
previous=1
current=1
numbers=[]
while current<=n:
numbers.append(current)
previous,current=current,previous+current
return numbers
def encode(n):
numbers=fibonnaci_numbers_upto_n(n)
remainder=n
bits=['0']*len(numbers)+['1']
for i in range(len(numbers)-1,-len(numbers),-1):
if remainder==0:
break
if remainder>=numbers[i]:
bits[i]='1'
remainder-=numbers[i]
return ''.join(bits)
def decode(code):
if len(code)<=2 or code[-2:]!='11':
return 0
previous=1
current=1
n=0
previous_bit=False
for bit in (int(c) for c in code[:-1]):
if bit:
if previous_bit:
return 0
n+=current
previous_bit=True
else:
previous_bit=False
previous,current=current,previous+current
return n
| [
"vidikab@gmail.com"
] | vidikab@gmail.com |
a6a07450d50bc0b680aeccd879e4ad8909edd2f0 | 8bd6a9181e05ece2a607f40194a1c7f74e87682f | /section1/homework/day1/bai3.py | 7ef7f90542908a17bbb523a7a8c22c2888af0501 | [] | no_license | shadowstep666/phamminhhoang-fundamental-c4e25 | b9669c0ccecc3c1b027cdfaca6d0d6bae6f5bcc1 | 3d1d6cd239ca8963204e3b23ae01ea3aecd1178e | refs/heads/master | 2020-04-13T12:14:44.544845 | 2019-01-04T20:53:56 | 2019-01-04T20:53:56 | 163,196,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | c=int(input("enter the celsius temperature :"))
f= c *1.8+32
print ( c , "(C) =" , f , "(F)") | [
"noreply@github.com"
] | shadowstep666.noreply@github.com |
7efc8a8438108ce02035f70090cb8888400ff463 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/notification/migrations/0004_auto_20180716_1216.py | 0191eb5e80b1b65fec501f3020d06bea0bfa08f5 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | # Generated by Django 2.0.5 on 2018-07-16 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notification', '0003_auto_20180613_1301'),
]
operations = [
migrations.AlterField(
model_name='appnotification',
name='notification_type',
field=models.PositiveIntegerField(choices=[(1, 'Appointment Accepted'), (2, 'Appointment Cancelled'), (3, 'Appointment Rescheduled by Patient'), (4, 'Appointment Rescheduled by Doctor'), (5, 'Appointment Booked'), (10, 'Doctor Invoice'), (11, 'Lab Invoice')]),
),
migrations.AlterField(
model_name='emailnotification',
name='notification_type',
field=models.PositiveIntegerField(choices=[(1, 'Appointment Accepted'), (2, 'Appointment Cancelled'), (3, 'Appointment Rescheduled by Patient'), (4, 'Appointment Rescheduled by Doctor'), (5, 'Appointment Booked'), (10, 'Doctor Invoice'), (11, 'Lab Invoice')]),
),
migrations.AlterField(
model_name='pushnotification',
name='notification_type',
field=models.PositiveIntegerField(choices=[(1, 'Appointment Accepted'), (2, 'Appointment Cancelled'), (3, 'Appointment Rescheduled by Patient'), (4, 'Appointment Rescheduled by Doctor'), (5, 'Appointment Booked'), (10, 'Doctor Invoice'), (11, 'Lab Invoice')]),
),
migrations.AlterField(
model_name='smsnotification',
name='notification_type',
field=models.PositiveIntegerField(choices=[(1, 'Appointment Accepted'), (2, 'Appointment Cancelled'), (3, 'Appointment Rescheduled by Patient'), (4, 'Appointment Rescheduled by Doctor'), (5, 'Appointment Booked'), (10, 'Doctor Invoice'), (11, 'Lab Invoice')]),
),
]
| [
"kanhaiyalal@policybazaar.com"
] | kanhaiyalal@policybazaar.com |
fb5fd8b8750934092164597d06bd43e67d19e4c4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/768.py | 789102ddeb40b07f44f85d54dd152798feab35b8 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | import sys
def nbValidPermutation(i, maxi):
strF = str(i)
nb = 0
finded = []
for j in range(len(strF))[:-1]:
other = int(strF[j+1:]+strF[:j+1])
if other > i and other <= maxi and not other in finded:
finded.append(other)
nb += 1
return nb
def buildKelem(i, maxi):
strF = str(i)
nb = 0
finded = []
for j in range(len(strF))[:-1]:
other = int(strF[j+1:]+strF[:j+1])
if other > i and other <= maxi and not other in finded:
finded.append(other)
nb += 1
return sorted(finded,reverse=True)
def buildK():
vals = []
for i in range(2000000):
vals.append(buildKelem(i,2000000))
return vals
def computeSolKno(mini,maxi,kno):
sol = 0
for i in range(mini,maxi-1):
sol += len(kno[i])
counter = 0
while counter < len(kno[i]):
if kno[i][counter] <= maxi:
counter = len(kno[i])
else:
counter += 1
sol -= 1
return sol
def computeSol(mini,maxi):
sol = 0
for i in range(mini,maxi-1):
sol += nbValidPermutation(i,maxi)
return sol
def solve(pathI,pathOut):
kno = buildK()
print 'ok, kno'
counter = 1
fI = file(pathI,'rU')
fO = file(pathOut,'w')
lines = fI.readlines()
for line in lines[1:]:
print line
elem = line.split()
mini = int(elem[0])
maxi = int(elem[1])
sol = computeSolKno(mini,maxi,kno)
fO.write('Case #')
fO.write(str(counter))
fO.write(': ')
fO.write(str(sol))
fO.write('\n')
counter+=1
fI.close()
fO.close()
def main():
args = sys.argv[1:]
solve(args[0],args[1])
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
83e19f4583eacef328f16c8bd2557547c4102e72 | 9e49244b88b0d49e735bbd8f33e53194fd29c054 | /ShoppingCart/models.py | 4857be945e4aa66822fbb0ca60150edfe46851ab | [] | no_license | stanyu2013/apps | 24c7edad58e09f9c292868ade05b01aa0309336b | f1123aafaab9f58ec2755a3a6027746c0f947f1e | refs/heads/main | 2023-03-18T15:48:07.868364 | 2021-03-12T22:12:57 | 2021-03-12T22:12:57 | 347,206,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,115 | py | from django.conf import settings
from django.db import models
from django.utils.text import slugify
'''SIZES = (
('XXS', 'Extra Extra Small'),
('XS', 'Extra Small'),
('S', 'Small'),
('M', 'Medium'),
('L', 'Large'),
('XL', 'Extra Large'),
('XXL', 'Extra Extra Large'),
('XXXL', 'Triple Extra Large')
)
STORAGES = (
('1GB', '1GB'),
('4GB', '4GB'),
('8GB', '8GB'),
('16GB', '16GB'),
('32GB', '32GB'),
('64GB', '64GB'),
('128GB', '128GB'),
('256GB', '256GB'),
('512GB','512GB')
)'''
class Category(models.Model):
name = models.CharField(max_length=100)
#description = models.TextField(nullable=True)
#image = models.ImageField(upload_to="categories", nullable=True)
slug = models.SlugField()
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = "Categories"
def __str__(self):
return "{0}".format(self.name)
class Product(models.Model):
name = models.CharField(max_length=100)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
description = models.TextField()
price = models.FloatField()
image = models.ImageField(upload_to="inventory")
recently_added = models.BooleanField(default=False)
reduced_price = models.BooleanField(default=False)
'''is_apparel = models.BooleanField(default=False)
size = models.CharField(max_length=4, choices=SIZES, default='', null=True)
is_electronics = models.BooleanField(default=False)
storage = models.CharField(max_length=4, choices=STORAGES, default='', null=True)'''
slug = models.SlugField()
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Product, self).save(*args, **kwargs)
def __str__(self):
return "{0}".format(self.name)
class Rating(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
stars = models.FloatField()
#review = models.TextField()
def __str__(self):
return "{0}: {1} Stars".format(self.product.name, self.stars)
class OrderProduct(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
amount = models.IntegerField(default=0)
def __str__(self):
return "User {0} Product {1} Amount {2}".format(self.user, self.product.name, self.amount)
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
products = models.ManyToManyField(OrderProduct)
start_date = models.DateTimeField(auto_now_add=True)
ordered_date = models.DateTimeField()
ordered = models.BooleanField(default=False)
def __str__(self):
return "User {0} Products {1} Start Date {2} Ordered Date {3} Ordered {4}"\
.format(self.user, self.products.name, self.start_date, self.ordered_date, self.ordered) | [
"noreply@github.com"
] | stanyu2013.noreply@github.com |
799385a40aad50c59c9dbece7f0c6505f3b5acd8 | b7f6cfda4fe68bde9d3896c5f29190c496b426d4 | /c11/a4/A4/pca/pca.py | fac16ec9745549148b21ef66d70e18deed3f52b0 | [] | no_license | aribshaikh/UofT-Projects | bca513e153fa30860c832fe9af48409ac79a9433 | 97c5b3ed782da53e4ce49c7526a6e47ad5bfc5ee | refs/heads/main | 2023-03-06T20:06:59.927356 | 2021-02-16T19:08:25 | 2021-02-16T19:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,164 | py | """
CSCC11 - Introduction to Machine Learning, Fall 2020, Assignment 4
B. Chan, S. Wei, D. Fleet
"""
import matplotlib.pyplot as plt
import numpy as np
import os
class PCA:
def __init__(self, Y):
""" This class represents PCA with components and mean given by data.
For the following:
- N: Number of samples.
- D: Dimension of observation features.
- K: Dimension of state features.
NOTE: K >= 1
Args:
- Y (ndarray (shape: (D, N))): A DxN matrix consisting N D-dimensional observation data.
"""
self.D = Y.shape[0]
# Mean of each row, shape: (D, )
self.mean = np.mean(Y, axis=1, keepdims=True)
self.V, self.w = self._compute_components(Y)
def _compute_components(self, Y):
""" This method computes the PCA directions (one per column) given data.
Args:
- Y (ndarray (shape: (D, N))): A DxN matrix consisting N D-dimensional observation data.
Output:
- V (ndarray (shape: (D, D))): The matrix of PCA directions (one per column) sorted in descending order.
- w (ndarray (shape: (D, ))): The vector of eigenvalues corresponding to the eigenvectors.
"""
assert len(Y.shape) == 2, f"Y must be a DxN matrix. Got: {Y.shape}"
(D, N) = Y.shape
data_shifted = Y - self.mean
data_cov = np.cov(data_shifted)
# Numpy collapses the ndarray into a scalar when the output size i.
if D == 1:
data_cov = np.array([[data_cov]])
w, V = np.linalg.eigh(data_cov)
w = np.flip(w)
V = np.flip(V, axis=1)
assert V.shape == (D, D), f"V shape mismatch. Expected: {(D, D)}. Got: {V.shape}"
return V, w
def inference(self, Y, K):
""" This method estimates state data X from observation data Y using the precomputed mean and components.
Args:
- Y (ndarray (shape: (D, N))): A DxN matrix consisting N D-dimensional observation data.
- K (int): Number of dimensions for the state data.
Output:
- X (ndarray (shape: (K, N))): The estimated state data.
"""
assert len(Y.shape) == 2, f"Y must be a DxN matrix. Got: {Y.shape}"
(D, N) = Y.shape
assert D > 0, f"dimensionality of observation representation must be at least 1. Got: {D}"
assert K > 0, f"dimensionality of state representation must be at least 1. Got: {K}"
X = self.V[:, :K].T @ (Y - self.mean)
assert X.shape == (K, N), f"X shape mismatch. Expected: {(K, N)}. Got: {X.shape}"
return X
def reconstruct(self, X):
""" This method estimates observation data Y from state data X using the precomputed mean and components.
NOTE: The K is implicitly defined by X.
Args:
- X (ndarray (shape: (K, N))): A SxN matrix consisting N K-dimensional state (subspace) data.
Output:
- Y (ndarray (shape: (D, N))): A DxN matrix consisting N D-dimensional reconstructed observation data.
"""
assert len(X.shape) == 2, f"X must be a NxK matrix. Got: {X.shape}"
(K, N) = X.shape
assert K > 0, f"dimensionality of state representation must be at least 1. Got: {K}"
D = self.mean.shape[0]
Y = self.V[:, :K] @ X + self.mean
assert Y.shape == (D, N), f"Y shape mismatch. Expected: {(D, N)}. Got: {Y.shape}"
return Y
def plot_eigenvalues(self, savefig=False):
""" This function plots the eigenvalues captured by each subspace dimension from 1 to D.
Output:
- eigenvalues (ndarray (shape: (D,))): D-column vector corresponding to the eigenvalues captured by each subspace dimension.
"""
# ====================================================
# TODO: Implement your solution within the box
eigenvalues = self.w
plt.plot(eigenvalues)
plt.ylabel('Value')
plt.xlabel('Rank')
# ====================================================
plt.title("Eigenvalues")
if savefig:
if not os.path.isdir("results"):
os.mkdir("results")
plt.savefig(f"results/eigenvalues.eps", format="eps")
else:
plt.show()
plt.clf()
assert eigenvalues.shape == (self.D,), f"eigenvalues shape mismatch. Expected: {(self.D,)}. Got: {eigenvalues.shape}"
return eigenvalues
def plot_subspace_variance(self, savefig=False):
""" This function plots the fractions of the total variance in the data from 1 to D.
NOTE: Include the case when K=0.
Output:
- fractions (ndarray (shape: (D,))): D-column vector corresponding to the fractions of the total variance.
"""
# ====================================================
# TODO: Implement your solution within the box
fractions = np.empty([self.D + 1, ])
fractions[0] = 0 # prefill first element
totsum = np.sum(self.w)
runningsum = 0
for i in range(0, self.D):
runningsum += self.w[i]
fractions[i + 1] = (runningsum / totsum)
plt.plot(fractions)
plt.ylabel('Fraction')
plt.xlabel('K')
# ====================================================
plt.title("Fractions of Total Variance")
if savefig:
if not os.path.isdir("results"):
os.mkdir("results")
plt.savefig(f"results/fraction_variance.eps", format="eps")
else:
plt.show()
plt.clf()
assert fractions.shape == (self.D + 1,), f"fractions shape mismatch. Expected: {(self.D + 1,)}. Got: {fractions.shape}"
return fractions
if __name__ == "__main__":
Y = np.arange(11)[None, :] - 5
Y = np.vstack((Y, Y, Y))
print(f"Original observations: \n{Y}")
test_pca = PCA(Y)
print(f"V: \n{test_pca.V}")
est_X = test_pca.inference(Y, 1)
print(f"Estimated states: \n{est_X}")
est_Y = test_pca.reconstruct(est_X)
print(f"Estimated observations from estimated states: \n{est_Y}")
| [
"jeffersonli.li@mail.utoronto.ca"
] | jeffersonli.li@mail.utoronto.ca |
b3a0e01023c3bbe4e2be9de0b8979ca581c9ca10 | c6e9160522cc0014198320b0ccabbd11c9b55fe8 | /ex12.py | 25f0c415a166e80879db1eb06ea2f6a8ff22117d | [] | no_license | Girbons/LearnPythonTheHardWay | 256c45d094853e1ada0e664d8833ad1fab5bc1a5 | 77d1598a1d36d2ebf503eaeb4ed249c995b592fc | refs/heads/master | 2020-04-11T13:53:20.856074 | 2014-07-02T13:42:48 | 2014-07-02T13:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from sys import argv
script, first, second, third = argv
print "The script is called:",script
print "your first variable is:",first
print "your second variable is:",second
print "your third variable is:",third
| [
"alessandrodea22@gmail.com"
] | alessandrodea22@gmail.com |
062f91b723b8c7772435b58db9b06d79e8559fab | f387405b9158701e1d5dabdd252ad9efaf108e5e | /tasks/__init__.py | bca084a6157dfea9c6ed8f30172efaf43a4efae2 | [] | no_license | ivov160/diary_parser | bdea5ca2cba306279cfe63f7400439c62d34d3cb | bfddc3d63f0e88dd306ba2956a81c88865c3ff68 | refs/heads/master | 2021-01-22T11:29:25.291687 | 2017-06-03T09:35:00 | 2017-06-03T09:35:00 | 92,704,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | import tasks.diary_posts
import tasks.file_writer
import tasks.post_filter
import tasks.data_extractor
import tasks.bruteforce_user_id
| [
"ivov160@gmail.com"
] | ivov160@gmail.com |
7937ed53104fe047714bf6e587ccd85bf22f019c | 0437ec3526cc39af1d8d87c2e3c0928b9740e7b9 | /Node.py | 19436774365bebf7a66124db81ab1caa08a93e7e | [] | no_license | wkcn/Flow | b5b2c15a72e2407fcce3e8d2535705acf9f11bb1 | 461b8c181b8bca68c41bb69d20e2a0083596cef9 | refs/heads/master | 2021-06-07T07:37:49.633042 | 2016-09-11T14:55:19 | 2016-09-11T14:55:19 | 67,905,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #coding=utf-8
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.ox = 0
self.oy = 0
self.edges = []
self.neighbors = []
self.redgreen = False
| [
"wkcn@live.cn"
] | wkcn@live.cn |
56c8e38291e2283bbfa53a777a7f7bb3664739d2 | 2924267d4cd143ba4638cb0234b0ad4ec1551ae3 | /lib/tournament-handler/sns.py | 49c607031c99321eea99b97471487e3b6cc520d0 | [] | no_license | joshjiang/smashgg-offline-tourney-notifier | d61c42deb78d290251d4fc092b676e68832e747d | aa1aa4643e0510615794cf331ad2c3fca8d76e86 | refs/heads/master | 2023-07-04T18:59:41.029916 | 2021-08-26T15:01:29 | 2021-08-26T15:01:29 | 390,794,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | class SnsTopic(object):
def __init__(self, sns_topic):
self._topic = sns_topic
self._phone = '8287354503'
def publish_message(self, tournament_data, tournament_link):
"""Get the topic from the AWS account."""
# Return an error if we don't have topics
try:
topics = self._topic.list_topics()
response = self._topic.publish(
TopicArn='arn:aws:sns:us-east-1:476815464521:SmashggOfflineTourneyNotifierStack-TournamentTopic664DE0FD-1MLFLG4XAC31M',
Message=f'smash.gg link: {tournament_link}',
Subject='New NYC Tournament Added',
MessageStructure='string',
MessageAttributes={
'tournament': {
'DataType': 'String',
'StringValue': 'true'
}
})
return response
except Exception as e:
print(e)
return e
| [
"josh.jiang.dbf@gmail.com"
] | josh.jiang.dbf@gmail.com |
deec09e0baf2531114f192fdb1aba714d03af881 | 2a266dda00578ea177b231e8f0dfd14a1824d2e6 | /pw_ls/pw_ls_AB/test_decompress.py | 1ad25097f68c7ea5778186d55d9da1735b9235dd | [] | no_license | sanskrit-lexicon/PWK | fbb51c19d9169e4c28d5c9056484c4a53def78eb | 57d07725b828a95b22b859422287474bfd858ffe | refs/heads/master | 2023-08-17T04:32:37.387691 | 2023-08-15T18:34:46 | 2023-08-15T18:34:46 | 15,903,957 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #-*- coding:utf-8 -*-
"""make_numberchange2b.py
"""
from __future__ import print_function
import sys, re,codecs
from make_numberchange2b import lsnumstr_to_intseq, decompress
if __name__=="__main__":
x = sys.argv[1]
seq,flag = lsnumstr_to_intseq(x)
print(flag,seq)
if flag:
d,flag1 = decompress(seq)
print(flag1,d)
| [
"funderburkjim@gmail.com"
] | funderburkjim@gmail.com |
4a68e36b7f42290f625877fa13ce183047161721 | 303e1025fa6b5a7ae9b13285d82fbd1cc1d67c23 | /noweirds_list/settings.py | 2ba5f51178d74b74d1cec98ac9b1920888684eef | [] | no_license | technoweirds/craigslist-clone | 65d501bc892ee40fde8b585871d2c80258bfa06e | b9eb93bb55b0146c22063c2cc91742d9cb261499 | refs/heads/master | 2023-03-08T01:11:30.756298 | 2021-02-17T17:23:35 | 2021-02-17T17:23:35 | 298,990,537 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | """
Django settings for noweirds_list project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pegwbxfinps42v(%u2*m_eh41i6solc(b-r-8uv7r8f8637!fu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'noweirds_list.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'noweirds_list.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'test.db'),
'TEST_NAME': os.path.join(os.path.dirname(__file__), 'test.db'),
},
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
('django.contrib.auth.backends.ModelBackend'),
)
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
| [
"technoweirds@gmail.com"
] | technoweirds@gmail.com |
3c6efaa9740b328d1508fc75df89820d4fa4ed29 | 7c01cd1df700a68965a22a041fcf0425fb5b8d2e | /api/tacticalrmm/apiv3/urls.py | 934da836b1079809c5346d405b12aac2207b14af | [
"MIT"
] | permissive | socmap/tacticalrmm | 61de15244c61edfb343314bd9e7d832b473df38e | 72d55a010b8a55583a955daf5546b21273e5a5f0 | refs/heads/master | 2023-03-17T23:50:37.565735 | 2021-03-05T23:05:17 | 2021-03-05T23:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from django.urls import path
from . import views
urlpatterns = [
path("checkrunner/", views.CheckRunner.as_view()),
path("<str:agentid>/checkrunner/", views.CheckRunner.as_view()),
path("<str:agentid>/checkinterval/", views.CheckRunnerInterval.as_view()),
path("<int:pk>/<str:agentid>/taskrunner/", views.TaskRunner.as_view()),
path("meshexe/", views.MeshExe.as_view()),
path("sysinfo/", views.SysInfo.as_view()),
path("newagent/", views.NewAgent.as_view()),
path("software/", views.Software.as_view()),
path("installer/", views.Installer.as_view()),
path("checkin/", views.CheckIn.as_view()),
path("syncmesh/", views.SyncMeshNodeID.as_view()),
path("choco/", views.Choco.as_view()),
path("winupdates/", views.WinUpdates.as_view()),
path("superseded/", views.SupersededWinUpdate.as_view()),
path("<int:pk>/chocoresult/", views.ChocoResult.as_view()),
path("<str:agentid>/recovery/", views.AgentRecovery.as_view()),
]
| [
"dcparsi@gmail.com"
] | dcparsi@gmail.com |
21d471ed05699c506dff30c3733bd9c9159ab7a8 | 3e99efdbcf1a3839677bc4f444d46791e4efc408 | /main/migrations/0007_auto__del_error__add_errortype__add_field_uidstatus_error__add_field_u.py | 56e59123e9a914c266f45f4da72c9f044ff1a82b | [] | no_license | policy-innovations/survey-tracker | 226865b6e8ba320f1df71e04466390697628c42c | 78dea7b588da00a817e225679e4b844f55633b4b | refs/heads/master | 2020-06-04T04:29:54.222541 | 2011-07-20T13:53:55 | 2011-07-20T13:53:55 | 1,813,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,443 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Error'
db.delete_table('main_error')
# Adding model 'ErrorType'
db.create_table('main_errortype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='suberrors', null=True, to=orm['main.ErrorType'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('main', ['ErrorType'])
# Adding field 'UIDStatus.error'
db.add_column('main_uidstatus', 'error', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['main.ErrorType'], blank=True), keep_default=False)
# Adding field 'UIDStatus.details'
db.add_column('main_uidstatus', 'details', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
def backwards(self, orm):
# Adding model 'Error'
db.create_table('main_error', (
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('details', self.gf('django.db.models.fields.TextField')(blank=True)),
('parent', self.gf('mptt.fields.TreeForeignKey')(related_name='suberrors', null=True, to=orm['main.Error'], blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('main', ['Error'])
# Deleting model 'ErrorType'
db.delete_table('main_errortype')
# Deleting field 'UIDStatus.error'
db.delete_column('main_uidstatus', 'error_id')
# Deleting field 'UIDStatus.details'
db.delete_column('main_uidstatus', 'details')
models = {
'main.errortype': {
'Meta': {'object_name': 'ErrorType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'suberrors'", 'null': 'True', 'to': "orm['main.ErrorType']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'main.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.role': {
'Meta': {'object_name': 'Role'},
'head': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'subordinate'", 'null': 'True', 'to': "orm['main.Role']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Project']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'main.uidstatus': {
'Meta': {'object_name': 'UIDStatus'},
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'error': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ErrorType']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Project']"}),
'responsibles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Role']", 'symmetrical': 'False'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['main']
| [
"crodjer@gmail.com"
] | crodjer@gmail.com |
9e216c94f1b8abb2b98946061ab509f95cc8bb88 | aa8507007b4b1a8055da9161596714ed1f9c431c | /broad_crawler/broad/settings.py | 4c9710eb38fb48c262b7e26373350f8b1f28d60e | [] | no_license | chensian/Broad_Crawler | 7044ee599fd6d3c81454b1c5393405243ad33d2c | dffeb25485dc8e301c970c9688567214b0040fce | refs/heads/master | 2021-05-25T09:18:04.716241 | 2020-10-20T04:28:54 | 2020-10-20T04:28:54 | 126,963,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,037 | py | # -*- coding: utf-8 -*-
# Scrapy settings for broad project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'broad'
SPIDER_MODULES = ['broad.spiders']
NEWSPIDER_MODULE = 'broad.spiders'
# Enables scheduling storing requests queue in redis.
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# Ensure all spiders share same duplicates filter through redis.
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# REDIS_HOST = '192.168.5.203'
# REDIS_PORT = 6379
REDIS_URL = 'redis://192.168.5.203:6379'
FILTER_URL = 'redis://192.168.5.203:6379'
# FILTER_HOST = 'localhost'
# FILTER_PORT = 6379
"""
这是去重队列的Redis信息。
原先的REDIS_HOST、REDIS_PORT只负责种子队列;由此种子队列和去重队列可以分布在不同的机器上。
"""
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
DOWNLOAD_HANDLERS = {'S3': None,}
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 100
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)COOKIES_ENABLED = False
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'broad.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'broad.middlewares.MyCustomDownloaderMiddleware': 543,
'broad.middleware.UserAgentMiddleware': 200,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'broad.pipelines.MySQLStorePipeline': 300,
# 'broad.pipelines.SQLiteStorePipeline': 300,
# 'scrapy_redis.pipelines.RedisPipeline': 300,
# 'broad.pipelines.BroadPipeline': 300,
# 'broad.pipelines.BroadImagesPipeline': 400,
# 'scrapy.pipelines.images.ImagesPipeline': 1,
"broad.pipelines.MongoDBPipeline": 403,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Broad Crawl Setting
CONCURRENT_REQUESTS = 100
REACTOR_THREADPOOL_MEAXSIZE = 20
LOG_LEVEL = 'INFO'
COOKIES_ENABLED = False
RETRY_ENABLED = False
DOWNLOAD_TIMEOUT = 15
# REDIRECT_ENABLED = False
AJAXCRAWL_ENABLED = True
# project customs setting
#!/usr/bin/env bash
# redis-cli -h 192.168.5.203 -n 2 lpush start_url https://finance.sina.com.cn
# start_url
# https://www.itjuzi.com/ 1
# https://finance.sina.com.cn 2
# https://finance.yahoo.com/ 3
# http://www.eastmoney.com/ 4
# https://xueqiu.com/ 5
# http://www.p5w.net/ 6
REDIS_PARAMS = {
# 'password' : 'xxxxxx',
# 'db': 1
'db': 2
# 'db': 3
# 'db': 4
# 'db': 6
}
FILTER_DB = 2
# POSTIFX = 'itjuzi.com'
POSTIFX = 'finance.sina.com.cn'
# POSTIFX = 'guba.sina.com.cn'
# POSTIFX = 'finance.yahoo.com'
# POSTIFX = 'eastmoney.com'
# POSTIFX = 'p5w.net'
# MONGO_DB_NAME = 'ITJUZI'
MONGO_DB_NAME = 'SINA'
# MONGO_DB_NAME = 'SINA_GUBA'
# MONGO_DB_NAME = 'YAHOO'
# MONGO_DB_NAME = 'EASTMONEY'
# MONGO_DB_NAME = 'P5W' | [
"1217873870@qq.com"
] | 1217873870@qq.com |
b3010fd68667fb943f916bb0bc8359acdf954995 | b607957ffc579922db21e28aa7dc909088493e1e | /tools_python2.py | e873c02470c47cdef5bf14f3f1a4f35ce9b07609 | [] | no_license | Hyyudu/PolyGen | f4a13f667e37e8bc844f43e13fc33e15bbee320f | 32e2d03696612fce7196f0641cc546fb296b7515 | refs/heads/master | 2021-01-10T17:47:46.761692 | 2016-10-01T19:54:09 | 2016-10-01T19:54:09 | 48,236,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | # coding=utf-8
from tools import *
| [
"hyyudu@gmail.com"
] | hyyudu@gmail.com |
f49e1dbe8c810a08a570e388f162a8d1c59fb1ad | ec4b8410109f5b5c327bd1e38ca72af3642e28f4 | /tf-Faster-RCNN/Networks/proposal_layer.py | 84d679302bf5fbd6a880100cbe8c8328c7cde2aa | [] | no_license | David-webb/Faster-Rcnn | c0d5ac4fd4c5a2a42f84f9bfbf51c802e8aa61b7 | 34721a8f19fdd39835611603e826ae2ccb7b8f37 | refs/heads/master | 2022-10-01T00:03:47.146419 | 2022-08-05T03:16:34 | 2022-08-05T03:16:34 | 72,858,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,701 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 2 19:25:41 2017
@author: Kevin Liang (modifications)
Proposal Layer: Applies the Region Proposal Network's (RPN) predicted deltas to
each of the anchors, removes unsuitable boxes, and then ranks them by their
"objectness" scores. Non-maximimum suppression removes proposals of the same
object, and the top proposals are returned.
Adapted from the official Faster R-CNN repo:
https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/rpn/proposal_layer.py
"""
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
import tensorflow as tf
from Lib.bbox_transform import bbox_transform_inv, clip_boxes
from Lib.faster_rcnn_config import cfg
from Lib.generate_anchors import generate_anchors
from Lib.nms_wrapper import nms
def proposal_layer(rpn_bbox_cls_prob, rpn_bbox_pred, im_dims, cfg_key, _feat_stride, anchor_scales):
return tf.reshape(tf.py_func(_proposal_layer_py,[rpn_bbox_cls_prob, rpn_bbox_pred, im_dims[0], cfg_key, _feat_stride, anchor_scales], [tf.float32]),[-1,5])
def _proposal_layer_py(rpn_bbox_cls_prob, rpn_bbox_pred, im_dims, cfg_key, _feat_stride, anchor_scales):
'''
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
'''
_anchors = generate_anchors(scales=np.array(anchor_scales))
_num_anchors = _anchors.shape[0]
rpn_bbox_cls_prob = np.transpose(rpn_bbox_cls_prob,[0,3,1,2]) # shape = (n,18,h,w,)
rpn_bbox_pred = np.transpose(rpn_bbox_pred,[0,3,1,2]) # shape = (n,36,h,w)
# Only minibatch of 1 supported
assert rpn_bbox_cls_prob.shape[0] == 1, \
'Only single item batches are supported'
if cfg_key == 'TRAIN':
pre_nms_topN = cfg.TRAIN.RPN_PRE_NMS_TOP_N
post_nms_topN = cfg.TRAIN.RPN_POST_NMS_TOP_N
nms_thresh = cfg.TRAIN.RPN_NMS_THRESH
min_size = cfg.TRAIN.RPN_MIN_SIZE
else: # cfg_key == 'TEST':
pre_nms_topN = cfg.TEST.RPN_PRE_NMS_TOP_N
post_nms_topN = cfg.TEST.RPN_POST_NMS_TOP_N
nms_thresh = cfg.TEST.RPN_NMS_THRESH
min_size = cfg.TEST.RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
# rpn_box_cls_prob,shape = (n,18,h,w), 也就是softmax之后的特征图各像素点的anchors的打分矩阵,其第二维18的前
# 9个元素是记录的是背景图的概率,后9个元素记录的是前景图的概率???? 这里没有详细理解。。。。。。??????
scores = rpn_bbox_cls_prob[:, _num_anchors:, :, :] # shape = (n,9,h,w)
bbox_deltas = rpn_bbox_pred
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifts
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = _num_anchors
K = shifts.shape[0]
anchors = _anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
# 使用回归量对anchors进行初步修正
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
# 对proposals进行裁剪修正,确定都在原图的边界内
proposals = clip_boxes(proposals, im_dims)
# 3. remove predicted boxes with either height or width < threshold
keep = _filter_boxes(proposals, min_size)
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1] # argsort返回的升序排序的元素索引,后面的[::-1]实现逆序,也就是降序排序
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
# 使用非极大值抑制(NMS)对proposals进行进一步筛选
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
| [
"471435549@qq.com"
] | 471435549@qq.com |
a27ff143e1c2637535d9b42770bf576b53aaed96 | f2f03da38885dc63e61b516e9fa5d3bd2ddd123a | /CO1/leapyr2.py | 1c1b146ca1df96ec1e67de16032eef66f34942dd | [] | no_license | shahanavp/python | 441ff7168a63626633d1ca84e7cfe64d4a8ac818 | 305cf3c91e14ea8913edefb68770bf238d6916ab | refs/heads/main | 2023-03-25T14:48:26.767215 | 2021-03-24T17:16:14 | 2021-03-24T17:16:14 | 347,891,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | yr1=int(input("enter the current cyear"))
yrn=int(input("enter last year"))
for i in range(yr1,yrn+1):
if (i%4==0 or i%100==0 or i%400==0):
print(i)
i=i+1
| [
"noreply@github.com"
] | shahanavp.noreply@github.com |
b6f4646277645361f9fc6e70434c49499d8d65c8 | d76b83351c578ae62f67d2005264e6f963b8ef9a | /defUFO.py | f12d2acfc801d5aac632edfbe2aec34119d3cfbc | [] | no_license | Eze-NoirFenix/Help | 8397c0003675bbebf07ae979d3bab3f96b002675 | e308ab516e62eaee139d861f09f477723b6ee360 | refs/heads/main | 2023-04-26T23:55:48.602625 | 2021-05-31T13:20:27 | 2021-05-31T13:20:27 | 372,400,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | # nave enemiga
import pygame
from pygame.sprite import Sprite # Pygame sprites must have a self.image and a self.rect
class ufoShip(Sprite): # class que representa los enemigos
def __init__(self, defSettings, Image): # deja la nave en su posicion inicial
super(ufoShip, self).__init__()
self.img = Image
self.defSettings = defSettings
# carga la imagen del enemigo recto
self.image = pygame.image.load('image/defUFO.bmp')
self.rect = self.image.get_rect()
# inicia el enemigo arriba
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# almacena el enemigo en una posicion exacta
self.x = float(self.rect.x)
def blitme(self):
# dibuja el enemigo
self.img.blit(self.image, self.rect)
def chkEdge(self):
# vuelve al enemigo al borde de pantalla
rImage = self.image.get_rect()
if self.rect.right >= rImage.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
# move UFO derecha o izquierda
self.x += (self.defSettings.dspfleetUFO *
self.defSettings.dirfleetUFO)
self.rect.x = self.x
| [
"noreply@github.com"
] | Eze-NoirFenix.noreply@github.com |
32370305956bdaa9a3226650e42697ee227b1f90 | 9ac405635f3ac9332e02d0c7803df757417b7fee | /cotizaciones/migrations/0042_auto_20191019_0954.py | 236d57369a78000a98643a41cf309646161b8d74 | [] | no_license | odecsarrollo/07_intranet_proyectos | 80af5de8da5faeb40807dd7df3a4f55f432ff4c0 | 524aeebb140bda9b1bf7a09b60e54a02f56fec9f | refs/heads/master | 2023-01-08T04:59:57.617626 | 2020-09-25T18:01:09 | 2020-09-25T18:01:09 | 187,250,667 | 0 | 0 | null | 2022-12-30T09:36:37 | 2019-05-17T16:41:35 | JavaScript | UTF-8 | Python | false | false | 838 | py | # Generated by Django 2.2.6 on 2019-10-19 14:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cotizaciones', '0041_cotizacion_revisada'),
]
operations = [
migrations.AlterField(
model_name='cotizacion',
name='estado',
field=models.CharField(choices=[('Cita/Generación Interés', 'Cita/Generación Interés'), ('Configurando Propuesta', 'Configurando Propuesta'), ('Cotización Enviada', 'Cotización Enviada'), ('Evaluación Técnica y Económica', 'Evaluación Técnica y Económica'), ('Aceptación de Terminos y Condiciones', 'Aceptación de Terminos y Condiciones'), ('Cierre (Aprobado)', 'Cierre (Aprobado)'), ('Aplazado', 'Aplazado'), ('Cancelado', 'Cancelado')], max_length=200, null=True),
),
]
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
e2533c04d73ecafbadbef614614cc79f9af8fafb | 25541c41f3d0ee71f44c8a0c917b790a077c144f | /bibliography/migrations/0009_auto_20170224_2035.py | 7016b00211fb427cf1e5463078c106b607ae1a6d | [] | no_license | gorarakelyan/armtreebank | 4da98d126a0e95ec7f4a04f70e0bbefea0bb3ed2 | 798b824969962083b82593c9d0bdfe59259fd2b6 | refs/heads/master | 2021-01-19T20:41:54.562374 | 2017-06-10T15:21:44 | 2017-06-10T15:21:44 | 88,538,174 | 0 | 0 | null | 2017-04-17T18:32:34 | 2017-04-17T18:32:34 | null | UTF-8 | Python | false | false | 1,703 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-24 16:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bibliography', '0008_press_link'),
]
operations = [
migrations.AlterField(
model_name='author',
name='birth_date',
field=models.DateField(blank=True, verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
migrations.AlterField(
model_name='author',
name='death_date',
field=models.DateField(blank=True, verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
migrations.AlterField(
model_name='fiction',
name='text_creation_date',
field=models.DateField(verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
migrations.AlterField(
model_name='fiction',
name='text_publication_date',
field=models.DateField(verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
migrations.AlterField(
model_name='press',
name='text_publication_date',
field=models.DateField(verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
migrations.AlterField(
model_name='textbook',
name='text_creation_date',
field=models.DateField(verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
migrations.AlterField(
model_name='textbook',
name='text_publication_date',
field=models.DateField(verbose_name=['%Y-%m-%d', '%Y-%m', '%Y', '%m']),
),
]
| [
"gor19973010@gmail.com"
] | gor19973010@gmail.com |
017d19b97fd8f6aab8a08babe66bec2918da227a | 233928d206e13e068cf8cb5ff7888c9a2d84ad61 | /swea/D5/swea_1242_암호코드스캔.py | 36d01ac84b27da5410b011fd26f7544b5e741c33 | [] | no_license | Jinwoongma/Algorithm | 7f6daa2d3c2c361059c09fb4fe287b1cce4863e2 | 78803f4572f1416451a9f4f31f53b7d653f74d4a | refs/heads/master | 2022-10-07T22:53:20.333329 | 2020-06-07T13:27:47 | 2020-06-07T13:27:47 | 237,114,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | hcode = {'0':'0000', '1':'0001', '2':'0010', '3':'0011',
'4':'0100', '5':'0101', '6':'0110', '7':'0111',
'8':'1000', '9':'1001', 'A':'1010', 'B':'1011',
'C':'1100', 'D':'1101', 'E':'1110', 'F':'1111'}
scode = {211:0, 221:1, 122:2, 411:3, 132:4, 231:5, 114:6, 312:7, 213:8, 112:9}
TC = int(input())
for tc in range(TC):
R, C = map(int, input().split())
data = [input() for _ in range(R)]
answer = 0
mat = [''] * R
for i in range(R):
for j in range(C):
mat[i] += hcode[data[i][j]]
for i in range(1, len(mat) - 6):
j = C * 4 - 1
while j > 56:
if mat[i][j] == '1' and mat[i - 1][j] == '0':
c = [0] * 8
for k in range(7, -1, -1):
c1, c2, c3 = 0, 0, 0
while mat[i][j] == '1': c3 += 1; j -= 1
while mat[i][j] == '0': c2 += 1; j -= 1
while mat[i][j] == '1': c1 += 1; j -= 1
while mat[i][j] == '0' and k: j -= 1
MIN = min(c1, c2, c3)
c1, c2, c3 = c1 // MIN, c2 // MIN, c3 // MIN
c[k] = scode[100 * c1 + 10 * c2 + c3]
t = 3 * (c[0] + c[2] + c[4] + c[6]) + c[1] + c[3] + c[5] + c[7]
if t % 10 == 0:
answer += sum(c)
j -= 1
print('#{} {}'.format(tc + 1, answer))
| [
"jinwoongma@gmail.com"
] | jinwoongma@gmail.com |
7302f79820ed48a59c38e2c5c86e570ebdcea90f | 9128fa598cc7a3e1d494243b7da26adaed098412 | /distributed_gp/modified_files/run_distributed_gp_test.py | e878b7c48de4f571a005edb70185ccd5004583c8 | [] | no_license | daifanxiang/CityBeat | ff45967f48fc7a65337300fc32cf9f8088471fe3 | 6b7bbb4fc50446f7718dd456e6cd4fcd8082fca3 | refs/heads/master | 2021-01-15T21:19:35.069155 | 2013-04-11T02:27:43 | 2013-04-11T02:27:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,160 | py | import os
from rq import Queue, Connection
#from test import haha
from do_gp import Predict
from redis import Redis
import time
from random import randrange
from data_process import find_photos_given_region
from datetime import timedelta
from datetime import datetime
import pymongo
import calendar
def read_regions():
f = open('number.csv','r')
res = []
for line in f.readlines():
t = line.split(',')
res.append((t[0], t[1]))
res.reverse()
return res
#return res[1:3]
#return [(40.728072, -73.9931535), (40.75953,-73.9863145), (40.746048, -73.9931535), (40.741554,-73.9931535), (40.75953, -73.9794755), (40.755036, -73.9794755)]
#return [(40.728072,-73.9931535)]
def process_ts(ts):
"""return two results; the first is the start datetime, the second is the list of training data"""
idx = ts.index
start = idx[0]
res = []
for t in idx:
days_diff = (t-start).days + (t-start).seconds/(24*3600.0);
res.append((days_diff, ts[t]))
return start, res
def get_testing(model_update_time, start_time, predict_days):
res = []
align = []
for i in range(24*predict_days):
delta = model_update_time + timedelta(seconds=3600*(i+1)) -start_time
secs = delta.seconds+delta.days*86400
res.append( secs/(3600.0 * 24) )
align.append( model_update_time + timedelta(seconds=3600*(i+1)))
return res,align
"""
cur = 1.0/24;
res = []
while(cur<predict_days):
print cur+start
res.append(cur+start)
cur+=1.0/24
return res
"""
def save_to_mongo(result, region, model_update_time):
mongo = pymongo.Connection("grande",27017)
mongo_db = mongo['predict']
mongo_collection = mongo_db.prediction
for r in result:
t = {'time':r[0], 'mu':float(r[1]), 'var':float(r[2]), 'mid_lat':str(region[0]), 'mid_lng':str(region[1]), 'model_update_time':model_update_time}
mongo_collection.insert(t)
"""
def fix_time(model_update_time, result_list):
res = []
for i in range(24*predict_days):
res.append( (((cur_utc + timedelta(seconds=3600*(i+1))) - model_update_time).seconds)*1.0/(3600*24) )
"""
def fix_time(start, result_list):
"""re-align the time"""
res = []
for r in result_list:
res.append( (start + timedelta(days = float(r[0])), float(r[1]), float(r[2])) )
return res
def do_align(align, result):
res = []
for a,r in zip(align,result):
res.append( (a,r[1],r[2]))
return res
def main():
predict_days = 1
regions = read_regions()
redis_conn = Redis('tall4')
q = Queue(connection=redis_conn)
cnt = 0
async_results = {}
start_time = []
model_update_time = datetime.utcnow()
for region in regions:
par = cnt
try:
ts = find_photos_given_region(region[0], region[1])
except Exception as e:
print e
continue
start, training = process_ts(ts)
start_time.append(start)
#testing = get_testing(model_update_time, ( ts.index[len(ts)-1] - start).days, predict_days)
testing, align= get_testing(model_update_time, start, predict_days)
print 'start is ',start
print 'model_update_time is ',model_update_time
print 'testing is ',testing
async_results[cnt] = q.enqueue_call( Predict, args = ( training,testing, cnt,), timeout=1720000, result_ttl=-1 )
# Only for temporal test
fileName = '/grad/users/kx19/xia/test_tmp/training_time_' + str(cnt) + '.txt'
fout = open(fileName, 'w')
fout.write('Before Distributed GP:\n')
for t in testing:
fout.write(str(t))
fout.write('\n')
fout.close()
# end for test
cnt+=1
# Only for temporal test use
break
# end for test
done = False
begin_time = time.time()
time.sleep(2)
saved_flag = [0]*len(async_results)
while not done:
print "Time elapsed : ",time.time()-begin_time
done = True
for x in range(cnt):
#print 'checking ',x
result = async_results[x].return_value
#print 'check done'
#print 'res is ',result
if result is None:
done = False
continue
if saved_flag[x] == 0:
#result = fix_time(start_time[x], result)
result = do_align(align, result)
save_to_mongo(result, regions[x], model_update_time)
saved_flag[x] = 1
# Only for temporal test
fileName = '/grad/users/kx19/xia/test_tmp/training_time_' +str(x) + '.txt'
fout = open(fileName, 'a')
fout.write('After Test:\n')
for i in xrange(len(result)):
fout.write(str(result[i][0]))
fout.write('\n')
fout.close()
# end for test
time.sleep(0.2)
main()
| [
"oeddyo@gmail.com"
] | oeddyo@gmail.com |
cb2c58b73f1b5eed4677513e52d9a302b7685bc1 | 34cf8f3eb624d7ba65da8ef3099fea0c5d1b42b9 | /control/Client.py | 6d358ab5d0b24bd4198ce3f459b06454b933c50c | [] | no_license | jfwang213/graduate_demo | 7d09058d37da750f3efa02f57d595e5208e5b0a0 | 74dd3ab60b84c5a71a21e2a706926187621dd94a | refs/heads/master | 2020-12-30T10:50:24.244570 | 2011-12-15T08:59:41 | 2011-12-15T08:59:41 | 2,035,956 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,421 | py | #!/usr/bin/env python
from Constants import Constants
from gnuradio import gr, modulation_utils
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import usrp_receive_path, usrp_transmit_path
from transceiver import my_top_block
import struct
class Client(object):
def __init__(self):
self.macAddr = 1
args = ["-f", "2.4G", "-R", "B", "-T", "B"]
demods = modulation_utils.type_1_demods()
mods = modulation_utils.type_1_mods()
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-m", "--modulation", type="choice", choices=mods.keys(),
default='gmsk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(mods.keys()),))
usrp_transmit_path.add_options(parser, expert_grp)
usrp_receive_path.add_options(parser, expert_grp)
for mod in mods.values():
mod.add_options(expert_grp)
for demod in demods.values():
demod.add_options(expert_grp)
(options, args) = parser.parse_args(args)
self.tb = my_top_block(demods[options.modulation], mods[options.modulation], self.callback, options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print 'Warning: Failed to enable realtime scheduling.'
self.tb.start()
self.n_rcvd = 0
self.n_right = 0
self.pkt_no = 0
self.reqId = 0
print "client init ok!"
def callback(self, ok, payload):
(pktno,) = struct.unpack('!H', payload[0:2])
self.n_rcvd += 1
if ok:
self.n_right += 1
self.dealWithCommand(payload[2:])
print "ok = %5s pktno = %4d n_rcvd = %4d n_right = %4d" % (
ok, pktno, self.n_rcvd, self.n_right)
def dealWithCommand(self, commandContent):
srcMac = struct.unpack('!B', commandContent[0:1])[0]
dstMac = struct.unpack('!B', commandContent[1:2])[0]
if dstMac != self.macAddr:
print 'not my package'
return
commandType = struct.unpack('!B', commandContent[2:3])[0]
if commandType == Constants.FreqAssign:
width = struct.unpack('!I', commandContent[3:7])[0]
print 'get freqAssign width:', width
def sendReqPackage(self, width):
payload = ''
payload += struct.pack('!BBB', self.macAddr, 0, Constants.FreqReq)
payload += struct.pack('!II', width, self.reqId)
self.send_pkt(payload)
self.reqId += 1
def send_pkt(self, payload='', eof=False):
self.pkt_no += 1
payload = struct.pack('!H', self.pkt_no) + payload
self.tb.txpath.send_pkt(payload, eof)
def wait(self):
self.tb.wait()
if __name__ == '__main__':
client = Client()
content = raw_input("input command!\n")
while content != 'E':
if content[0:1] == 'R':
try:
width = int(content[2:])
except ValueError as e:
width = None
if width != None:
client.sendReqPackage(width)
else:
print 'invalid number'
content = raw_input()
server.send_pkt('', True)
print 'send end!'
server.wait()
| [
"jfwang213@gmail.com"
] | jfwang213@gmail.com |
7444b56e9b7ed51e1b6516cfeff32c7b8bda3da1 | 997f6ee474c95a90af7af6294583102fc8c4bf14 | /preprocessing/simplepreprocessor.py | e7bfb9903dea364e8169e70631c08d3ed4a93f00 | [] | no_license | ABnoLecture/Vehicle-registration-number | f2b15fb01593560b2532fe34f20a0f35738229d1 | d3799c73d8cc3c9ab360a8e9204c581d321c7424 | refs/heads/master | 2021-04-06T02:34:40.768441 | 2018-03-26T21:45:38 | 2018-03-26T21:45:38 | 124,590,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | import cv2
class SimplePreprocessor:
"""docstring for SimplePreprocessor."""
def __init__(self, width, heigth, inter=cv2.INTER_AREA):
#Se le asigna un valor de alto, ancho e interpolacion
self.width = width
self.height = heigth
self.inter = inter
def preprocess(self, image):
# resize the image to a fixed size, ignoring the aspect
# ratio
if image is not None :
return cv2.resize(image, (self.width, self.height),
interpolation=self.inter)
| [
"noreply@github.com"
] | ABnoLecture.noreply@github.com |
f68850fab00b0d8d341b87258702a748a0dddf37 | 458f75da364b2e3cefb945b3a7cdaf2bc12ec356 | /config.py | a27b5c0c402790ea61179ec4a046b3b9abd0f2e4 | [] | no_license | kuruoky/TestProj1 | 32cbaa53a757a74bab21f8b5dc49cb97cb6695c9 | 10b3a8449fd97af18c101d2dff902048f9ad24e4 | refs/heads/master | 2020-04-23T16:43:00.144416 | 2019-02-18T16:04:55 | 2019-02-18T16:04:55 | 171,307,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | #data
import os
logdir = './log'
dataroot='./data' #./data/train/img ./data/train/gt
path_to_train_lmdb_dir = os.path.join(dataroot, 'train.lmdb')
path_to_val_lmdb_dir = os.path.join(dataroot, 'val.lmdb')
path_to_log_dir = logdir
test_img_path='./data/test/img'
result = './result'
lr = 0.0001
gpu_ids = [0]
gpu = 1
init_type = 'xavier'
resume = False
checkpoint = ''# should be file
train_batch_size_per_gpu = 14
num_workers = 1
print_freq = 1
eval_iteration = 50
save_iteration = 50
max_epochs = 1000000
| [
"noreply@github.com"
] | kuruoky.noreply@github.com |
86e9d0a7a6b0e7dc34028e374c0ad1fcaaabcd29 | c80b21fc4ed23d95b4a7a53f16c94411332e0504 | /email_slicer.py | 485bae57b5f4697402c3c3683d3721cc126982cf | [] | no_license | gbanerje/MyRepository | 380fdeec5bfd176568322a1b37a187b8106378ff | 1cb9453f6f78c2d07ee2a3a5d469bb8280602dee | refs/heads/master | 2020-04-11T01:24:14.065048 | 2018-12-12T07:16:48 | 2018-12-12T07:16:48 | 161,413,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #get email id
email = input("what is your email address?:" ).strip()
#slice username
username= email[:email.index("@")]
print(username)
#slice domainname
domain = email[email.index("@")+1:]
#format message
output = "Your username is {} and domain name is {}".format(username,domain)
#dislplay output message
print (output)
| [
"gautam.banerjee.ext@nokia.com"
] | gautam.banerjee.ext@nokia.com |
755726a3c93aceffe88779b448d2cf3a2e35ae45 | 9d209c5f2c93a512e83bd0eaee9d8c29750e13d5 | /models/__init__.py | e8229d2b6f7f0915b8abfef6863e8f14964f9c77 | [
"MIT"
] | permissive | SwithinHwong/AUNets-Aff-Wild2 | 396fa51b6541971bdb01e3ff62691958f30c1d7a | c9f99a06c455db249be72acd08b4cc4601259ed3 | refs/heads/master | 2020-12-03T22:11:14.443712 | 2020-01-03T06:00:45 | 2020-01-03T06:00:45 | 231,502,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | # import sys
# sys.path.append('..')
# from ops import *
# from utils import *
| [
"rv.andres10@uniandes.edu.co"
] | rv.andres10@uniandes.edu.co |
a8d0151455b9f1aa60672a576787ac831af8eb55 | e417efe334ec78ee0f079e3e8eb0d2347c3e847b | /config/urls.py | b19f72817c476a566402ef00336fed57078493ce | [] | no_license | TISB-Social-Good/core-server | 7db018c7ac381de6b18d69b4a2c6e5f4a14e729d | d0904695ebee709ec4b4ce43c9955e7f19c6b9ac | refs/heads/main | 2023-08-25T22:29:22.664773 | 2021-11-11T22:08:05 | 2021-11-11T22:08:05 | 421,803,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.urls import path
from config import views
urlpatterns = [
path('signup/', views.signup, name="signup"),
path('activate/<uidb64>/<token>/',views.activate, name='activate'),
] | [
"40730714+sarafraghav@users.noreply.github.com"
] | 40730714+sarafraghav@users.noreply.github.com |
bd44b70dd13d7fac8ccc49cc076a5e40e6a79434 | 591f2520913a1ef99bb7e375e447107978f082d7 | /orm/pracownicy_orm/zapytania_orm.py | f9be1c5847b3ab8b4b216ebb5c61ffb3c29930d9 | [] | no_license | Mery18/gittest | 95805211fe70e3dfee7ac93251911155a13f16bd | ddf9dbd7b0853856c2ba86293cd77e4e26c257ea | refs/heads/master | 2021-01-20T15:07:28.007493 | 2018-04-17T12:10:30 | 2018-04-17T12:10:30 | 82,796,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,930 | py | # -*- coding: utf-8 -*-
from peewee import *
baza_plik = "pracownicy.sqlite3"
baza = SqliteDatabase(baza_plik) # ':memory:'
class BazaModel(Model): # klasa bazowa
class Meta:
database = baza
class Dzial(BazaModel):
id = IntegerField(primary_key=True)
nazwa = CharField(null=False)
siedziba = CharField(null=False)
class Premia(BazaModel):
id = CharField(primary_key=True)
premia = DecimalField()
class Pracownik(BazaModel):
id = CharField(primary_key=True)
nazwisko = CharField(null=False)
imie = CharField(null=False)
stanowisko = ForeignKeyField(Premia, related_name='pracownicy')
data_zatr = DateField(null=False)
placa = DecimalField(decimal_places=2)
premia = DecimalField(decimal_places=2, default=0)
id_dzial = ForeignKeyField(Dzial, related_name='pracownicy')
baza.connect() # nawiązujemy połączenie z bazą
def kwerenda_c():
query = (Dzial
.select(Dzial.siedziba, fn.Sum(Pracownik.placa).alias('place'))
.join(Pracownik)
.group_by(dzial.siedziba)
.order_by('place').asc()
)
for obj in query:
print(obj.siedziba, obj.place)
def kwerenda_d():
query = (Pracownik
.select(Dzial.id, Dzial.nazwa, Pracownik.imie, Pracownik.nazwisko)
.join(Dzial)
.order_by(Dzial.nazwa).asc()
)
for obj in query:
print(obj.id_dzial.id, obj.id_dzial.nazwa, obj.imie, obj.nazwisko)
def kwerenda_e():
query = (Pracownik
.select()
.join(Premia))
for obj in query:
print(obj.imie, obj.nazwisko, obj.stanowisko.id, obj.placa* obj.stanowisko.premia)
def kwerenda_f():
query = (Pracownik
.select(fn.Avg(Pracownik.placa).alias('srednia'))
.group_by(Pracownik.imie.endswith('a'))
)
for obj in query:
print(obj.srednia)
def kwerenda_g():
from datetime import datetime
query = (Pracownik
.select(Pracownik.imie, Pracownik.nazwisko, Pracownik.stanowisko, Pracownik.data_zatr.year.alias('rok'))
.join(Premia)
)
for obj in query:
print(obj.imie, obj.nazwisko, obj.stanowisko.id, datetime.now().year - int(obj.rok))
def kwerenda_h():
"""Kwerenda ktora wybiera imię, stanowisko, siedzibę pracownik"""
query = (Pracownik
.select()
.join(Premia)
)
for obj in query:
print(obj.imie, obj.nazwisko, obj.stanowisko.id, obj.id_dzial.siedziba )
kwerenda_h()
def kwerenda_i():
"""Kwerenda ;iczy liczbę pracownikow zatrudnionych w każdym dziale"""
query = (Pracownik
.select(fn.count(Pracownik.id).alias('ilu'))
.join(Dzial)
.group_by(Dzial.siedziba)
)
for obj in query:
print(obj.imie, obj.nazwisko, obj.stanowisko.id, obj.id_dzial.siedziba )
#kwerenda_i()
| [
"kl3ag1@komp05.lo1cg.org"
] | kl3ag1@komp05.lo1cg.org |
2d81f3ed38bf8b5a19078da061350ef870f8b0de | 2d9a627b85154a05df5610f5776a8b0874f9febe | /sg/framer.py | b87be04bd4ee1f3deca312656b5af45cb003b9b3 | [] | no_license | LiamBindle/sg-restart-regridder | 6dc76f2cbda1e610e4c1855d89e7d126e128119e | 3b9c13fc344b917f28c3683fc09096e9c82dca85 | refs/heads/master | 2021-07-24T17:26:05.826469 | 2020-01-12T19:59:36 | 2020-01-12T19:59:36 | 215,572,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from sg.figure_axes import FigureAxes
from sg.experiment import Experiment
def plate_carree(experiment: Experiment, coastlines={'linewidth': 0.8}):
proj = ccrs.PlateCarree()
ax = plt.subplot(1, 1, 1, projection=proj)
ax.set_global()
ax.coastlines(**coastlines)
figax = FigureAxes(ax, proj)
return figax
def nearside_perspective(experiment: Experiment):
proj = ccrs.NearsidePerspective(experiment.grid.target_lon, experiment.grid.target_lat)
ax = plt.subplot(1, 1, 1, projection=proj)
ax.set_global()
ax.coastlines(linewidth=0.8)
figax = FigureAxes(ax, proj)
return figax | [
"liambindle@gmail.com"
] | liambindle@gmail.com |
86bc3e6b0f90f98530e34516fecb380a33c107ad | ffd71350156319a276b16ee47e1b15c26c574f6c | /tests/rule_based_profiler/domain_builder/test_domain_builder.py | 6b873901894c7a9d74b0e31087eb32df6553469f | [
"Apache-2.0"
] | permissive | Tianguistengo/great_expectations | 48a54645e965e8499fbb72946e80478a564961a4 | 17d499f802a01cd2bd1e4a6baa01a4d5ff9c154d | refs/heads/main | 2023-06-09T07:47:50.065818 | 2021-06-23T18:48:27 | 2021-06-23T18:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,103 | py | from typing import List, Optional
from ruamel.yaml import YAML
from great_expectations import DataContext
from great_expectations.rule_based_profiler.domain_builder import (
ColumnDomainBuilder,
DomainBuilder,
SimpleSemanticTypeColumnDomainBuilder,
TableDomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.domain import Domain
from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
ParameterContainer,
build_parameter_container_for_variables,
)
yaml = YAML()
# noinspection PyPep8Naming
def test_table_domain_builder(
alice_columnar_table_single_batch_context,
table_Users_domain,
):
data_context: DataContext = alice_columnar_table_single_batch_context
domain_builder: DomainBuilder = TableDomainBuilder(
data_context=data_context,
batch_request=None,
)
domains: List[Domain] = domain_builder.get_domains()
assert len(domains) == 1
assert domains == [
{
"domain_type": "table",
}
]
domain: Domain = domains[0]
# Assert Domain object equivalence.
assert domain == table_Users_domain
# Also test that the dot notation is supported properly throughout the dictionary fields of the Domain object.
assert domain.domain_kwargs.batch_id is None
# noinspection PyPep8Naming
def test_column_domain_builder(
alice_columnar_table_single_batch_context,
alice_columnar_table_single_batch,
column_Age_domain,
column_Date_domain,
column_Description_domain,
):
data_context: DataContext = alice_columnar_table_single_batch_context
profiler_config: str = alice_columnar_table_single_batch["profiler_config"]
full_profiler_config_dict: dict = yaml.load(profiler_config)
variables_configs: dict = full_profiler_config_dict.get("variables")
variables: Optional[ParameterContainer] = None
if variables_configs:
variables = build_parameter_container_for_variables(
variables_configs=variables_configs
)
batch_request: dict = {
"datasource_name": "alice_columnar_table_single_batch_datasource",
"data_connector_name": "alice_columnar_table_single_batch_data_connector",
"data_asset_name": "alice_columnar_table_single_batch_data_asset",
}
domain_builder: DomainBuilder = ColumnDomainBuilder(
data_context=data_context,
batch_request=batch_request,
)
domains: List[Domain] = domain_builder.get_domains(variables=variables)
assert len(domains) == 7
assert domains == [
{
"domain_type": "column",
"domain_kwargs": {
"column": "id",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "event_type",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "user_id",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "event_ts",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "server_ts",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "device_ts",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "user_agent",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {},
},
]
# noinspection PyPep8Naming
def test_simple_semantic_type_column_domain_builder(
alice_columnar_table_single_batch_context,
alice_columnar_table_single_batch,
column_Age_domain,
column_Description_domain,
):
data_context: DataContext = alice_columnar_table_single_batch_context
profiler_config: str = alice_columnar_table_single_batch["profiler_config"]
full_profiler_config_dict: dict = yaml.load(profiler_config)
variables_configs: dict = full_profiler_config_dict.get("variables")
variables: Optional[ParameterContainer] = None
if variables_configs:
variables = build_parameter_container_for_variables(
variables_configs=variables_configs
)
batch_request: dict = {
"datasource_name": "alice_columnar_table_single_batch_datasource",
"data_connector_name": "alice_columnar_table_single_batch_data_connector",
"data_asset_name": "alice_columnar_table_single_batch_data_asset",
}
domain_builder: DomainBuilder = SimpleSemanticTypeColumnDomainBuilder(
data_context=data_context,
batch_request=batch_request,
semantic_types=[
"numeric",
],
)
domains: List[Domain] = domain_builder.get_domains(variables=variables)
assert len(domains) == 2
assert domains == [
{
"domain_type": "column",
"domain_kwargs": {
"column": "event_type",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {"inferred_semantic_domain_type": "numeric"},
},
{
"domain_type": "column",
"domain_kwargs": {
"column": "user_id",
"batch_id": "cf28d8229c247275c8cc0f41b4ceb62d",
},
"details": {"inferred_semantic_domain_type": "numeric"},
},
]
| [
"noreply@github.com"
] | Tianguistengo.noreply@github.com |
ae689e1c15c82bffc24fbe2af3e17ee0844003d4 | 7fb9bfe9473f9d85a4eaa775d0dc2c270ab5313e | /simulation_results/ppe_090_new_strain_sens_low/ppe/simulations/plot_all_excel_files_2.py | 5d9080a44642c25e026d72477abd101a59310512 | [] | no_license | htahir2/covid_intra-hospital_model | cfbe86d175ecd950a285e913c8c7c658f396917e | ab2ec3452303bff6fbf5e084cbc8fc80bd97c287 | refs/heads/main | 2023-06-19T16:40:59.681356 | 2021-07-20T06:55:13 | 2021-07-20T06:55:13 | 328,629,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,719 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 12 16:09:14 2019
@author: hannantahir
Adapted by Thi Mui Pham
"""
import networkx as nx
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import datetime as dt
from datetime import timedelta
import seaborn as sns
import matplotlib.pyplot as plt
import glob
import os
import math # For using math.floor
#import re
pd.set_option('display.max_columns',30)
dir = '/Users/tm-pham/PhD/covid-19/abm/data/Final_simulations_20210204_results/ppe_090_new_strain_sens_low/'
indir = dir + 'ppe/simulations/'
resultdir = dir + 'ppe/results/'
print("Running plot_all_excel_files_2.py")
print("Current folder:", indir)
#### ---------------------------------------------------------------------- ####
#### Occupied beds by symptomatic patients
#### ---------------------------------------------------------------------- ####
df_occ_beds = pd.DataFrame(columns=['mean', 'ci_lower', 'ci_upper'])
sum_covid_wards = pd.DataFrame()
pt_wards_files = glob.glob(indir+ 'occupied_beds_0???.csv')
dfs_pat_wards = {}
for f in pt_wards_files:
dfs_pat_wards[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
i = 0
for name in sorted(dfs_pat_wards):
# Sum up w1 till w8 (COVID wards)
sum_covid_wards[name] = dfs_pat_wards[name]['occupied_bed']
df_occ_beds['mean'] = sum_covid_wards.mean(axis=1)
df_occ_beds['ci_lower'] = sum_covid_wards.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_occ_beds['ci_upper'] = sum_covid_wards.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_occ_beds.to_csv(resultdir+'occupied_beds_covid_wards.csv')
#### ---------------------------------------------------------------------- ####
#### Tranmission counts
#### ---------------------------------------------------------------------- ####
df_trans_route = pd.DataFrame()
trans_files = glob.glob(indir+'transmission_routes_contribution_count_0???.csv')
dfs_trans = {}
for f in trans_files:
dfs_trans[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
i = 0
for name in sorted(dfs_trans):
df_trans_route.loc[i,'Total_transmission'] = dfs_trans[name]['Total_transmission'].iloc[0]
df_trans_route.loc[i,'P-N_count'] = dfs_trans[name]['P-N'].iloc[0]
df_trans_route.loc[i,'P-HC_count'] = dfs_trans[name]['P-HC'].iloc[0]
df_trans_route.loc[i,'N-P_count'] = dfs_trans[name]['N-P'].iloc[0]
df_trans_route.loc[i,'N-HC_count'] = dfs_trans[name]['N-HC'].iloc[0]
df_trans_route.loc[i,'N-N_count'] = dfs_trans[name]['N-N'].iloc[0]
df_trans_route.loc[i,'HC-P_count'] = dfs_trans[name]['HC-P'].iloc[0]
df_trans_route.loc[i,'HC-N_count'] = dfs_trans[name]['HC-N'].iloc[0]
df_trans_route.loc[i,'HC-HC_count'] = dfs_trans[name]['HC-HC'].iloc[0]
df_trans_route.loc[i,'HCW_community_trans_count'] = dfs_trans[name]['HCW_community_trans_count'].iloc[0]
df_trans_route.loc[i,'Total_trans_non_covid_wards'] = dfs_trans[name]['Total_trans_non_covid_wards'].iloc[0]
df_trans_route.loc[i,'Total_trans_covid_wards'] = dfs_trans[name]['Total_trans_covid_wards'].iloc[0]
df_trans_route.loc[i,'Asympt_patient_admission_count'] = dfs_trans[name]['Asympt_patient_admission_count'].iloc[0]
df_trans_route.loc[i,'Exposed_patient_admission_count'] = dfs_trans[name]['Exposed_patient_admission_count'].iloc[0]
df_trans_route.loc[i,'Total_patients_admitted'] = dfs_trans[name]['Total_patients_admitted'].iloc[0]
df_trans_route.loc[i,'Num_susceptible_patients'] = dfs_trans[name]['Num_susceptible_patients'].iloc[0]
df_trans_route.loc[i,'num_replacement_hcw'] = dfs_trans[name]['num_replacement_hcw'].iloc[0]
df_trans_route.loc[i,'trans_counts_from_pre_symptomatic'] = dfs_trans[name]['trans_counts_from_pre_symptomatic'].iloc[0]
df_trans_route.loc[i,'trans_counts_from_symptomatic'] = dfs_trans[name]['trans_counts_from_symptomatic'].iloc[0]
df_trans_route.loc[i,'trans_counts_from_assymptomatic'] = dfs_trans[name]['trans_counts_from_assymptomatic'].iloc[0]
df_trans_route.loc[i,'Trans_count_patients'] = dfs_trans[name]['N-P'].iloc[0] + dfs_trans[name]['HC-P'].iloc[0]
i += 1
#print('Number of files are ',i)
df_trans_route = df_trans_route.astype(float)
df_trans_route['P-N'] = df_trans_route['P-N_count']*100/df_trans_route['Total_transmission']
df_trans_route['P-HC'] = df_trans_route['P-HC_count']*100/df_trans_route['Total_transmission']
df_trans_route['N-P'] = df_trans_route['N-P_count']*100/df_trans_route['Total_transmission']
df_trans_route['N-HC'] = df_trans_route['N-HC_count']*100/df_trans_route['Total_transmission']
df_trans_route['N-N'] = df_trans_route['N-N_count']*100/df_trans_route['Total_transmission']
df_trans_route['HC-P'] = df_trans_route['HC-P_count']*100/df_trans_route['Total_transmission']
df_trans_route['HC-N'] = df_trans_route['HC-N_count']*100/df_trans_route['Total_transmission']
df_trans_route['HC-HC'] = df_trans_route['HC-HC_count']*100/df_trans_route['Total_transmission']
df_trans_route.to_csv(resultdir+'transmission_route.csv')
#### ---------------------------------------------------------------------- ####
#### Total number of patients and HCWs over time
#### ---------------------------------------------------------------------- ####
patient_files = glob.glob(indir+'patients_by_state_per_day_0???.csv')
nurse_files = glob.glob(indir+'nurses_by_state_per_day_0???.csv')
physician_files = glob.glob(indir+'physicians_by_state_per_day_0???.csv')
dfs_patients = {}
dfs_nurses = {}
dfs_physicians = {}
for p in patient_files:
dfs_patients[os.path.splitext(os.path.basename(p))[0]] = pd.read_csv(p)
dfs_patients[os.path.splitext(os.path.basename(p))[0]].rename(columns = {'Unnamed: 0': 'day'}, inplace = True)
for n in nurse_files:
dfs_nurses[os.path.splitext(os.path.basename(n))[0]] = pd.read_csv(n)
dfs_nurses[os.path.splitext(os.path.basename(n))[0]].rename(columns = {'Unnamed: 0': 'day'}, inplace = True)
for d in physician_files:
dfs_physicians[os.path.splitext(os.path.basename(d))[0]] = pd.read_csv(d)
dfs_physicians[os.path.splitext(os.path.basename(d))[0]].rename(columns = {'Unnamed: 0': 'day'}, inplace = True)
df_total_patients= {} # Total number of patients
df_total_nurses = {} # Total number of nurses
df_total_physicians= {} # Total number of physicians
df_total = {} # Total number of individuals
df_pos = {} # Total number of positive patients (exposed, mild, severe, asymptomatics)
for name in sorted(dfs_patients):
df_total[name] = pd.DataFrame(columns=['day'])
df_pos[name] = pd.DataFrame(columns=['day'])
df_total_patients[name] = dfs_patients[name].drop(dfs_patients[name].columns[[1,2,3,4,5,6,7]],axis=1)
# df_total_patients[name] = dfs_patients[name]['day']
df_total[name]['day'] = df_total_patients[name]['day']
df_pos[name]['day'] = df_total_patients[name]['day']
df_total_patients[name]['n_total'] = dfs_patients[name].iloc[:, dfs_patients[name].columns!='day'].sum(axis=1)
df_total[name]['n_total'] = df_total_patients[name]['n_total']
df_pos[name]['n_pos'] = (dfs_patients[name].drop(dfs_patients[name].columns[[0,1,4,6]],axis=1)).sum(axis=1)
i=0
for name in sorted(dfs_nurses):
df_total_nurses[name] = dfs_nurses[name].drop(dfs_nurses[name].columns[[1,2,3,4,5,6,7]],axis=1)
df_total_nurses[name]['n_total'] = dfs_nurses[name].iloc[:, dfs_nurses[name].columns!='day'].sum(axis=1)
df_total[sorted(df_total)[i]]['n_total'] = df_total[sorted(df_total)[i]]['n_total'] + df_total_nurses[name]['n_total']
df_pos[sorted(df_pos)[i]]['n_pos'] = df_pos[sorted(df_pos)[i]]['n_pos'] + (dfs_nurses[name].drop(dfs_nurses[name].columns[[0,1,4,6]],axis=1)).sum(axis=1)
i+=1
i=0
for name in sorted(dfs_physicians):
df_total_physicians[name] = dfs_physicians[name].drop(dfs_physicians[name].columns[[1,2,3,4,5,6,7]],axis=1)
df_total_physicians[name]['n_total'] = dfs_physicians[name].iloc[:, dfs_physicians[name].columns!='day'].sum(axis=1)
df_total[sorted(df_total)[i]]['n_total'] = df_total[sorted(df_total)[i]]['n_total'] + df_total_physicians[name]['n_total']
df_pos[sorted(df_pos)[i]]['n_pos'] = df_pos[sorted(df_pos)[i]]['n_pos'] + (dfs_physicians[name].drop(dfs_physicians[name].columns[[0,1,4,6]],axis=1)).sum(axis=1)
i+=1
#### ---------------------------------------------------------------------- ####
#### This calculates positivity rates for contact tracing
#### ---------------------------------------------------------------------- ####
df_contact = pd.DataFrame()
df_contact_max = pd.DataFrame()
cont_files = glob.glob(indir+'contact_tracinng_counts_0???.csv')
dfs_contact = {}
for f in cont_files:
dfs_contact[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f, index_col = False)
i = 0
for name in sorted(dfs_contact):
df_contact.loc[i,'n_contacts_traced'] = dfs_contact[name].num_contacts_traced.sum()
df_contact.loc[i,'n_pos_contacts'] = dfs_contact[name].num_positive_contacts.sum()
i += 1
df_contact['positivity_rate'] = df_contact['n_pos_contacts'] * 100/df_contact['n_contacts_traced']
df_contact.to_csv(resultdir+'contact_tracing.csv')
###### This calculates positivity rates for contact tracing and includes time of contact tracing
df_contact_trace = pd.DataFrame(columns =['num_contacts_traced', 'num_positive_contacts','contact_tracing_time'])
cont_files_1 = glob.glob(indir+'contact_tracinng_counts_0???.csv')
prev_files = glob.glob(indir+'prev_full_hosp_0???.csv')
dfs_contact = {}
dfs_prev = {}
for f in cont_files_1:
dfs_contact[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f, index_col = False)
for f in prev_files:
dfs_prev[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f, index_col = False)
for i in range(0,len(dfs_contact)):
# df_1 = dfs_contact[sorted(dfs_contact)[i]].drop(dfs_contact[sorted(dfs_contact)[i]].columns[[0,1,4,6,7,8,9]], axis=1)
df_1 = dfs_contact[sorted(dfs_contact)[i]][['num_contacts_traced','num_positive_contacts','contact_tracing_time','num_sympt_patients_traced','hcw_time_of_infection','hcw_current_ward', 'num_sympt_hcws_traced']]
df_1['day'] = dfs_contact[sorted(dfs_contact)[i]].loc[:,'contact_tracing_time'].apply(lambda x: math.floor(x))
df_1['sim'] = dfs_contact[sorted(dfs_contact)[i]].loc[:,'contact_tracing_time'].apply(lambda x: i) # Index for simulation
df_temp_total = df_total[sorted(df_total)[i]]
df_temp_pos = df_pos[sorted(df_pos)[i]]
df_temp = dfs_prev[sorted(dfs_prev)[i]].drop(dfs_prev[sorted(dfs_prev)[i]].columns[[2]], axis=1)
df_temp['day'] = df_temp.iloc[:,0].apply(lambda x: x)
df_1['prevalence'] = (df_1['day'].apply(lambda x: df_temp.loc[df_temp['day']==x,:]['total_prev'].values)).str.get(0)
# df_1['prevalence'] = df_1['day'].apply(lambda x: df_temp.loc[df_temp['day']==x,'total_prev'])
# df_1['prevalence'] = df_1['prevalence'].str.get(0)
df_1['n_total'] = df_1['day'].apply(lambda x: df_temp_total.loc[df_temp_total['day']==x,:]['n_total'].values)
df_1['n_total'] = df_1['n_total'].str.get(0) - df_1['num_contacts_traced'] - 1
df_1['n_pos'] = df_1['day'].apply(lambda x: df_temp_pos.loc[df_temp_pos['day']==x,:]['n_pos'].values)
df_1['n_pos'] = df_1['n_pos'].str.get(0) - df_1['num_positive_contacts'] - 1
df_contact_trace = df_contact_trace.append(df_1, ignore_index = True)
del df_1
df_contact_trace =df_contact_trace[df_contact_trace['num_contacts_traced'] !=0] ## to check if there is no 0 otherwise we will get division by zero error
df_contact_trace['pos_rate'] = 100*df_contact_trace['num_positive_contacts'].divide(df_contact_trace['num_contacts_traced'])
# df_contact_trace.drop(df_contact_trace.columns[[0,1]], axis = 1, inplace = True) # Only time and positivity_rate left
df_contact_trace.to_csv(resultdir+'contact_tracing_with_time_data_appended.csv')
#### Contact tracing positivity rate per simulation
df_contact_over_time = pd.DataFrame()
i=0
for name in sorted(dfs_contact):
df_contact_over_time.loc[:,i] = dfs_contact[name].num_positive_contacts * 100/dfs_contact[name].num_contacts_traced
df_contact_max.loc[i,'max'] = df_contact_over_time.loc[:,i].max()
df_contact_max.loc[i,'std'] = df_contact_over_time.loc[:,i].std()
i+=1
df_contact_over_time.to_csv(resultdir+'contact_tracing_per_sim.csv')
df_contact_max.to_csv(resultdir+'contact_tracing_max.csv')
#### ---------------------------------------------------------------------- ####
#### This calculates positivity rates for screening
#### ---------------------------------------------------------------------- ####
df_screen = pd.DataFrame(columns=['total_screened','positive_detected'])
pos_rate_scr = pd.DataFrame()
screen_files = glob.glob(indir+'screening_counts_0???.csv')
dfs_screen = {}
for scr in screen_files:
dfs_screen[os.path.splitext(os.path.basename(scr))[0]] = pd.read_csv(scr)
dfs_screen[os.path.splitext(os.path.basename(scr))[0]].rename(columns = {'Day': 'day'}, inplace = True)
i = 0
for name in sorted(dfs_screen):
### new code for positivity rate over time
pos_rate_scr[name] = (dfs_screen[name].positive_detected)*100/(dfs_screen[name].total_screened)
df1 = pd.DataFrame()
if len(dfs_screen[name]) > 50: ## this means that screening was performed every 3 days
df1 = dfs_screen[name][3:32] ## day from 10 - 91 will be selected
df_screen.loc[i,'total_screened'] = df1.total_screened.sum()
df_screen.loc[i,'positive_detected'] = df1.positive_detected.sum()
elif len(dfs_screen[name]) < 30: ## this means this was screening weekly
df1 = dfs_screen[name][1:14] ## 13 weeks will be selected
df_screen.loc[i,'total_screened'] = df1.total_screened.sum()
df_screen.loc[i,'positive_detected'] = df1.positive_detected.sum()
i += 1
df_screen =df_screen[df_screen['total_screened'] !=0]
df_screen['positivity_rate'] = df_screen.positive_detected * 100/df_screen['total_screened']
# df_screening.loc[len(df_screening), :] = df_screen['positivity_rate'].mean(), df_screen['positivity_rate'].std()
df_screen.to_csv(resultdir+'screening_outbreak_period.csv')
if len(pos_rate_scr) > 0:
scr_pos_rate_over_time = pd.DataFrame()
scr_pos_rate_over_time['mean'] = pos_rate_scr.mean(axis = 1)
scr_pos_rate_over_time['ci_lower'] = pos_rate_scr.apply(lambda x: np.percentile(x, 2.5), axis=1)
scr_pos_rate_over_time['ci_upper'] = pos_rate_scr.apply(lambda x: np.percentile(x, 97.5), axis=1)
scr_pos_rate_over_time.to_csv(resultdir+'screening_data_time_dependant.csv')
# Screening with day and prevalence appended
# TBD
df_screening_3 = pd.DataFrame(columns=['day','total_screened','positive_detected','prevalence','positivity_rate'])
for i in range(0,len(dfs_screen)):
df_s = dfs_screen[sorted(dfs_screen)[i]].drop(dfs_screen[sorted(dfs_screen)[i]].columns[[0]], axis=1)
# df_s['day'] = (df_s['day']*3)+3
df_s['day'] = dfs_screen[sorted(dfs_screen)[i]]['day']
df_temp = dfs_prev[sorted(dfs_prev)[i]].drop(dfs_prev[sorted(dfs_prev)[i]].columns[[2]], axis=1)
df_temp['day'] = df_temp.iloc[:,0].apply(lambda x: x)
df_s['prevalence'] = df_s['day'].apply(lambda x: df_temp.loc[df_temp['day']==x,:]['total_prev'].values)
df_s['prevalence'] = df_s['prevalence'].str.get(0)
df_screening_3 = df_screening_3.append(df_s, ignore_index = True)
del df_s
df_screening_3 =df_screening_3[df_screening_3['total_screened'] !=0] ## to check if there is no 0 otherwise we will get division by zero error
df_screening_3['positivity_rate'] = 100*df_screening_3['positive_detected'].divide(df_screening_3['total_screened'])
df_screening_3.to_csv(resultdir+'screening_with_time_data_appended.csv')
for scr in screen_files:
dfs_screen[os.path.splitext(os.path.basename(scr))[0]] = pd.read_csv(scr)
dfs_screen[os.path.splitext(os.path.basename(scr))[0]].rename(columns = {'Day': 'day'}, inplace = True)
# For screening every 7 days
df_screening_7 = pd.DataFrame(columns=['day','total_screened','positive_detected','prevalence','positivity_rate'])
for i in range(0,len(dfs_screen)):
df_s7 = dfs_screen[sorted(dfs_screen)[i]].drop(dfs_screen[sorted(dfs_screen)[i]].columns[[0]], axis=1)
# df_s7['day'] = (dfs_screen[sorted(dfs_screen)[i]]['day']*7)+7
df_s7['day'] = dfs_screen[sorted(dfs_screen)[i]]['day']
df_temp7 = dfs_prev[sorted(dfs_prev)[i]].drop(dfs_prev[sorted(dfs_prev)[i]].columns[[2]], axis=1)
df_temp7['day'] = df_temp7.iloc[:,0].apply(lambda x: x)
df_s7['prevalence'] = df_s7['day'].apply(lambda x: df_temp7.loc[df_temp7['day']==x,:]['total_prev'].values)
df_s7['prevalence'] = df_s7['prevalence'].str.get(0)
df_screening_7 = df_screening_7.append(df_s7, ignore_index = True)
del df_s7
df_screening_7 =df_screening_7[df_screening_7['total_screened'] !=0] ## to check if there is no 0 otherwise we will get division by zero error
df_screening_7['positivity_rate'] = 100*df_screening_7['positive_detected'].divide(df_screening_7['total_screened'])
df_screening_7.to_csv(resultdir+'screening_7days_with_time_data_appended.csv')
#### ---------------------------------------------------------------------- ####
#### daily covid19 patients discharged to community
#### ---------------------------------------------------------------------- ####
df_discharge = pd.DataFrame()
disch_files = glob.glob(indir+'covid19_patients_Discharge_count_0???.csv')
dfs_discharge = {}
for dsc in disch_files:
dfs_discharge[os.path.splitext(os.path.basename(dsc))[0]] = pd.read_csv(dsc)
i = 0
for name in sorted(dfs_discharge):
df_discharge[name] = dfs_discharge[name]['count']
df_discharge['mean'] = df_discharge.mean(axis = 1)
df_discharge['median'] = df_discharge.median(axis = 1)
df_discharge['ci_lower'] = df_discharge.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_discharge['ci_upper'] = df_discharge.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_discharge.to_csv(resultdir+'COVID-19_patients_discharged_to_community.csv', mode='w', columns=['mean','median','ci_lower','ci_upper'])
#### ---------------------------------------------------------------------- ####
#### daily absent hcws
#### ---------------------------------------------------------------------- ####
df_abs_hcw = pd.DataFrame()
abs_hcw_proportions = pd.DataFrame()
abs_hcw_mean_CI = pd.DataFrame()
abs_hcw_files = glob.glob(indir+'daily_absent_hcw_count_0???.csv')
dfs_abs_hcw = {}
for dsc in abs_hcw_files:
dfs_abs_hcw[os.path.splitext(os.path.basename(dsc))[0]] = pd.read_csv(dsc)
i = 0
for name in sorted(dfs_abs_hcw):
df_abs_hcw[name] = dfs_abs_hcw[name]['Daily_absent_hcw']
abs_hcw_proportions = df_abs_hcw*100/870 ## this will calculate proportions over time from every simulation
abs_hcw_mean_CI['mean'] = abs_hcw_proportions.mean(axis = 1)
abs_hcw_mean_CI['ci_lower'] = abs_hcw_proportions.apply(lambda x: np.percentile(x, 2.5), axis=1)
abs_hcw_mean_CI['ci_upper'] = abs_hcw_proportions.apply(lambda x: np.percentile(x, 97.5), axis=1)
abs_hcw_mean_CI.to_csv(resultdir+'daily_absent_hcw.csv')
#### ---------------------------------------------------------------------- ####
#### daily infected hcws
#### ---------------------------------------------------------------------- ####
df_infected_hcw = pd.DataFrame()
inf_hcw_files = glob.glob(indir+'daily_infected_hcw_count_0???.csv')
dfs_inf_hcw = {}
for dsc in inf_hcw_files:
dfs_inf_hcw[os.path.splitext(os.path.basename(dsc))[0]] = pd.read_csv(dsc)
i = 0
for name in sorted(dfs_inf_hcw):
df_infected_hcw[name] = dfs_inf_hcw[name]['Daily_infected_hcw']
df_infected_hcw['mean'] = df_infected_hcw.mean(axis = 1)
df_infected_hcw['ci_lower'] = df_infected_hcw.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_infected_hcw['ci_upper'] = df_infected_hcw.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_infected_hcw.to_csv(resultdir+'daily_infected_hcw.csv', mode='w', columns=['mean','ci_lower','ci_upper'])
#### ---------------------------------------------------------------------- ####
#### daily transmission counts
#### ---------------------------------------------------------------------- ####
df_trans_total_count = pd.DataFrame()
peak_transm_count_pat = pd.DataFrame(columns = ['peak transmission'])
peak_transm_count_hcw = pd.DataFrame(columns = ['peak transmission'])
df_trans_pat_count = pd.DataFrame()
df_trans_hcw_count = pd.DataFrame()
trans_files = glob.glob(indir+'daily_transmissions_count_0???.csv')
dfs_trans_count = {}
for dsc in trans_files:
dfs_trans_count[os.path.splitext(os.path.basename(dsc))[0]] = pd.read_csv(dsc)
i = 0
for name in sorted(dfs_trans_count):
df_trans_total_count[name] = dfs_trans_count[name]['Total_Transmission_counts']
df_trans_pat_count[name] = dfs_trans_count[name]['Patient_transmission_counts']
df_trans_hcw_count[name] = dfs_trans_count[name]['hcw_transmission_count']
peak_transm_count_pat.loc[i,'peak transmission'] = dfs_trans_count[name]['Patient_transmission_counts'].max()
peak_transm_count_hcw.loc[i,'peak transmission'] = dfs_trans_count[name]['hcw_transmission_count'].max()
i += 1
df_trans_total_count['mean'] = df_trans_total_count.mean(axis = 1)
df_trans_total_count['ci_lower'] = df_trans_total_count.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_trans_total_count['ci_upper'] = df_trans_total_count.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_trans_total_count.to_csv(resultdir+'daily_total_transmission_count.csv', mode='w', columns=['mean','ci_lower','ci_upper'])
df_trans_pat_count['mean'] = df_trans_pat_count.mean(axis = 1)
df_trans_pat_count['ci_lower'] = df_trans_pat_count.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_trans_pat_count['ci_upper'] = df_trans_pat_count.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_trans_pat_count.to_csv(resultdir+'daily_patient_transmission_count.csv', mode='w', columns=['mean','ci_lower','ci_upper'])
df_trans_hcw_count['mean'] = df_trans_hcw_count.mean(axis = 1)
df_trans_hcw_count['ci_lower'] = df_trans_hcw_count.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_trans_hcw_count['ci_upper'] = df_trans_hcw_count.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_trans_hcw_count.to_csv(resultdir+'daily_hcw_transmission_count.csv', mode='w', columns=['mean','ci_lower','ci_upper'])
peak_transm_count_pat.to_csv(resultdir+'peak transmission patients.csv')
peak_transm_count_hcw.to_csv(resultdir+'peak transmission hcws.csv')
##### data_total_prev_per_ward
df_ward1 = pd.DataFrame()
df_ward2 = pd.DataFrame()
df_ward3 = pd.DataFrame()
df_ward4 = pd.DataFrame()
df_ward5 = pd.DataFrame()
df_ward6 = pd.DataFrame()
df_ward7 = pd.DataFrame()
df_ward8 = pd.DataFrame()
df_ward9 = pd.DataFrame()
df_ward10 = pd.DataFrame()
df_ward11 = pd.DataFrame()
df_ward12 = pd.DataFrame()
df_ward13 = pd.DataFrame()
df_ward14 = pd.DataFrame()
df_ward15 = pd.DataFrame()
df_ward16 = pd.DataFrame()
df_ward17 = pd.DataFrame()
df_ward18 = pd.DataFrame()
df_ward19 = pd.DataFrame()
df_ward20 = pd.DataFrame()
df_ward21 = pd.DataFrame()
df_ward22 = pd.DataFrame()
df_ward23 = pd.DataFrame()
df_ward24 = pd.DataFrame()
df_ward25 = pd.DataFrame()
df_ward26 = pd.DataFrame()
df_ward27 = pd.DataFrame()
df_ward28 = pd.DataFrame()
#### ---------------------------------------------------------------------- ####
#### nurses_by_state_per_day_hospital_transmissions_only
#### ---------------------------------------------------------------------- ####
df_susc = pd.DataFrame()
df_expo = pd.DataFrame()
df_mild = pd.DataFrame()
df_seve = pd.DataFrame()
df_reco = pd.DataFrame()
df_asym = pd.DataFrame()
df_nurse_by_state_tranms = pd.DataFrame()
files_nur = glob.glob(indir+'nurses_by_state_per_day_hospital_transmissions_only_0???.csv')
dfs_nur = {}
for f in files_nur:
dfs_nur[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_nur):
df_susc[name] = dfs_nur[name].SUSCEPTIBLE
df_expo[name] = dfs_nur[name].EXPOSED
df_mild[name] = dfs_nur[name].MILD
df_seve[name] = dfs_nur[name].SEVERE
df_reco[name] = dfs_nur[name].RECOVERED
df_asym[name] = dfs_nur[name].ASYMPTOMATIC
df_susc['avg'] = df_susc.mean(axis = 1)
df_expo['avg'] = df_expo.mean(axis = 1)
df_mild['avg'] = df_mild.mean(axis = 1)
df_seve['avg'] = df_seve.mean(axis = 1)
df_reco['avg'] = df_reco.mean(axis = 1)
df_asym['avg'] = df_asym.mean(axis = 1)
df_nurse_by_state_tranms['susceptible'] = df_susc['avg']
df_nurse_by_state_tranms['exposed'] = df_expo['avg']
df_nurse_by_state_tranms['mild'] = df_mild['avg']
df_nurse_by_state_tranms['severe'] = df_seve['avg']
df_nurse_by_state_tranms['recovered'] = df_reco['avg']
df_nurse_by_state_tranms['asymptomatic'] = df_asym['avg']
df_nurse_by_state_tranms.to_csv(resultdir+'nurse_by_state_transmission.csv')
#### ---------------------------------------------------------------------- ####
#### HC Specialists_by_state_per_day_hospital_transmissions_only
#### ---------------------------------------------------------------------- ####
df_susc_hc = pd.DataFrame()
df_expo_hc = pd.DataFrame()
df_mild_hc = pd.DataFrame()
df_seve_hc = pd.DataFrame()
df_reco_hc = pd.DataFrame()
df_asym_hc = pd.DataFrame()
df_hc_by_state_tranms = pd.DataFrame()
files_hc = glob.glob(indir+'physicians_by_state_per_day_hospital_transmissions_only_0???.csv')
#files = glob.glob(indir+'*.xlsx')
dfs_hc = {}
for f in files_hc:
dfs_hc[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_hc):
df_susc_hc[name] = dfs_hc[name].SUSCEPTIBLE
df_expo_hc[name] = dfs_hc[name].EXPOSED
df_mild_hc[name] = dfs_hc[name].MILD
df_seve_hc[name] = dfs_hc[name].SEVERE
df_reco_hc[name] = dfs_hc[name].RECOVERED
df_asym_hc[name] = dfs_hc[name].ASYMPTOMATIC
df_susc_hc['avg'] = df_susc_hc.mean(axis = 1)
df_expo_hc['avg'] = df_expo_hc.mean(axis = 1)
df_mild_hc['avg'] = df_mild_hc.mean(axis = 1)
df_seve_hc['avg'] = df_seve_hc.mean(axis = 1)
df_reco_hc['avg'] = df_reco_hc.mean(axis = 1)
df_asym_hc['avg'] = df_asym_hc.mean(axis = 1)
df_hc_by_state_tranms['susceptible'] = df_susc_hc['avg']
df_hc_by_state_tranms['exposed'] = df_expo_hc['avg']
df_hc_by_state_tranms['mild'] = df_mild_hc['avg']
df_hc_by_state_tranms['severe'] = df_seve_hc['avg']
df_hc_by_state_tranms['recovered'] = df_reco_hc['avg']
df_hc_by_state_tranms['asymptomatic'] = df_asym_hc['avg']
#df_hc_by_state_tranms['susceptible_std'] = df_susc_hc['std']
#df_hc_by_state_tranms['exposed_std'] = df_expo_hc['std']
#df_hc_by_state_tranms['mild_std'] = df_mild_hc['std']
#df_hc_by_state_tranms['severe_std'] = df_seve_hc['std']
#df_hc_by_state_tranms['recovered_std'] = df_reco_hc['std']
#df_hc_by_state_tranms['asymptomatic_std'] = df_asym_hc['std']
df_hc_by_state_tranms.to_csv(resultdir+'HCspecialists_by_state_transmission.csv')
####patients_by_state_per_day_hospital_transmissions_only
df_susc_pat = pd.DataFrame()
df_expo_pat = pd.DataFrame()
df_mild_pat = pd.DataFrame()
df_seve_pat = pd.DataFrame()
df_reco_pat = pd.DataFrame()
df_asym_pat = pd.DataFrame()
df_pat_by_state_tranms = pd.DataFrame()
files_pat = glob.glob(indir+'patients_by_state_per_day_hospital_transmissions_only_0???.csv')
#files = glob.glob(indir+'*.xlsx')
dfs_pat = {}
for f in files_pat:
dfs_pat[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_pat):
df_susc_pat[name] = dfs_pat[name].SUSCEPTIBLE
df_expo_pat[name] = dfs_pat[name].EXPOSED
df_mild_pat[name] = dfs_pat[name].MILD
df_seve_pat[name] = dfs_pat[name].SEVERE
df_reco_pat[name] = dfs_pat[name].RECOVERED
df_asym_pat[name] = dfs_pat[name].ASYMPTOMATIC
df_susc_pat['avg'] = df_susc_pat.mean(axis = 1)
df_expo_pat['avg'] = df_expo_pat.mean(axis = 1)
df_mild_pat['avg'] = df_mild_pat.mean(axis = 1)
df_seve_pat['avg'] = df_seve_pat.mean(axis = 1)
df_reco_pat['avg'] = df_reco_pat.mean(axis = 1)
df_asym_pat['avg'] = df_asym_pat.mean(axis = 1)
df_pat_by_state_tranms['susceptible'] = df_susc_pat['avg']
df_pat_by_state_tranms['exposed'] = df_expo_pat['avg']
df_pat_by_state_tranms['mild'] = df_mild_pat['avg']
df_pat_by_state_tranms['severe'] = df_seve_pat['avg']
df_pat_by_state_tranms['recovered'] = df_reco_pat['avg']
df_pat_by_state_tranms['asymptomatic'] = df_asym_pat['avg']
df_pat_by_state_tranms.to_csv(resultdir+'patients_by_state_transmission.csv')
#### ---------------------------------------------------------------------- ####
#### patients_by_state_per_day_hospital_total
#### ---------------------------------------------------------------------- ####
df_susc_pat_tot = pd.DataFrame()
df_expo_pat_tot = pd.DataFrame()
df_mild_pat_tot = pd.DataFrame()
df_seve_pat_tot = pd.DataFrame()
df_reco_pat_tot = pd.DataFrame()
df_asym_pat_tot = pd.DataFrame()
df_sympt_pat_tot = pd.DataFrame()
df_pat_tot_by_state = pd.DataFrame()
files_pat_tot = glob.glob(indir+'patients_by_state_per_day_0???.csv')
#files = glob.glob(indir+'*.xlsx')
dfs_pat_tot = {}
for f in files_pat_tot:
dfs_pat_tot[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_pat_tot):
df_susc_pat_tot[name] = dfs_pat_tot[name].SUSCEPTIBLE
df_expo_pat_tot[name] = dfs_pat_tot[name].EXPOSED
df_mild_pat_tot[name] = dfs_pat_tot[name].MILD
df_seve_pat_tot[name] = dfs_pat_tot[name].SEVERE
df_reco_pat_tot[name] = dfs_pat_tot[name].RECOVERED
df_asym_pat_tot[name] = dfs_pat_tot[name].ASYMPTOMATIC
df_sympt_pat_tot[name] = dfs_pat_tot[name].MILD + dfs_pat_tot[name].SEVERE ## sum of mild and severe patients
df_pat_tot_by_state['susceptible_mean'] = df_susc_pat_tot.mean(axis = 1)
df_pat_tot_by_state['exposed_mean'] =df_expo_pat_tot.mean(axis = 1)
df_pat_tot_by_state['mild_mean'] = df_mild_pat_tot.mean(axis = 1)
df_pat_tot_by_state['severe_mean'] = df_seve_pat_tot.mean(axis = 1)
df_pat_tot_by_state['recovered_mean'] = df_reco_pat_tot.mean(axis = 1)
df_pat_tot_by_state['asymptomatic_mean'] = df_asym_pat_tot.mean(axis = 1)
df_pat_tot_by_state['symptomatic_mean'] = df_sympt_pat_tot.mean(axis = 1)
df_pat_tot_by_state['susceptible_ci_lower'] = df_susc_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['exposed_ci_lower'] =df_expo_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['mild_ci_lower'] = df_mild_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['severe_ci_lower'] = df_seve_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['recovered_ci_lower'] = df_reco_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['asymptomatic_ci_lower'] = df_asym_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['symptomatic_ci_lower'] = df_sympt_pat_tot.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_pat_tot_by_state['susceptible_ci_upper'] = df_susc_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state['exposed_ci_upper'] =df_expo_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state['mild_ci_upper'] = df_mild_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state['severe_ci_upper'] = df_seve_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state['recovered_ci_upper'] = df_reco_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state['asymptomatic_ci_upper'] = df_asym_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state['symptomatic_ci_upper'] = df_sympt_pat_tot.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_pat_tot_by_state.to_csv(resultdir+'patients_by_state_per_day.csv')
#### ---------------------------------------------------------------------- ####
#### nurses_by_state_per_day_hospital_total
#### ---------------------------------------------------------------------- ####
df_susc_nur_tot = pd.DataFrame()
df_expo_nur_tot = pd.DataFrame()
df_mild_nur_tot = pd.DataFrame()
df_seve_nur_tot = pd.DataFrame()
df_reco_nur_tot = pd.DataFrame()
df_asym_nur_tot = pd.DataFrame()
df_nur_tot_by_state = pd.DataFrame()
files_nur_tot = glob.glob(indir+'nurses_by_state_per_day_0???.csv')
#files = glob.glob(indir+'*.xlsx')
dfs_nur_tot = {}
for f in files_nur_tot:
dfs_nur_tot[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_nur_tot):
df_susc_nur_tot[name] = dfs_nur_tot[name].SUSCEPTIBLE
df_expo_nur_tot[name] = dfs_nur_tot[name].EXPOSED
df_mild_nur_tot[name] = dfs_nur_tot[name].MILD
df_seve_nur_tot[name] = dfs_nur_tot[name].SEVERE
df_reco_nur_tot[name] = dfs_nur_tot[name].RECOVERED
df_asym_nur_tot[name] = dfs_nur_tot[name].ASYMPTOMATIC
df_susc_nur_tot['avg'] = df_susc_nur_tot.mean(axis = 1)
df_expo_nur_tot['avg'] = df_expo_nur_tot.mean(axis = 1)
df_mild_nur_tot['avg'] = df_mild_nur_tot.mean(axis = 1)
df_seve_nur_tot['avg'] = df_seve_nur_tot.mean(axis = 1)
df_reco_nur_tot['avg'] = df_reco_nur_tot.mean(axis = 1)
df_asym_nur_tot['avg'] = df_asym_nur_tot.mean(axis = 1)
df_nur_tot_by_state['susceptible'] = df_susc_nur_tot['avg']
df_nur_tot_by_state['exposed'] = df_expo_nur_tot['avg']
df_nur_tot_by_state['mild'] = df_mild_nur_tot['avg']
df_nur_tot_by_state['severe'] = df_seve_nur_tot['avg']
df_nur_tot_by_state['recovered'] = df_reco_nur_tot['avg']
df_nur_tot_by_state['asymptomatic'] = df_asym_nur_tot['avg']
df_nur_tot_by_state.to_csv(resultdir+'nurses_by_state_per_day.csv')
####hc_specialist_by_state_per_day_hospital_total
df_susc_hc_tot = pd.DataFrame()
df_expo_hc_tot = pd.DataFrame()
df_mild_hc_tot = pd.DataFrame()
df_seve_hc_tot = pd.DataFrame()
df_reco_hc_tot = pd.DataFrame()
df_asym_hc_tot = pd.DataFrame()
df_hc_tot_by_state = pd.DataFrame()
files_hc_tot = glob.glob(indir+'physicians_by_state_per_day_0???.csv')
#files = glob.glob(indir+'*.xlsx')
dfs_hc_tot = {}
for f in files_hc_tot:
dfs_hc_tot[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_hc_tot):
df_susc_hc_tot[name] = dfs_hc_tot[name].SUSCEPTIBLE
df_expo_hc_tot[name] = dfs_hc_tot[name].EXPOSED
df_mild_hc_tot[name] = dfs_hc_tot[name].MILD
df_seve_hc_tot[name] = dfs_hc_tot[name].SEVERE
df_reco_hc_tot[name] = dfs_hc_tot[name].RECOVERED
df_asym_hc_tot[name] = dfs_hc_tot[name].ASYMPTOMATIC
df_susc_hc_tot['avg'] = df_susc_hc_tot.mean(axis = 1)
df_expo_hc_tot['avg'] = df_expo_hc_tot.mean(axis = 1)
df_mild_hc_tot['avg'] = df_mild_hc_tot.mean(axis = 1)
df_seve_hc_tot['avg'] = df_seve_hc_tot.mean(axis = 1)
df_reco_hc_tot['avg'] = df_reco_hc_tot.mean(axis = 1)
df_asym_hc_tot['avg'] = df_asym_hc_tot.mean(axis = 1)
df_hc_tot_by_state['susceptible'] = df_susc_hc_tot['avg']
df_hc_tot_by_state['exposed'] = df_expo_hc_tot['avg']
df_hc_tot_by_state['mild'] = df_mild_hc_tot['avg']
df_hc_tot_by_state['severe'] = df_seve_hc_tot['avg']
df_hc_tot_by_state['recovered'] = df_reco_hc_tot['avg']
df_hc_tot_by_state['asymptomatic'] = df_asym_hc_tot['avg']
df_hc_tot_by_state.to_csv(resultdir+'HCspecialists_by_state_per_day.csv')
#### prev_full_hosp
df_tot_prev = pd.DataFrame()
df_trans_prev = pd.DataFrame()
df_total_prevalence = pd.DataFrame()
df_trans_prevalence = pd.DataFrame()
files_prev = glob.glob(indir+'prev_full_hosp_0???.csv')
#files = glob.glob(indir+'*.xlsx')
dfs_prev = {}
for f in files_prev:
dfs_prev[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_prev):
df_tot_prev[name] = dfs_prev[name].total_prev
df_trans_prev[name] = dfs_prev[name].nosocomial_prev
df_total_prevalence['total_prev'] = df_tot_prev.mean(axis = 1)
df_trans_prevalence['trans_prev'] = df_trans_prev.mean(axis = 1)
# df_prevalence['total_prev_std'] = df_tot_prev.std(axis = 1)
# df_prevalence['trans_prev_std'] = df_trans_prev.std(axis = 1)
df_total_prevalence['ci_lower'] = df_tot_prev.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_total_prevalence['ci_upper'] = df_tot_prev.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_trans_prevalence['ci_lower'] = df_trans_prev.apply(lambda x: np.percentile(x, 2.5), axis=1)
df_trans_prevalence['ci_upper'] = df_trans_prev.apply(lambda x: np.percentile(x, 97.5), axis=1)
df_total_prevalence.to_csv(resultdir+'prevalence_total.csv')
df_trans_prevalence.to_csv(resultdir+'prevalence_nosocomial.csv')
df_reco_nur_tot = pd.DataFrame()
nur_by_state_expos = pd.DataFrame()
nur_by_state_sympt = pd.DataFrame()
nur_by_state_asympt = pd.DataFrame()
files_nur_tot = glob.glob(indir+'nurses_by_state_per_day_0???.csv')
dfs_nur_tot = {}
for f in files_nur_tot:
dfs_nur_tot[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_nur_tot):
df_reco_nur_tot[name] = dfs_nur_tot[name].RECOVERED
nur_by_state_expos[name] = dfs_nur_tot[name].EXPOSED
nur_by_state_sympt[name] = dfs_nur_tot[name].MILD + dfs_nur_tot[name].SEVERE
nur_by_state_asympt[name] = dfs_nur_tot[name].ASYMPTOMATIC
reco_nur_final = df_reco_nur_tot.loc[189,:].to_frame().reset_index()
reco_nur_final.drop(reco_nur_final.columns[[0]], axis=1, inplace=True)
reco_nur_final.rename(columns={189: 'Mean_recovered_nurses'}, inplace = True)
df_reco_hc_tot = pd.DataFrame()
hc_by_state_expos = pd.DataFrame()
hc_by_state_sympt = pd.DataFrame()
hc_by_state_asympt = pd.DataFrame()
files_hc_tot = glob.glob(indir+'physicians_by_state_per_day_0???.csv')
dfs_hc_tot = {}
for f in files_hc_tot:
dfs_hc_tot[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f)
for name in sorted(dfs_hc_tot):
df_reco_hc_tot[name] = dfs_hc_tot[name].RECOVERED
hc_by_state_expos[name] = dfs_hc_tot[name].EXPOSED
hc_by_state_sympt[name] = dfs_hc_tot[name].MILD + dfs_hc_tot[name].SEVERE
hc_by_state_asympt[name] = dfs_hc_tot[name].ASYMPTOMATIC
reco_hc_final = df_reco_hc_tot.loc[189,:].to_frame().reset_index()
reco_hc_final.drop(reco_hc_final.columns[[0]], axis=1, inplace=True)
reco_hc_final.rename(columns={189: 'Mean_recovered_hc'}, inplace = True)
total_recovered = pd.DataFrame()
total_recovered['Recovered_percentage'] = (reco_nur_final['Mean_recovered_nurses'] + reco_hc_final['Mean_recovered_hc'])*100/870
total_recovered.to_csv(resultdir+'precent_recovered_hcws.csv', index = False)
#raise Exception('exit')
expo_hcws = pd.DataFrame()
symp_hcws = pd.DataFrame()
asympt_hcws = pd.DataFrame()
for i in range(hc_by_state_expos.shape[1]):
expo_hcws[i] = hc_by_state_expos.iloc[:,i] + nur_by_state_expos.iloc[:,i]
symp_hcws[i] = hc_by_state_sympt.iloc[:,i] + nur_by_state_sympt.iloc[:,i]
asympt_hcws[i] = hc_by_state_asympt.iloc[:,i] + nur_by_state_asympt.iloc[:,i]
disease_state_hcws = pd.DataFrame() ## nurse and hc specialist added together
disease_state_hcws['exposed mean'] = expo_hcws.mean(axis = 1)
disease_state_hcws['exposed stdv'] = expo_hcws.std(axis = 1)
disease_state_hcws['symptomatic mean'] = symp_hcws.mean(axis = 1)
disease_state_hcws['symptomatic stdv'] = symp_hcws.std(axis = 1)
disease_state_hcws['asymptomatic mean'] = asympt_hcws.mean(axis = 1)
disease_state_hcws['saymptomatic stdv'] = asympt_hcws.std(axis = 1)
disease_state_hcws.to_csv(resultdir+'hcws counts in disease states.csv', index = False)
#### ---------------------------------------------------------------------- ####
#### average secondary transmission counts
#### ---------------------------------------------------------------------- ####
### patients
pat_second_counts_mean = pd.DataFrame(columns=['patient_symptomatic','patient_asymptomatic'])
pat_second_counts_sum = pd.DataFrame(columns=['patient_symptomatic_sum','patient_asymptomatic_sum', 'patient_nrow'])
pat_secon_trans_files = glob.glob(indir+'patient_seco_trans_count_0???.csv')
dfs_pat_secon = {}
for f in pat_secon_trans_files:
# dfs_pat_secon[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f, index_col = False, usecols=['infect_symptomatic','infect_asymptomatic','trans_counts_to_pat','trans_counts_to_hcw'])
dfs_pat_secon[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f, index_col = False, usecols=[1,2])
i = 0
for name in sorted(dfs_pat_secon):
pat_second_counts_mean.loc[i,'patient_symptomatic'] = dfs_pat_secon[name].infect_symptomatic.mean(skipna = True)
pat_second_counts_mean.loc[i,'patient_asymptomatic'] = dfs_pat_secon[name].infect_asymptomatic.mean(skipna = True)
# second_counts[name]['patient'] = dfs_pat_secon[name].fillna(0)['infect_symptomatic'] + dfs_pat_secon[name].fillna(0)['infect_asymptomatic']
# pat_second_counts_mean.loc[i, 'patient'] = second_counts[name].patient.mean(skipna = True)
pat_second_counts_sum.loc[i,'patient_symptomatic_sum'] = dfs_pat_secon[name].infect_symptomatic.sum(skipna = True)
pat_second_counts_sum.loc[i,'patient_asymptomatic_sum'] = dfs_pat_secon[name].infect_asymptomatic.sum(skipna = True)
pat_second_counts_sum.loc[i,'patient_nrow'] = len(dfs_pat_secon[name].index)
i += 1
#pat_second_counts_mean.to_csv(resultdir+'average_patient_second_counts_per_simulation_run.csv')
#### HCWs
hcw_second_counts_mean = pd.DataFrame(columns=['hcw_symptomatic','hcw_asymptomatic'])
hcw_second_counts_sum = pd.DataFrame(columns=['hcw_symptomatic_sum','hcw_asymptomatic_sum', 'hcw_nrow'])
hcw_secon_trans_files = glob.glob(indir+'hcw_seco_trans_count_0???.csv')
dfs_hcw_secon = {}
for f in hcw_secon_trans_files:
dfs_hcw_secon[os.path.splitext(os.path.basename(f))[0]] = pd.read_csv(f, index_col = False, usecols=[1,2])
i = 0
for name in sorted(dfs_hcw_secon):
hcw_second_counts_mean.loc[i,'hcw_symptomatic'] = dfs_hcw_secon[name].infect_symptomatic.mean(skipna = True)
hcw_second_counts_mean.loc[i,'hcw_asymptomatic'] = dfs_hcw_secon[name].infect_asymptomatic.mean(skipna = True)
# second_counts[name]['hcw'] = dfs_hcw_secon[name].fillna(0)['infect_symptomatic'] + dfs_hcw_secon[name].fillna(0)['infect_asymptomatic']
# hcw_second_counts_mean.loc[i, 'hcw'] = second_counts[name].hcw.mean(skipna = True)
hcw_second_counts_sum.loc[i,'hcw_symptomatic_sum'] = dfs_hcw_secon[name].infect_symptomatic.sum(skipna = True)
hcw_second_counts_sum.loc[i,'hcw_asymptomatic_sum'] = dfs_hcw_secon[name].infect_asymptomatic.sum(skipna = True)
hcw_second_counts_sum.loc[i,'hcw_nrow'] = len(dfs_hcw_secon[name].index)
i += 1
#hcw_second_counts_mean.to_csv(resultdir+'average_hcw_second_counts_per_simulation_run.csv')
second_counts_sum = pd.DataFrame(columns=['total_counts'])
second_counts_sum['total_counts'] = pat_second_counts_sum['patient_symptomatic_sum'] + pat_second_counts_sum['patient_asymptomatic_sum'] + hcw_second_counts_sum['hcw_symptomatic_sum'] + hcw_second_counts_sum['hcw_asymptomatic_sum']
### combined mean secondary transmission counts
average_second_trans_count = pd.concat([pat_second_counts_mean,hcw_second_counts_mean, pat_second_counts_sum, hcw_second_counts_sum, second_counts_sum], axis = 1)
average_second_trans_count.to_csv(resultdir+'average_second_trans_counts_per_simulation_run.csv')
| [
"thi.mui.pham@posteo.de"
] | thi.mui.pham@posteo.de |
08705d72ea243e75252f62cd0fc1ae2a2944a1bc | 84cbf37a81740c3bdd7c4dcf1a0afaf020ba0402 | /doc/conf.py | 68ccb1aa7dba38fd554f13850c279029c01c44f1 | [
"MIT"
] | permissive | realizeme/blinker-study | 396fc9ab8fc90b382e9da1057d44c59231fff90f | cb687d38b178aacd19bcc9e800e178948c6b7e9b | refs/heads/master | 2020-03-22T07:18:50.977690 | 2018-07-04T10:02:08 | 2018-07-04T10:02:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,065 | py | # -*- coding: utf-8 -*-
#
# app documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'app'
copyright = u'2018, hsboee'
author = u'hsboee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0.dev0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0.dev0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'appdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'app.tex', u'app Documentation',
u'hsboee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'projectname', u'app Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'app', u'app Documentation',
author, 'app', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"hsboee@omnious.com"
] | hsboee@omnious.com |
b9b92335eba3bef772db5c6bf7d209beeab3bc9b | a7e38225a9cba48eafae6c9de303cb1de8339416 | /server/config.py | d159dedeae492c3f470bebc71328c21ed55b6193 | [] | no_license | TamirTapiro/Flask_Task_Api_with_Repl | 052767c5271b1617dff2c6b71bf402223e39b8f5 | 40c8881921327ffa70c0e26d27cb5b5a3545c43b | refs/heads/master | 2023-06-17T02:00:00.674117 | 2021-07-02T10:09:29 | 2021-07-02T10:09:29 | 381,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # config.py
DEBUG = True
SECRET_KEY = 'TheMostSecretKeyInTheWrold'
SESSION_DURATION = 120
MONGO_URI = "***********************************************"
MY_DB = "todo_database"
TASKS_COLLECTION = "tasks"
USERS_COLLECTION = "users" | [
"tapirotamirsh@gmail.com"
] | tapirotamirsh@gmail.com |
1c5624a6732baf17a13c3731ad4d34cdcf59756b | 85af8d0e88196de1d316d6f3c7b214642bfe0951 | /python/rawToPOS.py | a5f56d07842250c0f07115b41a74748e51b7269f | [] | no_license | dileepkamath/Projects | 719c0796f2eddabe3afc788be24a345ab4d33105 | 92e6a1a2e4a4c281a57ce53cb480fe9da02240af | refs/heads/master | 2020-01-23T21:08:42.339435 | 2016-01-07T02:44:09 | 2016-01-07T02:44:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import nltk, re, pprint
from nltk import word_tokenize
def ie_preprocess(document):
sentences = nltk.sent_tokenize(document)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
return sentences
source = "My grandfather is going to the hospital. Does she have illness?"
result = ie_preprocess(source)
print(result)
| [
"5884seiya@gmail.com"
] | 5884seiya@gmail.com |
43a5dfb42d3699d30d9b9899baa6db5097933efe | ce866aa2edc54a0666b9a84719335295fca3155e | /demos/demo.py | 56ed94a2e5a1ead1b7ad8be0cbc421af9b3ceb56 | [
"MIT"
] | permissive | Aboghazala/AwesomeTkinter | 8e9a3b0e20ce4b5469c08efd46d320fa2db4cc4a | 73f638ac432bafbbd4296588a3d20f27f8570577 | refs/heads/master | 2023-09-03T18:46:40.435405 | 2021-11-07T22:57:06 | 2021-11-07T22:57:06 | 295,693,212 | 96 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | import tkinter as tk
from tkinter import ttk
import awesometkinter as atk
# our root
root = tk.Tk()
root.config(background=atk.DEFAULT_COLOR)
# select tkinter theme required for things to be right on windows,
# only 'alt', 'default', or 'classic' can work fine on windows 10
s = ttk.Style()
s.theme_use('default')
# 3d frame
f1 = atk.Frame3d(root)
f1.pack(side='left', expand=True, fill='both', padx=3, pady=3)
# 3d progressbar
bar = atk.RadialProgressbar3d(f1, fg='cyan', size=120)
bar.pack(padx=20, pady=20)
bar.start()
# 3d button
atk.Button3d(f1, text='3D Button').pack(pady=10)
f2 = atk.Frame3d(root)
f2.pack(side='left', expand=True, fill='both', padx=3, pady=3)
# flat radial progressbar
bar = atk.RadialProgressbar(f2, fg='green')
bar.pack(padx=30, pady=30)
bar.start()
atk.Button3d(f2, text='Pressed Button').pack(pady=10)
f3 = atk.Frame3d(root)
f3.pack(side='left', expand=True, fill='both', padx=3, pady=3)
atk.Radiobutton(f3, text="Radiobutton 1").pack(padx=20, pady=(20, 5))
atk.Radiobutton(f3, text="Radiobutton 2", ind_outline_color='white', ind_bg='yellow',
ind_mark_color='red').pack(padx=20, pady=5)
atk.Checkbutton(f3, text=" Checkbutton 1", check_mark_color='red', size=12).pack(padx=20, pady=(20, 5))
atk.Checkbutton(f3, text=" Checkbutton 2").pack(padx=20, pady=5)
root.mainloop()
| [
"mahmoud_elshahhat@yahoo.com"
] | mahmoud_elshahhat@yahoo.com |
a2ea39c1e9140dfea655dbbdad6828663edf2923 | bd6bf14c92de71a50d782b999b2e8471902c4b5d | /AE/ProblemInstances/HCSP-3O-MPE/generador_workload.py | 67e07b6f75b2ed3141cb32b637366aa2f97397e7 | [] | no_license | sudhanshupatra/hcsp-chc | 0e3f8b3757223af665fdaf49d44a5d5e2ff203da | 591a1746f6e85347057b16c0ebf26717d036cd53 | refs/heads/master | 2020-12-24T15:14:18.750958 | 2013-04-07T05:42:49 | 2013-04-07T05:42:49 | 35,659,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | #encoding: utf-8
'''
Created on Oct 3, 2011
@author: santiago
'''
import sys
import random
# Todas las unidades de tiempo son segundos.
TO_min = 60 # 1 minuto.
TO_max = 20*60*60 # 20 horas.
# Intel Xeon E5440: cores=4, ssj_ops=150,979, E_IDLE=76.9, E_MAX=131.8
TO_default_ssj = int(150.979 / 4)
TO_min_ssj = float(TO_default_ssj) * float(TO_min)
TO_max_ssj = float(TO_default_ssj) * float(TO_max)
AO_lo = (5,20)
AO_med = (5,35)
AO_hi = (5,45)
if __name__ == '__main__':
argc = len(sys.argv)
if argc != 5:
print "Modo de uso: python %s <cant_tareas> <cant_maquinas> <heterogeneidad> <seed>" % sys.argv[0]
print " heterogeneidad: NONE=0, LOW=1, MEDIUM=2, HIGH=3"
exit(0)
cantidad_tareas = int(sys.argv[1])
cantidad_maquinas = int(sys.argv[2])
heterogeneidad = int(sys.argv[3])
current_seed = int(sys.argv[4])
# Configuro la heterogeneidad seleccionada.
if heterogeneidad == 1:
AO_hetero = AO_lo
elif heterogeneidad == 2:
AO_hetero = AO_med
elif heterogeneidad == 3:
AO_hetero = AO_hi
else:
AO_hetero = (0,0)
random.seed(current_seed)
for task in range(cantidad_tareas):
# Calculo el costo independiente de la máquina.
TO_current = long(random.uniform(TO_min_ssj, TO_max_ssj))
for machine in range(cantidad_maquinas):
# Calculo el costo del overhead adicional para cada posible máquina.
if heterogeneidad == 0:
AO_current = 0
else:
AO_current = random.randint(AO_hetero[0], AO_hetero[1])
#print(AO_current)
# Calculo TO * (1 + AO).
print long(TO_current * ((AO_current / 100.0) + 1))
| [
"santiago.iturriaga@2f77c51a-9e79-da1f-09b5-e1d637586647"
] | santiago.iturriaga@2f77c51a-9e79-da1f-09b5-e1d637586647 |
2cf1eca6b8cf431c5a069121e31fdf491999f772 | b559cf7ed26cde4289bc5d54cef8b5c6e3777c5d | /10-rest/server.py | 06dde7590f86c6fcaf5b8f771a91f7361f0f9696 | [] | no_license | mreichl-tgm/sew-5 | d2f5cb3f8a0a7b3351b6d34d6fb1c779c46ff594 | 32cf164349a6345a68ed2b019bf1cc9a54c64ed9 | refs/heads/master | 2021-01-23T20:05:18.787292 | 2018-06-27T09:19:55 | 2018-06-27T09:19:55 | 102,846,310 | 0 | 1 | null | 2018-03-20T19:43:26 | 2017-09-08T09:48:48 | HTML | UTF-8 | Python | false | false | 65 | py | class Server:
pass
if __name__ == "__main__":
Server()
| [
"markus@re1.at"
] | markus@re1.at |
0db94add5b7e49358560f7ec98edbab68fd92c66 | 1c338f83e97ea83a94cdc8fa1685885f2be96323 | /author_id_adaboost/nb_author_id.py | af629c6902da8805624f630f6f8a1751154778eb | [] | no_license | Lakshadeep/machine_learning | 5b175245856109e5360a3c7444eb8a87cb66edb2 | 622925011cea9990c5790d75781e6322429bec2a | refs/heads/master | 2020-12-24T06:51:16.276552 | 2016-08-03T18:26:05 | 2016-08-03T18:26:05 | 63,354,580 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# print features_test
#########################################################
### your code goes here ###
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200)
t0 = time()
classifier = clf.fit(features_train, labels_train)
print "Training Time:", round(time()-t0, 3), "s"
t0 = time()
output = clf.predict(features_test)
print "Prediction Time:", round(time()-t0, 3), "s"
# print output
wrong_count = 0
for i in range(len(output)):
if(output[i] != labels_test[i]):
wrong_count = wrong_count + 1
print "Accuracy: " + str((1 - wrong_count / 250.0) * 100)
#########################################################
| [
"lakshadeep.naik@gmail.com"
] | lakshadeep.naik@gmail.com |
1d8d5b62fb8428f787d89e9d89d9b41d6c5cab39 | 91a20a1eefcf0549e6bb4fa1d3cc40a5fcc1bf7a | /mojprojekt/sklep/migrations/0001_initial.py | bd5f10a13fe020501b556ecf80434e421874109c | [] | no_license | Saiter711/Django-Case | 092f030aa336c62d2e7bd6988c8045f31622003e | f4846a9db1e67ccfc27b3cc89165061e0fae24e2 | refs/heads/master | 2020-12-01T21:37:13.205690 | 2020-03-05T14:23:38 | 2020-03-05T14:23:38 | 230,778,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | # Generated by Django 2.2.7 on 2019-11-28 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price', models.FloatField(default=0)),
('desc', models.CharField(max_length=500)),
('autor', models.CharField(max_length=40)),
('rok', models.IntegerField(default=0)),
],
),
]
| [
"P.Lempio@stud.elka.pw.edu.pl"
] | P.Lempio@stud.elka.pw.edu.pl |
6f9f8130e409b273fe17ea85e9f237087d3bd4de | a5e0453f56773229e2cfe6698d4a57b13b920bfe | /fixture/db.py | 2bad60e1c029ded2966c1e326612276c9cac8c69 | [
"Apache-2.0"
] | permissive | baowyld/Task-1.1 | cf1a602670ab1a8d7de136526a7acddfe6c1c5aa | 6fdce8203fb0ccf4edb35213b916026b609e496d | refs/heads/master | 2021-05-08T19:47:52.796637 | 2018-04-05T22:46:54 | 2018-04-05T22:46:54 | 119,579,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,423 | py | import pymysql.cursors
from model.contact import Contact
from model.group import Group
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
self.connection.autocommit = True
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
#cursor.execute("select id, firstname, lastname from addressbook where deprecated='0000-00-00 00:00:00'")
cursor.execute("select id, firstname, middlename, lastname, nickname, title, company, address, home, mobile,"
" work, fax, email, email2, email3, homepage, address2, phone2 from addressbook"
" where deprecated='0000-00-00 00:00:00'")
for row in cursor:
# (id, firstname, lastname) = row
# list.append(Contact(id=str(id), firstname=firstname, lastname=lastname))
(id, firstname, middlename, lastname, nickname, title, company, address, homephone, mobilephone,
workphone, fax, email, email2, email3, homepage, secondaryaddress, secondaryphone) = row
list.append(Contact(id=str(id), firstname=firstname, middlename=middlename, lastname=lastname,
nickname=nickname, title=title, company=company, address=address,
homephone=homephone, mobilephone=mobilephone, workphone=workphone, fax=fax,
email=email, email2=email2, email3=email3, homepage=homepage,
secondaryaddress=secondaryaddress, secondaryphone=secondaryphone))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
| [
"bao.wyld@gmail.com"
] | bao.wyld@gmail.com |
308de907bdf4c8d38504cf22864287ba066e8e21 | 49644566d1d421afcf31da9b374516216b448e67 | /spider/qiubai.py | e0b5dcccb623289a669c03e3517e03d15c241eb5 | [] | no_license | goosling/PythonTest | 6ba687decf19a415a110d764e3c2bdafe417a190 | 09bd6135cb5871abcc251fa0fc2ffc18600a4574 | refs/heads/master | 2021-01-10T10:48:01.436065 | 2016-01-09T00:47:43 | 2016-01-09T00:47:43 | 48,020,432 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | __author__ = 'joe'
# -*- coding: utf-8 -*-
import urllib
import re
import thread
import time
import urllib2
class Spider:
def __init__(self):
self.page = 1
self.pages = []
self.enable = False
# 将所有的段子都抠出来,添加到列表中并返回列表
def GetPage(self, page):
myUrl = 'http://m.qiushibaike.com/hot/page/'+page
user_agent = user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent': user_agent}
req = urllib2.Request(myUrl, headers=headers)
response = urllib2.urlopen(req)
myPage = response.read()
unicodePage = myPage.decode('utf-8')
# 找出所有class='content'的div标记
myItems = re.findall('<div.*?class="content".*?title="(.*?)">(.*?)</div>', unicodePage, re.S)
items = []
for item in myItems:
# item中第一个div是标题,即时间
# item中第二个div是内容
items.append([item[0].replace('\n', ''), item[1].replace('\n', '')])
return items
#用于加载新的段子
def loadPage(self):
# 如果用户未输入quit则一直运行
while self.enable:
if len(self.pages) < 2:
try:
myPage = self.getPage(str(self.page))
self.page += 1
self.pages.append(myPage)
except:
print '无法连接到糗百'
else:
time.sleep(1)
def showPage(self, nowPage, page):
for items in nowPage:
print u'第%d页' % page, items[0], items[1]
myInput = raw_input()
if myInput == 'quit':
self.enable = False
break
def start(self):
self.enable = True
page = self.page
print u'正在加载请稍候。。。。'
thread.start_new_thread(self.loadPage, ())
while self.enable:
if self.pages:
nowPage = self.pages[0]
del self.pages[0]
self.showPage(nowPage, page)
page += 1
print u'请按下回车浏览今日内容'
raw_input(' ')
myModel = Spider()
myModel.start()
| [
"joehuang920@gmail.com"
] | joehuang920@gmail.com |
1c11ede580dfd0e97ca7791608ac08e13a0fb46d | 58c29a2e7d0000a4bc4781dc35ea5a6f693ff9b4 | /ch13-keras.py | 2811a268cec278446ad765827e934504cd181cdc | [] | no_license | Najah-lshanableh/python-ML-book-Raschka | b251b86ce66ea175e7c493d7a06ac07aa8a85d8e | 3e69c6f9ee8514888b45e8a882c25bafafd7f3d5 | refs/heads/master | 2020-07-01T05:04:24.064308 | 2016-10-12T08:02:37 | 2016-10-12T08:02:37 | 74,093,587 | 2 | 1 | null | 2016-11-18T03:54:55 | 2016-11-18T03:54:55 | null | UTF-8 | Python | false | false | 2,719 | py | import os
import struct
import numpy as np
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels.idx1-ubyte'
% kind)
images_path = os.path.join(path,
'%s-images.idx3-ubyte'
% kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('mnist', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('mnist', kind='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
import theano
theano.config.floatX = 'float32'
X_train = X_train.astype(theano.config.floatX)
X_test = X_test.astype(theano.config.floatX)
from keras.utils import np_utils
print('First 3 labels: ', y_train[:3])
y_train_ohe = np_utils.to_categorical(y_train)
print('\nFirst 3 labels (one-hot):\n', y_train_ohe[:3])
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
np.random.seed(1)
model = Sequential()
model.add(Dense(input_dim=X_train.shape[1],
output_dim=50,
init='uniform',
activation='tanh'))
model.add(Dense(input_dim=50,
output_dim=50,
init='uniform',
activation='tanh'))
model.add(Dense(input_dim=50,
output_dim=y_train_ohe.shape[1],
init='uniform',
activation='softmax'))
sgd = SGD(lr=0.001, decay=1e-7, momentum=.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(X_train, y_train_ohe,
nb_epoch=50,
batch_size=300,
verbose=1,
validation_split=0.1,
show_accuracy=True)
y_train_pred = model.predict_classes(X_train, verbose=0)
print('First 3 predictions: ', y_train_pred[:3])
train_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (train_acc * 100))
y_test_pred = model.predict_classes(X_test, verbose=0)
test_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (test_acc * 100))
| [
"robbie@soha.io"
] | robbie@soha.io |
7f18b56489ef36f4e2391878671a569f4252027d | 1ac9f756c5bab3ae8ae2df8daa596b6fc55b63d1 | /backend/accounts/views.py | c3104129fe20c9ad274477dc8f541600ce56fc03 | [] | no_license | woorud/facebook_clone | 6520adbf5e5aaeb3f517abe7920a0b90096e4f89 | a5b96f215c74e2960465cd2a96568e57db92043c | refs/heads/master | 2022-12-11T18:26:07.648768 | 2020-08-29T14:45:43 | 2020-08-29T14:45:43 | 277,793,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,907 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout as django_logout
from .forms import SignupForm, LoginForm
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
from django.http import HttpResponse
import json
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST, request.FILES)
if form.is_valid():
user = form.save()
return redirect('accounts:login')
else:
form = SignupForm()
return render(request, 'accounts/signup.html', {
'form':form,
})
def login_check(request):
if request.method == 'POST':
form = LoginForm(request.POST)
name = request.POST.get('username')
pwd = request.POST.get('password')
user = authenticate(username = name, password = pwd)
if user is not None:
login(request, user)
return redirect('/')
else:
form = LoginForm()
return render(request, 'accounts/login.html', {
'form':form
})
def logout(request):
django_logout(request)
return redirect('/')
def create_friend_request(request):
user_id = request.POST.get('pk', None)
user = request.user
target_user = get_object_or_404(get_user_model(), pk=user_id)
try:
user.friend_requests.create(from_user=user, to_user=target_user)
context = {'result': 'succes'}
except Exception as ex:
print('에러가 발생했습니다', ex) # ex는 발생한 에러의 이름을 받아오는 변수
context = {
'result': 'error',
}
return HttpResponse(json.dumps(context), content_type="application/json")
def accept_friend_request(request):
friend_request_id = request.POST.get('pk', None)
# 요청
friend_request = FriendRequest.objects.get(pk=friend_request_id)
# 커런트유저 가져오기
from_user = friend_request.from_user
# 타겟유저 가져오기
to_user = friend_request.to_user
try:
# 친구관계 생성
# room_name= "{},{}".format(from_user.username, to_user.username)
# 채팅방을 만들고
# room = Room.objects.create(room_name=room_name)
Friend.objects.create(user=from_user, current_user=to_user, room=room)
Friend.objects.create(user=to_user, current_user=from_user, room=room)
# 현재 만들어진 친구요청을 삭제
friend_request.delete()
context = {
'result': 'success',
}
except Exception as ex:
print('에러가 발생했습니다', ex)
context = {
'result': 'error',
}
return HttpResponse(json.dumps(context), content_type="application/json")
| [
"woorud96@gmail.com"
] | woorud96@gmail.com |
59b2d6adf466bc8e3c788a1859a533dc0a502e2e | f5ca0a9f4e68c4b0d0986e074a22ee7de3fec085 | /api/config.py | a13eaf294d6bc09a5d8a1cdd0ee5dbbe3bdb783a | [] | no_license | awaris123/gym-notes | 80800bdd91c5b3300fbc4a4d78af2b3c32a0607c | 7a23f6824d1d02ca1c53a8e8d24efcca770d7cf1 | refs/heads/master | 2022-12-12T10:38:09.748999 | 2019-11-08T22:07:33 | 2019-11-08T22:07:33 | 190,944,746 | 0 | 0 | null | 2022-12-08T06:15:55 | 2019-06-08T23:30:14 | Dart | UTF-8 | Python | false | false | 231 | py | import firebase_admin
from firebase_admin import credentials, auth
import json
file = open('keys.json', 'r')
keys = json.load(file)
cred = credentials.Certificate(keys['firebase-admin-key'])
firebase_admin.initialize_app(cred)
| [
"awaris@hawk.iit.edu"
] | awaris@hawk.iit.edu |
f06a21f022b3d3742cee8df6c8048fcc34022202 | a51854991671a4389902945578288da34845f8d9 | /libs/UserInterface/TestPages/LampHolderTest.py | e9567659c28b0e4822d07ddbb3702556f7e9276b | [] | no_license | wuyou1102/DFM_B2 | 9210b4b8d47977c50d92ea77791f477fa77e5f83 | 69ace461b9b1b18a2269568110cb324c04ad4266 | refs/heads/master | 2020-04-13T18:54:20.045734 | 2019-06-17T12:46:23 | 2019-06-17T12:46:23 | 163,387,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,838 | py | # -*- encoding:UTF-8 -*-
import wx
import logging
import Base
from libs import Utility
from libs.Config import Font
from libs.Config import Color
from libs.Config import String
logger = logging.getLogger(__name__)
class LampHolder(Base.TestPage):
def __init__(self, parent, type):
Base.TestPage.__init__(self, parent=parent, type=type)
self.count = 0
def init_test_sizer(self):
sizer = wx.BoxSizer(wx.VERTICAL)
turn_on_button = wx.Button(self, wx.ID_ANY, u"开启LED", wx.DefaultPosition, (-1, 60), 0)
turn_on_button.SetFont(Font.NORMAL_20_BOLD)
turn_on_button.Bind(wx.EVT_BUTTON, self.on_button_click)
output = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE | wx.TE_READONLY)
output.AppendText(u"请检查治具上的指示灯是否全亮\n")
output.AppendText(u"\n")
output.AppendText(u"判断条件:\n")
output.AppendText(u" 指示灯全亮 PASS\n")
output.AppendText(u" 其他情况 FAIL\n")
output.SetInsertionPointEnd()
output.SetBackgroundColour(Color.LightSkyBlue1)
output.SetFont(Font.DESC)
sizer.Add(turn_on_button, 0, wx.EXPAND | wx.ALL, 1)
sizer.Add(output, 1, wx.EXPAND | wx.ALL, 1)
return sizer
def before_test(self):
pass
def on_button_click(self, event):
comm = self.get_communicate()
if comm.unload_protocol_stack():
dlg = Utility.Alert.CountdownDialog(u"正在开启LED灯")
dlg.Countdown(3)
def start_test(self):
self.FormatPrint(info="Started")
def stop_test(self):
self.FormatPrint(info="Stop")
@staticmethod
def GetName():
return u"灯座测试"
@staticmethod
def GetFlag(t):
if t == "PCBA":
return String.LAMP_HOLDER_PCBA
| [
"jotey@qq.com"
] | jotey@qq.com |
a22ffc16dfff771c3f037f2cf3410d17066bbd79 | 1f080333f1714ba88d4f41d6ce2676f0b299e05e | /.venv/bin/maf_extract_ranges_indexed.py | 011751629233c72c0d998a7fdd8de77cfa72ed42 | [] | no_license | venice-juanillas/EIB-hackathon | b66bf128144dcef893c91af84dc28ff48be08e1b | 6b73babff2b88dccbd5ec2e74bd5737ff0a4270b | refs/heads/master | 2022-11-17T23:52:24.365210 | 2018-04-05T01:56:17 | 2018-04-05T01:56:17 | 120,545,413 | 0 | 1 | null | 2022-10-25T18:54:52 | 2018-02-07T01:19:48 | Python | UTF-8 | Python | false | false | 4,702 | py | #!/home/galaxy/data/galaxy_17.09/.venv/bin/python2.7
"""
Reads a list of intervals and a maf. Produces a new maf containing the
blocks or parts of blocks in the original that overlapped the intervals.
It is assumed that each file `maf_fname` has a corresponding `maf_fname`.index
file.
NOTE: If two intervals overlap the same block it will be written twice. With
non-overlapping intervals and --chop this is never a problem.
NOTE: Intervals are origin-zero, half-open. For example, the interval 100,150
is 50 bases long, and there are 100 bases to its left in the sequence.
NOTE: Intervals are relative to the + strand, regardless of the strands in
the alignments.
WARNING: bz2/bz2t support and file cache support are new and not as well
tested.
usage: %prog maf_fname1 maf_fname2 ... [options] < interval_file
-m, --mincols=0: Minimum length (columns) required for alignment to be output
-c, --chop: Should blocks be chopped to only portion overlapping (no by default)
-s, --src=s: Use this src for all intervals
-p, --prefix=p: Prepend this to each src before lookup
-d, --dir=d: Write each interval as a separate file in this directory
-S, --strand: Strand is included as an additional column, and the blocks are reverse complemented (if necessary) so that they are always on that strand w/r/t the src species.
-C, --usecache: Use a cache that keeps blocks of the MAF files in memory (requires ~20MB per MAF)
"""
import psyco_full
from bx.cookbook import doc_optparse
import bx.align.maf
from bx import misc
import os
import sys
def main():
# Parse Command Line
options, args = doc_optparse.parse( __doc__ )
try:
maf_files = args
if options.mincols: mincols = int( options.mincols )
else: mincols = 0
if options.src: fixed_src = options.src
else: fixed_src = None
if options.prefix: prefix = options.prefix
else: prefix = None
if options.dir: dir = options.dir
else: dir = None
chop = bool( options.chop )
do_strand = bool( options.strand )
use_cache = bool( options.usecache )
except:
doc_optparse.exit()
# Open indexed access to mafs
index = bx.align.maf.MultiIndexed( maf_files, keep_open=True,
parse_e_rows=True,
use_cache=use_cache )
# Start MAF on stdout
if dir is None:
out = bx.align.maf.Writer( sys.stdout )
# Iterate over input ranges
for line in sys.stdin:
strand = None
fields = line.split()
if fixed_src:
src, start, end = fixed_src, int( fields[0] ), int( fields[1] )
if do_strand: strand = fields[2]
else:
src, start, end = fields[0], int( fields[1] ), int( fields[2] )
if do_strand: strand = fields[3]
if prefix: src = prefix + src
# Find overlap with reference component
blocks = index.get( src, start, end )
# Open file if needed
if dir:
out = bx.align.maf.Writer( open( os.path.join( dir, "%s:%09d-%09d.maf" % ( src, start, end ) ), 'w' ) )
# Write each intersecting block
if chop:
for block in blocks:
for ref in block.get_components_by_src( src ):
slice_start = max( start, ref.get_forward_strand_start() )
slice_end = min( end, ref.get_forward_strand_end() )
if (slice_end <= slice_start): continue
sliced = block.slice_by_component( ref, slice_start, slice_end )
# If the block is shorter than the minimum allowed size, stop
if mincols and ( sliced.text_size < mincols ):
continue
# If the reference component is empty, don't write the block
if sliced.get_component_by_src( src ).size < 1:
continue
# Keep only components that are not empty
sliced.components = [ c for c in sliced.components if c.size > 0 ]
# Reverse complement if needed
if ( strand != None ) and ( ref.strand != strand ):
sliced = sliced.reverse_complement()
# Write the block
out.write( sliced )
else:
for block in blocks:
out.write( block )
if dir:
out.close()
# Close output MAF
out.close()
index.close()
if __name__ == "__main__":
main()
| [
"v.juanillas@irri.org"
] | v.juanillas@irri.org |
e8aef7a1e7a43d8d224a77625deb5fe27c1db496 | 05225e9eacafad865fd31f305711c6dbfde316ec | /actmoi.py | c312feba7c799ae0e0526cea9491e28217b7d285 | [] | no_license | borisenglebert/SVVA23 | c521167e5c1540b86943c5ee9207d342e18594d4 | 6ed0636c33691a0336e6441e24e8da5d45ae60d2 | refs/heads/master | 2020-04-23T11:40:05.486926 | 2019-03-02T14:44:26 | 2019-03-02T14:44:26 | 171,143,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | ##------------------ACTUAL MOI----------
##This script finds the actual moments of inertia Iz'z' and Iy'y' of the cross section, based on the
##stiffener point areas (that only contribute due to the Steiner terms) and the angled thin wall
##rectangular sections and the thin-walled semicircle. These moments of inertia can then be used
##to find the sigma ratios and as such the simplified section. All units in meters.
#-------FUNCTION INPUTS-------
##This module requires for both the actual centroid and the cross section geometry to be imported
def actualmoi(m):
from math import sqrt, pi
from cs import crosssec
from actcent import centactual
c = 0.515 # aileron chord
h = 0.248 # aileron height
r = h / 2 # leading edge section radius
le = sqrt((c - r) ** 2 + r ** 2) # length of linear section
circ = pi * r + 2 * le # circumference of cross section
phi = circ /m # stiffener spacing
Astiff = 5.4*10**(-5) #Stiffener point area (m^2)
tsk = 0.0011 #skin thickness
tsp = 0.0022 #spar thickness
zc = centactual(11)
nstiff, zpos, ypos = crosssec(11)
#-----Thin walled sections Izz-----
Izz1 = le*tsk*r**2/12+le*tsk*(0.5*(c-r)-zc)**2 #Izz of linear sections
Izz2 = pi*r**3*tsk/2+pi*r*tsk*((c-r)+2*r/pi-zc)**2 #Izz of semicircle
Izzsp = tsp*h**3/12 #Izz of spar
#-----Thin walled section Iyy-----
Iyy1 = le*tsk*(c-r)**2/12+(0.5*r)**2 #Iyy of linear section
Iyy2 = pi*r**3*tsk/2+pi*r*tsk*((c-r)) #Iyy of semicircle
Iyysp = tsp*h*((c-r)-zc)**2
Izzst = []
Iyyst = []
for n in range(len(zpos)):
if n==4 or n==8:
break
else:
izz = Astiff*(zpos[n]-zc)**2
Izzst.append(izz)
for k in range(len(zpos)):
if k==4 or k==8:
break
else:
iyy = Astiff*(ypos[k])**2
Iyyst.append(iyy)
Izztot = sum(Izzst)+2*Izz1+Izz2+Izzsp
Iyytot = sum(Iyyst)+2*Iyy1+Iyy2+Iyysp
return Izztot, Iyytot
| [
"noreply@github.com"
] | borisenglebert.noreply@github.com |
3d01caa59ff32f00f9e86dd757e073b4f4a17609 | cac2c04fa7e0a9bc1fbd4fca5be00030f07e01c0 | /fundamentals/trie/trie.py | 8887f2c2eacc8c0c35ff18091f291db2d656047c | [
"Apache-2.0"
] | permissive | davjohnst/fundamentals | b5d27286531f54eb082a98d30dfa5c138bf3dc51 | f8aff4621432c3187305dd04563425f54ea08495 | refs/heads/master | 2021-01-01T16:39:35.315050 | 2015-11-15T00:32:53 | 2015-11-15T00:32:53 | 32,647,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | #!/usr/bin/env python
class TrieNode(object):
def __init__(self, letter, is_terminal=True, children={}, value=None):
self.letter = letter
self.is_terminal = is_terminal
self.children = children
self.value = value
class Trie(object):
def __init__(self):
self.head = TrieNode("", is_terminal=False)
def contains(self, word):
current = self.head
for letter in word:
if letter not in current.children:
return False
current = current.children[letter]
return current.is_terminal
def get(self, word):
current = self.head
for letter in word:
if letter not in current.children:
return None
current = current.children[letter]
return current.value
def put(self, word, value=None):
current = self.head
for letter in word:
if letter not in current.children:
current.children[letter] = TrieNode(letter, is_terminal=False, children={})
current = current.children[letter]
else: # letter already has a node
current = current.children[letter]
current.is_terminal = True
current.value = value
def num_nodes(self):
return self._num_nodes_under(self.head)
def _num_nodes_under(self, node):
# counting the number of nodes under, and including, the `node` argument
# base case
if len(node.children) == 0:
return 1
# recursive case
num_under_each_child = [self._num_nodes_under(c) for c in node.children.values()]
return 1 + sum(num_under_each_child)
def main():
t = Trie()
totalnchar = 0
with open("/usr/share/dict/words", "r") as w:
for line in w:
line = line.lower().strip()
t.put(line)
totalnchar += len(line)
print totalnchar
print t.num_nodes()
print totalnchar / float(t.num_nodes())
if __name__ == "__main__":
main() | [
"dajohnston@ucdavis.edu"
] | dajohnston@ucdavis.edu |
dcda4ae98e5ceea8422c2a9d5b281462addc5b6e | 4047b91585245c3ee5ea6c50a620dadf74636bc3 | /phylobot/phylobot/admin.py | e38df56d65328e8a83b088332fe4a4404c4facb6 | [] | no_license | httang12/phylobot-django | fd371cc870f444cf94179d6a3cc6d23e9895186c | b535edfd1ee09dab02421ba22d96d48b3f611dad | refs/heads/master | 2020-04-15T12:53:13.349661 | 2018-02-15T08:46:08 | 2018-02-15T08:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from django.contrib import admin
admin.autodiscover()
from phylobot.models import *
from phylobot.models_aws import *
print "\n\n\n phylobot admin\n\n\n"
admin.site.register(UserProfile)
admin.site.register(AncestralLibrary)
admin.site.register(AWSConfiguration)
admin.site.register(ViewingPrefs)
admin.site.register(AncestralLibrarySourceJob)
| [
"victorhansonsmith@gmail.com"
] | victorhansonsmith@gmail.com |
0970a2abc0d9d65f4b605e3d42e8da253566a347 | aa35f2dcdcb2abddddeb0635eb2bfbe40d8eeaff | /main.py | 88f2dae6ff0b8752c25c803bb57ed22ede3e56ff | [] | no_license | dminiotas05/FromZero | 9654e9ea1e7dc14fee184f28ac0ee803feb4c2fa | 57130b91749239b1ff560d79f5a8607c6ddffcf4 | refs/heads/master | 2022-10-27T09:20:02.714674 | 2020-06-16T12:20:39 | 2020-06-16T12:20:39 | 260,192,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import statistics
import numpy as np
def std_deviation(sarasas):
std_nuokrypis = statistics.stdev(sarasas)
return std_nuokrypis
def mean(sarasas):
vidurkis = np.mean(sarasas)
return vidurkis
def list_sum(sarasas):
suma = sum(sarasas)
return suma
sarasas = []
n = int(input("Enter number of elements : "))
for i in range(n):
skaicius = int(input())
sarasas.append(skaicius)
print("Std deviation:", std_deviation(sarasas))
print("Mean:", mean(sarasas))
print("Sum:", list_sum(sarasas))
| [
"dariusm@neurotechnology.com"
] | dariusm@neurotechnology.com |
8a4459a7d7e37d05862cfd7f202f778254a52089 | a351f3aaad20b2e4706621e9c8ae5857680e4ff4 | /xunfei/xunfei_client.py | 1b688dca580910caea604c4da58dbbbd1c15cfc5 | [
"MIT"
] | permissive | wangjinyu124419/long-audio-asr | 6c8bca371152f8cdaa6522f7da47352e4d5d24ac | d8dabf6cb10b282e3bd4981207c4a0f478977c9b | refs/heads/master | 2020-05-19T05:59:32.753429 | 2019-05-04T06:37:24 | 2019-05-04T06:37:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,926 | py | import time
from xunfei.weblfasr_python3_demo import RequestApi
import json
appid="5c458f95"
secret_key="8c6cc2043040a13ff36c5ead9349c530"
def get_result(file_path):
api = RequestApi(appid=appid, secret_key=secret_key,upload_file_path=file_path)
response=api.all_api_request()
# data={ 'data': '[{"bg":"7140","ed":"10380","onebest":"我会先对其父亲说,","speaker":"0"},{"bg":"10400","ed":"11880","onebest":"节哀顺变,","speaker":"0"},{"bg":"13350","ed":"14870","onebest":"询问下","speaker":"0"},{"bg":"14890","ed":"18240","onebest":"什么时间的事情,什么原因?","speaker":"0"},{"bg":"37260","ed":"39960","onebest":"然后请其父亲","speaker":"0"},{"bg":"40100","ed":"42350","onebest":"提交我方一份,","speaker":"0"},{"bg":"42590","ed":"43920","onebest":"死者的死亡!","speaker":"0"},{"bg":"43930","ed":"45090","onebest":"证明,","speaker":"0"},{"bg":"45730","ed":"49540","onebest":"告诉其父亲联系地址,","speaker":"0"},{"bg":"50160","ed":"53580","onebest":"我放好进下一步处理。","speaker":"0"},{"bg":"54430","ed":"56740","onebest":"如果金额不多,","speaker":"0"},{"bg":"57950","ed":"60070","onebest":"会劝家属","speaker":"0"},{"bg":"60090","ed":"62060","onebest":"能否给还上,","speaker":"0"},{"bg":"63970","ed":"67180","onebest":"尽可能的不造成公司。","speaker":"0"},{"bg":"67190","ed":"68710","onebest":"的损失!","speaker":"0"}]', 'err_no': 0, 'failed': None, 'ok': 0}
data_list=json.loads(response.get('data'))
res=''.join([ data.get('onebest') for data in data_list])
return res
if __name__ == '__main__':
import os
files_path = '/home/wangjinyu/workproject/long_audio_asr/mp3_audio'
files_list = os.listdir(files_path)
f = open(os.path.join(files_path, 'xunfei.txt'), 'a')
for file in files_list:
if not file.endswith('wav'):
continue
file_path = os.path.join(files_path, file)
print(file_path)
res = get_result(file_path)
print(res)
f.write(file + ':' + res + '\n')
time.sleep(2)
# for i in range(10):
# file='/home/wangjinyu/workproject/long_audio_asr/res/real_wav/yangqingqing/yangqingqing{}.wav'.format(str(i+1))
# print(file)
# res=get_result(file)
# res=get_result('/home/wangjinyu/workproject/long_audio_asr/res/real_wav/wangsongbo/wangsongbo10.wav')
# f=open('/home/wangjinyu/workproject/long_audio_asr/res/real_test/wangsongbo/txt/xunfei.txt','a')
# f.write(res+'\n')
# print(res)
# data = {'data': '[{"bg":"7140","ed":"10380","onebest":"我会先对其父亲说,","speaker":"0"},{"bg":"10400","ed":"11880","onebest":"节哀顺变,","speaker":"0"},{"bg":"13350","ed":"14870","onebest":"询问下","speaker":"0"},{"bg":"14890","ed":"18240","onebest":"什么时间的事情,什么原因?","speaker":"0"},{"bg":"37260","ed":"39960","onebest":"然后请其父亲","speaker":"0"},{"bg":"40100","ed":"42350","onebest":"提交我方一份,","speaker":"0"},{"bg":"42590","ed":"43920","onebest":"死者的死亡!","speaker":"0"},{"bg":"43930","ed":"45090","onebest":"证明,","speaker":"0"},{"bg":"45730","ed":"49540","onebest":"告诉其父亲联系地址,","speaker":"0"},{"bg":"50160","ed":"53580","onebest":"我放好进下一步处理。","speaker":"0"},{"bg":"54430","ed":"56740","onebest":"如果金额不多,","speaker":"0"},{"bg":"57950","ed":"60070","onebest":"会劝家属","speaker":"0"},{"bg":"60090","ed":"62060","onebest":"能否给还上,","speaker":"0"},{"bg":"63970","ed":"67180","onebest":"尽可能的不造成公司。","speaker":"0"},{"bg":"67190","ed":"68710","onebest":"的损失!","speaker":"0"}]','err_no': 0, 'failed': None, 'ok': 0}
# data_list = json.loads(data.get('data'))
# res = ''.join([data.get('onebest') for data in data_list])
# dict={'data': '[{"bg":"0","ed":"7470","onebest":"零基础学it,月薪过万就来,黑马程序员,黑马程序员成就it黑马!","speaker":"0"},{"bg":"8170","ed":"14960","onebest":"员基础,啊第一呃这个是CA加学院那个是资源基础第一部分,啊就是咱们的最基础这块内容,啊","speaker":"0"},{"bg":"14980","ed":"21990","onebest":"那首先我们看一下第一个知识体系,啊咱们这个整体去给大家去讲一下,啊这里面有这个网格视图看一下,啊","speaker":"0"},{"bg":"21980","ed":"25370","onebest":"那这里面会分为了这个总共是这个是12块啊11,","speaker":"0"}]', 'err_no': 0, 'failed': None, 'ok': 0}
# print(type(dict['data'][0]))
# res = ''.join([(json.loads(data)).get('onebest') for data in dict['data']])
# data_list=json.loads(dict['data'])
# print(type(data_list))
# for data in data_list:
# print(data['onebest'])# print(data)
# print(res)
# print(json.dumps(dict,indent=4,ensure_ascii=False))
| [
"wangjinyu@deeplycurious.ai"
] | wangjinyu@deeplycurious.ai |
f5a1122f866bed45225e696274ecc24d0f763dec | 0f3d71597610a7a2dfb92a105feec894c4b664f2 | /encodecode.py | 8933d7010a07e4567395e2ce4fd2b3161ef496d9 | [] | no_license | phin01/encoder | e893c364cd9beec662ae6c21c1d5a1dec535b675 | 8322bbde74a802de4ba21ae3c77780874aaf04b0 | refs/heads/master | 2022-11-25T10:26:23.412044 | 2020-08-01T17:35:18 | 2020-08-01T17:35:18 | 281,481,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | import pandas as pd
from cryptography.fernet import Fernet
import base64
class EncoDeco():
def __init__(self):
self._fernet_key = '7AbQpZWYYi96g1nmNTcYxFxg04Qi5Rfmd7drVqhL1t8='
self._vigenere_key = 'knrStW0PJDgn3e1PaQw3QXxq5oqAzCKJ7XwrnHLWkcihd_7'
# ------------------------------------------------
# Handle CSV files
# ------------------------------------------------
def load_csv(self, filename: str, separator: str) -> pd.DataFrame :
try:
df = pd.read_csv(filename, sep=separator, header=None)
except:
df = None
return df
def store_csv(self, df: pd.DataFrame, filename:str, separator: str) -> bool:
try:
df.to_csv(filename, sep=separator, header=None, index=None)
return True
except:
return False
# ------------------------------------------------
# Encode/Decode functions
# ------------------------------------------------
def encode(self, df: pd.DataFrame, method="base64") -> pd.DataFrame :
df['concat'] = ['|'.join(row) for row in df[df.columns[0:]].astype(str).values]
if method == "fernet":
df['fernet'] = [self._fernet_encode(row.encode('utf-8')) for row in df['concat'].values]
if method == "vigenere":
df['vigenere'] = [self._vigenere_encode(row) for row in df['concat'].values]
if method == "base64":
df['base64'] = [self._scramble64(row) for row in df['concat'].values]
df = df.drop(columns=['concat'])
return df
def decode(self, df: pd.DataFrame, method="base64") -> pd.DataFrame :
print('oxe')
if method == "fernet":
df['converted'] = [str(self._fernet_decode(row)) for row in df[0].values]
if method == "vigenere":
df['converted'] = [str(self._vigenere_decode(row)) for row in df[0].values]
if method == "base64":
df['converted'] = [str(self._unscramble64(row)) for row in df[0].values]
df_split = df['converted'].str.split('|', expand=True)
df_split['encoded'] = df[0]
return df_split
# ------------------------------------------------
# Fernet encode/decode helper functions
# ------------------------------------------------
def _fernet_encode(self, message: bytes) -> bytes:
return Fernet(self._fernet_key).encrypt(message)
def _fernet_decode(self, token: bytes) -> bytes:
return Fernet(self._fernet_key).decrypt(token)
# ------------------------------------------------
# Vigenere encode/decode helper functions
# https://gist.github.com/gowhari/fea9c559f08a310e5cfd62978bc86a1a
# ------------------------------------------------
def _vigenere_encode(self, string: str) -> str:
key = self._vigenere_key
encoded_chars = []
for i in range(len(string)):
key_c = key[i % len(key)]
encoded_c = chr(ord(string[i]) + ord(key_c) % 256)
encoded_chars.append(encoded_c)
encoded_string = ''.join(encoded_chars)
return encoded_string
def _vigenere_decode(self, string: str) -> str:
key = self._vigenere_key
encoded_chars = []
for i in range(len(string)):
key_c = key[i % len(key)]
encoded_c = chr((ord(string[i]) - ord(key_c) + 256) % 256)
encoded_chars.append(encoded_c)
decoded_string = ''.join(encoded_chars)
return decoded_string
# ------------------------------------------------
# Personal Base64 encoding
# String is initially encoded using base64
# Each char will be scrambled based on its unicode number incremented by an offset factor
# Offset factor is the remainder of the string's length divided by 8, added 1 (so it falls between a 1-8 range)
# Offset factor is reversed every other char
# ------------------------------------------------
def _scramble64(self, string: str) -> str:
try:
scrambled = ''
b64 = base64.b64encode(string.encode('utf-8'))
b64_string = str(b64)[2:-1]
offset = len(b64_string) % 8 + 1
for x in range(0, len(b64_string)):
delta = offset * -1 if x % 2 == 0 else offset
char = chr(ord(b64_string[x]) - delta)
scrambled += str(char)
return str(offset) + scrambled
except:
return ''
def _unscramble64(self, string: str) -> str:
# try:
unscrambled = ''
offset = int(string[0])
string = string[1:]
for x in range(0, len(string)):
delta = offset * -1 if x % 2 == 0 else offset
char = chr(ord(string[x]) + delta)
unscrambled += str(char)
return str(base64.b64decode(unscrambled.encode('utf-8')))[2:-1]
# except:
# return ''
| [
"phin@uol.com.br"
] | phin@uol.com.br |
1f629729c97c9040289082b5c88b8013e5fc7310 | a2b8733718640ddc5719b06589baf4f6a8bfcd5d | /fabfile.py | bd0bb65a24177f744e603deda6351fc694a50a47 | [] | no_license | fndjjx/blog | dc3bcf9cfc34ea3e41af63dbdc57980425175bfb | 527526737f16d5c71f3fcb2a323ab3d3c022d38a | refs/heads/master | 2021-01-25T08:00:53.029282 | 2017-07-25T06:02:57 | 2017-07-25T06:02:57 | 93,696,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 377 | py | #!/usr/bin/env python
# encoding: utf-8
from fabric.api import local,cd,run,env
env.hosts=['bloger@106.14.24.66:22',]
def update_remote():
print("remote update")
with cd('~/git_repo/blog'):
run('git pull --rebase')
run('supervisorctl -c supervisord.conf shutdown')
run('supervisord -c supervisord.conf')
def update():
update_remote()
| [
"yi.lei@unidt.com"
] | yi.lei@unidt.com |
5dd9789f49b6bf5e26968ad8d2ac344ebc993ed3 | fcca7ebb332ae400b82f7d75d424ace30e35963c | /apps/elasticity/stegoton/plot_comparison.py | 6f3eaab6264e7dee56852f1672d4f2d87a7f8564 | [] | no_license | clawpack/sharpclaw | 5d2812149b28a09bfb626daf057fd27e4ab2f6a5 | 7c9782d932a449b92c875ff341a16bf00f0cc630 | refs/heads/master | 2021-01-04T14:06:42.001372 | 2013-11-28T15:19:26 | 2013-11-28T15:19:26 | 1,613,567 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from pyclaw.data import ClawPlotData
from pyclaw.plotting import plotframe
plotdata = ClawPlotData()
plotdata.outdir = '.'
# Figure:
plotfigure = plotdata.new_plotfigure(name='Solution', figno=1)
plotfigure.kwargs = {'figsize':[5,3]}
# Axes:
plotaxes = plotfigure.new_plotaxes(name='Strain')
#plotaxes.xlim = [73,79]
plotitem = plotaxes.new_plotitem(name='SharpClaw 3600', plot_type='1d')
plotitem.plot_var = 0 # q[2] is the stress
plotitem.plotstyle = 's'
plotitem.color = 'b' # could use 'r' or 'red' or '[1,0,0]'
plotitem.kwargs = {'linewidth':3,'markersize':10}
plotitem = plotaxes.new_plotitem(name='ClawPack 3600', plot_type='1d')
plotitem.outdir = '/users/ketch/research/claw42/fwave2/3600'
plotitem.plot_var = 0 # q[2] is the stress
plotitem.plotstyle = 'o'
plotitem.color = 'r'
plotitem.kwargs = {'linewidth':3,'markersize':10}
#plotitem = plotaxes.new_plotitem(name='ClawPack 28800', plot_type='1d')
#plotitem.outdir = '/users/ketch/research/claw42/fwave2/'
#plotitem.plot_var = 0 # q[2] is the stress
#plotitem.plotstyle = '-'
#plotitem.color = 'k'
#plotitem.kwargs = {'linewidth':3}
plotdata.plotframe(100)
| [
"dketch@gmail.com"
] | dketch@gmail.com |
3e73c8eff7b111466a253dd49996cec3d1474aab | 67458c986797100fcf0ddf3352d5a359e8375fb2 | /equazioni_secondo_grado.py | 2de8d295b1e05ae1e844ab0e2af7f09be881ea6d | [] | no_license | giacomotampella/second_degree_equations_py | 3d53bb6f028193fd8be6a7028de0b88fe2fc2077 | 79ff99152a2e3da01b27fe7fe1cad9db703ef68c | refs/heads/main | 2023-02-13T08:08:33.656522 | 2021-01-12T17:19:43 | 2021-01-12T17:19:43 | 329,058,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import math
import os
a = float(input("a = "))
b = float(input("b = "))
c = float(input("c = "))
x1 = (-b - math.sqrt(b**2 -4*a*c)) / (2*a)
x2 = (-b + math.sqrt(b**2 -4*a*c)) / (2*a)
print("x1 = ", x1)
print("x2 = ", x2)
os.system("pause")
| [
"noreply@github.com"
] | giacomotampella.noreply@github.com |
fead5b51476cb0ee7d01cbd4d92adfe47ece5082 | 32a6ac6cbec63296ba68838ad4699b995810c6cd | /compiled/construct/debug_enum_name.py | f557f7c82a5e810c80400f8ac4c1aa17e88d975e | [
"MIT"
] | permissive | smarek/ci_targets | a33696ddaa97daa77c0aecbdfb20c67546c729bc | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | refs/heads/master | 2022-12-01T22:54:38.478115 | 2020-08-10T13:36:36 | 2020-08-19T07:12:14 | 286,483,420 | 0 | 0 | MIT | 2020-08-10T13:30:22 | 2020-08-10T13:30:21 | null | UTF-8 | Python | false | false | 914 | py | from construct import *
from construct.lib import *
def debug_enum_name__test_subtype__inner_enum1(subcon):
return Enum(subcon,
enum_value_67=67,
)
def debug_enum_name__test_subtype__inner_enum2(subcon):
return Enum(subcon,
enum_value_11=11,
)
debug_enum_name__test_subtype = Struct(
'field1' / debug_enum_name__test_subtype__inner_enum1(Int8ub),
'field2' / Int8ub,
'instance_field' / Computed(lambda this: KaitaiStream.resolve_enum(DebugEnumName.TestSubtype.InnerEnum2, (this.field2 & 15))),
)
def debug_enum_name__test_enum1(subcon):
return Enum(subcon,
enum_value_80=80,
)
def debug_enum_name__test_enum2(subcon):
return Enum(subcon,
enum_value_65=65,
)
debug_enum_name = Struct(
'one' / debug_enum_name__test_enum1(Int8ub),
'array_of_ints' / Array(1, debug_enum_name__test_enum2(Int8ub)),
'test_type' / LazyBound(lambda: debug_enum_name__test_subtype),
)
_schema = debug_enum_name
| [
"kaitai-bot@kaitai.io"
] | kaitai-bot@kaitai.io |
ddd0a104b3c015f0bb272d19126416861184bd20 | af1ec234c00c2f2f4ac713162597be5f718b5457 | /venv/bin/conch | 13180b6f221cea4ec23d399ba905147285c86bff | [] | no_license | weasleyqi/douban_demo | 5fbf2dbd7435e6e06324abb2a7e978151a830236 | d3d258eaf049d66758fc2c96b27a299bae1d2259 | refs/heads/master | 2020-03-27T00:57:23.234705 | 2018-08-22T06:45:00 | 2018-08-22T06:45:00 | 145,669,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/Users/weasleyqi/Documents/projects/douban/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.conch import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"weasleyqi@gmail.com"
] | weasleyqi@gmail.com | |
5c26c84bf3ddf673fee060ddd328581300a404e1 | 2fed3b92a7c9378d2e891e38c22fb82b2919f654 | /myrobogals/rgconf/admin.py | da00816db8a6bafcfa32618c29cdde134c888dc3 | [] | no_license | bagzcode/myrobogals | 41c41aab4416e6bf7e23a75cc2bafd5bf0e85308 | 0707962f684dcd9b627ffa1428795db3b8ff5ca9 | refs/heads/master | 2021-01-22T14:25:36.222700 | 2016-12-17T18:38:27 | 2016-12-17T18:38:27 | 28,613,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from myrobogals.rgconf.models import Conference, ConferencePart, ConferenceAttendee, ConferencePayment, ConferenceCurrency
from myrobogals import admin
class ConferenceAdmin(admin.ModelAdmin):
list_display = ('name', 'start_date', 'end_date')
class ConferencePartAdmin(admin.ModelAdmin):
list_display = ('conference', 'title', 'cost_formatted')
list_filter = ('conference',)
class ConferenceAttendeeAdmin(admin.ModelAdmin):
list_display = ('conference', 'first_name', 'last_name', 'chapter', 'mobile', 'total_cost_formatted', 'balance_owing_formatted')
list_filter = ('conference',)
class ConferencePaymentAdmin(admin.ModelAdmin):
list_display = ('date', 'attendee_name', 'conference', 'amount_formatted', 'payment_method')
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(ConferencePart, ConferencePartAdmin)
admin.site.register(ConferenceAttendee, ConferenceAttendeeAdmin)
admin.site.register(ConferencePayment, ConferencePaymentAdmin)
admin.site.register(ConferenceCurrency)
| [
"me@markparncutt.com"
] | me@markparncutt.com |
76f7a15c5c2ab5e66f7256f28ae3d5da36b3368c | 3283ebfcaa36e798f34b61669f15dfb8cd6b436f | /mainapp/admin.py | 1304470214485ad0db15151c6c27569442ed4cc4 | [] | no_license | Mixiz/django_study | 3fcb2c0f1f89ee333a710a36a27f6185e48b04bb | 21f220678298138add5330da67ec181bd8358e9e | refs/heads/master | 2022-12-07T14:50:56.533943 | 2020-08-30T19:12:55 | 2020-08-30T19:12:55 | 282,625,289 | 0 | 0 | null | 2020-08-30T19:12:56 | 2020-07-26T10:20:49 | Python | UTF-8 | Python | false | false | 219 | py | from django.contrib import admin
from mainapp.models import Product, ProductCategory, Contact
# Register your models here.
admin.site.register(Product)
admin.site.register(ProductCategory)
admin.site.register(Contact)
| [
"lavrikov.denis@gmail.com"
] | lavrikov.denis@gmail.com |
5804c617c68c836e76ad0aaedd6606d7be05daa6 | 1a14b473ffb319cd4814d32c217f44505ee6e11c | /lesson2_homework/home_task8.py | 487910610d78ef23046eb36b4ef9f186e16e743e | [] | no_license | Mayfryn/my_first_repa | 12d4ccf8013210a076f49eb8d928ae175a32eaff | 849498076df81420e5f71f5a65e316c7e8577eb2 | refs/heads/master | 2023-06-02T20:00:45.707058 | 2021-06-09T07:50:08 | 2021-06-09T07:50:08 | 293,090,029 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | a = float(input("Enter a first number: "))
b = float(input("Enter a second number: "))
c = float(input("Enter a third number: "))
if a == b == c:
print(3)
if a == b or b == c or a == c:
print(2)
if a != b and b!= c and c != a:
print(0) | [
"mayfryn@gmail.com"
] | mayfryn@gmail.com |
d94ce76d8de3d9ac32d2f73c501fa86ff5c6d658 | 647c4a2396be693b7395aa9412644491bb062a43 | /conf/config.py | 36ad87b1318ce7c7da4f23fd332feae149edb3d7 | [] | no_license | xdf020168/qa_platform | c85c5d4c5d0748f00a302bea423f1232f1bd04e2 | e7de790e84b7749260aba7eb6832ca565d8c1a33 | refs/heads/master | 2023-03-16T04:31:09.163407 | 2020-09-10T10:11:47 | 2020-09-10T10:11:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # -*- coding=utf-8 -*-
# Author: BoLin Chen
# @Date : 2020-08-10
MYSQL = {
'production': {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': "10.50.255.161",
'PORT': 3306,
'USER': "root",
'PASSWORD': "261090dong",
'NAME': "qa_platform",
'TEST': {
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci'
}
}
},
"test": {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': "10.50.255.105",
'PORT': 3306,
'USER': "qa_platform_test",
'PASSWORD': "fPaOw44UgXdWdoCA",
'NAME': "qa_platform",
'TEST': {
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci'
}
}
},
"local": {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': "10.50.255.105",
'PORT': 3306,
'USER': "qa_platform_test",
'PASSWORD': "fPaOw44UgXdWdoCA",
'NAME': "qa_platform",
'TEST': {
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci'
}
}
}
}
db_mysql = {
'master': {
'host': "10.50.255.161",
'port': 3306,
'user': "root",
'password': "261090dong",
'database': "newsonar"
}
} | [
"chenwenjian@fangdd.com"
] | chenwenjian@fangdd.com |
39f1ab98e67afeba433bba71016769cf604ee099 | b7e6cdf094baaee9d6e5034c2355641fbf9138d7 | /824. 山羊拉丁文.py | fcd4e950c0e3c85ebf53f970708644e021c0f2ce | [] | no_license | heshibo1994/leetcode-python-2 | 04296c66cd6d1fe58880062aeafdbe9d474b7d2e | 3ea32f03bd453743b9b81de9871fad7ac67ced90 | refs/heads/master | 2020-05-23T21:49:01.367969 | 2019-09-30T03:31:27 | 2019-09-30T03:31:27 | 186,961,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | # 给定一个由空格分割单词的句子 S。每个单词只包含大写或小写字母。
#
# 我们要将句子转换为 “Goat Latin”(一种类似于 猪拉丁文 - Pig Latin 的虚构语言)。
#
# 山羊拉丁文的规则如下:
#
# 如果单词以元音开头(a, e, i, o, u),在单词后添加"ma"。
# 例如,单词"apple"变为"applema"。
#
# 如果单词以辅音字母开头(即非元音字母),移除第一个字符并将它放到末尾,之后再添加"ma"。
# 例如,单词"goat"变为"oatgma"。
#
# 根据单词在句子中的索引,在单词最后添加与索引相同数量的字母'a',索引从1开始。
# 例如,在第一个单词后添加"a",在第二个单词后添加"aa",以此类推。
#
# 返回将 S 转换为山羊拉丁文后的句子。
# 输入: "I speak Goat Latin"
# 输出: "Imaa peaksmaaa oatGmaaaa atinLmaaaaa
class Solution:
def toGoatLatin(self, S):
s = S.split(" ")
print(s)
ans = ""
for i in range(len(s)):
if s[i][0] in "aeiouAEIOU":
temp = s[i]+"ma"+"a"*(i+1)
else:
temp = s[i][1:]+s[i][0]+"ma"+"a"*(i+1)
ans = ans+temp+" "
temp = ""
return ans
s=Solution()
print(s.toGoatLatin("I speak Goat Latin"))
| [
"csuheshibo@163.com"
] | csuheshibo@163.com |
fee28c811bd8f5f28bb0e07872b2ad4480581370 | 13b576aa9c6abc54634479a2602c83d8b06c9bc0 | /snipsroku/snipsroku.py | 2de27e0c08ccee5e519f0fa953d34cd9f38a256a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | msgpo/snips-skill-roku | fa1b2a0ef5a4938db80871075b1a1a9cad686559 | 506c03e9e1ffa270005c1d8e2a7f63550d5b744c | refs/heads/master | 2022-02-24T16:16:56.778005 | 2018-04-04T13:28:36 | 2018-04-04T13:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | #!/usr/local/bin/python
# -*-: coding utf-8 -*-
import requests
import re
import xml.etree.ElementTree as ET
class SnipsRoku:
def __init__(self, roku_device_ip=None, locale=None):
if roku_device_ip is None:
raise ValueError('You need to provide a Roku device IP')
self.roku_device_ip = roku_device_ip
self.apps = {}
self.apps_string_list = ""
def set_available_apps(self):
r = requests.get(
"http://{}:8060/query/apps".format(self.roku_device_ip))
parsed_data = ET.fromstring(r.content)
apps_array = []
for app in parsed_data:
self.apps[app.text.lower()] = app.attrib['id']
apps_array.append(app.text)
# comma separated list of providers to use when automatically launching content
self.apps_string_list = ",".join(apps_array)
def get_apps(self):
return self.apps
def launch_app(self, app_id):
requests.post(
"http://{}:8060/launch/{}".format(self.roku_device_ip, app_id))
def get_app_id(self, app_name):
# we call set_available_apps every time just in case new apps have been installed
self.set_available_apps()
return self.apps[app_name.lower()]
def search_content(self, content_type, keyword=None, title=None, launch=False, provider=None,
season=None):
"""
:param content_type: tv-show, movie, persona, channel or game
:param keyword: Keyword contained in movie or serie title, person name, channel name or game
:param title: Exact content title, channel name, person name, or keyword. Case sensitive.
:param launch: When true it automatically launches the selected content. True or false have
to be string literals
:param provider: The name of the provider where to launch the content. Case sensitive and
:param season: The season of the series you the user wants to watch
"""
payload = {'type': content_type, 'launch': SnipsRoku.bool2string(launch),
'season': season}
# when launching pick the first content and provider available if not specified
if launch:
payload['match-any'] = 'true'
if provider is None:
# we call set_available_apps every time just in case new apps have been installed
self.set_available_apps()
payload['provider'] = self.apps_string_list
else:
payload['provider'] = provider
if title is not None:
payload['title'] = title
elif keyword is not None:
payload['keyword'] = keyword
else:
raise ValueError('Either keyword or title need to be specified')
requests.post(
"http://{}:8060/search/browse?".format(self.roku_device_ip), params=payload)
def play(self):
requests.post(
"http://{}:8060/keypress/Play".format(self.roku_device_ip))
def home_screen(self):
requests.post(
"http://{}:8060/keypress/Home".format(self.roku_device_ip))
@staticmethod
def parse_season(season_string):
"""
Return the season as integer. It expects a string with the structure
string literal 'season' + ordinal number. Example season 10
:param season_string:
:return: integer
"""
p = re.compile('\d+')
match = p.findall(season_string)
if match:
return int(match[0])
return None
@staticmethod
def bool2string(boolean):
if boolean:
return 'true'
elif boolean is False:
return 'false'
else:
return 'false'
| [
"pau.fabregat.p@gmail.com"
] | pau.fabregat.p@gmail.com |
16ebbdc2d377e53ed1986ad88b7fd27221354491 | de4b5fb7323ec97f2edb6cec3022b450f96cc796 | /project.py | f8bf8736fd15a291c603369375ff7c1ac9926755 | [] | no_license | vibhormehta07/Python-with-Data-Science-Project-Search-Engine-Optimization | ba8116bc7843e6ccc266ed9729a5fae9d2d28006 | 8f55c689a0a80bd4b15cba5845ccfe5df3c04ac5 | refs/heads/master | 2020-04-01T09:06:35.236490 | 2018-10-15T05:59:30 | 2018-10-15T05:59:30 | 153,060,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | #Project in Python
#Search Engine Optimization
from urllib.request import urlopen
import re
from bs4 import BeautifulSoup
import sys
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
ip=input("Enter the search keyword:")
resultdict={}
fo=open("url.txt","r+")
fz=fo.read()
urllists=fz.split('\n')
print(urllists)
fo.close()
for url in urllists:
res=url+ip
print(res)
file_handle=urlopen(res)
html=file_handle.read()
soup=BeautifulSoup(html,"html.parser")
for script in soup(["script","style"]):
script.extract()
text=soup.get_text().lower()
List1=[]
List1.append(text.lower().split())
i=0
for x in List1:
for a in x:
if ip==a:
i=i+1
resultdict.update({res:i})
fo=open("results.txt","w")
for d,k in resultdict.items():
fo.write("%s "%d)
fo.write("The no of hits of the keyword is ")
fo.write("%d \n" %k)
fo.close()
| [
"vibhormehta20@gmail.com"
] | vibhormehta20@gmail.com |
31347f55ce459008b3bfe89e6d7eeff0ce2b2db5 | b7b521e1a0e3cdb3bca5ffaf457ad0caad76cb95 | /DeployManager/webmanager/views.py | f607102378a938a0593b4356b681517ad072f4f3 | [] | no_license | BroSobek/FlowNative | 483dec0a2b25fd30782d23728635df5474fba3d1 | 64a8577a97346f4787c58e5a2428939bcf3c4018 | refs/heads/master | 2022-09-26T21:15:54.773852 | 2020-06-05T15:59:38 | 2020-06-05T15:59:38 | 267,108,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.views import generic
from django.template import loader
# Create your views here.
class IndexView(generic.ListView):
template_name = 'webmanager/index.html'
def get_queryset(self):
return 0 | [
"adi.roth2323@gmail.com"
] | adi.roth2323@gmail.com |
c74363ec7f3ffb330ff7eb6cc99754b2dfbc69e4 | 0e7be557833f38fef17b5eaa57c331a96148ad5e | /Assets/Python/StrategyOnly/Heroes.py | 895ee42ac8234f910d9e6bebc4e54df85577387d | [] | no_license | Thunderbrd/Caveman2Cosmos | 9f38961c638b82099b0601c22f8e90a1c98daa1e | b99aca8e56fb2a1fae48abd424dc0060a1d1fc1a | refs/heads/master | 2022-01-12T19:40:32.586456 | 2019-07-21T22:00:09 | 2019-07-21T22:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,886 | py | ## By StrategyOnly converted to BUG by Dancing Hoskuld
from CvPythonExtensions import *
import CvEventInterface
import CvUtil
import BugUtil
import PyHelpers
import Popup as PyPopup
import SdToolKit as SDTK
gc = CyGlobalContext()
localText = CyTranslator()
PyPlayer = PyHelpers.PyPlayer
PyInfo = PyHelpers.PyInfo
giSparticus = -1
giGladiator = -1
def init():
global giSparticus, giGladiator
giSparticus = gc.getInfoTypeForString('UNITCLASS_SPARTACUS')
giGladiator = CvUtil.findInfoTypeNum(gc.getUnitInfo,gc.getNumUnitInfos(),'UNIT_GLADIATOR')
def onUnitBuilt(self, argsList):
'Unit Completed'
city = argsList[0]
unit = argsList[1]
player = PyPlayer(city.getOwner())
CvAdvisorUtils.unitBuiltFeats(city, unit)
## Hero Movies ##
if not CyGame().isNetworkMultiPlayer() and city.getOwner() == CyGame().getActivePlayer() and isWorldUnitClass(unit.getUnitClassType()):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON_SCREEN)
popupInfo.setData1(unit.getUnitType())
popupInfo.setData2(city.getID())
popupInfo.setData3(4)
popupInfo.setText(u"showWonderMovie")
popupInfo.addPopup(city.getOwner())
## Hero Movies ##
def onCombatResult(argsList):
'Combat Result'
pWinner,pLoser = argsList
playerX = PyPlayer(pWinner.getOwner())
unitX = PyInfo.UnitInfo(pWinner.getUnitType())
playerY = PyPlayer(pLoser.getOwner())
unitY = PyInfo.UnitInfo(pLoser.getUnitType())
pPlayer = gc.getPlayer(pWinner.getOwner())
## BTS HEROS - Spartacus Capture Event Start ##
if pWinner.getUnitClassType() == giSparticus:
## Capture % Random # 0 to 3 or 25% ##
iNewGladiatorNumber = getRandomNumber( 3 )
if iNewGladiatorNumber == 0:
pClearPlot = findClearPlot(pLoser)
if (pLoser.plot().getNumUnits() == 1 and pClearPlot != -1):
pPlot = pLoser.plot()
pLoser.setXY(pClearPlot.getX(), pClearPlot.getY(), False, True, True)
else:
pPlot = pWinner.plot()
pPID = pPlayer.getID()
newUnit = pPlayer.initUnit(giGladiator, pPlot.getX(), pPlot.getY(), UnitAITypes.NO_UNITAI, DirectionTypes.DIRECTION_NORTH)
pLoser.setDamage(100000, False)
## newUnit.convert(pLoser)
## pLoser.setDamage(100, False)
newUnit.finishMoves()
iXa = pLoser.getX()
iYa = pLoser.getY()
CyInterface().addMessage(pPID,False,15,CyTranslator().getText("TXT_KEY_SPARTACUS_CAPTURE_SUCCESS",()),'',0,',Art/Interface/Buttons/Units/ICBM.dds,Art/Interface/Buttons/Warlords_Atlas_1.dds,3,11',ColorTypes(44), iXa, iYa, True,True)
## BTS HEROS - Spartacus Capture End ##
## Field Medic Start ##
if pWinner.isHasPromotion(gc.getInfoTypeForString('PROMOTION_RETINUE_MESSENGER')):
iHealChance = getRandomNumber( 9 )
if iHealChance == 0:
if ( not SDTK.sdObjectExists('Heroes', pWinner) ) :
iHealTurn = -1
else :
iHealTurn = SDTK.sdObjectGetVal( 'Heroes', pWinner, 'HealTurn' )
if( iHealTurn == None or gc.getGame().getGameTurn() > iHealTurn ) :
pWinner.setDamage(0, False)
if ( not SDTK.sdObjectExists('Heroes', pWinner) ) :
SDTK.sdObjectInit('Heroes', pWinner, {})
SDTK.sdObjectSetVal( 'Heroes', pWinner, 'HealTurn', gc.getGame().getGameTurn() )
## Field Medic End ##
def findClearPlot(pUnit):
BestPlot = -1
iBestPlot = 0
pOldPlot = pUnit.plot()
iX = pOldPlot.getX()
iY = pOldPlot.getY()
for iiX in range(iX-1, iX+2, 1):
for iiY in range(iY-1, iY+2, 1):
iCurrentPlot = 0
pPlot = CyMap().plot(iiX,iiY)
if pPlot.getNumUnits() == 0:
iCurrentPlot = iCurrentPlot + 5
if iCurrentPlot >= 1:
iCurrentPlot = iCurrentPlot + CyGame().getSorenRandNum(5, "findClearPlot")
if iCurrentPlot >= iBestPlot:
BestPlot = pPlot
iBestPlot = iCurrentPlot
return BestPlot
def getRandomNumber(int):
return CyGame().getSorenRandNum(int, "Gods")
| [
"raxo2222@8bbd16b5-4c62-4656-ae41-5efa6c748c97"
] | raxo2222@8bbd16b5-4c62-4656-ae41-5efa6c748c97 |
75dcbceb4dc651a37e410ccb56b96e08c9a8777c | ea8ca822bcbe5fccdd40b849115ffcd8d3ab44bc | /16/ulli.py | a2c398d94ab071e201a35608c60941382632ef03 | [] | no_license | langqy/webdriver_manual | 64aebb13537c097ef0fa1a6535246c547409567d | 8599121512fd19de0ef589c13195fa00ceaf0624 | refs/heads/master | 2021-01-12T13:40:31.298087 | 2013-12-10T17:07:07 | 2013-12-10T17:07:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
import time
import os
dr = webdriver.Firefox()
file_path = 'file:///' + os.path.abspath('uili.html')
dr.get(file_path)
# 获得其父层级
for link in dr.find_element_by_class_name('ultest').find_elements_by_tag_name('a'):
print link.text
# 获取当前层级
# 由于页面上可能有很多class为active的元素
# 所以使用层级定位最为保险
print dr.find_element_by_class_name('ultest').find_element_by_class_name('active').text
dr.quit()
| [
"yueye-22@163.com"
] | yueye-22@163.com |
f7e3e4d0eb43e1b66081962b0ee6cdd9d6a3694b | 39c80306080defbde999f1af05ae5993f22d7fd7 | /oxford_astrazeneca/tests/q_calc_efficiency.py | 07076b1e2e93744fc876d8399910625d47330256 | [] | no_license | uob-cfd/spe | 47931d724792fbe812de49ac489a7e88cca52e1d | f7c76b766bffec71b80febd0dbc79e12aec3a11c | refs/heads/master | 2023-02-04T20:45:44.411481 | 2020-12-27T19:03:52 | 2020-12-27T19:03:52 | 321,508,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | test = {
'name': 'Question calc_efficiency',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # You need to define the function 'calc_efficiency'
>>> 'calc_efficiency' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # calc_efficiency should be a function.
>>> callable(calc_efficiency)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Oops, have you deleted 'ox_vax'?
>>> 'ox_vax' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Oops, have you deleted 'vax_eff'?
>>> 'vax_eff' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> calc_efficiency(ox_vax) == vax_eff
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"matthew.brett@gmail.com"
] | matthew.brett@gmail.com |
985bbe1a7ecae1798320c35977effa768cc2eb63 | 5b1ca29d02ba32911c71a3dcb142d3ce070369d7 | /Web/Web/LoginManager.py | 5e115357f55cf557227f097ee78f45e2d55b3e1a | [
"MIT"
] | permissive | Bideau/SmartForrest | a52f57f86a2f4e03cd83ed1887a865b240c19786 | 5fe58593b18ac4e5e285d271a8ec5e9292093409 | refs/heads/master | 2016-09-01T06:56:56.953919 | 2016-04-01T06:37:09 | 2016-04-01T06:37:09 | 51,444,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,166 | py | #! /usr/bin/env python
import sys
import MySQLdb as mdb
from email.parser import Parser
from email.mime.text import MIMEText
import smtplib
import string
from random import sample, choice
import md5
error=0
HOST='srvmysql.imerir.com'
DB='SmartForest'
PASSWORD='LjcX7vWRMs84jJ3h'
USER='SmartForest'
# Generation mot de passe
def genPass(length):
retour=""
chars = string.letters + string.digits
retour=''.join(choice(chars) for _ in range(length))
return retour
# Verification du login
def isLogin(login):
valid = False
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT COUNT(*) FROM connection where c_login=\'%s\'" % login)
rows = cur.fetchall()
for row in rows:
if (row[0] == 1):
valid = True
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
return 1001
finally:
con.close()
return valid
# Verification du mot de passe
def isPass(login, password):
valid=False
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT c_password, c_adminKey FROM connection where c_login=\'%s\'" % login)
rows = cur.fetchall()
for row in rows:
if (row[0] == password):
valid=True
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
return 1001
finally:
con.close()
return valid
# Verification acces admin
def isAdmin(login):
valid=False
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT c_adminKey FROM connection where c_login=\'%s\'" % login)
rows = cur.fetchall()
for row in rows:
if (row[0] == 1):
valid=True
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
return 1001
finally:
con.close()
return valid
#Verification mot de passe temporaire
def isTemp(login):
valid=False
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT c_tempPassword FROM connection where c_login=\'%s\'" % login)
rows = cur.fetchall()
for row in rows:
if (row[0] == 1):
valid=True
if(valid==True):
newPass=md5.new(genPass(12)).hexdigest()
cur.execute("UPDATE connection SET c_password=\'"+str(newPass)+"\',c_tempPassword=0 where c_login=\'"
+str(login)+"\'")
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
return 1001
finally:
con.close()
return valid
# Verification du mot de passe
def changePass(login,newPassword):
error=200
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("UPDATE connection SET c_password=\'"+newPassword+"\' where c_login=\'"+login+"\'")
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
return 1001
finally:
con.close()
return error
# Insert un utilisateur et un login dans la BDD
def insertUser(login, password, nom, prenom, desc,mail):
global UserId
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("INSERT INTO user (u_id,u_lastName,u_firstName,u_description,u_mail) values (NULL,\'"
+nom+"\',\'"+prenom+"\',\'"+desc+"\',\'"+mail+"\')")
cur.execute("SELECT u_id FROM user where u_lastName=\'"+nom+"\' AND u_firstName=\'"
+prenom+"\' AND u_description=\'"+desc+"\' AND u_mail=\'"+mail+"\'")
UserId=0
rows = cur.fetchall()
for row in rows:
UserId = row[0]
cur.execute("INSERT INTO connection (c_id,u_id,c_login,c_password,c_adminKey,c_tempPassword)"+
" values (NULL,\'"+str(UserId)+"\',\'"+login+"\',\'"+password+"\',False,False)")
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
return 1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
return 1000
finally:
con.close()
return 200
# retourne les information de l'utilisateur du login
def userInfo(login):
tmp={"nom":"toto","prenom":"toto","description":"toto","login":login}
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT u.u_lastName,u.u_firstName,u.u_description,u.u_mail FROM connection c INNER JOIN user u "+
"ON u.u_id=c.u_id where c.c_login=\'"+str(login)+"\' ")
rows = cur.fetchone()
nom=rows[0]
prenom=rows[1]
desc=rows[2]
mail=rows[3]
tmp["nom"]=nom
tmp["prenom"]=prenom
tmp["description"]=desc
tmp["isAdmin"]=isAdmin(login)
tmp["motDePasseUnique"]=isTemp(login)
tmp["mail"]=mail
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return tmp
# retourne les information de l'utilisateur du login
def userSuppr(login):
error=200
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("DELETE FROM connection where c_login=\'"+str(login)+"\' ")
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return error
# retourne les information de l'utilisateur du login
def descModif(login,desc):
error=200
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("UPDATE user u,connection c SET u.u_description=\'"+str(desc)+"\' where c.c_login=\'"+str(login)+"\' AND u.u_id=c.u_id ")
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return error
# retourne les information de l'utilisateur du login
def mailModif(login,mail):
error=200
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("UPDATE user u,connection c SET u.u_mail=\'"+str(mail)+"\' where c.c_login=\'"+str(login)+"\' AND u.u_id=c.u_id")
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return error
# retourne les information d'acces de l'utilisateur du login
def userAccess(login,capteurId):
valid=False
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT Count(sta.sta_id) FROM station sta "+
"INNER JOIN stationAccess staa ON staa.sta_id=sta.sta_id "+
"INNER JOIN user u ON u.u_id=staa.u_id "+
"INNER JOIN connection c ON u.u_id=c.u_id "+
"where c.c_login=\'"+str(login)+"\' AND sta.sta_id=\'"+str(capteurId)+"\' ")
#rows = cur.fetchone()
rows = cur.fetchall()
for row in rows:
if (row[0] == 1):
valid = True
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return valid
# retourne les information de l'utilisateur du login
def userList():
myArray=[]
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
cur = con.cursor()
cur.execute("SELECT u.u_lastName,u.u_firstName,c.c_login FROM connection c INNER JOIN user u "+
"ON u.u_id=c.u_id")
rows = cur.fetchall()
for row in rows:
tmp={"nom":"toto","prenom":"toto","login":"toto"}
nom=row[0]
prenom=row[1]
login=row[2]
tmp["nom"]=nom
tmp["prenom"]=prenom
tmp["login"]=login
myArray.append(tmp)
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return myArray
# retourne les information de l'utilisateur du login
def forgetPassword(login,mail):
error=200
try:
con = mdb.connect(HOST, USER, PASSWORD, DB)
with con:
userId=0
tmpMail=""
sender="smartforest66@gmail.com"
password='guilhem1'
cur = con.cursor()
cur.execute("SELECT u.u_mail,u.u_id FROM connection c INNER JOIN user u "+
"ON u.u_id=c.u_id WHERE c.c_login=\'"+str(login)+"\'")
rows = cur.fetchall()
for row in rows:
tmpMail=row[0]
userId=row[1]
if tmpMail == mail:
tmpPass=genPass(10)
headers = "From: <"+sender+">\n"+"To: <"+mail+">\n"+"Subject: Changement de mot passe\n"+\
"\nVotre nouveau mot de passe temporaire est : " + tmpPass + " \n"
newPass=md5.new(tmpPass).hexdigest()
cur.execute("UPDATE connection SET c_password=\'"+str(newPass)+"\',c_tempPassword=1 where c_login=\'"
+str(login)+"\' AND u_id=\'"+str(userId)+"\'")
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(sender,password)
server.sendmail(sender, mail, headers)
server.quit()
except mdb.Error as e:
print("Error %d: %s") % (e.args[0], e.args[1])
error=1001
except Exception as e:
# si une erreur de format retour erreur 1000
print(e)
error=1000
finally:
con.close()
return error
| [
"arnaud.bes66@hotmail"
] | arnaud.bes66@hotmail |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.