blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c41762f2c2e70227c1da7a3d72edb9ac29b73f67 | 1d7ae7f6e7a0df98d92f9ec5f277752d14924a94 | /fake-very-small-test/wrong_case/pytorch_bike_ddqn_test-small-with-former-a-trick.py | 8a858ccaca83a7784c6175faa9f8337e5f56be9b | [] | no_license | lindsaymorgan/Mobike-Bike-Sharing-System-Dispatch-Optimization-Using-Reinforcement-Learning | 1e6b1aa3c64d2ff2e31b5d9dcc4abdc11e10679c | 6c8a329fae5c2ac8db45a3d8c55b308aae8ad804 | refs/heads/master | 2023-05-02T07:39:49.089459 | 2021-05-23T02:26:14 | 2021-05-23T02:26:14 | 279,467,461 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,869 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import matplotlib.pyplot as plt
import pandas as pd
import random
import time
# hyper parameters
EPSILON = 0.85
GAMMA = 0.99
LR = 0.001
MEMORY_CAPACITY = 2000
Q_NETWORK_ITERATION = 1000
BATCH_SIZE = 128
EPISODES = 3000
need = pd.read_csv('../fake_4region_trip_20170510.csv')
ts=int(time.time())
class Env(object):
def __init__(self, region_num, move_amount_limit, eps_num):
self.region_num = region_num
self.move_amount_limit = move_amount_limit
self.action_dim = region_num * (2 * move_amount_limit + 1)
self.obs_dim = 2 * region_num + 1
self.episode_num = eps_num
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(eps_num)}
self.out_nums = np.array([self.start_region[str(i)].agg(np.sum) for i in range(eps_num)])
self.in_nums = np.array([self.end_region[str(i)].agg(np.sum) for i in range(eps_num)])
self.t = 0
self.obs_init = np.array([15, 15, 15, 15, 0, 0,0,0,15, 15, 15, 15, 0, 0]) # 各方格单车量+货车位置+货车上的单车量
self.obs_init[-self.region_num-2:-2] -= self.out_nums[0, ]
def init(self):
self.obs = self.obs_init.copy()
self.t = 0
return np.append(self.obs, self.t)
def step(self, action):
# 更新时间状态
self.t += 1
if self.t == self.episode_num-1:
done = True
else:
done = False
self.obs[:self.region_num+2]=self.obs[-self.region_num-2:] #更新状态
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
# 更新单车分布状态
# 处理上时段骑入
self.obs[-self.region_num-2:-2] += self.in_nums[self.t - 1, ]
reward = 0
# 筛选不合理情况 若合理 按照推算移动车辆 更新货车状态 若不合理则不采取任何操作
if move + self.obs[-self.region_num-2+region] >= 0 and move <= self.obs[-1] \
and (self.obs[-self.region_num-2+region]- self.out_nums[self.t,region])*move<=0:
self.obs[-self.region_num-2+region] += move
# 更新货车状态
self.obs[-1] -= move # 更新货车上的单车数
self.obs[-2] = region # 更新货车位置
# 更新之前的动作历史
self.obs[-self.region_num-2-1] = move # 搬动的单车数
self.obs[-self.region_num-2-2] = region # 货车位置
self.obs[-self.region_num-2:-2] -= self.out_nums[self.t, ]
reward = np.sum(self.obs[-self.region_num-2:-2][self.obs [-self.region_num-2:-2]< 0])
self.obs[-self.region_num-2:-2][self.obs [-self.region_num-2:-2]< 0] = 0
return np.append(self.obs, self.t), reward, done
class Net(nn.Module):
def __init__(self, NUM_STATES):
super(Net, self).__init__()
EMB_SIZE = 10
OTHER_SIZE = NUM_STATES-2 # fixme: update this value based on the input
self.fc1 = nn.Linear(OTHER_SIZE + EMB_SIZE * 4, 256).cuda()
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(256, 64).cuda()
self.fc2.weight.data.normal_(0, 0.1)
self.fc3 = nn.Linear(64, 1).cuda()
self.fc3.weight.data.normal_(0, 0.1)
self.emb = nn.Embedding(NUM_STATES, EMB_SIZE).cuda()
def forward(self, x: torch.cuda.FloatTensor, stations: torch.cuda.LongTensor):
emb = self.emb(stations).flatten(start_dim=1)
x = torch.cat([x, emb], 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class Dqn():
def __init__(self, NUM_STATES, NUM_ACTIONS, region_num,move_amount_limit, eps_num):
self.eval_net, self.target_net = Net(NUM_STATES), Net(NUM_STATES)
self.memory = np.zeros((MEMORY_CAPACITY, NUM_STATES * 2 + 2))
# state, action ,reward and next state
self.memory_counter = 0
self.learn_counter = 0
self.optimizer = optim.Adam(self.eval_net.parameters(), LR)
self.loss = nn.MSELoss()
self.NUM_ACTIONS = NUM_ACTIONS
self.NUM_STATES = NUM_STATES
self.move_amount_limit = move_amount_limit
self.region_num=region_num
self.fig, self.ax = plt.subplots()
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(eps_num)}
self.out_nums = np.array([self.start_region[str(i)].agg(np.sum) for i in range(eps_num)])
def store_trans(self, state, action, reward, next_state):
if self.memory_counter % 10 == 0:
print("The experience pool collects {} time experience".format(self.memory_counter))
index = self.memory_counter % MEMORY_CAPACITY
trans = np.hstack((state, [action], [reward], next_state))
self.memory[index,] = trans
self.memory_counter += 1
def choose_action(self, state):
# notation that the function return the action's index nor the real action
# EPSILON
# state = torch.unsqueeze(torch.FloatTensor(state) ,0)
# feasible action
if random.random() <= EPSILON:
action=self.predict(state)
else:
feasible_action = list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num - 2 + region] >= 0 and move <= state[-2] and \
(state[-self.region_num-2+region]- self.out_nums[state[-1],region])*move<=0:
feasible_action.append(action)
action = random.choice(feasible_action)
return action
def predict(self, state):
# notation that the function return the action's index nor the real action
# EPSILON
# feasible action
feasible_action = list()
state_1 = [j for i, j in enumerate(state) if
i not in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
state_2 = [j for i, j in enumerate(state) if
i in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
tmp_x = list()
tmp_y = list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num - 2 + region] >= 0 and move <= state[-2]\
and (state[-self.region_num-2+region]- self.out_nums[state[-1],region])*move<=0:
feasible_action.append(action)
tmp_x.append(np.concatenate([state_1, np.array([move])]))
tmp_y.append(np.concatenate([state_2, np.array([region])]))
x = torch.FloatTensor(tmp_x).cuda()
station = torch.LongTensor(tmp_y).cuda()
action_val = self.target_net.forward(x, station)
max_indice = [i for i, j in enumerate([i[0] for i in action_val]) if
j == np.max([i[0] for i in action_val])] # 找最大index
action = feasible_action[random.choice(max_indice)] # 如果有多个index随机选一个,获得对应action
return action
def plot(self, ax, x):
ax.cla()
ax.set_xlabel("episode")
ax.set_ylabel("total reward")
ax.plot(x, 'b-')
plt.pause(0.000000000000001)
def learn(self):
# learn 100 times then the target network update
if self.learn_counter % Q_NETWORK_ITERATION == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_counter += 1
if self.learn_counter % 50 == 0:
test_x=torch.FloatTensor([[11,12,12,7,0,0,5,5,3,0,0,1,-5],[5,5,3,0,0,0,10,11,0,3,0,2,-10],
[11,12,12,7,0,-1,4,5,3,0,1,1,-5],[10,8,0,3,3,3,8,9,0,0,0,3,-9]]).cuda()
test_station=torch.LongTensor([[0,3,3,0],[3,0,0,0],[0,0,0,0],[1,3,3,0]]).cuda()
action_val = self.target_net.forward(test_x, test_station)
print(np.mean(action_val.cpu().detach().numpy()), file=open(f"result_history/ddqn_output_action_value_{ts}.txt", "a"))
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
# 切取sars切片
batch_memory = self.memory[sample_index, :]
batch_reward = torch.FloatTensor(batch_memory[:, self.NUM_STATES + 1: self.NUM_STATES + 2]).cuda()
x=torch.FloatTensor(np.delete(batch_memory[:, :self.NUM_STATES],
[self.region_num,self.region_num+2,self.region_num*2+4], 1)).cuda()
move = torch.FloatTensor([[i[0] % (2 * self.move_amount_limit + 1) - self.move_amount_limit] for i in
batch_memory[:, self.NUM_STATES:self.NUM_STATES + 1]]).cuda()
x = torch.cat((x, move), 1)
y=torch.LongTensor(batch_memory[:, [self.region_num,self.region_num+2,self.region_num*2+4]]).cuda()
region = torch.LongTensor([[int(np.floor(i[0] / (2 * self.move_amount_limit + 1)))] for i in
batch_memory[:, self.NUM_STATES:self.NUM_STATES + 1]]).cuda()
y = torch.cat((y, region), 1)
q_eval = self.eval_net(x, y)
tmp_q_next = list()
for state in batch_memory[:, -self.NUM_STATES:]:
feasible_action = list()
m_r_list=list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num-2+region] >= 0 and move <= state[-2]\
and (state[-self.region_num-2+region]- self.out_nums[int(state[-1]),region])*move<=0:
feasible_action.append(action)
m_r_list.append((move,region))
tmp_x = list()
tmp_y = list()
# 对每个feasible action算value
state_1 = [j for i, j in enumerate(state) if
i not in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
state_2 = [j for i, j in enumerate(state) if
i in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
for move,region in m_r_list:
# move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
# region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
tmp_x.append(np.concatenate([state_1, np.array([move])]))
tmp_y.append(np.concatenate([state_2, np.array([region])]))
x = torch.FloatTensor(tmp_x).cuda()
station = torch.LongTensor(tmp_y).cuda()
# action_val = self.target_net.forward(x, station)
current_action_val = self.eval_net.forward(x, station)
values, indices = current_action_val.max(1)[0].max(0)
action_val = self.target_net.forward(x, station)
tmp_q_next.append([float(action_val[indices].cpu().detach().numpy())])
q_next = torch.FloatTensor(tmp_q_next).cuda()
# q_target = batch_reward + GAMMA*q_next.max(1)[0].view(BATCH_SIZE, 1)
q_target = batch_reward + GAMMA * q_next
loss = self.loss(q_eval, q_target)
print(loss.item(), file=open(f"result_history/ddqn_output_loss_{ts}.txt", "a"))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(self, env, render=False):
eval_reward = []
for i in range(1):
obs = env.init()
episode_reward = 0
while True:
action = self.predict(obs) # 预测动作,只选最优动作
obs, reward, done = env.step(action)
episode_reward += reward
print(f"obs:{obs[:-1]} action:{action} reward:{reward} reward_sum:{episode_reward} t:{obs[-1]}")
print(
f"obs:{obs[:-1]} t:{obs[-1]} region:{int(np.floor(action / (2 * self.move_amount_limit + 1)))} "
f"move:{action % (2 * self.move_amount_limit + 1) - self.move_amount_limit} reward:{reward} "
f"reward_sum:{episode_reward}",
file=open(f"result_action/ddqn_output_action_{ts}.txt", "a"))
# if render:
# env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
def main():
eps_num = 5
env = Env(region_num=4, move_amount_limit=10, eps_num=eps_num)
NUM_ACTIONS = (2 * env.move_amount_limit + 1) * env.region_num # [-500,500]*4个方块
NUM_STATES = 2*env.region_num + 7 # MountainCar-v0: (2,)
net = Dqn(NUM_STATES, NUM_ACTIONS, env.region_num, env.move_amount_limit, eps_num)
print("The DQN is collecting experience...")
step_counter_list = []
for episode in range(EPISODES):
state = env.init()
step_counter = 0
reward_sum = 0
while True:
step_counter += 1
# env.render()
action = net.choose_action(state)
# print("the action is {}".format(action))
next_state, reward, done = env.step(action)
net.store_trans(state, action, reward, next_state)
reward_sum += reward
if net.memory_counter >= MEMORY_CAPACITY:
net.learn()
if done:
print("episode {}, the reward is {}".format(episode, round(reward_sum, 3)))
print(f"{round(reward_sum, 3)}", file=open(f"result_history/ddqn_output_result_{ts}.txt", "a"))
if done:
step_counter_list.append(step_counter)
net.plot(net.ax, step_counter_list)
break
state = next_state
print(net.evaluate(env))
if __name__ == '__main__':
main()
| [
"lindsaymarymorgan@gmail.com"
] | lindsaymarymorgan@gmail.com |
4d3ad13d97346c629af9713a9ec75583d934cf79 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/web/v20181101/list_web_app_sync_function_triggers.py | f1ecce2911774db5bc639f91d0d5aee728445211 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,108 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListWebAppSyncFunctionTriggersResult',
'AwaitableListWebAppSyncFunctionTriggersResult',
'list_web_app_sync_function_triggers',
]
@pulumi.output_type
class ListWebAppSyncFunctionTriggersResult:
"""
Function secrets.
"""
def __init__(__self__, id=None, key=None, kind=None, name=None, trigger_url=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key and not isinstance(key, str):
raise TypeError("Expected argument 'key' to be a str")
pulumi.set(__self__, "key", key)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if trigger_url and not isinstance(trigger_url, str):
raise TypeError("Expected argument 'trigger_url' to be a str")
pulumi.set(__self__, "trigger_url", trigger_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="triggerUrl")
def trigger_url(self) -> Optional[str]:
"""
Trigger URL.
"""
return pulumi.get(self, "trigger_url")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppSyncFunctionTriggersResult(ListWebAppSyncFunctionTriggersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppSyncFunctionTriggersResult(
id=self.id,
key=self.key,
kind=self.kind,
name=self.name,
trigger_url=self.trigger_url,
type=self.type)
def list_web_app_sync_function_triggers(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppSyncFunctionTriggersResult:
"""
Function secrets.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20181101:listWebAppSyncFunctionTriggers', __args__, opts=opts, typ=ListWebAppSyncFunctionTriggersResult).value
return AwaitableListWebAppSyncFunctionTriggersResult(
id=__ret__.id,
key=__ret__.key,
kind=__ret__.kind,
name=__ret__.name,
trigger_url=__ret__.trigger_url,
type=__ret__.type)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
a1f7f5b65197ee1efee0da35f8bdcdf5b748aa55 | f60b11b2fd19fe23b966fdd667f2ad96fcb46456 | /common/utils/email_utils.py | 6cf06b07bf7e3b2e76341d511d3ae16cc88f7c52 | [
"MIT"
] | permissive | gurodriguez/backendtest | 87e9b31c55be018ac14f655d7efe7171d1b26e2f | dec5bf8e55180cbf918ddc6de5b3703e2aa0f9eb | refs/heads/main | 2023-06-02T11:43:33.033705 | 2021-06-17T21:49:24 | 2021-06-17T21:49:24 | 357,601,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | from django.core.mail import send_mail
from smtplib import SMTPException
from django.conf import settings
from django.template import Context
from django.template.loader import render_to_string
from django.utils.html import strip_tags
def send_product_notification(product, subject):
""" Send email when there is a product's change"""
try:
html_message = render_to_string('product_notifications.html', {
'subject': subject, 'product': product})
plain_message = strip_tags(html_message)
send_mail(
subject,
plain_message,
settings.DEFAULT_FROM_EMAIL,
settings.SYSADMIN,
html_message=html_message
)
except SMTPException as e:
print('There was an error sending an email.' + e)
except:
print("Mail Sending Failed!")
| [
"gabriel.gurodriguez@gmail.com"
] | gabriel.gurodriguez@gmail.com |
b60c9bda4543a9d1eea9e2dcb1aaa232ff3f30c5 | 568d1f31c7660521aaa1d61c757dec1fabc0e519 | /A2/demoMissing.py | 5c52ef35b663e91663ac3755b84dcd5dbcf57d51 | [] | no_license | andrewklinkman/PubPol590 | 6f12cd7c9dd07fd86385a9c3588411ae3b969e32 | d6041fadd9e10d85ca02ffec75e669eab5b4ca65 | refs/heads/master | 2016-08-04T12:02:24.757497 | 2015-04-20T21:56:15 | 2015-04-20T21:56:15 | 29,268,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | from pandas import Series, DataFrame
import pandas as pd
import numpy as np
#file paths
main_dir = "/Users/andrewklinkman/Dropbox/Duke/School Stuff 14-15/Spring 2015/PUBPOL 590"
git_dir = "/Users/andrewklinkman/GitHub/PubPol590/A2"
csv_dir = "/sample_missing.csv"
#IMPORTING DATA (setting missing values = sentinels)-------------------
df = pd.read_csv(git_dir + csv_dir)
df.head(10) #top 10 values - default = 5
df[:10]
df.tail(10)
df['consump'].head(10).apply(type) #apply function 'type' to all records
#we don't want string data. periods are common missing data placeholders in some languages
#so, we need to create new sentinels to adjust for this. na_values = use sentinels
missing = ['.', 'NA', 'NULL', '']#vector of potential missing symbols
df = pd.read_csv(git_dir + csv_dir, na_values = missing)
#NaN = not a number
df['consump'].head(10).apply(type)
#MISSING DATA (using smaller dataframe) -----------------------------
#quick tip: repeat lists by multiplication
[1,2,3] * 3 #makes a new list, repeating this list x times
#types of missing data:
None #standard
np.nan #from numpy
type(None)
type(np.nan) #missing value that is considered numeric
#create a small sample dataset
zip1 = zip([2,4,8], [np.nan, 5, 7], [np.nan, np.nan, 22])
df1 = DataFrame(zip1, columns = ['a', 'b', 'c'])
#search for missing data using
df1.isnull() #pandas method to find missing data
#FIND METHODS FOR ANY OBJ
#type object, period, then hit tab
df1.isnull() #pandas
np.isnan(df1) #numpy
#subset of columns
cols = ['a', 'c']
df1[cols].isnull() #.notnull = opposite
df1['b'].isnull() #creates a vector -> more useful
# FILLING IN/DROPPING MISSING VALUES ----------------------
#pandas method 'fillna'
df2 = df1.fillna(999)
#dropping = slightly more usual
df1.dropna() #drops ROWS with any missing values
df1.dropna(axis = 0, how = 'any') #drop rows with any missing values
df1.dropna(axis = 1, how = 'any') #drop cols
df1.dropna(axis = 0, how = 'all') #only drop where whole row is empty
df.dropna(axis = 0, how = 'all')
#SEEING ROWS WITH MISSING DATA --------------------------
df3 = df.dropna(how = 'all')
df3.head(10)
rows = df3['consump'].isnull()
df3[rows]
#
| [
"andrew.klinkman@duke.edu"
] | andrew.klinkman@duke.edu |
4df91054408fa95d2297a9f28ec8497e8ca715fd | 21124f2534dfb9ce7cfb19b3a9f17e7641da9de6 | /xpaxos/__init__.py | b73c4c91d02ed0f1cfe98fba3d920fb5636dfd6a | [] | no_license | wgr523/proj4 | c44a40b1fbfbb35129385ec4b8dbbf6064cd0f91 | 4ae6ed332e326c5c867de72c2a21509603b98e2c | refs/heads/master | 2020-03-29T19:44:07.450438 | 2016-06-22T06:45:51 | 2016-06-22T06:45:51 | 61,082,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | import xpaxos.MessageHandler
import xpaxos.MyPaxos
| [
"wgr54@126.com"
] | wgr54@126.com |
0ba62c7b2098a01217928c521e4ce0dd255959f4 | 2d9c91e9cd2329962a2e0186e4174ff1c8f02c63 | /BtStream/python_scripts/aAccelDaccel10Hz.py | 3bcec9c652ab544fc96a114e2787c9501b723836 | [] | no_license | mixtoism/shimmer3 | 1370aca1ae5b734ad983c3408ada5010a7428f41 | 08502485ba45926a70826780a37354e27b698331 | refs/heads/master | 2021-08-29T08:42:02.943197 | 2017-12-13T15:51:55 | 2017-12-13T15:51:55 | 114,138,811 | 0 | 0 | null | 2017-12-13T15:52:20 | 2017-12-13T15:43:07 | C | UTF-8 | Python | false | false | 1,988 | py | #!/usr/bin/python
import sys, struct, serial
def wait_for_ack():
ddata = ""
ack = struct.pack('B', 0xff)
while ddata != ack:
ddata = ser.read(1)
return
if len(sys.argv) < 2:
print "no device specified"
print "You need to specify the serial port of the device you wish to connect to"
print "example:"
print " analogaccelDaccel10Hz.py Com12"
print "or"
print " analogaccelDaccel10Hz.py /dev/rfcomm0"
else:
ser = serial.Serial(sys.argv[1], 115200)
ser.flushInput()
# send the set sensors command
ser.write(struct.pack('BBBB', 0x08, 0x80, 0x10, 0x00)) #analog accel, digi accel
wait_for_ack()
# send the set sampling rate command
ser.write(struct.pack('BBB', 0x05, 0x80, 0x0C)) #10.24Hz (3200 (0xC80)). Has to be done like this for alignment reasons
wait_for_ack()
# send start streaming command
ser.write(struct.pack('B', 0x07))
wait_for_ack()
# read incoming data
ddata = ""
numbytes = 0
framesize = 15 # 1byte packet type + 2byte timestamp + 3x2byte Analog Accel + 3x2byte Accel
print "Packet Type,Timestamp,Analog Accel X,Analog Accel Y,Analog Accel ZDigital Accel X,Digital Accel Y,Digital Accel Z"
try:
while True:
while numbytes < framesize:
ddata += ser.read(framesize)
numbytes = len(ddata)
data = ddata[0:framesize]
ddata = ddata[framesize:]
numbytes = len(ddata)
(packettype) = struct.unpack('B', data[0:1])
(timestamp, analogaccelx, analogaccely, analogaccelz, digiaccelx, digiaccely, digiaccelz) = struct.unpack('HHHHhhh', data[1:framesize])
print "0x%02x,%5d,\t%4d,%4d,%4d,\t\t%4d,%4d,%4d " % (packettype[0], timestamp, analogaccelx, analogaccely, analogaccelz, digiaccelx, digiaccely, digiaccelz)
except KeyboardInterrupt:
#send stop streaming command
ser.write(struct.pack('B', 0x20))
wait_for_ack()
#close serial port
ser.close()
print
print "All done"
| [
"somahony@shimmersensing.com"
] | somahony@shimmersensing.com |
0090ca9595236b1767fcd7dcda4cfb9410f8c38b | 25d2588ae29778a2d9ea06f6c45db6aa2ea873ad | /min_points/reducer.py | e977e1837afd50a7c0c5a2f54ab057779e0b25e2 | [] | no_license | s523286/Wine-Reviews-By-Country | b6c6b39eba0fbbe42bdbcf82118453ff2457b37d | dab8fb4ad512341e543cccd32a394754afbecdbf | refs/heads/master | 2020-04-05T22:13:58.363697 | 2018-12-04T20:41:54 | 2018-12-04T20:41:54 | 157,249,468 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | # opens the file to read and write from
s = open("s.txt","r")
r = open("r.txt", "w")
# setting thisKey and min to 0
thisKey = ""
min = 0.0
# It is going to read every line in and strip and split it
for line in s:
data = line.strip().split("\t")
# if the data isnt in key value pairs then it will continue
if len(data) != 2:
continue
# Setting country and points to data
country, points = data
if country != thisKey:
if thisKey:
# output the key value pair result
r.write(thisKey + '\t' + str(min)+'\n')
# start over when changing keys
thisKey = country
min = points
# statment to find the minimum points
if int(points) < min:
min = int(points)
# output the final entry when done
r.write(thisKey + '\t' + str(min)+'\n')
# closes the files
s.close()
r.close() | [
"S529039@nwmissouri.edu"
] | S529039@nwmissouri.edu |
29d146957929d2f90f9f0d84b7c4eef2260958af | 4db6ec888fb7e6a20ce283c1c32046f599cd4f13 | /CLJ_app/forms.py | 435d75c6f21413ee4fae44ec6fd7b077d3ec0519 | [] | no_license | jcrawfo4/AssessmentFour | 113aa11487381c66d52b5a98b5ec354a69256e12 | 1d480251d360bf70b3bc502e32e987891be71b97 | refs/heads/main | 2023-07-10T21:05:50.200228 | 2021-08-20T19:38:54 | 2021-08-20T19:38:54 | 359,334,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django import forms
from django.db import models
from django.forms import fields
from .models import Category, Post
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name']
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['category','title'] | [
"jkbcrawford@gmail.com"
] | jkbcrawford@gmail.com |
5ba8920e5965fadab6f22ea7e8b6789c0bfdec20 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/CIG-CLIENTS-MIB.py | 6f8d0dcca8db283b66f74bf6098d29473eb995d6 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 33,163 | py | #
# PySNMP MIB module CIG-CLIENTS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CIG-CLIENTS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:49:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
IpAddress, Integer32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32, MibIdentifier, Gauge32, enterprises, Bits, NotificationType, ObjectIdentity, iso, Counter64, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Integer32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32", "MibIdentifier", "Gauge32", "enterprises", "Bits", "NotificationType", "ObjectIdentity", "iso", "Counter64", "ModuleIdentity")
DisplayString, TextualConvention, TimeInterval, DateAndTime, RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TimeInterval", "DateAndTime", "RowStatus", "TruthValue")
cigClients = ModuleIdentity((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17))
if mibBuilder.loadTexts: cigClients.setLastUpdated('200910251352Z')
if mibBuilder.loadTexts: cigClients.setOrganization('Avaya')
if mibBuilder.loadTexts: cigClients.setContactInfo(' Avaya Customer Services Postal: Avaya, Inc. 211 Mt Airy Rd. Basking Ridge, NJ 07920 USA Tel: +1 908 953 6000 WWW: http://www.avaya.com ')
if mibBuilder.loadTexts: cigClients.setDescription(" This module defines the cig Clients. The first client in the DHCP client used for getting dynamic IP address and configuration parameters from DHCP server.The DHCP client will be a function of interfaces. The second client is DNS resolver client. Copyright notice: This AVAYA SNMP Management Information Base Specification (Specification) embodies AVAYA confidential and Proprietary intellectual property. AVAYA retains all Title and ownership in the Specification, including any revisionsIt is AVAYA's intent to encourage the widespread use of this Specification in connection with the management of AVAYA products. AVAYA grants vendors, end-users, and other interested parties a non-exclusive license to use this Specification in connection with the management of AVAYA products.This Specification is supplied 'as is', and AVAYA makes no warranty, either express or implied, as to the use, operation, condition, or performance of the Specification. ")
avaya = MibIdentifier((1, 3, 6, 1, 4, 1, 6889))
mibs = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2))
lsg = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1))
cigDhcpClients = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1))
cigDnsResolver = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2))
cigDhcpClientsNotification = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 0))
cigDhcpClientsConflictDetectionTrap = NotificationType((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 0, 1)).setObjects(("CIG-CLIENTS-MIB", "cigDhcpClientsIfAlias"), ("CIG-CLIENTS-MIB", "cigDhcpClientsIPAddress"), ("CIG-CLIENTS-MIB", "cigDhcpClientsHostName"), ("CIG-CLIENTS-MIB", "cigDhcpClientsClientId"), ("CIG-CLIENTS-MIB", "cigDhcpClientsServerIpAddr"))
if mibBuilder.loadTexts: cigDhcpClientsConflictDetectionTrap.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsConflictDetectionTrap.setDescription('DHCP IP address conflict detection. This trap is based on: The Client Decline Packets after the client sent gratuitous Arp and get response which implies that another station use this IP address.')
cigDhcpClientsTable = MibTable((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1), )
if mibBuilder.loadTexts: cigDhcpClientsTable.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsTable.setDescription('cigDhcpClientsTable parameters. Used for DHCP clients which gathered dynamic IP addresses and configuration from DHCP servers.')
cigDhcpClientsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1), ).setIndexNames((0, "CIG-CLIENTS-MIB", "cigDhcpClientsIfIndex"))
if mibBuilder.loadTexts: cigDhcpClientsEntry.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsEntry.setDescription('cigDhcpClients Entry Parameters.')
cigDhcpClientsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsIfIndex.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsIfIndex.setDescription('A unique value for each interface. This value is same as ifIndex from ifTable. The DHCP client can be enabled on Vlan and on WAN Fast Ethernet interfaces. ')
cigDhcpClientsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsRowStatus.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsRowStatus.setDescription(' This is a row status mib item For adding and deleting DHCP clients interfaces. For configuring only DHCP client parameters use create and wait for specific interface. When activating the DHCP client, activate the row. Activating the row will create a new row in the ipDynamic Table in CROUTE-MIB that presents dynamic ipInterface. Deactivating the DHCP client will cause to erasing the row in the ipDynamic Mib table and deactivating the DHCP client row in order to keep the specific DHCP parameters which already were configured. ')
cigDhcpClientsIfAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsIfAlias.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsIfAlias.setDescription("This object is an 'alias' name for the lower-layer interface on top of which this IP interface is defined. It is the same as the ifAlias from the standard interface table.")
cigDhcpClientsIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 4), IpAddress().clone(hexValue="00000000")).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsIPAddress.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsIPAddress.setDescription('The IP address received from remote peer during the DHCP session. This is the same MIB item as ipInterface table in CROUTE-MIB.')
cigDhcpClientsSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsSubnetMask.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsSubnetMask.setDescription('The subnet mask associated with the IP address of this entry. The value of the mask is an IP address with all the network bits set to 1 and all the hosts bits set to 0. In case of DHCP client this is DHCP option 1 == Subnet Mask. This is the same MIB item as ipInterface table in CROUTE-MIB.')
cigDhcpClientsClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsClientId.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsClientId.setDescription("Client identifier used for manual leased. The structure of the client identifier is as follows: combination of TYPE and Data. Where Type = 01 if for Ethernet MACs and Data for this case if Ethernet MAC address. The user can use also other client identifiers as strings. Default: '' - 01:Interface MAC address.")
cigDhcpClientsHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsHostName.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsHostName.setDescription("Host name is sent by the DHCP client as Dhcp option 12. It is specifically used by part of DHCP servers (MS2000) to identify the client in the binding file (more user intuitive). This option is used as well by part of the DHCP server with the support of DHCP client (sending option 81) to update the DNS server with the new IP for specific host. The format of the string can be: 'usernameA'. The hostname shall comply to RFC 1035 naming convention with the following exceptions: a. allow digits at the start of the name. b. Do not check length constraints of 63 bytes. The default of the host name used by the DHCP client is the device host name.")
cigDhcpClientsRequestLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 8), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsRequestLeaseTime.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsRequestLeaseTime.setDescription('The Requested leased time in seconds. Infinite is defined as 0xffffffff. 0 - used to present that no lease time requested from the DHCP server. I.e. option 51 will not be sent by the client. Default 0 seconds.')
cigDhcpClientsReceiveLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsReceiveLeaseTime.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsReceiveLeaseTime.setDescription('The received leased time from the DHCP server in seconds. Infinite is defined as 0xffffffff. Default 0 seconds.')
cigDhcpClientsRemainLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsRemainLeaseTime.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsRemainLeaseTime.setDescription('The remains leased time in seconds . Infinite is defined as 0xffffffff. Default 0 seconds.')
cigDhcpClientsRenewLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsRenewLeaseTime.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsRenewLeaseTime.setDescription('The time defined for renewing phase in seconds . Infinite is defined as 0xffffffff. It is defined as 0.5 of the receive lease. Default 0 seconds.')
cigDhcpClientsRebindLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsRebindLeaseTime.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsRebindLeaseTime.setDescription('The time defined for rebinding phase in seconds . Infinite is defined as 0xffffffff. It is defined as 0.875 of the receive lease. Default 0 seconds.')
cigDhcpClientsDefaultGatewayList = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsDefaultGatewayList.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsDefaultGatewayList.setDescription("Default Gateway is Dhcp option 3. Default = '' = Null String. The format of the string can be up to 8 IP addresses in the list, for example: '121.23.1.12 135.64.13.190 192.123.242.230'")
cigDhcpClientsDnsServerList = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsDnsServerList.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsDnsServerList.setDescription("DNS Server is Dhcp option 6. Default = '' = Null String. The format of the string can be up to 8 IP addresses in the list, for exapmle: '121.23.1.12 135.64.13.190 192.123.242.230'")
cigDhcpClientsDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 15), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsDomainName.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsDomainName.setDescription("DNS Server is Dhcp option 15. Default = Null String The format of the string can be: 'avaya' There is no ability to configure this option in the general option table. ")
cigDhcpClientsServerIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 16), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsServerIpAddr.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsServerIpAddr.setDescription('The DHCP server IP address that the client gets from it the IP address allocation and keep requesting for lease extension from it. In case no IP address is gathered from DHCP server then value is 0.0.0.0. Default 0.0.0.0.')
cigDhcpClientsOperations = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("idle", 1), ("release", 2), ("renew", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsOperations.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsOperations.setDescription('Used for instruct the DHCP client to perfrom release or renew operations.')
cigDhcpClientsStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 255))).clone(namedValues=NamedValues(("select", 1), ("request", 2), ("bound", 3), ("rebind", 4), ("renew", 5), ("release", 6), ("decline", 7), ("reboot", 8), ("idle", 9), ("notSupported", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDhcpClientsStatus.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsStatus.setDescription('The status of the DHCP client. Select - Sending DISCOVER packets Request - Requesting IP address after selecting from all the offer packets. Bound - Receiving ACK packet and there is no conflict detection by sending ARP packets. Rebind - After 0.875 out of the lease time the renewal phase enter to rebind. Renew - After 0.5 out of the lease time the device try to renew the lease until 0.875 of the lease time. Release - The client release the lease. Decline - The client detect IP conflict by sending ARP packet. Idle - The client did not get IP address and keep trying after an Idle time (~40 seconds) to reach new IP address. ')
cigDhcpClientsRequestBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsRequestBitmap.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsRequestBitmap.setDescription(" Each bit of this value set to '1' indicates a specific requested DHCP option (Bitmap of 255 options represented by 32Bytes). When the bit is set to '1' then specific DHCP option is requested by the client. Subnet mask bit 1 - is always requested. Bits are numbered from the most-significant leftmost bit in the octet (bit 0) to the least-significant rightmost bit (bit 7). Numbering continues across octets, so that octet 1 bit 0 is called bit 8. Default: '0x5201' which equals to: bit 1 - Subnet mask (Always requested!) Bit 3 - Default routers Bit 6 - DNS servers Bit 15 - Domain Name")
cigDhcpClientsDefaultRouterTrackId = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 1, 1, 1, 20), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDhcpClientsDefaultRouterTrackId.setStatus('current')
if mibBuilder.loadTexts: cigDhcpClientsDefaultRouterTrackId.setDescription('Bind the status of the DHCP default router to an object-tracker by specifying the ID of the object-tracker (avstrTrackerId in AVAYA-SAA-TRACK-MIB). A value of 0 means that the default router is not bound to any object-tracker. A value of 0xffffffff is notSupported feature for the relevant default router. ')
cigDnsResolverGenConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 1))
cigDnsResolverMode = MibScalar((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverMode.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverMode.setDescription('Enable the DNS resolver in the device. Default = True (Enable).')
cigDnsResolverRetry = MibScalar((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 1, 2), Unsigned32().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverRetry.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverRetry.setDescription('The number of retries for each query for specific DNS server when there is timeout. The range is 0-100.')
cigDnsResolverTimeout = MibScalar((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 1, 3), Unsigned32().clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverTimeout.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverTimeout.setDescription('The timeout in seconds for each query for specific DNS server. The range is 1-3600 seconds.')
cigDnsResolverOperations = MibScalar((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("idle", 1), ("clearDynamicServers", 2), ("clearDNSCache", 3))).clone('idle')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverOperations.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverOperations.setDescription('Used for specific operations for DNS server. clearDynamicServers(2) will clear all the dynamic servers (learnt from PPP interfaces and DHCP clients) from DNS server list. clearDNSCache(3) - Clear the DNS Resolver cache. ')
cigDnsResolverDnsServersListTable = MibTable((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 2), )
if mibBuilder.loadTexts: cigDnsResolverDnsServersListTable.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServersListTable.setDescription('Lists of DNS servers. The default list(1) includes all the static DNS server and the dynamic DNS servers from the DHCP clients/PPP interfaces as PPPoE and Dial out modem. ')
cigDnsResolverDnsServersListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 2, 1), ).setIndexNames((0, "CIG-CLIENTS-MIB", "cigDnsResolverDnsServersListIndex"))
if mibBuilder.loadTexts: cigDnsResolverDnsServersListEntry.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServersListEntry.setDescription('The DNS servers list. Lists of DNS servers.')
cigDnsResolverDnsServersListIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDnsResolverDnsServersListIndex.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServersListIndex.setDescription('The index of the list of DNS servers. Default list index is 1. The user will not be able to create lists other then default list at the first release.')
cigDnsResolverDnsServersListDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDnsServersListDescription.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServersListDescription.setDescription("This is the list description. Default for DNS list #1: 'DNS list #1'")
cigDnsResolverDnsServersListRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 2, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDnsServersListRowStatus.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServersListRowStatus.setDescription('This is a row status mib item For adding and deleting DNS server lists. The user will not be able to create lists other then list 1 at the first release.')
cigDnsResolverDnsServerTable = MibTable((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3), )
if mibBuilder.loadTexts: cigDnsResolverDnsServerTable.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerTable.setDescription('DNS servers. The user can configure up to 6 static servers located in places 1-6. The other indexes are of dynamic servers which can not be created (learnt automatically by the device) but can be erased. The indexes 1-99 are for static servers (only the first 6 are configurable). The indexes 100-199 are for dynamic servers. ')
cigDnsResolverDnsServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1), ).setIndexNames((0, "CIG-CLIENTS-MIB", "cigDnsResolverDnsServerListIndex"), (0, "CIG-CLIENTS-MIB", "cigDnsResolverDnsServerIndex"))
if mibBuilder.loadTexts: cigDnsResolverDnsServerEntry.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerEntry.setDescription('The DNS server entry. The user can configure up to 6 static servers located in places 1-6. The other indexes are of dynamic servers which can not be created (learnt automatically by the device) but can be erased. The indexes 1-99 are for static servers (only the first 6 are configurable). The indexes 100-199 are for dynamic servers.')
cigDnsResolverDnsServerListIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDnsResolverDnsServerListIndex.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerListIndex.setDescription('The index of the list of DNS servers. Default list index is 1. The user will not be able to create lists other then default list at the first release.')
cigDnsResolverDnsServerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDnsResolverDnsServerIndex.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerIndex.setDescription('The index of DNS server in the list of DNS servers. The index presents the preference of the server. The lower the index the higher priority to use it.')
cigDnsResolverDnsServerIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDnsServerIpAddress.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerIpAddress.setDescription('The DNS server IP address.')
cigDnsResolverDnsServerIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDnsResolverDnsServerIfIndex.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerIfIndex.setDescription('A unique value for each interface. This value is same as ifIndex from ifTable. This field is used when the DNS server is learnt from interface running DHCP client or PPP negotiated (PPPoE/Dial Out modem). This field has no valid value when the type of the DNS server is static.')
cigDnsResolverDnsServerType = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("static", 1), ("dynamic-dhcp", 2), ("dynamic-ppp", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDnsResolverDnsServerType.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerType.setDescription('This is the server type. The user can create new row only when the server type is static. dynamic-dhcp represents DNS server learnt from DHCP client. dynamic-ppp represents DNS server learnt from ppp interface (PPPoE/Dial out modem).')
cigDnsResolverDnsServerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 6), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDnsServerRowStatus.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerRowStatus.setDescription(' This is a row status mib item For adding and deleting DNS servers. The user can add only static servers in locations 1-6. For static servers the operation suppported is CreateAndWait and dynamic servers are automatically created by the device.')
cigDnsResolverDnsServerInetAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 7), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDnsServerInetAddressType.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerInetAddressType.setDescription('The address type of cigDnsResolverDnsServerInetAddress. ')
cigDnsResolverDnsServerInetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 3, 1, 8), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDnsServerInetAddress.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDnsServerInetAddress.setDescription('The Inet Address of the dns server. 1. The address type of this object is specified in cigDnsResolverDnsServerInetAddressType. 2. Management applications can use cigDnsResolverDnsServerIpAddress or cigDnsResolverDnsServerInetAddressType/cigDnsResolverDnsServerInetAddress to set IPv4 server address and only cigDnsResolverDnsServerInetAddressType/cigDnsResolverDnsServerInetAddress to set IPv6 server address. 3. The cigDnsResolverDnsServerInetAddressType must always been set before the cigDnsResolverDnsServerInetAddress is set (the avGenOpServerInetAddress must conform to the cigDnsResolverDnsServerInetAddressType). 4. Setting cigDnsResolverDnsServerIpAddress address mib item will change cigDnsResolverDnsServerInetAddressType to IPv4(1) and cigDnsResolverDnsServerInetAddress to IPv4 address configured. 5. Setting cigDnsResolverDnsServerInetAddress as IPv4 address will also set cigDnsResolverDnsServerIpAddress. 6. Setting cigDnsResolverDnsServerInetAddress as IPv6 address will also set cigDnsResolverDnsServerIpAddress to 0.0.0.0. 7. Default of cigDnsResolverDnsServerInetAddressType is unknown(0) and cigDnsResolverDnsServerInetAddress is zero length. 8. Setting cigDnsResolverDnsServerInetAddressType to IPv4 will return cigDnsResolverDnsServerIpAddress and cigDnsResolverDnsServerInetAddressType to default value of 0.0.0.0. Setting genOpServerInetAddressType to value other than IPv4 return cigDnsResolverDnsServerIpAddress to default value of 0.0.0.0 and cigDnsResolverDnsServerInetAddressType to default value of null string. Row status can not be active if cigDnsResolverDnsServerIpAddress or cigDnsResolverDnsServerInetAddressType/cigDnsResolverDnsServerInetAddress are not configured. ')
cigDnsResolverDomainTable = MibTable((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 4), )
if mibBuilder.loadTexts: cigDnsResolverDomainTable.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDomainTable.setDescription('The domains used by the DNS resolver to add to non full qualified names. I.e. when the host name is not a full host name then these domains are added in order to resolve the non full qulified name. an example for domain is : avaya.com. an example for non qualified name is lantis. Therefore when trying to resolve lantis the suffix avaya.com will be added to create the name lantis.avaya.com')
cigDnsResolverDomainEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 4, 1), ).setIndexNames((0, "CIG-CLIENTS-MIB", "cigDnsResolverDomainIndex"))
if mibBuilder.loadTexts: cigDnsResolverDomainEntry.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDomainEntry.setDescription('The Domain entry. The domains used by the DNS resolver to add to non full qualified names. I.e. when the host name is not a full host name then these domains are added in order to resolve the non full qulified name. an example for domain is : avaya.com. an example for non qualified name is lantis. Therefore when trying to resolve lantis the suffix avaya.com will be added to create the name lantis.avaya.com')
cigDnsResolverDomainIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cigDnsResolverDomainIndex.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDomainIndex.setDescription('The index of the domain. The index presents the precedence of the domain. I.e. the lower the index the higher priority this domain is used.')
cigDnsResolverDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 4, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDomain.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDomain.setDescription('The Domain shall follow RFC 1035 Host name convention. The following syntax will result in fewer problems with many applications that use domain names (e.g., mail, TELNET). Note that while upper and lower case letters are allowed in domain names, no significance is attached to the case. That is, two names with the same spelling but different case are to be treated as if identical. The labels must follow the rules for ARPANET host names. They must start with a letter, end with a letter or digit, and have as interior characters only letters, digits, and hyphen. There are also some restrictions on the length. Labels must be 63 characters or less. For example, the following strings identify hosts in the Internet: A.ISI.EDU XX.LCS.MIT.EDU SRI-NIC.ARPA ')
cigDnsResolverDomainRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6889, 2, 1, 17, 2, 4, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cigDnsResolverDomainRowStatus.setStatus('current')
if mibBuilder.loadTexts: cigDnsResolverDomainRowStatus.setDescription('This is a row status mib item For adding and deleting domains. The row is created by CreateAndWait. ')
mibBuilder.exportSymbols("CIG-CLIENTS-MIB", cigDnsResolverDnsServerRowStatus=cigDnsResolverDnsServerRowStatus, cigDhcpClients=cigDhcpClients, cigDnsResolverDnsServerIfIndex=cigDnsResolverDnsServerIfIndex, cigDhcpClientsDefaultRouterTrackId=cigDhcpClientsDefaultRouterTrackId, cigDnsResolver=cigDnsResolver, cigDhcpClientsClientId=cigDhcpClientsClientId, cigDhcpClientsDnsServerList=cigDhcpClientsDnsServerList, cigDhcpClientsRequestLeaseTime=cigDhcpClientsRequestLeaseTime, cigDnsResolverDomainTable=cigDnsResolverDomainTable, cigDhcpClientsIPAddress=cigDhcpClientsIPAddress, cigDhcpClientsNotification=cigDhcpClientsNotification, cigDnsResolverMode=cigDnsResolverMode, cigDnsResolverDomainIndex=cigDnsResolverDomainIndex, cigDhcpClientsDefaultGatewayList=cigDhcpClientsDefaultGatewayList, cigDnsResolverDnsServerEntry=cigDnsResolverDnsServerEntry, cigDhcpClientsRenewLeaseTime=cigDhcpClientsRenewLeaseTime, lsg=lsg, cigDhcpClientsServerIpAddr=cigDhcpClientsServerIpAddr, mibs=mibs, cigDnsResolverDomainEntry=cigDnsResolverDomainEntry, cigDnsResolverDnsServersListTable=cigDnsResolverDnsServersListTable, cigDnsResolverDnsServersListIndex=cigDnsResolverDnsServersListIndex, cigDhcpClientsIfAlias=cigDhcpClientsIfAlias, cigDhcpClientsDomainName=cigDhcpClientsDomainName, cigDhcpClientsRowStatus=cigDhcpClientsRowStatus, cigDnsResolverOperations=cigDnsResolverOperations, cigDhcpClientsTable=cigDhcpClientsTable, cigDhcpClientsSubnetMask=cigDhcpClientsSubnetMask, avaya=avaya, cigDhcpClientsStatus=cigDhcpClientsStatus, cigDhcpClientsEntry=cigDhcpClientsEntry, cigDhcpClientsRemainLeaseTime=cigDhcpClientsRemainLeaseTime, cigDhcpClientsRebindLeaseTime=cigDhcpClientsRebindLeaseTime, cigDnsResolverDomain=cigDnsResolverDomain, cigDnsResolverDnsServersListDescription=cigDnsResolverDnsServersListDescription, cigDnsResolverRetry=cigDnsResolverRetry, cigDnsResolverDnsServerListIndex=cigDnsResolverDnsServerListIndex, cigDnsResolverGenConfig=cigDnsResolverGenConfig, cigDnsResolverDomainRowStatus=cigDnsResolverDomainRowStatus, cigDnsResolverDnsServerTable=cigDnsResolverDnsServerTable, cigDhcpClientsOperations=cigDhcpClientsOperations, cigDnsResolverDnsServerIpAddress=cigDnsResolverDnsServerIpAddress, cigDhcpClientsReceiveLeaseTime=cigDhcpClientsReceiveLeaseTime, cigDnsResolverTimeout=cigDnsResolverTimeout, PYSNMP_MODULE_ID=cigClients, cigDhcpClientsIfIndex=cigDhcpClientsIfIndex, cigDnsResolverDnsServersListRowStatus=cigDnsResolverDnsServersListRowStatus, cigDhcpClientsRequestBitmap=cigDhcpClientsRequestBitmap, cigDnsResolverDnsServerIndex=cigDnsResolverDnsServerIndex, cigDhcpClientsHostName=cigDhcpClientsHostName, cigDnsResolverDnsServerType=cigDnsResolverDnsServerType, cigDnsResolverDnsServersListEntry=cigDnsResolverDnsServersListEntry, cigDnsResolverDnsServerInetAddressType=cigDnsResolverDnsServerInetAddressType, cigDnsResolverDnsServerInetAddress=cigDnsResolverDnsServerInetAddress, cigDhcpClientsConflictDetectionTrap=cigDhcpClientsConflictDetectionTrap, cigClients=cigClients)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
8bc0e253bcff105a4ede5e931ee1a5b8c496b7a1 | 5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8 | /buildout-cache/eggs/plone.app.versioningbehavior-1.2.0-py2.7.egg/plone/app/versioningbehavior/testing.py | b6817a3b3f16fd765884b5ed7e997bd55a8acf8f | [] | no_license | renansfs/Plone_SP | 27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a | 8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5 | refs/heads/master | 2021-01-15T15:32:43.138965 | 2016-08-24T15:30:19 | 2016-08-24T15:30:19 | 65,313,812 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,740 | py | # -*- coding: utf-8 -*-
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.dexterity.fti import DexterityFTI
from Products.CMFCore.utils import getToolByName
from Products.CMFDiffTool.TextDiff import TextDiff
from Products.PloneTestCase.layer import onteardown
from zope.configuration import xmlconfig
# Make it work with plone.protect < 3.0.0 where the `auto` module is not available.
# This is necessary for Plone 4.3.x compatibility.
try:
from plone.protect import auto as protect_auto
except ImportError:
class DummyAuto(object):
CSRF_DISABLED = True
protect_auto = DummyAuto()
def fix_plonetestcase_mess():
"""Registers a Products.PloneTestCase cleanup.
It is a nested teardown so that we can meake sure that it is executate
as last tear down function.
"""
def reset_zope2():
"""Testing.ZopeTestCase.layer.ZopeLite does not support tearing down.
This results in a partically teared down Zope2 instance.
This function resets the Zope2 initialization state so that we can
initialize another Zope2 instance with p.a.testing.
"""
import Zope2
Zope2._began_startup = 0
onteardown(reset_zope2)()
onteardown(fix_plonetestcase_mess)()
TEST_CONTENT_TYPE_ID = 'TestContentType'
DEFAULT_POLICIES = ('at_edit_autoversion', 'version_on_revert',)
class VersioningLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import plone.app.versioningbehavior
xmlconfig.file('configure.zcml', plone.app.versioningbehavior,
context=configurationContext)
def setUpPloneSite(self, portal):
applyProfile(portal, 'plone.app.versioningbehavior:default')
self.registerVersionedDocumentFTI(portal)
def registerVersionedDocumentFTI(self, portal):
types_tool = getToolByName(portal, 'portal_types')
fti = DexterityFTI(
TEST_CONTENT_TYPE_ID,
global_allow=True,
behaviors=(
'plone.app.versioningbehavior.behaviors.IVersionable',
'plone.app.dexterity.behaviors.metadata.IBasic',
),
model_source="""
<model xmlns="http://namespaces.plone.org/supermodel/schema">
<schema>
<field name="text" type="zope.schema.Text">
<title>Text</title>
<required>False</required>
</field>
</schema>
</model>
""")
types_tool._setObject(TEST_CONTENT_TYPE_ID, fti)
diff_tool = getToolByName(portal, 'portal_diff')
diff_tool.setDiffForPortalType(
TEST_CONTENT_TYPE_ID, {'text': TextDiff.meta_type})
portal_repository = getToolByName(portal, 'portal_repository')
portal_repository.setVersionableContentTypes(
list(portal_repository.getVersionableContentTypes()) +
[TEST_CONTENT_TYPE_ID])
for policy_id in DEFAULT_POLICIES:
portal_repository.addPolicyForContentType(
TEST_CONTENT_TYPE_ID, policy_id)
def testSetUp(self):
self.CSRF_DISABLED_ORIGINAL = protect_auto.CSRF_DISABLED
protect_auto.CSRF_DISABLED = True
def testTearDown(self):
protect_auto.CSRF_DISABLED = self.CSRF_DISABLED_ORIGINAL
VERSIONING_FIXTURE = VersioningLayer()
VERSIONING_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(VERSIONING_FIXTURE,),
name='plone.app.versioningbehavior:functional')
| [
"renansfs@gmail.com"
] | renansfs@gmail.com |
edbf95ca896c420c5aa1967b99099de42c02de70 | 05cc20aae84fe7df767b31003ed550133c5efabb | /gui.py | b112fadf8c0be4db6a96af6e3b3ee2646be44cfb | [] | no_license | watercolorfish/rec_dip | 9a88e13e9afe2b6021c8b96afe738cebc0480d4d | 91154412265e88997a2d4cdb72f93dd225e94dcc | refs/heads/main | 2023-06-09T08:02:03.201040 | 2021-06-14T06:54:39 | 2021-06-14T06:54:39 | 376,723,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,724 | py | from tkinter import *
from tkinter.filedialog import askopenfilename, askdirectory
from main import main_neuro
import pickle
def check_paths(path):
with open('path_data.pickle', 'rb') as f:
data = pickle.load(f)
return(data.get(path))
def choose_file():
file = askopenfilename()
return file
def choose_directory():
return askdirectory()
def choose_lyrics_path():
global lyricsPath
lyricsPath = choose_file()
with open('path_data.pickle', 'wb') as f:
pickle.dump({"lyricsPath": lyricsPath, "savePath": savePath, "trainPath": trainPath, "preTrain": preTrain.get()}, f)
chosen_lyrics_path_label['text'] = lyricsPath
if (lyricsPath):
main_neuro()
def choose_save_path():
global savePath
savePath = choose_directory()
with open('path_data.pickle', 'wb') as f:
pickle.dump({"lyricsPath": lyricsPath, "savePath": savePath, "trainPath": trainPath, "preTrain": preTrain.get()}, f)
chosen_save_path_label['text'] = savePath
def choose_train_path():
global trainPath
trainPath = choose_directory()
with open('path_data.pickle', 'wb') as f:
pickle.dump({"lyricsPath": lyricsPath, "savePath": savePath, "trainPath": trainPath, "preTrain": preTrain.get()}, f)
chosen_train_path_label['text'] = trainPath
def choose_is_train():
global preTrain
with open('path_data.pickle', 'wb') as f:
pickle.dump({"lyricsPath": lyricsPath, "savePath": savePath, "trainPath": trainPath, "preTrain": preTrain.get()}, f)
lyricsPath = check_paths("lyricsPath")
savePath = check_paths("savePath")
trainPath = check_paths("trainPath")
root = Tk()
root.geometry('500x360+{}+{}'.format(root.winfo_screenwidth()//2 - 410, root.winfo_screenheight()//2 - 250))
Tk().withdraw()
preTrain = IntVar()
preTrain.set(check_paths("preTrain"))
chosen_lyrics_path_label = Label()
chosen_save_path_label = Label(text=check_paths("savePath"))
chosen_train_path_label = Label(text=check_paths("trainPath"))
Label().pack()
Label(text="Choose file to add lyrics").pack()
Checkbutton(text="Train before generation", variable=preTrain, command=choose_is_train).pack()
Button(text="Choose file", width=20, command=choose_lyrics_path).pack()
Label().pack()
Label(text="You chose file:").pack()
chosen_lyrics_path_label.pack()
Label().pack()
Label(text="Save path:").pack()
chosen_save_path_label.pack()
Button(text="Change save path", width=20, command=choose_save_path).pack()
Label().pack()
Label(text="Path to train network:").pack()
chosen_train_path_label.pack()
Button(text="Change train path", width=20, command=choose_train_path).pack()
def on_closing():
root.quit()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop() | [
"orliet@yandex.ru"
] | orliet@yandex.ru |
92c0e09476c89293544c2680a6d1bb74a148fe62 | 27691e5ef8e49fb29189b01dd76a1dc3720e7ae8 | /AC/ABC-TDD/057/c.py | 33fa121771e3149b137159983a0a5ae01e6fbf1c | [] | no_license | oshou/procon | 61e5f5bc819e0fe5ab29749fc2f894fe6f3b1d07 | 3d000c64b5917c65b51ed7da5b90cb79892d5909 | refs/heads/master | 2023-05-10T23:56:50.861468 | 2021-09-23T06:07:29 | 2021-09-23T06:07:29 | 116,886,484 | 1 | 0 | null | 2023-05-05T02:28:41 | 2018-01-10T00:21:38 | Go | UTF-8 | Python | false | false | 350 | py | import math
def f(a, b: int) -> int:
alen = len(str(a))
blen = len(str(b))
if alen > blen:
return alen
else:
return blen
n = int(input())
max = int(math.sqrt(n))
fmin = len(str(n))
for i in range(1, max+1):
if n % i == 0:
tmp = f(i, int(n/i))
if tmp < fmin:
fmin = tmp
print(fmin)
| [
"adf1985adf@gmail.com"
] | adf1985adf@gmail.com |
898186647682626d768515a7302411ae98519e75 | 8623b42717f487071ae826b7bce7cfc25ac16e9e | /course/models.py | 4e8529094b8c1eacc373f650dabc3314c9e0d979 | [] | no_license | wudaown/evian | 0fb865917e3712773d56186c015e2ef36771efd9 | 4e8f55fe62670dee86e5178d5e997260588dbfd2 | refs/heads/master | 2020-08-01T11:33:20.586418 | 2019-10-09T09:15:22 | 2019-10-09T09:15:22 | 210,983,447 | 0 | 0 | null | 2019-12-05T00:21:44 | 2019-09-26T02:36:11 | Python | UTF-8 | Python | false | false | 2,459 | py | from django.db import models
from login.models import User
DAYS=[
(0,'Monday'),
(1,'Tuesday'),
(2,'Wednesday'),
(3,'Thursday'),
(4,'Friday'),
(5,'Saturday'),
(6,'Sunday')
]
CLASS_TYPE = [
('lecture','Lecture'),
('tutorial','Tutorial'),
('lab','Lab')
]
STATUS = [
('present','Present'),
('absent','Absent'),
('mc','MC')
]
# Create your models here.
class Course(models.Model):
course_code = models.CharField(max_length=10)
course_name = models.CharField(max_length=100)
def __str__(self):
return self.course_code
class CourseIndex(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
index = models.CharField(max_length=10)
group = models.CharField(max_length=10)
def __str__(self):
return f'{str(self.course)}-{self.index}'
class Meta:
verbose_name_plural = "Course Indexes"
class CourseIndexType(models.Model):
course_index = models.ForeignKey(CourseIndex, on_delete=models.CASCADE)
class_type = models.CharField(max_length=10, choices=CLASS_TYPE)
day = models.IntegerField(choices=DAYS)
time = models.TimeField('class time')
duration = models.DurationField()
def __str__(self):
return f'{str(self.course_index)}-{self.class_type}'
class Class(models.Model):
course_index_type = models.ForeignKey(CourseIndexType, on_delete=models.CASCADE)
datetime = models.DateTimeField('class datetime')
def __str__(self):
return f'{str(self.course_index_type)}-{self.datetime.date()}-{self.datetime.time()}'
class Meta:
verbose_name_plural = "Classes"
class ClassTaker(models.Model):
course_index = models.ForeignKey(CourseIndex, on_delete=models.CASCADE)
student = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
# matric_no = models.CharField(max_length=20)
def __str__(self):
return f'{str(self.course_index)}-{str(self.student)}'
class ClassInstructor(models.Model):
course_index = models.ForeignKey(CourseIndex, on_delete=models.CASCADE)
staff = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
def __str__(self):
return f'{str(self.course_index)}-{str(self.staff)}'
class Attendance(models.Model):
class_session = models.ForeignKey(Class, on_delete=models.CASCADE)
student = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
status = models.CharField(max_length=10, choices=STATUS)
attendance_time = models.TimeField('attendance time', null=True, blank=True)
def __str__(self):
return f'{str(self.class_session)}-{str(self.student)}'
| [
"jeko.lonardo@gmail.com"
] | jeko.lonardo@gmail.com |
4aa5de904d5db17eee1ffaeb1ac77ed83ca86551 | 3eae9c14c119ee2d6a7d02ef1ba5d61420959e3c | /modules/core/mgmt/rwuagent/test/utframework/testtasklet.py | e8a7ca66d397981331bc610e8777f2b7cffb1434 | [
"Apache-2.0"
] | permissive | RIFTIO/RIFT.ware | 94d3a34836a04546ea02ec0576dae78d566dabb3 | 4ade66a5bccbeb4c5ed5b56fed8841e46e2639b0 | refs/heads/RIFT.ware-4.4.1 | 2020-05-21T14:07:31.092287 | 2017-06-05T16:02:48 | 2017-06-05T16:02:48 | 52,545,688 | 9 | 8 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(BEGIN)
# Creation Date: 3/20/2016
# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
# install the plugin implementation in site-packages and then
# import it from the actual plugin.
import rift.tasklets.uagenttbed
class Tasklet(rift.tasklets.uagenttbed.TestTasklet):
pass
| [
"Leslie.Giles@riftio.com"
] | Leslie.Giles@riftio.com |
c9d556d6af0129defb58122bfdbef32661347adb | febab2cb43852451ec5ef568d52392ba28a0246e | /example/ws_test.py | 8fadc8c52f7e37c5100594e16f898a17d3dccb5f | [
"MIT"
] | permissive | wusenyangzju/dorna2-python | 99c7dc7e03de129db0b2124790e68883a0b56b93 | 680cd9cb3fadf69d646e8faa1769c982a2556bd4 | refs/heads/master | 2023-02-04T10:57:43.334149 | 2020-12-22T20:36:48 | 2020-12-22T20:36:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from __future__ import print_function
import sys
sys.path.append('..')
from dorna2 import dorna
import time
def main(ip, port):
start = time.time()
robot = dorna()
robot.connect(ip, port)
for cmd in 100 * ["alarm", "toollength", "input", "output", "pwm", "adc"]:
arg = {"cmd": cmd, "id": robot.rand_id()}
print(arg)
robot.play(**arg)
robot.wait_id(arg["id"])
robot.close()
print(time.time()-start)
if __name__ == '__main__':
main("192.168.1.10", 443)
| [
"hussein.ty@gmail.com"
] | hussein.ty@gmail.com |
081e45ac958434a2c52e931cbcff053a5838f406 | c98e84766da320a73eded9ba6a476fe79468ef77 | /__init__.py | bdc75c5b3a0111d3f46fc0b96471fcce8dc5934a | [
"MIT"
] | permissive | Koensw/APT-Peaks | e30d99bde134e7f5d02a445f0278328b9fa9f30b | 3da775438e69aa7faa2120ed4ed16518dbafcd93 | refs/heads/master | 2021-05-01T11:40:31.212125 | 2018-10-26T18:32:32 | 2018-10-26T18:32:32 | 67,690,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | """
Peak finding library for APT
"""
__all__ = ["read", "prepare", "peaks", "main"]
| [
"koen@bluejayeindhoven.nl"
] | koen@bluejayeindhoven.nl |
ab83d16458b910bc88e253f102e63c942329746f | 6d710128035e3c284743b8dbd8164701ee0e81c8 | /ForLoop.py | 17a62cca2fd36660c7fb8ae73ced033da124b575 | [] | no_license | amittyyy/DSA_WebScrapingPython | 60f62ce5c7e864dda0730e6e8d8e94a66d1ea433 | 4d04740ac610d2b6141682cd24442dc2c79f5d2f | refs/heads/master | 2022-12-19T08:19:48.835623 | 2020-09-18T01:11:08 | 2020-09-18T01:11:08 | 296,477,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # print("My Name is ")
# for i in range(5):
# print("Jimmy Five Times" + str(i))
| [
"amittyyy@gmail.com"
] | amittyyy@gmail.com |
0fc3725fc4710a20b5d88d2c95441a91cda6c601 | 07a46167a48d330cc0fa41d1cea69bf9a67e799d | /src/robot_dynamics_files/rigid_body_dynamics/scripts/spawn_elements.py | 458d1c7c44dd01b6be632272b719e9c92c1dc19b | [] | no_license | Moises981/RrbotControl | 30cefe9658bc4d26a35c8982566947da4dd01527 | 8b56371a1ef600ae5a30911eae686677790d8f41 | refs/heads/master | 2023-05-05T00:40:13.111761 | 2021-05-11T02:43:56 | 2021-05-11T02:43:56 | 365,592,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | #!/usr/bin/env python
from gazebo_utils.spawner import Spawner
from geometry_msgs.msg import Pose, Point
spawner = Spawner()
spawner.spawn('table', 'table',
Pose(position=Point(0,0,0)))
spawner.spawn('cone', 'cone',
Pose(position=Point(0,0,1.064)))
spawner.spawn('coke_can', 'coke_can',
Pose(position=Point(0.2,0,1.0128)))
spawner.spawn('tennis_ball', 'tennis_ball',
Pose(position=Point(-0.2,0,1.05096)))
| [
"jesus.castillo.j@tecsup.edu.pe"
] | jesus.castillo.j@tecsup.edu.pe |
7d1281d398b5ca587744f224b7f48f4e79b8a012 | 98828b82c4bd057b657b9e4f8343a72a44522d9c | /process_nouns.py | 28c448ef879eab7f620d485b7c97d1137eb4f1f0 | [] | no_license | RimiChen/Story-Assistant | 86a6bb81ccabe9408db65b3a6aea7236f81ca10c | b49854ad3b4efe81fef5260b5a6d6dcca22aa4ea | refs/heads/master | 2020-04-05T11:46:28.588006 | 2017-09-25T22:17:32 | 2017-09-25T22:17:32 | 81,164,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,183 | py | import nltk
import math
import json
import sys
from nltk.corpus import gutenberg
from nltk.tokenize import PunktSentenceTokenizer
def preprocessing_text_file(input_file):
train_text = gutenberg.raw(input_file)
sample_text = gutenberg.raw(input_file)
custom_sent_tokenizer = PunktSentenceTokenizer(train_text)
tokenized = custom_sent_tokenizer.tokenize(sample_text)
print("\n\n\n")
print("====combine with verb============")
name_list = process_content_find_verb(10, tokenized, sample_text)
process_json(10, name_list, input_file)
def process_content(tokenized, sample_text):
try:
for i in tokenized[:2]:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
chunkGram = r"""Chunk: {<RB.?>*<VB.?>*<NNP>+<NN>?}"""
chunkParser = nltk.RegexpParser(chunkGram)
chunked = chunkParser.parse(tagged)
count = 0;
for j in chunked:
countNoun = 0
for item in chunked[count]:
if 'NNP' in item or 'NN' in item:
# only print nnp
currentWord = chunked[count][countNoun][0]
currentWordCount = sample_text.count(currentWord)
#print("--{}".format(sample_text.count(currentWord)))
#if countNoun == 0 and currentWordCount > 100:
if countNoun == 0:
print("{}".format(chunked[count]))
# current word = print(chunked[count][countNoun][0])
# if it is nnp add to list and get count
#print(chunked[count].pos)
#else:
# print("{}".format(chunked[count]))
countNoun= countNoun+1
count=count+1
except Exception as e:
print(str(e))
def process_content_filter_freqeuncy(tokenized, sample_text):
try:
#how many words in this range
# create a dictionary to filter out duplicate things
constant = 10
print("frequency {}".format(noun_frequency(constant, len(tokenized))))
for i in tokenized[:50]:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
chunkGram = r"""Chunk: {<RB.?>*<VB.?>*<NNP>+<NN>?}"""
chunkParser = nltk.RegexpParser(chunkGram)
chunked = chunkParser.parse(tagged)
count = 0;
for j in chunked:
countNoun = 0
currentChunk = ""
targetString = ""
for item in chunked[count]:
if 'NNP' in item or 'NN' in item:
# only print nnp
currentWord = chunked[count][countNoun][0]
currentWordCount = sample_text.count(currentWord)
#print("--{}".format(sample_text.count(currentWord)))
if currentWordCount > noun_frequency(constant, len(tokenized)):
#if countNoun == 0:
if countNoun == 0:
#print("{}".format(chunked[count]))
currentChunk = chunked[count]
if len(currentWord) >2:
targetString = targetString+" "+currentWord
#print("--{}".format(currentWord))
#current_word = chunked[count][countNoun][0]
# if it is nnp add to list and get count
#print(chunked[count].pos)
if len(currentWord) >2 and targetString != "" and countNoun != 0:
targetString = targetString+" "+currentWord
countNoun= countNoun+1
if currentChunk != "":
print("-------------")
print("Chunk: {}".format(currentChunk))
print("String: {}".format(targetString))
# save the target string
count=count+1
except Exception as e:
print(str(e))
def noun_frequency(constant, storyLength):
# frequency = c log(storyLength)
frequency = math.floor( constant * math.log10(storyLength))
return frequency
def process_content_find_verb(json_length, tokenized, sample_text):
try:
#how many words in this range
# create a dictionary to filter out duplicate things
constant = 10
dictionary_verb = {};
print("frequency {}".format(noun_frequency(constant, len(tokenized))))
for i in tokenized[:1500]:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
chunkGram = r"""Chunk: {<RB.?>*<VB.?>*<NNP>+<NN>?}"""
chunkParser = nltk.RegexpParser(chunkGram)
chunked = chunkParser.parse(tagged)
shouldGetVerb = False;
count = 0;
targetVerb = ""
targetString = ""
for j in chunked:
countNoun = 0
currentChunk = ""
#targetString = ""
for item in chunked[count]:
if 'NNP' in item or 'NN' in item:
# only print nnp
currentWord = chunked[count][countNoun][0]
currentWordCount = sample_text.count(currentWord)
targetWordCount = sample_text.count(targetString)
if shouldGetVerb == True:
test = str(targetWordCount);
dictionary_verb.setdefault(targetString,[test]).append(targetVerb);
#print(dictionary_verb);
#print("\"{}\": [\"{}\",\"{}\"],".format(targetString, targetVerb, targetWordCount))
shouldGetVerb = False
targetVerb = ""
targetString = ""
#print("Stop get action {}".format(targetString))
if currentWordCount > noun_frequency(constant, len(tokenized)):
if countNoun == 0:
currentChunk = chunked[count]
if len(currentWord) >2:
targetString = currentWord
if len(currentWord) >2 and targetString != "" and countNoun != 0:
targetString = targetString+" "+currentWord
countNoun= countNoun+1
if currentChunk != "":
#print("-------------")
#print("Chunk: {}".format(currentChunk))
#print("String: {}".format(targetString))
## we get a noun now, and want to find its verb = action
shouldGetVerb = True
#print("Need to get action for {}".format(targetString))
# save the target string
count=count+1
if count < 3:
if shouldGetVerb == True:
if 'VB' in chunked[count] or 'VBD' in chunked[count] or 'VBG' in chunked[count] or\
'VBN' in chunked[count] or 'VBN' in chunked[count]:
#print("Verb: {}".format(chunked[count]))
targetVerb = targetVerb+" "+chunked[count][0]
#print("action pair: ({}, {})".format(targetString, chunked[count][0]))
#print(dictionary_verb);
control_json_limit = 0;
print(type(dictionary_verb))
for x in dictionary_verb:
if control_json_limit < json_length:
print(repr(x),":",dictionary_verb[x], ",")
control_json_limit = control_json_limit +1
except Exception as e:
print(str(e))
return dictionary_verb
def process_json(print_limit, noun_list, file_name):
new_json_file = "./Static/text_sample/"+str(file_name).replace(".txt","")+".json"
#print ( sys.argv[1])
target = open(new_json_file, 'w')
target.write(json.dumps(noun_list, sort_keys=False, indent=4, separators=(',', ': ')))
#print (new_json_file)
#print ()
#print("\n\n\n")
#print("====get all Noun phrase============")
#process_content()
#print("\n\n\n")
#print("====filter by frequency============")
#process_content_filter_freqeuncy()
#print("\n\n\n")
#print("====combine with verb============")
#name_list = process_content_find_verb(10)
#process_json(10, name_list) | [
"ychen74@ncsu.edu"
] | ychen74@ncsu.edu |
e864c3c1a1724cf778f97ad67157ca72f97139ae | 419d0cf3e47951cc0eae24c5578524a7a86d11a7 | /unittests/stellaropnav/test_stellaropnav.py | d076a30ff4f67e699f0f6401c54a1b0dce1e59f7 | [
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nasa/giant | 509a3376bee30d070673e9d919a120e72f7f5570 | 58a5699dacd240583e9b9b2f13168697acf4b3b2 | refs/heads/main | 2023-05-23T16:39:56.153889 | 2023-03-08T17:05:02 | 2023-03-08T17:05:02 | 404,409,646 | 19 | 4 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from unittest import TestCase
import numpy as np
from giant import stellar_opnav as sopnav
class TestStellarOpnav(TestCase):
def test_init(self):
pass
| [
"andrew.j.liounis@nasa.gov"
] | andrew.j.liounis@nasa.gov |
b9459e9ff5bdd141c5f22653c1f854167a3a9ddc | 6be1990abf99c85ef886b49dcea1824aabb648d3 | /weixinofneolocal/weixinofneolocal/zinnia/management/commands/zinnia2wp.py | ed19b9f8955b229141d7afe8a566d52eb21ec5cb | [] | no_license | neoguojing/cloudServer | b53ae205efe52cf0aea28dbb9e6c16c20caf991f | 7c19101789b0c46474269e4c8fe00e92203e9cd7 | refs/heads/master | 2020-12-04T23:02:23.551479 | 2017-09-22T03:08:35 | 2017-09-22T03:08:35 | 67,382,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | """Zinnia to WordPress command module"""
from django.conf import settings
from django.utils.encoding import smart_str
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from django.core.management.base import NoArgsCommand
from utils.tagging.models import Tag
from zinnia import __version__
from zinnia.settings import PROTOCOL
from zinnia.models.entry import Entry
from zinnia.models.category import Category
class Command(NoArgsCommand):
"""Command object for exporting a Zinnia blog
into WordPress via a WordPress eXtended RSS (WXR) file."""
help = 'Export Zinnia to WXR file.'
def handle_noargs(self, **options):
site = Site.objects.get_current()
blog_context = {'entries': Entry.objects.all(),
'categories': Category.objects.all(),
'tags': Tag.objects.usage_for_model(Entry),
'version': __version__,
'language': settings.LANGUAGE_CODE,
'site': site,
'site_url': '%s://%s' % (PROTOCOL, site.domain)}
export = render_to_string('zinnia/wxr.xml', blog_context)
print(smart_str(export))
| [
"guojing_neo@163.com"
] | guojing_neo@163.com |
6f31f90aa02e69640449dce547cee92ace26127a | e521c1bd7f0d7f30d3c666ded047e605ab34124f | /donate_post/migrations/0001_initial.py | e2a7c327d8a0f55e86576da2800fd400fab9fd4b | [] | no_license | mahadihanif/Web_-project_feedseek | ab222964ff039ef0c37d134cfb96f7ce23664412 | 0baedeb4117978d90e36d67620e1678471f840fa | refs/heads/main | 2023-01-29T15:27:56.251217 | 2020-12-14T13:22:05 | 2020-12-14T13:22:05 | 321,301,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # Generated by Django 3.1.4 on 2020-12-10 19:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('food_name', models.CharField(max_length=50)),
('organization_name', models.CharField(max_length=50)),
('how_many_people', models.IntegerField(blank=True)),
('date', models.DateField(blank=True)),
('location', models.CharField(max_length=70)),
('add_note', models.TextField(max_length=500)),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mahadihanif27@gmail.com"
] | mahadihanif27@gmail.com |
afaf11274b56e03d619c3cd51304f965f83de1bc | db138753958d4f25fe8889a85ffc5984ad8f69c5 | /regex.py | b3da3be9a063309e0b2e04ca0287056a6e3295fa | [] | no_license | noob20000405/mywxbot | 042e8487e0aebfafd35fa29d4e2bc31505ddf1db | 32f257e06d7e4529e82cba9778ce9df437c0b6cc | refs/heads/master | 2022-12-08T09:33:54.169838 | 2020-09-01T17:49:24 | 2020-09-01T17:49:24 | 291,354,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import re
def is_question(text):
word_list = ['请问', '问', '吗', '么', '啥', '哪', '\?', '\?']
for word in word_list:
if re.search(word, text) != None:
return True
return False
"""
text = "我想你是逼"
if is_question(text):
print("True")
else:
print("False")
""" | [
"a13349916261@gmail.com"
] | a13349916261@gmail.com |
f79f71a27c4cd6b11ba6f783fac029d3aa97fd4c | a14907600c8f00a25a0ba162939459361fb41d00 | /jx_api/testcases/test_05_epcstock.py | 7c6a23a4539924434a8c647c20774fdc8fb5b28d | [] | no_license | paopao-xiang/JX_API | b9c2785e5c6d9a7e467e7e2dbdfbc575f75a0933 | b201c3ad2c72f498ff451e5dbecce95c24545cd9 | refs/heads/master | 2023-04-17T09:39:12.522383 | 2021-02-22T11:20:31 | 2021-02-22T11:20:31 | 326,924,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py | # -*- coding:utf-8 _*-
"""
@author:paopao
@time: 2020/11/6:17:11
@email:1332170414@qq.com
@function:
"""
import unittest
import string, random, json
from ddt import ddt, data
from jx_api.common import contants,logger
from jx_api.common.config import config
from jx_api.common import do_excel
from jx_api.common import do_mysql
from jx_api.common import context
from jx_api.common.http_request import HTTPRequest2
@ddt
class EpcstockTest(unittest.TestCase):
excel = do_excel.DoExcel(config.get('case', 'case_file'), 'epcstock')
cases = excel.get_cases()
@classmethod
def setUpClass(cls):
logger.logger.info("开始库存管理相关接口")
cls.http_request = HTTPRequest2()
cls.mysql = do_mysql.DoMysql()
params = config.get('data', 'data')
resp = cls.http_request.request('POST', '/user/login', 'json', jsons=params)
#将登陆的token传入header里面,后面接口调用
cls.headers = {"content-type": "application/json", "Connection": "keep-alive", 'token': resp.json()['data']['token']}
@data(*cases)
def test_epcstock(self, case):
case.data=context.replace(case.data)
logger.logger.info("开始测试:{0},发送的请求是:{1},请求类型:{2}".format(case.title, case.data, type(case.data)))
# 随机生成用户名,并且反射到Context类的属性中,方便后面参数调用
random_str = ''.join(random.sample(string.ascii_letters, 6))
if case.data.find('warehouse_name') != -1:
case.data = case.data.replace('warehouse_name', random_str.lower())
setattr(context.Context, 'warehouse_name', random_str)
resp = self.http_request.request(case.method, case.url, case.type, headers=self.headers,jsons=case.data)
logger.logger.info("{0},返回是:{1},返回类型:{2}".format(case.title, resp.text, type(resp.text)))
print("请求的内容:{}".format(case.title))
if 'message' in json.loads(resp.text).keys():
#判断返回值的类型,是否含有message提示
try:
self.assertEqual(str(case.expected), json.loads(resp.text)['message'])
self.excel.write_result(case.case_id + 1, json.loads(resp.text)['message'], 'PASS')
logger.logger.info('{0}接口测试通过'.format(case.title))
if case.case_id == 15:
#将新增盘点任务单号传入Context中,为下一请求的传参单号
taskId=json.loads(resp.text)['data']['taskId']
setattr(context.Context, 'taskId', taskId)
except AssertionError as e:
self.excel.write_result(case.case_id + 1, resp.text, 'FAIL')
logger.logger.error("报错了,{0}".format(e))
raise e
else:
try:
self.assertIsInstance(json.loads(resp.text),dict)
self.excel.write_result(case.case_id + 1, resp.text, 'PASS')
logger.logger.info('{0}接口测试通过'.format(case.title))
except AssertionError as e:
self.excel.write_result(case.case_id + 1, resp.text, 'FAIL')
logger.logger.error("报错了,{0}".format(e))
raise e
@classmethod
def tearDownClass(cls):
logger.logger.info('库存查询模块接口测试完毕')
cls.http_request.close()
| [
"871758195@qq.com"
] | 871758195@qq.com |
bf571d699897e360caed7a431074956d110d5e1f | 3338fea5ade9474e9bd2e1c49995466773bec659 | /tutorial/static/code-snippets/orientation_model_init.py | 3693278848f6bf2c34db5df677ace7599d370bbd | [
"MIT"
] | permissive | Livingstone99/pytorch-for-information-extraction | 716f631294467a1cdcdddcdf9bb761e713defcab | 4e8eea92140df806c2a82c8fa3db4afaf13e66db | refs/heads/master | 2023-01-24T08:00:04.281128 | 2020-12-01T04:53:10 | 2020-12-01T04:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | def get_orientation_model(num_classes, model_state_dict=None):
orientation_model = OrientationModel(num_classes)
# initialize model state dictionary if specified
if not model_state_dict == None: orientation_model.load_state_dict(model_state_dict)
return orientation_model | [
"mbassijaphet@gmail.com"
] | mbassijaphet@gmail.com |
a6492367edc20853e84511db1b05e10bf5cfc989 | 82aee3211216f55392d5a757eb57f02c859e9a28 | /Easy/172_factorailTrailingZeroes.py | 05ea5253c0407b74dbb6ecfb30dd4f8afb340ace | [] | no_license | Yucheng7713/CodingPracticeByYuch | 505d18095d4b9a35c1f3b23632a90a76d811b64a | 1461b10b8910fa90a311939c6df9082a8526f9b1 | refs/heads/master | 2022-05-01T11:51:00.612603 | 2022-04-18T09:46:55 | 2022-04-18T09:46:55 | 198,961,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | class Solution:
def trailingZeroes(self, n):
if n == 0:
return 0
return n // 5 + self.trailingZeroes(n // 5) | [
"yuchengh@usc.edu"
] | yuchengh@usc.edu |
5e20cae19098110c1259b6331ea2761525706dec | 014e01ab4fee577cd3058f71b6bbb71cf9fd7156 | /rango/migrations/0016_auto_20150126_0917.py | e2c72c1a180f8a916da5eb13847580660e9e1803 | [] | no_license | enzoroiz/tangowithdjango | c993ad9b881d8e6816463fc49154bfe7ae3e845b | 66ee507216660153a1444861e8698a56fc12707d | refs/heads/master | 2021-01-18T12:54:13.154508 | 2015-02-24T14:02:28 | 2015-02-24T14:02:28 | 29,796,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0015_auto_20150126_0916'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(),
),
]
| [
"enzoroiz@gmail.com"
] | enzoroiz@gmail.com |
5f8bb830c9edcefa4d7bf2f6a11a374e5aa1c534 | 718115f4d405edcff2984f22367428f66725f00e | /user_application/pyxis-kafka-producer/pyxis-kafka-producer/pyxis-kafka-producer.py | c82f9a1262685e9cab1e285ab93af6ac89ee9307 | [] | no_license | Alb3niz/tornado-app | e909698a6b92c907fcd1fd1aea4dcec717654155 | bcbe0471df91ccec8a21adbd37331a06db1c4128 | refs/heads/master | 2020-03-09T07:07:01.127772 | 2018-06-01T07:58:19 | 2018-06-01T07:58:19 | 128,657,110 | 0 | 0 | null | 2018-04-08T15:49:44 | 2018-04-08T15:49:44 | null | UTF-8 | Python | false | false | 2,692 | py | import tweepy
import json
import os
from kafka import KafkaProducer
import logging
LOGGER = logging.getLogger(__name__)
class ExploiterTwitterStreamListener(tweepy.StreamListener):
def __init__(self):
super(ExploiterTwitterStreamListener, self).__init__()
bootstrap_server = os.environ.get("KAFKA_BOOTSTRAP_SERVER", "kafka-server:9092")
self.kafka_topic = os.environ.get("KAFKA_TOPIC", "default-token")
if bootstrap_server is None:
LOGGER.error("There is no KAFKA_BOOTSTRAP_SERVER present in environment")
return None
if self.kafka_topic is None:
LOGGER.error("There is no KAFKA_TOPIC present in environment")
self.kafka_topic = "empty_topic"
print(self.kafka_topic)
self.producer = KafkaProducer(bootstrap_servers=bootstrap_server,\
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
def on_data(self, data):
print(" -> I am going to write on topic {topic}".format(topic=self.kafka_topic))
self.producer.send(self.kafka_topic, data)
self.producer.flush()
return True
def on_status(self, status):
print(status.text)
def generate_auth_object():
"""Use twitter credentials to generate auth object"""
collector_api_token = os.environ.get\
("TWITTER_CONSUMER_API_KEY", "qUBED8JONS1rdOXMGXxJw3KDK")
collector_api_secret = os.environ.get\
("TWITTER_CONSUMER_API_SECRET", "DUI0ICvIXTYE4SPxdBSRVlq3xEw1UDpcy6mZG2qWE1yyX3nH2M")
collector_access_token = os.environ.get\
("TWITTER_CONSUMER_TOKEN", "245605482-rajqw4klordPOid8izXvAHBc8DhU8QliOFraCFqM")
collector_access_secret = os.environ.get\
("TWITTER_CONSUMER_SECRET", "kYalUO9SmnLvcjXPIrRE0dSEDd2LhQBSBMPD57UgLvzse")
auth = tweepy.OAuthHandler(collector_api_token, collector_api_secret)
auth.set_access_token(collector_access_token, collector_access_secret)
return auth
def run_application():
"""Start application"""
auth = generate_auth_object()
stream_listener = ExploiterTwitterStreamListener()
words_to_track = os.environ.get("WORDS_TO_TRACK", "bigdata, machinelearning").split(',')
# The hashtag is not allowed for now, so it is set before each word
words_to_track = [str('#') + word for word in words_to_track]
print(words_to_track)
stream = tweepy.Stream(auth=auth, listener=stream_listener)
# Goal is to keep this process always going
while True:
try:
stream.filter(track=words_to_track)
except Exception as error:
print("###Error###")
print(error)
if __name__ == '__main__':
run_application()
| [
"javier_albeniz@hotmail.com"
] | javier_albeniz@hotmail.com |
46bebe2411bfc00bfe220153fd705c51a4d73f1e | cf341fcdd4eaf058a79703be67db005c7967da78 | /Friend.py | 9dea47870168e97125d808ab30f01d1edfa8b4d8 | [] | no_license | aunghtet008900/Friend | 7abc269111c5a19093d7ce030508182a0baaec1c | 8999ff21200128c6ac252c1b5666cb476a36ea60 | refs/heads/master | 2020-08-12T02:25:47.955822 | 2019-05-17T14:20:58 | 2019-05-17T14:20:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,914 | py | #!/usr/bin/env python
# Created By Benelhaj_younes
#Libraries#
try:
from os import system as sy, path; import requests,socket,optparse
sy("")
except ImportError:
print(unknown2 +"Error Please Install [Requests] Module !!!")
print(unknown6 +"Use This Command > pip install requests")
exit(1)
# Check Internet Connection #
def cnet():
try:
ip = socket.gethostbyname("www.google.com")
con = socket.create_connection((ip, 80), 2)
return True
except socket.error:
pass
return False
#Check-Email-Function#
#
def Friend(email):
try:
data = {"email": email}
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.24 (KHTML, like Gecko) RockMelt/0.9.58.494 Chrome/11.0.696.71 Safari/534.24'}
response = requests.post("https://verifyemailaddress.com/result", headers=headers, data=data).text
if "is valid" in response:
print(unknown9+"["+unknown9+"+"+unknown9+"] Email["+unknown8+email+unknown8+"] STATUS["+unknown5+" Found "+unknown5+"]")
else:
print(Red+"["+Red+"-"+Red+"] Email["+Grey+email+Grey+"] STATUS["+cyan+" Not Found "+cyan+"]"+cyan)
except(KeyboardInterrupt,EOFError):
print(cyan+" ")
exit(1)
##############################3
Green="\033[1;33m"
Blue="\033[1;34m"
Grey="\033[1;30m"
Reset="\033[0m"
yellow="\033[1;36m"
Red="\033[1;31m"
purple="\033[35m"
Light="\033[95m"
cyan="\033[96m"
stong="\033[39m"
unknown="\033[38;5;82m"
unknown2="\033[38;5;198m"
unknown3="\033[38;5;208m"
unknown4="\033[38;5;167m"
unknown5="\033[38;5;91m"
unknown6="\033[38;5;210m"
unknown7="\033[38;5;165m"
unknown8="\033[38;5;49m"
unknown9="\033[38;5;160m"
unknown10="\033[38;5;51m"
unknown11="\033[38;5;13m"
unknown12="\033[38;5;162m"
unknown13="\033[38;5;203m"
unknown14="\033[38;5;113m"
unknown15="\033[38;5;14m"
###############################3
print(" "+unknown+"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown2+"MMMMMMMMMMNKWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown3+"MMMMMMMMMNc.dWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown4+"MMMMMMMMWd. .kWMMMMMMMMMMMMMMMMMMMMMMW0KMMMMMMMMMM")
print(" "+unknown15+"MMMMMMMMk:;. 'OMMMMMMMMMMMMMMMMMMMMMWx.,0MMMMMMMMM")
print(" "+unknown14+"MMMMMMMK:ok. ,0MMMMMMMMMMMMMMMMMMMWO. .cXMMMMMMMM")
print(" "+unknown13+"MMMMMMNl:KO. ;KWNXK00O0000KXNWMMWO' .c;dWMMMMMMM")
print(" "+unknown12+"MMMMMMx,xNk. .;'... ....';:l:. ,0l,0MMMMMMM")
print(" "+unknown11+"MMMMMK;,l;. .,:cc:;. .dx,lWMMMMMM")
print(" "+unknown10+"MMMMWo ,dKWMMMMWXk:. .cdkOOxo,. ...OMMMMMM")
print(" "+unknown5+"MMMM0' cXMMWKxood0WWk. .lkONMMNOOXO, lWMMMMM")
print(" "+unknown2+"MMMWl ;XMMNo. .lXWd. .dWk;;dd;;kWM0' '0MMMMM")
print(" "+unknown9+"kxko. lWMMO. .kMO. .OMMK; .kMMMNc oWMMMM")
print(" "+unknown8+"X0k:. ;KMMXc :XWo .dW0c,lo;;xNMK, 'xkkk0")
print(" "+unknown10+"kko' :KMMNkl::lkNNd. .dkdKWMNOkXO, .lOKNW")
print(" "+unknown15+"0Kk:. .lOXWMMWN0d, 'lxO0Oko;. .ckkOO")
print(" "+unknown4+"kkkdodo;. .,;;;'. .:ooc. . ...ck0XN")
print(" "+unknown8+"0XWMMMMWKxc'. ;dxc. .,cxKK0OkkOO")
print(" "+unknown+"MMMMMMMMMMMN0d:'. .' .l' .;lxKWMMMMMMMMMN")
print(" "+unknown2+"MMMMMMMMMMMMMMMN0xo0O:,;;;;;;xN0xOXWMMMMMMMMMMMMMM")
print(" "+unknown14+"MMMMMMMMMMMMMMMMMMMMMMWWWWWMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown6+"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown7+"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown8+"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+unknown9+"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
print(" "+Blue+" "+unknown2+"["+unknown14+"Friend"+unknown5+"]"+unknown+" ")
print(" "+purple+" "+unknown13+"["+unknown8+"Created By Benelhaj_younes"+unknown2+"]"+unknown10+" "+Reset+"\n")
parse = optparse.OptionParser(unknown +"""
Usage : python friend.py [Option]
Options:
|-------------------------------------------------------------------------|
| -s --single [Email] :> Check Single Email |
|-------------------------------------------------------------------------|
| -m --many [Email,Email2,etc] :> Check Many Emails |
|-------------------------------------------------------------------------|
| -f --file [File Contain Emails] :> Check All Email From File|
|-------------------------------------------------------------------------|
|----------------------------[Examples]-----------------------------------|
|-------------------------------------------------------------------------|
| --------python friend.py -s younes.brovi@gmail.com -------------------- |
|-------------------------------------------------------------------------|
|-------python friend.py -m younes.brovi@gmail.com,doophack@gmail.com-----|
|-------------------------------------------------------------------------|
|--------python friend.py -f emails.txt-----------------------------------|
|-------------------------------------------------------------------------|
""",version="1.0")
def Main():
parse.add_option('-s','-S','--single','--SINGLE', dest="Smail",type="string")
parse.add_option('-m','-m','--many','--MANY', dest="Mmail",type="string")
parse.add_option('-f','-F','--file','--FILE', dest="Fmail",type="string")
(opt,args) = parse.parse_args()
if opt.Smail !=None:
if cnet() !=True:
print(Red +"[!]"+unknown5+"Error Check Your Connection")
exit(1)
email = opt.Smail
if not email.strip() or "@" not in email:
print(Red+"\n["+Red+"!"+yellow+"] Invalid Email["+Red+email+yellow+"] STATUS["+Red+" SKIPPED "+yellow+"]"+cyan)
exit(1)
email = email.strip()
print(unknown6+"["+unknown4+"~"+unknown3+"]"+unknown2+" Checking....\n"+cyan)
Friend(email)
elif opt.Mmail !=None:
if cnet() !=True:
print(Red+"\n["+unknown8+"!"+unknown9+"]"+unknown10+" Error: Please Check Your Internet Connection "+unknown12+"!!!"+cyan)
exit(1)
many_email = opt.Mmail
print(Red+"["+yellow+"~"+Green+"]"+Red+" Checking....\n"+cyan)
if ',' in many_email:
emails = many_email.split(",")
else:
print(Red+"\n["+unknown15+"!"+unknown+"] Error: Please Use[ "+cyan+","+cyan+" ] To Split The Emails"+Red+" !!!"+cyan)
exit(1)
try:
for email in emails:
if not email.strip() or "@" not in email: continue
email = email.strip()
Friend(email)
except (KeyboardInterrupt,EOFError):
print(cyan+" ")
exit(1)
elif opt.Fmail !=None:
emails_file = opt.Fmail
print(unknown+"["+unknown2+"~"+unknown3+"]"+unknown4+" Checking....\n"+cyan)
if not path.isfile(emails_file):
print(yellow+"\n["+Red+"!"+Green+"]"+Grey+" Error:"+purple+" No Such File: [ "+Light+emails_file+Light+" ]"+Red+" !!!"+cyan)
print(cyan+"["+Red+"!"+cyan+"]"+yellow+" Please:"+cyan+" Check Your Emails File Path."+Grey+"!"+cyan)
exit(1)
try:
with open(emails_file) as fop:
for email in fop:
if not email.strip() or "@" not in email: continue
email = email.strip()
Friend(email)
fop.close()
except (KeyboardInterrupt,EOFError):
print(wi+" ")
exit(1)
else:
print(parse.usage)
exit(1)
if __name__=="__main__":
Main()
# Done!
| [
"noreply@github.com"
] | aunghtet008900.noreply@github.com |
d91badb267be5fee30fca3fe04414407e5995d0d | 07892596dead30c7446a32bbe9fac25ddeec1713 | /fake_hotel/core/models.py | 91231ebe961461a7aa7a68051941656a5b4ed967 | [] | no_license | joaosr/fake_hotel | 569820e5292ec5d732497d1c98a65ec1cc441bb8 | d3dd4d84d2ad41daf07637d8a2bb432a9a7ff713 | refs/heads/master | 2021-06-19T06:24:33.508261 | 2019-11-05T13:53:01 | 2019-11-05T13:53:01 | 216,972,182 | 0 | 0 | null | 2021-06-09T18:43:52 | 2019-10-23T05:05:25 | Python | UTF-8 | Python | false | false | 1,716 | py | from django.db import models
class Reservation(models.Model):
date_in = models.DateField('Date In', blank=False, null=False)
date_out = models.DateField('Date Out', blank=False, null=False)
guest_number = models.IntegerField('Number of guests', blank=False, null=False, default=1)
created_at = models.DateTimeField('Created at', auto_now_add=True)
def __str__(self):
return "{0};{1}".format(self.date_in,self.date_out)
class RoomType(models.Model):
description = models.TextField('Description', blank=True, null=True)
max_capacity = models.IntegerField('Max Capacity', blank=False, null=False, default=1)
created_at = models.DateTimeField('Created at', auto_now_add=True)
def __str__(self):
return self.description
class Room(models.Model):
OCCUPIED = 'Occupied'
VACANT = 'Vacant'
ROOM_STATUS = (
(OCCUPIED, 'Occupied'),
(VACANT, 'Vacant')
)
number = models.IntegerField('Number', blank=False, null=False)
status = models.CharField('Status', max_length=20, choices=ROOM_STATUS)
room_type = models.ForeignKey(RoomType, on_delete=models.CASCADE)
created_at = models.DateTimeField('Created at', auto_now_add=True)
def __str__(self):
return str(self.number)
class OccupiedRoom(models.Model):
check_in = models.DateField('Check In', blank=False, null=False)
check_out = models.DateField('Check Out', blank=False, null=False)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
reservation = models.ForeignKey(Reservation, on_delete=models.CASCADE)
created_at = models.DateTimeField('Created at', auto_now_add=True)
def __str__(self):
return str(self.room.number)
| [
"eng.jmsoares@gmail.com"
] | eng.jmsoares@gmail.com |
9f20f19fa04dd03b903adfa8d2bb8ba3f51fd0b9 | ac66632767f9643050a2b96b0a9000e9a97d0ebc | /SampleSurvey/Survey1/models.py | 0597153a6d375495fcc2ca3473171f266aba1452 | [] | no_license | shrayarora8/ICTD_Free_Basics_Survey | b13c4bd53d75abbb268b8e5f0c5e3f95ed5b055a | ab11d8afcf760704e3d3f4729c7c23398cb96fe8 | refs/heads/master | 2020-03-28T23:00:37.712982 | 2018-05-22T16:36:11 | 2018-05-22T16:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# This model recreated the first step in a LabintheWild study: getting confirmation that the user
# agrees to be in the study and gathering information about them. For more information, see
# SampleSurvey/SampleSurvey/templates/basicInfo.html or confirmation.html
class Entry(models.Model):
# For basic info form
CHOICES = (('', ''), ('1', '1 - Not at all'), ('2','2'), ('3','3'), ('4', '4'), ('5', '5 - very much'), )
learn = models.CharField(max_length=10, choices=CHOICES, default='')
fun = models.CharField(max_length=10, choices=CHOICES, default='')
bored = models.CharField(max_length=10, choices=CHOICES, default='')
science = models.CharField(max_length=10, choices=CHOICES, default='')
compare = models.CharField(max_length=10, choices=CHOICES, default='')
# For confirmation form
confirmation = models.BooleanField()
# This model is used to record a user's evaluation of the colorfulness of a web page that is flashed on
# their screen. See SampleSurvey/SampleSurvey/templates/selection.html
class Data(models.Model):
CHOICES = (('1', '1 - Not colorful at all'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'),
('7', '7'), ('8', '8'), ('9', '9'), ('10', '10 - Very colorful'),)
answer = models.PositiveSmallIntegerField(choices=CHOICES)
userId = models.IntegerField()
# This model is used to record a user's answer to which two of three words are associated. See
# SampleSurvey/SampleSurvey/templates/question.html
class Question(models.Model):
CHOICES = ((None, ''), ('a', 'A'), ('b', 'B'), ('c', 'C'),)
word1 = models.CharField(max_length=1, default='', choices=CHOICES)
word2 = models.CharField(max_length=1, default='', choices=CHOICES)
userId = models.IntegerField()
questionId = models.IntegerField()
# This model has the same purpose as Question but demonstrates a different form of data entry.
# See SampleSurvey/SampleSurvey/templates/question1.html
class Question1(models.Model):
A = models.BooleanField(null=False, default=False)
B = models.BooleanField(null=False, default=False)
C = models.BooleanField(null=False, default=False)
userId = models.IntegerField()
questionId = models.IntegerField()
| [
"sfoslund@gmail.com"
] | sfoslund@gmail.com |
fb8b3bf2e25eecf648aea3af50545bb8c47eea65 | ab9b527b246c63e4d932a3ef9e9dfabf2900858d | /venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distro.py | 88f747da7a800c8cef59743b13a15926dd87a89e | [] | no_license | UzJu/ShtermReUserExpireTime | 56f8f8f829877366774cd0be33df7b71a3031149 | 255eb6c18ca8ee6cbfa39c541b1c40b2de2de17d | refs/heads/master | 2023-08-14T18:51:09.958087 | 2021-10-12T02:17:40 | 2021-10-12T02:17:40 | 416,148,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,455 | py | # Copyright 2015,2016,2017 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
It is a renewed alternative implementation for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
3.5 deprecated this function, and Python 3.7 is expected to remove it
altogether. Its predecessor function :py:func:`platform.dist` was already
deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
Still, there are many cases in which access to OS distribution information
is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
more information.
"""
import os
import re
import sys
import json
import shlex
import logging
import argparse
import subprocess
_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current OS distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the OS distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular OS distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current distribution, as a
machine-readable string.
For a number of OS distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
"openbsd" OpenBSD
"netbsd" NetBSD
"freebsd" FreeBSD
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the OS distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current OS distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current OS distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current OS distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current OS distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current OS distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current OS
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current OS distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current OS distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current OS distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def uname_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current OS distribution.
"""
return _distro.uname_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
def uname_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
"""
return _distro.uname_attr(attribute)
class cached_property(object):
"""A version of @property which caches the value. On access, it calls the
underlying function and sets the value in `__dict__` so future accesses
will not re-call the property.
"""
def __init__(self, f):
self._fname = f.__name__
self._f = f
def __get__(self, obj, owner):
assert obj is not None, 'call {} on an instance'.format(self._fname)
ret = obj.__dict__[self._fname] = self._f(obj)
return ret
class LinuxDistribution(object):
"""
Provides information about a OS distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current OS distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file='',
include_uname=True):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
* ``include_name`` (bool): Controls whether uname command output is
included as a data source. If the uname command is not available in
the program execution path the data source for the uname command will
be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
This controls whether the lsb information will be loaded.
* ``include_uname`` (bool): The result of the ``include_uname``
parameter. This controls whether the uname information will
be loaded.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self.include_lsb = include_lsb
self.include_uname = include_uname
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={self.os_release_file!r}, " \
"distro_release_file={self.distro_release_file!r}, " \
"include_lsb={self.include_lsb!r}, " \
"include_uname={self.include_uname!r}, " \
"_os_release_info={self._os_release_info!r}, " \
"_lsb_release_info={self._lsb_release_info!r}, " \
"_distro_release_info={self._distro_release_info!r}, " \
"_uname_info={self._uname_info!r})".format(
self=self)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the OS distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the OS distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
distro_id = self.uname_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name') \
or self.uname_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name') \
or self.uname_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', ''),
self.uname_attr('release')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the OS distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the current distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the current distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the OS distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the OS distribution.
For details, see :func:`distro.codename`.
"""
return self.os_release_attr('codename') \
or self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the OS
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the OS distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the OS
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the OS
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def uname_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the uname command data source of the OS distribution.
For details, see :func:`distro.uname_info`.
"""
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the OS distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the OS distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the OS distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def uname_attr(self, attribute):
"""
Return a single named information item from the uname command
output data source of the OS distribution.
For details, see :func:`distro.uname_release_attr`.
"""
return self._uname_info.get(attribute, '')
@cached_property
def _os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
if k == 'VERSION':
# this handles cases in which the codename is in
# the `(CODENAME)` (rhel, centos, fedora) format
# or in the `, CODENAME` format (Ubuntu).
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
else:
props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
return props
@cached_property
def _lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
if not self.include_lsb:
return {}
with open(os.devnull, 'w') as devnull:
try:
cmd = ('lsb_release', '-a')
stdout = subprocess.check_output(cmd, stderr=devnull)
except OSError: # Command not found
return {}
content = stdout.decode(sys.getfilesystemencoding()).splitlines()
return self._parse_lsb_release_content(content)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
@cached_property
def _uname_info(self):
with open(os.devnull, 'w') as devnull:
try:
cmd = ('uname', '-rs')
stdout = subprocess.check_output(cmd, stderr=devnull)
except OSError:
return {}
content = stdout.decode(sys.getfilesystemencoding()).splitlines()
return self._parse_uname_content(content)
@staticmethod
def _parse_uname_content(lines):
props = {}
match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip())
if match:
name, version = match.groups()
# This is to prevent the Linux kernel version from
# appearing as the 'best' version on otherwise
# identifiable distributions.
if name == 'Linux':
return {}
props['id'] = name.lower()
props['name'] = name
props['release'] = version
return props
@cached_property
def _distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
try:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
except OSError:
# This may occur when /etc is not readable but we can't be
# sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`.
basenames = ['SuSE-release',
'arch-release',
'base-release',
'centos-release',
'fedora-release',
'gentoo-release',
'mageia-release',
'mandrake-release',
'mandriva-release',
'mandrivalinux-release',
'manjaro-release',
'oracle-release',
'redhat-release',
'sl-release',
'slackware-version']
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
try:
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
except (OSError, IOError):
# Ignore not being able to read a specific, seemingly version
# related file.
# See https://github.com/nir0s/distro/issues/162
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="OS distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main()
| [
"uzjuer@163.com"
] | uzjuer@163.com |
17209aba0ca3c937eb00693870bae23548aaa218 | 26e646d2ba356e70cb6206132fcfa62feb5db9cf | /restdemo/restdemo/myapp/models.py | fbc777d4a38fe1e8eeaa8094d315fece43261605 | [] | no_license | panzailiang/restdemo | b3922cf5fab38174495d729d622b94909cb66361 | 5f2de6b1a67428d01a7ab6a9a41c6019279ee91d | refs/heads/master | 2022-12-14T21:35:14.323076 | 2018-07-10T03:31:48 | 2018-07-10T03:31:48 | 140,371,655 | 0 | 0 | null | 2021-06-10T20:37:59 | 2018-07-10T03:23:26 | Python | UTF-8 | Python | false | false | 335 | py | from django.db import models
# Create your models here.
class Place(models.Model):
address = models.CharField(max_length=20)
class Club(Place):
name = models.CharField(max_length=20)
class Resouce(models.Model):
place = models.ForeignKey(Place, on_delete=models.CASCADE)
res_name = models.CharField(max_length=20) | [
"ppzliang@gmail.com"
] | ppzliang@gmail.com |
9b8f60e10101426cf4085bb322847c4502a0f1ce | 0360055002ec6b98c489ed8cf432bba220da1746 | /learning/learning_model.py | e4f47fa7e23f610c91bf98d6aa1f8a294a7023d9 | [] | no_license | BrandonCChan/texture_based_segmentation | 55a43312f033bd078e34c3bda3f3f1d3aeaff1c7 | d84e6afae2f67d8e38e087ede10277abbf1472d4 | refs/heads/master | 2021-01-21T08:15:31.718025 | 2017-03-28T00:58:15 | 2017-03-28T00:58:15 | 83,341,126 | 0 | 1 | null | 2017-03-28T00:58:16 | 2017-02-27T18:08:12 | Python | UTF-8 | Python | false | false | 2,214 | py | # Implementation of neural network machine learning for classification of tissue segmentation based on texture
# This uses Conda, Sci-Kit Learn for building models, and Numpy
# Please make sure these dependencies are installed to run the code.
#
# This is a test function to ensure that everything is installed properly.
#
# With a 4-class classification problem based on 3 attributes, aim to use a 3 layered perceptron to predict tissue type.
# This model will train using backpropagation.
# The final model will need to be unsupervised since data is not labelled, rather it aims to make a prediction without
# knowing the correct answer.
#
# Nuwan Perera, Brandon Chan, Mareena Mallory
import numpy as np
import pickle
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# Import file data
data_file = open('Manual_Segment_Short.csv')
data_file.readline()
data = np.loadtxt(data_file, delimiter=',')
# Split labels and training data
data_labels = data[:, 0]
RGB_data = data[:, 1:4]
# Normalize RGB data using L-2 (Euclidean) normalization across axis 1 (row based)
RGB_data = preprocessing.normalize(RGB_data, norm='l2', axis=1)
# Randomly sample dataset
train_data, test_data, train_labels, test_labels = train_test_split(RGB_data, data_labels, test_size=0.4, random_state=0)
# Instantiate MLP with stochastic gradient descent optimization methods as a solver, 3 layers - 9 nodes per layer and train model
clf = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(9,9,9), random_state=1, learning_rate='adaptive', max_iter=1000)
clf.fit(train_data, train_labels)
# Test model based on randomly sampled data
test_results = clf.predict(test_data)
# Calculate score accuracy to validate results of the model
score = 0
for i in range(len(test_data)):
if test_results[i] == test_labels[i]:
score += 1
# Calculate final score based on % = (# correct classifications) / (sample size)
score /= float(test_results.size)
# Convert to percentage
score *= 100
# Consistently returning ~87.7 %
print "Score:", score, "%"
# Serialize model using Pickle
with open('neuralnetwork.pkl', 'wb') as f:
pickle.dump(clf, f)
| [
"nuwan.perera@queensu.ca"
] | nuwan.perera@queensu.ca |
c1bd6b0e66ca3ab2c00311bb3fc0b6bb59ec1e37 | 0b8392c3fc6bcc004f750b194f6d8dc3a8985391 | /build/catkin_generated/order_packages.py | 0f5bc89301b89cda4f0bbbe2c7f2e0f8151eaaec | [] | no_license | knuros5/catin_joy_turtlebot | 94970f4302079c847d7e90bf2a1b2fd2d1154dc9 | c85877e512591e84f97f4f3d852cf0e4646a994a | refs/heads/master | 2020-06-04T06:51:12.386194 | 2019-06-17T11:22:18 | 2019-06-17T11:22:18 | 191,912,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/knuros5/catkin_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/knuros5/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/knuros5/catkin_ws/devel;/opt/ros/kinetic" != "" else []
| [
"alswjd4047@naver.com"
] | alswjd4047@naver.com |
74da1d816b3b75bf23ae1d2dbf93e52b8e288b0f | 821135cfb5827f338d542d6824a75faaba47426f | /training_task_4_-_starts_with_s.py | fbc6a3bc2cb29a728dc5de1fce5d7b3c820906c8 | [] | no_license | LockeShots/Learning-Code | bd4617fa4129b98060d199b7290d191c8453798d | ae261449461c187c8e7168be14746a8902045525 | refs/heads/master | 2023-07-12T12:19:34.919379 | 2021-08-05T14:21:18 | 2021-08-05T14:21:18 | 287,728,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #Ask user for statement
#If statement begins with 's', print to file
def print_input():
f = open("s-statement.txt", 'a')
f.write (statement + "\n")
f.close ()
statement = input("Enter Statement: ")
result = statement.startswith('s')
if (result) == True:
print_input()
else:
exit | [
"MattJones@harmonyinautumn.com"
] | MattJones@harmonyinautumn.com |
b2aed38c3ec221cfd3950f19fc92013cbbd8fc5f | d22bafecf93428eacad417144d261c4afd86df8c | /magic_functions.py | 01d46d13daef635ef84334d3da04219f33cfe31b | [] | no_license | noamtoeg/21_06_19 | 89104f2668d0b93a9bb56897b735d42e8c48472d | 8d7d472a9a1a93a2284e6da931ec63dc668d5b96 | refs/heads/master | 2020-06-07T15:38:08.294081 | 2019-10-04T09:58:42 | 2019-10-04T09:58:42 | 193,050,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | '''
class Person():
def __init__(self , name = 'default' ):
self.name = name
noam = Person()
itay = Person()
noam.name= 'Noam'
itay.name = 'Itay'
print(noam.name)
print(itay.name)
class Point():
def __init__(self , x=0, y=0 ):
self.x = x
self.y = y
p1 = Point(3.5,9.30)
print(p1.x)
'''
class MobilePhone():
def __init__(self, brand , model , price):
self.brand = brand
self.model = model
self.price = price
def __eq__(self, other):
if self.model == other.model and\
self.brand == other.brand:
return True
else:
return False
# def add(self,other):
# total = self.price + other.price
# return total
def __add__(self, other):
return self.price + other.price
def __gt__(self):__(self, other):
return self.price > other.price
def __repr__(self):
return f'Phone({self.brand}, ' \
f'{self.model})'
Phone1 = MobilePhone('Samsung','S8',500)
Phone2 = MobilePhone('Samsung','A8',370)
Phone3 = MobilePhone('Samsung','S8',550)
print(Phone1 == Phone2)
print(Phone1 == Phone3)
print(Phone3 == Phone2)
print(Phone1 + Phone3)
| [
"noreply@github.com"
] | noamtoeg.noreply@github.com |
bc9d969532d16be769e237943030facdb30ca407 | 731c17913b5ff61190f938909e1a74bae18285c9 | /tf_agents/agents/sac/tanh_normal_projection_network_test.py | 7e0a439c969d84cc822e3458cdbe3dff3e9e3f54 | [
"Apache-2.0"
] | permissive | isabella232/agents | d3055ca0a4d593e2251801264354fb7193c8f99f | b2ed02d20c43a4b789a4711f4653e8421f8ba526 | refs/heads/master | 2023-03-10T16:14:46.426288 | 2020-12-29T20:32:45 | 2020-12-29T20:33:06 | 326,205,625 | 0 | 0 | Apache-2.0 | 2021-02-24T00:52:40 | 2021-01-02T14:57:47 | null | UTF-8 | Python | false | false | 2,584 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.normal_projection_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.specs import tensor_spec
def _get_inputs(batch_size, num_input_dims):
return tf.random.uniform([batch_size, num_input_dims])
class TanhNormalProjectionNetworkTest(tf.test.TestCase):
def testBuild(self):
output_spec = tensor_spec.BoundedTensorSpec([2], tf.float32, 0, 1)
network = tanh_normal_projection_network.TanhNormalProjectionNetwork(
output_spec)
inputs = _get_inputs(batch_size=3, num_input_dims=5)
distribution, _ = network(inputs, outer_rank=1)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(tfp.distributions.MultivariateNormalDiag,
type(distribution.input_distribution))
means = distribution.input_distribution.loc
stds = distribution.input_distribution.scale
self.assertAllEqual(means.shape.as_list(),
[3] + output_spec.shape.as_list())
self.assertAllEqual(stds.shape.as_list(),
[3] + output_spec.shape.as_list()*2)
def testTrainableVariables(self):
output_spec = tensor_spec.BoundedTensorSpec([2], tf.float32, 0, 1)
network = tanh_normal_projection_network.TanhNormalProjectionNetwork(
output_spec)
inputs = _get_inputs(batch_size=3, num_input_dims=5)
network(inputs, outer_rank=1)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Dense kernel and bias.
self.assertEqual(2, len(network.trainable_variables))
self.assertEqual((5, 4), network.trainable_variables[0].shape)
self.assertEqual((4,), network.trainable_variables[1].shape)
if __name__ == '__main__':
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
87244d22a378bbd81712fccb86963ebb2ea3da92 | f4b964329061217d2e07580724520906c6737999 | /edx/polygon.py | 760f7fd3a0eb3aefe7f0e59ad1a9e605a8419d67 | [] | no_license | gr3edo/program_learning | 0e2c5eb7ec06a66d4ed834084ee9244e1b868073 | a893bb8e4b049410cd53002bd6604f03d437d54b | refs/heads/master | 2021-01-16T18:31:37.759711 | 2016-11-04T11:43:59 | 2016-11-04T11:43:59 | 68,383,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | import math
def polysum(n,s):
area = (0.25 * n * (s**2)) / math.tan(math.pi/n)
perimeter = n * s
return round(area + (perimeter**2),4)
| [
"i.mitroshyn@synergetica.net"
] | i.mitroshyn@synergetica.net |
f258a219f8446de388d4977228e418e4f63b53f1 | 031b24455b953907a0f98778931ee8a03c3c4b6c | /pacman103/front/common/delay_projection_subedge.py | 20ba4ca1e9194c2bc870a136e70f2f2169b9f2a1 | [] | no_license | BRML/HBP-spinnaker-cerebellum | 7e5f69c05d0e51f79442635df58815768f20e6bc | 7fc3eb5c486df66720d227e0e422cbab65c08885 | refs/heads/master | 2020-12-25T23:47:09.416213 | 2015-06-26T09:45:31 | 2015-06-26T09:45:31 | 38,686,607 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,612 | py | from pacman103.front.common.projection_subedge import ProjectionSubedge
from pacman103.front.common.synaptic_list import SynapticList
from pacman103.front.common.synapse_row_info import SynapseRowInfo
import logging
logger = logging.getLogger(__name__)
class DelayProjectionSubedge(ProjectionSubedge):
def __init__(self, edge, presubvertex, postsubvertex):
super(DelayProjectionSubedge, self).__init__(edge, presubvertex,
postsubvertex)
self.synapse_sublist = None
self.synapse_delay_rows = None
def get_synapse_sublist(self):
"""
Gets the synapse list for this subedge
"""
if self.synapse_sublist is None:
synapse_sublist = self.edge.synapse_list.create_atom_sublist(
self.presubvertex.lo_atom, self.presubvertex.hi_atom,
self.postsubvertex.lo_atom, self.postsubvertex.hi_atom)
# if logger.isEnabledFor("debug"):
# logger.debug("Original Synapse List rows:")
# orig_list = synapse_sublist.get_rows()
# for i in range(len(orig_list)):
# logger.debug("{}: {}".format(i, orig_list[i]))
if synapse_sublist.get_n_rows() > 256:
raise Exception(
"Delay sub-vertices can only support up to"
+ " 256 incoming neurons!")
full_delay_list = list()
for i in range(0, self.edge.num_delay_stages):
min_delay = (i * self.edge.max_delay_per_neuron)
max_delay = min_delay + self.edge.max_delay_per_neuron
delay_list = synapse_sublist.get_delay_sublist(min_delay,
max_delay)
# if logger.isEnabledFor("debug"):
# logger.debug(" Rows for delays {} - {}:".format(
# min_delay, max_delay))
# for i in range(len(delay_list)):
# logger.debug("{}: {}".format(i, delay_list[i]))
full_delay_list.extend(delay_list)
# Add extra rows for the "missing" items, up to 256
if (i + 1) < self.edge.num_delay_stages:
for _ in range(0, 256 - len(delay_list)):
full_delay_list.append(SynapseRowInfo([], [], [], []))
self.synapse_sublist = SynapticList(full_delay_list)
self.synapse_delay_rows = len(full_delay_list)
return self.synapse_sublist
def get_synaptic_data(self, controller, delay_offset):
delay_list = self.postsubvertex.vertex.get_synaptic_data(controller,
self.presubvertex, self.synapse_delay_rows, self.postsubvertex,
self.edge.synapse_row_io).get_rows()
rows = list()
for pre_atom in range(0, self.presubvertex.n_atoms):
rows.append(SynapseRowInfo([], [], [], []))
for i in range(0, self.edge.num_delay_stages):
min_delay = (i * self.edge.max_delay_per_neuron) + delay_offset
list_offset = i * 256
for pre_atom in range(0, self.presubvertex.n_atoms):
row = delay_list[list_offset + pre_atom]
rows[pre_atom].append(row, min_delay=min_delay)
return SynapticList(rows)
def free_sublist(self):
"""
Indicates that the list will not be needed again
"""
self.synapse_sublist = None
| [
"dr.christoph.richter@gmail.com"
] | dr.christoph.richter@gmail.com |
261a7849b593ba0bb2e41586c884417a74cc6a8a | bf57265bb30e5d0d867427fa1bca78a93e9caf33 | /backend/application.py | 821f10b58824b1e1588e2900808b0a70d6ea8b41 | [] | no_license | eriytt/RoomWithAView | a89da501f3be96d7211d2b570bc6e6ce51ad9ae8 | c1fc767f763f5266ceea6918e749917c5fa36d5c | refs/heads/master | 2020-04-03T04:16:09.108589 | 2019-03-16T00:17:38 | 2019-03-16T00:17:38 | 155,008,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,142 | py | from os.path import join as pjoin
import subprocess
import json
import flask
from flask import Flask, request, Response
from flask_sockets import Sockets
import logging
logging.basicConfig(level=logging.DEBUG)
db_dir = "./db"
application = Flask(__name__, static_folder="static", static_url_path='')
sockets = Sockets(application)
socket = None
@application.route("/modelxml", methods=['GET'])
def get_model_mesh_xml():
return flask.send_from_directory('db', 'complex.xml')
@application.route("/model/sofa_seat.000.mesh.xml", methods=['GET'])
def get_model_sofa_mesh_xml():
return flask.send_from_directory('db', 'sofa_seat.000.mesh.xml')
@application.route("/model/sofa_seat.000.mesh.mesh", methods=['GET'])
def get_model_sofa_mesh_mesh():
return flask.send_from_directory('db', 'sofa_seat.000.mesh.mesh')
@application.route("/model/<string:object_name>/position", methods=['PUT'])
def put_object_position(object_name):
if not request.is_json:
return {"message": "Not JSON"}, 400
j = request.get_json()
global socket
if socket:
socket.send("update: " + json.dumps({object_name: {"position": j}}))
return ""
@application.route("/model/<string:object_name>/material", methods=['PUT'])
def put_object_material(object_name):
if not request.is_json:
return {"message": "Not JSON"}, 400
j = request.get_json()
global socket
if socket:
socket.send(
"update: " + json.dumps({object_name: {"materials": {object_name: j}}}))
return ""
@application.route("/model", methods=['GET', 'POST'])
def model():
if request.method == 'GET':
return get_model()
elif request.method == 'POST':
return post_model()
def get_model():
return flask.send_from_directory('db', 'complex.mesh')
def post_model():
if not request.is_json:
return {"message": "Not JSON"}, 400
xml = request.get_json()['model']
# TODO: validate XML?
xml_file = pjoin(db_dir, "model.xml")
mesh_file = pjoin(db_dir, "model.mesh")
convert_log_filename = pjoin(db_dir, "convert.log")
with open(xml_file, 'w') as f:
f.write(xml)
# TODO: stdout and stderr
# TODO: check error code
subprocess.call(['OgreXMLConverter',
'-E', 'big', # Always an ARM device?
'-log', convert_log_filename,
xml_file, mesh_file])
# TODO: return output if debug
return ""
@application.route("/model/meta", methods=['GET'])
def get_meta():
with open(pjoin(db_dir, "meta-complex.json")) as f:
return Response(f.read(), mimetype='application/json')
@application.route("/model/furniture", methods=['GET'])
def get_furniture():
with open(pjoin(db_dir, "furniture.json")) as f:
return Response(f.read(), mimetype='application/json')
@application.route("/model/furniture/<string:object_name>", methods=['GET'])
def get_furniture_files(object_name):
return flask.send_from_directory('db', object_name)
@application.route("/model/meta", methods=['POST'])
def post_meta():
if not request.is_json:
return {"message": "Not JSON"}, 400
print(request.get_data())
with open(pjoin(db_dir, "meta.json"), 'wb') as f:
f.write(request.get_data())
global socket
if socket:
socket.send('update')
return ""
@application.route('/')
def root():
return application.send_static_file('index.html')
@sockets.route('/notifications')
def notifications(ws):
global socket
print("Got websocket connection")
socket = ws
socket.send('update')
while not ws.closed:
message = ws.receive()
if message == None:
continue
socket = None
ws.close(code=1002, message='')
# print("Sending message:", message)
# ws.send(message)
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(
('', 5000), application, handler_class=WebSocketHandler)
server.serve_forever()
#application.run(host = '0.0.0.0')
| [
"erik.ytterberg@learningwell.se"
] | erik.ytterberg@learningwell.se |
2404d2650ba67d68a7c6cb8c024c650b6e344eee | 5b43fd574d0b3a8c0a3ba8e98663f0313f6ea024 | /Regression/RegressionTemplate.py | dd726e6616e5c40a0421b43959484eb9ec3c5d19 | [] | no_license | datcn1212/Basic-Machine-Learning-Models | 55777cac0618153aeddd4b9ca24ab7642d153e7c | d2fc4eaa628a776743a65ad184dd0c6aa4831506 | refs/heads/master | 2023-03-21T07:29:32.017078 | 2019-12-13T14:39:20 | 2019-12-13T14:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 29 01:44:37 2019
@author: PALLAVI
"""
#importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset=pd.read_csv('Position_Salaries.csv')
x=dataset.iloc[:,1:2].values
y=dataset.iloc[:,2].values
#splitting the dataset into the trainign and testing data
'''
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)'''
'''#feature Scaling
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)'''
#fitting the regression model to the dataset
#create your regressor here
#predicting a new result with regression model
y_pred=regressor.predict(6.5)
#visalizing the Regression Results
plt.scatter(x,y,color='red')
plt.plot(x,regressor.predict(x),color='blue')
plt.title('truth or bluff( Regression model)')
plt.xlabel('level')
plt.ylabel('salary')
plt.show()
#visalizing the Regression Results (for higher resolution and smoother curve)
x_grid=np.arange(min(x),max(x),0.1)
x_grid=x_grid.reshape(len(x_grid),1)
plt.scatter(x,y,color='red')
plt.plot(x_grid,regressor.predict(x_grid),color='blue')
plt.title('truth or bluff(Regression Model)')
plt.xlabel('level')
plt.ylabel('salary')
plt.show()
| [
"wacky.pallavi@gmail.com"
] | wacky.pallavi@gmail.com |
9e76f74404193e488f418c3ebfdc5622d20f0621 | b52de591daf1300be16169717efef38ae29746e6 | /run.py | b293a510402e2a7f56daf5f32ac9dc4efb3aab0a | [] | no_license | belibak/bash-history-dates | 1e967cb550c8600de3a79a0c6e168797c2dd0dbf | 036d0562f38675978b7ac9e34971206c5231dc92 | refs/heads/master | 2021-01-01T04:08:52.079781 | 2017-07-20T08:49:19 | 2017-07-20T08:49:19 | 97,132,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from datetime import datetime
def read_history_file(filename):
file = open(filename, 'r')
return file
def write_history_file(filename, lst):
with open(filename, 'w') as file:
for str in lst:
if str[0] == '#':
timestamp = int(str[1:])
dt = datetime.fromtimestamp(timestamp)
date_string = '%02d.%02d.%02d %02d:%02d\n' %(dt.day, dt.month, dt.year, dt.hour, dt.minute)
print('%s %s' %(timestamp, date_string))
file.write('#%s' %(date_string))
else:
file.write(str + '\n')
file.close()
def timestamps_rewrite(file_obj):
lst = [i for i in file_obj.readlines()]
file_obj.close()
write_history_file('/tmp/bash_history', lst)
if __name__ == "__main__":
timestamps_rewrite(read_history_file('/home/white/tmp/mikepero/root_bash_history'))
| [
"alex.rus@core-tech.ru"
] | alex.rus@core-tech.ru |
ad900fadf5913b6929a626b995320f911c90ae0b | e825640a5d087fb0e14267111dda4fe010224a3a | /multiapp/pages/instructions.py | e6770c94ad147a578d9720624af175bab345853a | [] | no_license | Nhiemth1985/PyPortfolioAnalytics | c2ee64a47fb58654528353cb8796a96eb6ed8d60 | 7f210c98b1dcc6f2a157372b34664d8d163a2874 | refs/heads/master | 2023-09-03T15:22:18.493060 | 2021-10-17T18:22:58 | 2021-10-17T18:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | import streamlit as st
def app():
st.write('Coming soon') | [
"simone.precicchiani@gmail.com"
] | simone.precicchiani@gmail.com |
ace5b07e131d2d71c023e4c8ac26ec80e2edf7e9 | 212d720d3493b4a9b02d8215f0e99bec014a3c86 | /rxpy-example/2.15_operator_to_dict.py | 0a6bc60fa6d8e30c35a4c8a35248f8af7d86f460 | [] | no_license | yqfang/rx-libs | 519c4dc19f1fcca34e1279db29f8b6db9c99bf9b | 8562e9f9d50b6678c789d64e768091cd57be7bcb | refs/heads/master | 2020-12-02T07:53:41.995863 | 2017-09-13T08:29:30 | 2017-09-13T08:29:30 | 96,743,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from __future__ import print_function
from rx import Observable, Observer
Observable.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]) \
.to_dict(lambda s: s[0]) \
.subscribe(lambda i: print(i))
Observable.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]) \
.to_dict(lambda s: s[0], lambda s: len(s)) \
.subscribe(lambda i: print(i))
| [
"yqfang@sysnew.com"
] | yqfang@sysnew.com |
33ce0b91644687a6e35b39f5b187a5528cf6c711 | 725bc937d3c82d45344e0baea5d3c1705bdaa2d1 | /test/regression/NOX_QS/NOX_QS_Newton_NoPrec_np4/NOX_QS_Newton_NoPrec.py | d7913bbf948fa197b9f8d07a7cdbcb511d835a26 | [] | no_license | pietrodantuono/peridigm | 2ce5ba747a4e9a661bf0a31db9e248d2e30993f6 | 78836ebefb7fe84b6b244e851b7a11c13d08b7a2 | refs/heads/master | 2022-01-02T22:38:56.063348 | 2021-12-15T16:14:42 | 2021-12-15T16:14:42 | 54,479,825 | 1 | 0 | null | 2016-03-22T14:04:20 | 2016-03-22T14:04:20 | null | UTF-8 | Python | false | false | 1,773 | py | #! /usr/bin/env python
import sys
import os
import re
import glob
from subprocess import Popen
test_dir = "NOX_QS/NOX_QS_Newton_NoPrec_np4"
base_name = "NOX_QS_Newton_NoPrec"
if __name__ == "__main__":
result = 0
# log file will be dumped if verbose option is given
verbose = False
if "-verbose" in sys.argv:
verbose = True
# change to the specified test directory
os.chdir(test_dir)
# open log file
log_file_name = base_name + ".log"
if os.path.exists(log_file_name):
os.remove(log_file_name)
logfile = open(log_file_name, 'w')
# remove old output files, if any
files_to_remove = glob.glob('*.e*')
for file in os.listdir(os.getcwd()):
if file in files_to_remove:
os.remove(file)
# run Peridigm
command = ["mpiexec", "-np", "4", "../../../../src/Peridigm", "../"+base_name+".peridigm"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
# compare output files against gold files
command = ["../../../../scripts/epu", "-p", "4", base_name]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
command = ["../../../../scripts/exodiff", \
"-stat", \
"-f", \
"../NOX_QS.comp", \
base_name+".e", \
"../"+base_name+"_gold.e"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
logfile.close()
# dump the output if the user requested verbose
if verbose == True:
os.system("cat " + log_file_name)
sys.exit(result)
| [
"djlittl@sandia.gov"
] | djlittl@sandia.gov |
504dcd6b031b80b87d353d6fb8c1fe63157987f6 | f4a0b5a834b47bfee2f89c318e97b9f4ae11a968 | /lib/collision_detector.py | 47b488f9603004391d92b4af2a3d8ddc8ce1554c | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Marcus-Peterson/ros | 5f0937dcd28198cd9be1025144d0bcc3cf69da31 | 77b1361e78f68f00ba2d3e3db908bb5ce0f973f5 | refs/heads/master | 2023-07-16T03:15:51.412658 | 2021-06-10T10:53:09 | 2021-06-10T10:53:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,551 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: altheim
# created: 2020-03-31
# modified: 2021-02-28
#
from colorama import init, Fore, Style
init()
try:
from gpiozero import DigitalInputDevice
print('successfully imported gpiozero.')
except Exception:
print('unable to import gpiozero.')
from lib.logger import Logger, Level
from lib.event import Event
from lib.message import Message
# ..............................................................................
class CollisionDetector():
'''
Uses an 15cm infrared sensor to scan for an imminent collision at the front
of the robot, particularly with the mast. This class may be extended to
include multiple sensors that send the same COLLISION_DETECT message.
'''
def __init__(self, config, message_factory, message_bus, level):
self._log = Logger('collision', Level.INFO)
if config is None:
raise ValueError('no configuration provided.')
self._message_factory = message_factory
self._message_bus = message_bus
_config = config['ros'].get('collision_detect')
_pin = _config.get('pin')
self._sensor = DigitalInputDevice(_pin, bounce_time=0.2, pull_up=False)
self._sensor.when_activated = self._activated
self._sensor.when_deactivated = self._deactivated
self._disabling = False
self._enabled = False
self._closed = False
# arm behaviour
self._arm_movement_degree_step = 5.0
self._arm_up_delay = 0.09
self._arm_down_delay = 0.04
self._log.info('ready.')
# ..........................................................................
def _activated(self):
'''
The default function called when the sensor is activated.
'''
if self._enabled:
self._log.info(Fore.YELLOW + 'detected mast sensor!')
self._message_bus.handle(self._message_factory.get_message(Event.COLLISION_DETECT, True))
else:
self._log.info('collision detection not enabled.')
# ..........................................................................
def _deactivated(self):
'''
The default function called when the sensor is deactivated.
'''
if self._enabled:
self._log.info('deactivated collision detection.')
else:
self._log.debug('collision detection not enabled.')
# ..........................................................................
def enable(self):
self._log.debug('enabling...')
if self._closed:
self._log.warning('cannot enable: closed.')
return
self._enabled = True
self._log.debug('enabled.')
# ..........................................................................
def disable(self):
if self._disabling:
self._log.warning('already disabling.')
else:
self._disabling = True
self._enabled = False
self._log.debug('disabling...')
self._disabling = False
self._log.debug('disabled.')
# ..........................................................................
def close(self):
self.disable()
self._closed = True
#EOF
| [
"ichiro.furusato@gmail.com"
] | ichiro.furusato@gmail.com |
6b86e44fb616e0cabf755125a793806ccccfa7c9 | 66e7866f3d23692c74076a25921e87340e21d50e | /src/preprocess/execute.py | c22ac69b24257d4d523828d434122b4b7dc10dcb | [
"MIT"
] | permissive | WSP-LAB/Montage | c729d5e57f81ab2d57549bb89f56da370203f90f | a3d7466bf1e540d227ed045abf3b5423318d237b | refs/heads/master | 2022-11-17T10:49:14.417239 | 2022-11-16T04:38:56 | 2022-11-16T04:38:56 | 210,642,275 | 87 | 14 | MIT | 2022-11-16T04:38:57 | 2019-09-24T15:55:38 | Python | UTF-8 | Python | false | false | 2,411 | py | import os
import threading
from subprocess import PIPE
from subprocess import Popen
from utils import kill_proc
from utils import list_dir
from utils import make_dir
from utils import pool_map
from utils import read
from utils import write
from utils.logger import print_msg
class Executor:
def __init__(self, conf):
self._conf = conf
def const_log_path(self, js_path, log_dir):
log_name = os.path.basename(js_path)
log_name = log_name.split('.')[0]
return os.path.join(log_dir, log_name)
def execute(self, proc, log_path, timeout):
timer = threading.Timer(timeout,
lambda p: kill_proc(p), [proc])
timer.start()
stdout, stderr = proc.communicate()
ret = proc.returncode
self.write_log(log_path, stdout, stderr, ret)
timer.cancel()
def run(self, js_path, cwd):
cmd = [self._conf.eng_path]
cmd += self._conf.opt
cmd += [js_path]
proc = Popen(cmd, cwd=cwd, stdout=PIPE, stderr=PIPE)
log_path = self.const_log_path(js_path,
self._conf.log_dir)
self.execute(proc, log_path, self._conf.timeout)
def write_log(self, log_path, stdout, stderr, ret):
log = b'\n============== STDOUT ===============\n'
log += stdout
log += b'\n============== STDERR ===============\n'
log += stderr
log += b'\nMONTAGE_RETURN: %d' % (ret)
write(log_path, log)
def exec_eng(js_path, conf):
tmp_js_path = rewrite_file(js_path, conf.data_dir)
executor = Executor(conf)
cwd = os.path.dirname(js_path)
executor.run(tmp_js_path, cwd)
os.remove(tmp_js_path)
def main(pool, conf):
make_dir(conf.log_dir)
js_list = []
for js in list_dir(conf.seed_dir):
if (js.endswith('.js') and
os.path.getsize(js) < 30 * 1024): # Excludes JS over 3KB
js_list += [js]
num_js = len(js_list)
msg = 'Start executing %d JS files' % (num_js)
print_msg(msg, 'INFO')
pool_map(pool, exec_eng, js_list, conf=conf)
def rewrite_file(js_path, tmp_dir):
dir_path = os.path.dirname(js_path)
PREFIX = b'if(typeof load == \'undefined\') load = function(js_path){WScript.LoadScriptFile(\'%s/\'.concat(js_path));};'
PREFIX = PREFIX % dir_path.encode('utf-8')
code = read(js_path)
code = b'\n'.join([PREFIX, code])
js_name = os.path.basename(js_path)
tmp_js_path = os.path.join(tmp_dir, js_name)
write(tmp_js_path, code)
return tmp_js_path
| [
"sevendays37@gmail.com"
] | sevendays37@gmail.com |
0d306e05c16e7699c515ab6dca5c332e859f20d9 | abd0caaed0affe1a98efe5de654846390cf0efc5 | /src/bit_description/bit_robotcar/scripts/hand_show/old/hand_mainwindow02.py | 92bd4a7d5d2d9ebd436b5762f43040a8d1dbbc1f | [] | no_license | GraceJary/bit_mbzirc | 35294f34f019370ee9c77fcc5fba5eeb9badbb81 | 6f56d2e1c0130da1131391407993af9ce904bc73 | refs/heads/master | 2020-08-30T02:31:55.647558 | 2019-10-29T01:13:35 | 2019-10-29T01:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # -*- coding: utf-8 -*-
from old import hand_jt_angle02
from PyQt5 import QtWidgets
import sys
app=QtWidgets.QApplication(sys.argv) #主应用
MainWindow=QtWidgets.QMainWindow()#初始化一个主窗口
ui= hand_jt_angle02.Ui_MainWindow(MainWindow)#用界面里的类实例化一个窗口
MainWindow.show()
sys.exit(app.exec_())
| [
"1337875013@qq.com"
] | 1337875013@qq.com |
40a310c295297236c8f23003e4612414cc2c870e | 81abfe7b33a70561a1676bdefadaf5fb0aa1f163 | /django/django_intro/dojosurvey/dojosurveyApp/views.py | bacea2ae978a69f50a4ddbbf8a7bfffe90f31f1a | [] | no_license | patelbhoomi2204/python_stack | 83f19b0cd1f3f3960bf1ca0f899024f9b080343f | ee8e37ad94a6e4c20f3c146119a3917454df1540 | refs/heads/main | 2023-01-19T14:58:27.978103 | 2020-11-26T23:38:30 | 2020-11-26T23:38:30 | 307,217,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | from django.shortcuts import render, HttpResponse, redirect
def index(request):
print(request.method)
return render(request, "form.html")
def formInfo(request):
print(request.method)
print(request.POST)
print(request.POST['name'])
request.session['forminfo'] = request.POST
return redirect('/success')
def showResults(request):
print('request.post in the showResults function', request.POST)
context = {
'formDetails' : request.POST
}
return render(request, "result.html")
| [
"patel.bhoomi2204@gmail.com"
] | patel.bhoomi2204@gmail.com |
b0730a6fc86fe4d3fe52fa22c024ebe71dd5de75 | 1dfa0ed638e0ca7e2a03b40d4f5e1daef0df67fa | /vcf2structure.py | 4b0e224d6f5a03237256518cee9a721d4baa517e | [
"MIT"
] | permissive | swang8/Perl_scripts_misc | fe5baa0e0505fd1415cda9cdbadc8b171b54851b | f227e18f846f792d6e9aa757c42aa44fdf7cd7c0 | refs/heads/master | 2021-01-17T19:26:07.775672 | 2020-08-03T21:58:12 | 2020-08-03T21:58:12 | 40,141,579 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | #!/usr/bin/env python2
#import modules
from __future__ import print_function
import argparse
import sys
import os
#third parties module
import vcf # pip install pyVCF
# this file convert a vcf file into a structure file
#FUNCTIONS
def errprint(*args, **kwargs):
''' print to stderr not stdout'''
print(*args, file=sys.stderr, **kwargs)
#parser
parser = argparse.ArgumentParser() # add the parser
parser.add_argument("input",help="input VCF file") # add the parser
parser.add_argument("output",help="output STRUCTURE DATA file") # add the parser
args = parser.parse_args()
dict_alleles = {"0/0":"11","0/1":"12","1/0":"12","1/1":"22","./.":"-9"}
input_vcf=vcf.Reader(fsock=None, filename=args.input, compressed=False, prepend_chr="False", strict_whitespace=False)#open the vcf parser
list_snps = []
nsites = 0
gen_dict = {ind:[] for ind in input_vcf.samples }
#store all the genotypes and loci names
for site in input_vcf:
list_snps.append( site.CHROM+"_"+str(site.POS)) # chr_pos
for i in range(len(gen_dict.keys())):
gen_dict[site.samples[i].sample].append(dict_alleles[site.samples[i]["GT"]])
#Write the strcture file
output = open(args.output,"w")
output.write("\t".join(list_snps)+"\n")
for ind in gen_dict.keys():
#print (gen_dict[ind])
#print (ind)
output.write("\t".join([ind]+gen_dict[ind])+"\n")
output.close()
| [
"shichen.wang@max.txgen.tamu.edu"
] | shichen.wang@max.txgen.tamu.edu |
aed7b4be1ccfaaebd86b703894655fde91911323 | ea66c1623163a95f518d115bf1e1f87a47b7d564 | /main.py | 58a2b5e72ca97d969d1ee18e33c833c801fabc5b | [] | no_license | tusqasi/circuits | f9ee83ec42b7da3f200a59c1006a38f2c0b63274 | 172770d12c473108db3a297d08c45e94741f81e4 | refs/heads/master | 2023-06-13T05:37:35.860082 | 2021-06-29T03:50:15 | 2021-06-29T03:50:15 | 380,531,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from components import Battery, Resistor, Helpers
def main():
b = Battery(10)
r2 = Resistor(2)
r1 = Resistor(1, r2)
b.next_component = r1
r2.next_component = b
print(Helpers.calculate_current_drawn(b))
if __name__ == "__main__":
main()
| [
"2002.tushar.kuntawar@gmail.com"
] | 2002.tushar.kuntawar@gmail.com |
35c8bbadea7912a45f80c67b0bda1e2a9b39bce0 | 9ba30f939b79df5bc8ea8ab97196693e10d45605 | /airflow/contrib/operators/dataflow_operator.py | 8f61e18ab5d6a61a18b1dfc296650253e833f727 | [
"Apache-2.0"
] | permissive | suchenzang/incubator-airflow | 5d09f1d1dfaf3f668408bd162bc275e1c112fbb7 | 5d90d132af4b5a455c6f3bb43817f0e46195cf72 | refs/heads/master | 2021-01-18T07:06:06.616698 | 2016-07-19T06:33:47 | 2016-07-19T06:33:47 | 63,733,649 | 2 | 0 | null | 2016-07-19T23:07:06 | 2016-07-19T23:07:06 | null | UTF-8 | Python | false | false | 3,680 | py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
```
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
```
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar. Use ``options`` to pass on
options to your job.
```
t1 = DataFlowOperation(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=my-dag)
```
Both ``jar`` and ``options`` are templated so you can use variables in them.
"""
template_fields = ['options', 'jar']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
dataflow_default_options={},
options={},
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowJavaOperator.
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar.
:type jar: string
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.dataflow_default_options = dataflow_default_options
self.options = options
def execute(self, context):
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.task_id, dataflow_options, self.jar)
| [
"bolke@xs4all.nl"
] | bolke@xs4all.nl |
f6f338f9ac0d2dd1cf4bb4fc1f662979d67521a0 | 3767600f0e16e2b7c78c1a9df646e0a99871bc4f | /Hello.py | ad2fab883638f1c3e18262f649128bbcba97ad4f | [] | no_license | MQ07/PyFu | 4b71a18f650518824681a6160165018beb383e38 | ebf6a90b7ea8e947d8dbd6fff6e29b5b002ab012 | refs/heads/master | 2020-05-24T23:54:19.981451 | 2019-05-19T20:22:06 | 2019-05-19T20:22:06 | 187,523,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | x = 20
y = 30
z = x + y
print(z) | [
"“csu2050@gmail.com”"
] | “csu2050@gmail.com” |
de368e39353b59b4218415c046ceacce2e5425a3 | c86ef7d2bdd16e2d8ef4a98d4de2d91627c9bdae | /Homeworks/helpers/data_generators.py | d055f33c00e644db8a66b16ad939ab8e9bfd7f99 | [] | no_license | anhnguyendepocen/MSDS_631 | 30ac3739799fef797c58065047b089b6f7d9c9f0 | 8b755766155d9ad6f3b42bd56d6ab7b6eb2172bf | refs/heads/master | 2023-02-15T05:12:12.575625 | 2019-05-09T01:55:15 | 2019-05-09T01:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | import pandas as pd
import random
| [
"js@a.ki"
] | js@a.ki |
15733591d6d33309c50f7054fd16fac3f9490298 | 626ae44f3a180b475864bc072d6bd6ce4c10817d | /Основы языка Python/Theme_2/Theme_2_part_3.py | 4c770e9a6dc864dd513db144838180c070290978 | [] | no_license | Aleksei741/Python_Lesson | 92d090fbd2c40c50addd5bd4954d1dc9862692c5 | c7edc088daedab42855f4d7c6d974c6acb0d5de0 | refs/heads/master | 2020-12-10T13:42:57.996658 | 2020-08-31T06:57:50 | 2020-08-31T06:57:50 | 233,609,183 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py |
len_input_list = int(input('Введите длину списка: '))
input_list = []
for i in range(len_input_list):
input_list.append(input('Введите {} элемент списка: '.format(i)))
#input_list = [2, 2, 5, 12, 8, 2, 12]
output_list = []
for i in input_list:
if input_list.count(i) == 1:
output_list.append(i)
print(output_list)
| [
"Aleksei.m74@yandex.ru"
] | Aleksei.m74@yandex.ru |
e7bec1127337c4b9de7eb01baddf78800bf1d1dd | c789ab0176a679140b865f12f0fddef99651a962 | /examples/gps.py | 2bb9cc8aa2da68f7f814d7b6d8dc2c438ce93601 | [] | no_license | plakna/raspberry-pi-gps-neo-6m | ee11fd3700881dc5ea3984927e531c0288cfafe5 | 302aa0fd1e1dbf3c1fc32e3a0c45f1244bfb1c1b | refs/heads/master | 2020-06-02T10:16:44.484787 | 2019-06-10T20:07:35 | 2019-06-10T20:07:35 | 191,125,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | mport serial
import pynmea2
def parse_gps(line):
msg = pynmea2.parse(line)
print((msg.timestamp, msg.latitude, msg.longitude))
serial_port = serial.Serial('/dev/ttyAMA0', 9600, timeout=5)
while True:
line = serial_port.readline().decode('unicode_escape')
if 'GGA' in line:
parse_gps(line) | [
"plakna@plakna.at"
] | plakna@plakna.at |
59dd0a777a2612f6ce678e0cf88e3444974c8c1e | 9ad08e5e973f600b6cf9e9f7c37986d31fdbae23 | /task1/server/chat.py | 17284c123ca03783e849fe2da5801e779d0e3203 | [
"Unlicense"
] | permissive | jcbecker/distcomputing | 215dd43c2bf0d076cb708e82c5e604932fc99ec3 | 0383815099b8b63a74c53a188c2052523173e688 | refs/heads/master | 2021-01-23T01:02:11.433539 | 2017-07-26T13:10:04 | 2017-07-26T13:10:04 | 85,868,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from bottle import route, run, post, request
messages = []
@post('/chat/msg') # or @route('/login', method='POST')
def index():
message = request.forms.get('message')
nickname = request.forms.get('nickname')
messages.append ("<b>"+nickname + ": </b>" + message + "</br>")
return messages
run(host='localhost', port=8080, debug=True) | [
"joaocarlosbecker666@gmail.com"
] | joaocarlosbecker666@gmail.com |
58b418712c5e39707357a8a9817fcb43dc1a645a | 4c6aa885ce09744588d73ed4ce65981608937f13 | /inventrol_django (1)/settings (1).py | 372450f8580d38360e20e7495575fb30b92e5fd4 | [] | no_license | jaydbendre/Inventory-Management-System-using-Raspberry-Pi-UI | b03caeb06135d139d554b82e28d512e8d9cffe42 | f7e0d51a4d7491233fb9592da693b919b0a843a3 | refs/heads/master | 2020-09-19T19:10:34.277813 | 2019-11-26T19:45:07 | 2019-11-26T19:45:07 | 224,271,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,247 | py | """
Django settings for inventrol_django project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '66%*!%!uif!*=%-ff0k-q(z%vlctd6ndffm_r^)wt+px6hs7aj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user.apps.UserConfig',
'django.contrib.sites',
'social_django',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'inventrol_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'inventrol_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'inventory1',
'USER': 'root',
'PASSWORD': 'hakunamatata',
'HOST': 'localhost',
'PORT': '',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=(
os.path.join(BASE_DIR,"src"),
)
AUTHENTICATION_BACKENDS=(
'social_core.backends.google.GoogleOpenId',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend'
)
SITE_ID=1
SOCIAL_AUTH_URL_NAMESPACE='social'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY='209044420267-7h8iuela48kmv1pddjdv9gt8srpbontf.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET='UuhyDkQNIddlLzOZM1Nb-Lf5'
LOGIN_URL='login/google-oauth2/'
LOGIN_REDIRECT_URL="/login_check"
LOGOUT_REDIRECT_URL="/" | [
"noreply@github.com"
] | jaydbendre.noreply@github.com |
fa3ba3c76cbf2f3ee1ad458ed312b64456892e3a | cf7a7d30cd51e785e40b774209bdc1f24bef1011 | /customers/models.py | 7f62fcf269974d15923cc431ebdc47388976ddec | [] | no_license | sterhannzer/Futurebody | 110ad471fd1e26ed3731f2c87e4caad7b5c666ee | 56deac20853b7b80d0748507efa8db7f0daf3c86 | refs/heads/master | 2021-01-15T09:32:38.277393 | 2016-02-29T19:43:47 | 2016-02-29T19:43:47 | 25,246,810 | 0 | 0 | null | 2015-10-01T20:45:09 | 2014-10-15T08:59:12 | HTML | UTF-8 | Python | false | false | 280 | py | from django.db import models
class Customer(models.Model):
name = models.CharField(max_length=32)
surname = models.CharField(max_length=32)
barcode = models.IntegerField()
def __unicode__(self):
return self.name + self.surname + " " + str(self.barcode)
| [
"lukasz.kanikula.fm@gmail.com"
] | lukasz.kanikula.fm@gmail.com |
d85a0d75f72090c037594dbe3a7bd571a917bf4f | 67e53afbb3f74df289309f9f1289c58e8d71f951 | /Pathrise_Project.py | 785b1e467f1be423857dd78139b18c2d80bf5494 | [] | no_license | SaloniGoradia/Pathrise-Dataset | 0f6f70a6bcfcf79dc09a9bfc0018fcc644a6a9fd | bfd26cf93b098ab21953535ab97bde521f161bcf | refs/heads/master | 2022-12-09T12:31:55.978982 | 2020-09-18T00:21:52 | 2020-09-18T00:21:52 | 296,232,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,986 | py | #!/usr/bin/env python
# coding: utf-8
#test
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
#from imblearn.under_sampling import TomekLinks
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import pandas_profiling
import sys
import streamlit as st
#get_ipython().system('{sys.executable} -m pip install pandas-profiling')
import warnings
warnings.filterwarnings('ignore')
st.title("Pathrise Data Project")
data = pd.read_csv(r"C:/Users/sgoradia/Desktop/Python/Pathrise project/data_Pathrise.csv")
st.header("How does data look like?")
st.write(data.head())
st.header("Pathrise Data with only Active and Placed candidates")
data= data[(data["pathrise_status"] == "Active") | (data["pathrise_status"] == "Placed")]
st.write(data.head())
#Remove Serial Number and Cohort tag
data.drop("id", axis=1, inplace=True)
data.drop("cohort_tag", axis=1, inplace=True)
data.drop("pathrise_status", axis=1, inplace=True)
#description of data
st.header("Description of data")
st.write(data.describe())
data.info()
#st.header("Data visualisation: Profie report")
#a=data.profile_report(title='data analysis at pathrise', progress_bar=False)
#st.write(a.html,unsafe_allow_html = True)
#pairplot
st.header("Pairplots")
grid = sns.PairGrid(data= data, hue='placed')
grid = grid.map_upper(plt.scatter)
grid = grid.map_diag(sns.kdeplot, shade=True)
grid = grid.map_lower(sns.kdeplot)
#plt.title('Distribution of the features',loc='left')
st.pyplot(plt)
#stripping the column names
data.columns = data.columns.str.rstrip()
#plots for categorical variables
st.header("Barplots for categorical variables")
cat_feats = ['primary_track', 'employment_status', 'highest_level_of_education', 'length_of_job_search',
'biggest_challenge_in_search', 'professional_experience','work_authorization_status','gender','race']
fig, axes = plt.subplots(3, 3, figsize=(20, 30))
sns.countplot(data.primary_track, hue=data.placed, ax=axes[0][0])
a=sns.countplot(data.employment_status, hue=data.placed, ax=axes[0][1])
a.set_xticklabels(a.get_xticklabels(), rotation=90)
b=sns.countplot(data.highest_level_of_education, hue=data.placed, ax=axes[0][2])
b.set_xticklabels(b.get_xticklabels(), rotation=90)
c=sns.countplot(data.length_of_job_search, hue=data.placed, ax=axes[1][0])
c.set_xticklabels(c.get_xticklabels(), rotation=90)
d=sns.countplot(data.biggest_challenge_in_search, hue=data.placed, ax=axes[1][1])
d.set_xticklabels(d.get_xticklabels(), rotation=90)
e=sns.countplot(data.professional_experience, hue=data.placed, ax=axes[1][2])
e.set_xticklabels(e.get_xticklabels(), rotation=90)
f=sns.countplot(data.work_authorization_status, hue=data.placed, ax=axes[2][0])
f.set_xticklabels(f.get_xticklabels(), rotation=90)
g=sns.countplot(data.gender, hue=data.placed, ax=axes[2][1])
g.set_xticklabels(g.get_xticklabels(), rotation=90)
h=sns.countplot(data.race, hue=data.placed, ax=axes[2][2])
h.set_xticklabels(h.get_xticklabels(), rotation=90)
st.pyplot(plt)
#boxplot
#st.header("Boxplot for numerical variables")
#sns.boxplot( x=data.placed, y=data.number_of_applications, width=0.5);
#plt.show()
#st.pyplot(plt)
#sns.boxplot( x=data.placed, y=data.number_of_interviews, width=0.5);
#plt.show()
#st.pyplot(plt)
##Data Preprocessing
##Feature encoding
st.header("Data Preprocessing: datatypes of data")
st.write(data.dtypes)
# count the number of missing values for each column
st.header("Number of missing values")
st.write(data[data.isnull().any(axis=1)])
##Missing values
data["gender"].isnull().sum()
##Imputing Missing values
data["employment_status"].fillna("Unemployed", inplace=True)
data["highest_level_of_education"].fillna("Some High School", inplace=True)
data["length_of_job_search"].fillna("Less than one month", inplace=True)
data["biggest_challenge_in_search"].fillna("Resume gap", inplace=True)
data["professional_experience"].fillna("Less than one year", inplace=True)
data["work_authorization_status"].fillna("Other", inplace=True)
data["number_of_interviews"].fillna(0, inplace=True)
data["number_of_applications"].fillna(0, inplace=True)
data["gender"].fillna("Decline to self identity", inplace=True)
data["race"].fillna("Decline to self identity", inplace=True)
data["employment_status"].isnull().sum()
data.dtypes
st.header("Creating different dataset with object variables")
obj_df = data.select_dtypes(include=['object']).copy()
st.write(obj_df.head())
#converting object into category
obj_df["primary_track"] = obj_df["primary_track"].astype('category')
obj_df["employment_status"] = obj_df["employment_status"].astype('category')
obj_df["highest_level_of_education"] = obj_df["highest_level_of_education"].astype('category')
obj_df["length_of_job_search"] = obj_df["length_of_job_search"].astype('category')
obj_df["biggest_challenge_in_search"] = obj_df["biggest_challenge_in_search"].astype('category')
obj_df["professional_experience"] = obj_df["professional_experience"].astype('category')
obj_df["work_authorization_status"] = obj_df["work_authorization_status"].astype('category')
obj_df["race"] = obj_df["race"].astype('category')
obj_df["gender"] = obj_df["gender"].astype('category')
data["primary_track"]=obj_df["primary_track"]
data["employment_status"]= obj_df["employment_status"]
data["highest_level_of_education"]=obj_df["highest_level_of_education"]
data["length_of_job_search"] =obj_df["length_of_job_search"]
data["biggest_challenge_in_search"] = obj_df["biggest_challenge_in_search"]
data["professional_experience"] = obj_df["professional_experience"]
data["work_authorization_status"] =obj_df["work_authorization_status"]
data["race"] = obj_df["race"]
data["gender"] =obj_df["gender"]
#data1 is copy of data, converting categories to int
data1= data.copy()
data1["primary_track"] = obj_df["primary_track"].cat.codes
data1["employment_status"] = obj_df["employment_status"].cat.codes
data1["highest_level_of_education"] = obj_df["highest_level_of_education"].cat.codes
data1["length_of_job_search"] = obj_df["length_of_job_search"].cat.codes
data1["biggest_challenge_in_search"] = obj_df["biggest_challenge_in_search"].cat.codes
data1["professional_experience"] = obj_df["professional_experience"].cat.codes
data1["work_authorization_status"] = obj_df["work_authorization_status"].cat.codes
data1["race"] = obj_df["race"].cat.codes
data1["gender"] = obj_df["gender"].cat.codes
st.header("Converting categorical variables to integer")
st.write(data1.head())
#Checking datatypes of latest data1
st.header("Datatypes of latest data")
st.write(data1.dtypes)
#for Classification
data_clf=data1.copy()
#for Regression
data_reg=data1.copy()
# Seperating Features and Target
st.header("Classification")
X = data_clf[['primary_track', 'employment_status', 'highest_level_of_education', 'length_of_job_search',
'biggest_challenge_in_search', 'professional_experience', 'work_authorization_status','number_of_interviews',
'number_of_applications', 'gender','race']]
st.write(X)
y = data_clf['placed']
st.write(y)
#Train Test Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
#DecisionTreeClassifier
dtree = DecisionTreeClassifier(criterion='entropy')
dtree.fit(X_train, y_train)
y_pred = dtree.predict(X_test)
st.header("Accuracy for DecisionTree")
st.write(accuracy_score(y_test, y_pred))
st.write(classification_report(y_test, y_pred))
#Using Random Forest Algorithm
st.header("RandomForestClassifier")
random_forest = RandomForestClassifier(n_estimators=100)
st.write(random_forest.fit(X_train, y_train))
y_pred = random_forest.predict(X_test)
st.write("Prediction of target variable on X_test")
st.write(y_pred)
st.write("Accuracy for RandomForest model")
st.write(accuracy_score(y_test, y_pred))
st.write(print(classification_report(y_test, y_pred)))
#Feature Importance
st.header("Feature Importance")
rows = list(X.columns)
imp = pd.DataFrame(np.zeros(6*len(rows)).reshape(2*len(rows), 3))
imp.columns = ["Classifier", "Feature", "Importance"]
#Add Rows
for index in range(0, 2*len(rows), 2):
imp.iloc[index] = ["DecisionTree", rows[index//2], (100*dtree.feature_importances_[index//2])]
imp.iloc[index + 1] = ["RandomForest", rows[index//2], (100*random_forest.feature_importances_[index//2])]
plt.figure(figsize=(15,5))
FI=sns.barplot("Feature", "Importance", hue="Classifier", data=imp)
FI.set_xticklabels(FI.get_xticklabels(), rotation=90)
plt.title("Computed Feature Importance")
st.pyplot(plt)
# Seperating Features and Target
X = data_clf[['employment_status', 'length_of_job_search',
'biggest_challenge_in_search', 'professional_experience', 'work_authorization_status','number_of_interviews',
'number_of_applications','gender','race']]
y = data_clf['placed']
#Train Test Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
dtree = DecisionTreeClassifier(criterion='entropy')
dtree.fit(X_train, y_train)
y_pred = dtree.predict(X_test)
accuracy_score(y_test, y_pred)
##Binary Classification with Logistic Regression
# Seperating Features and Target
st.header("Binary Classification with Logistic Regression")
X = data[['employment_status', 'highest_level_of_education', 'length_of_job_search',
'biggest_challenge_in_search', 'professional_experience', 'work_authorization_status','number_of_interviews',
'number_of_applications', 'gender','race']]
y = data['placed']
#One-Hot Encoding
X = pd.get_dummies(X)
colmunn_names = X.columns.to_list()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
#Train Test Split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3)
from sklearn.linear_model import LogisticRegression
logistic_reg = LogisticRegression()
logistic_reg.fit(X_train, y_train)
y_pred = logistic_reg.predict(X_test)
st.write("Prediction of target variable on X_test data")
st.write(y_pred)
st.write("Accuracy of LogisticRegression")
st.write(accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
import eli5
from eli5.sklearn import PermutationImportance
perm = PermutationImportance(logistic_reg).fit(X_test, y_test)
eli5.show_weights(perm)
#get_ipython().system('pip install shap')
#get_ipython().system('pip install lime')
#get_ipython().system('pip install eli5')
st.header("Feature Importance using LogisticRegression")
plt.figure(figsize=(30, 10))
plt.bar(colmunn_names , perm.feature_importances_std_ * 100)
#plt.show()
st.pyplot(plt)
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from sklearn.metrics import mean_absolute_error, r2_score
##Regression Analysis
##Data preprocessing
#dropping NaNs (in Salary)
data_reg.dropna(inplace=True)
data_reg.drop("placed", axis=1, inplace=True)
#data_reg.drop("pathrise_status", axis=1, inplace=True)
st.header("Regression analysis for duration")
st.write(data_reg.head())
#Seperating Depencent and Independent Vaiiables
y = data_reg["program_duration_days"] #Dependent Variable
X = data_reg.drop("program_duration_days", axis=1)
column_names = X.columns.values
#One-Hot Encoding
X = pd.get_dummies(X)
colmunn_names = X.columns.to_list()
#Scalizing between 0-1 (Normalization)
X_scaled = MinMaxScaler().fit_transform(X)
#PDF of program duration days
sns.kdeplot(y)
#plt.show()
st.pyplot(plt)
#Selecting outliers
y[y > 400]
# 11 records
#Removing these Records from data
X_scaled = X_scaled[y < 400]
y = y[y < 400]
#pip install mlxtend
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
linreg = LinearRegression()
sfs = SFS(linreg, k_features=1, forward=False, scoring='r2',cv=10)
sfs = sfs.fit(X_scaled, y)
fig = plot_sfs(sfs.get_metric_dict(), kind='std_err')
plt.title('Sequential Backward Elimination')
plt.grid()
#plt.show()
st.pyplot(plt)
#From Plot its clear that, many features actually decrease the performance
# Lets see the top 5 most significant features
top_n = 5
sfs.get_metric_dict()[top_n]
#Top N Features
top_n_indices = list(sfs.get_metric_dict()[top_n]['feature_idx'])
print(f"Most Significant {top_n} Features:")
for col in column_names[top_n_indices]:
print(col)
#Select these Features only
X_selected = X_scaled[: ,top_n_indices]
lin_reg = LinearRegression()
lin_reg.fit(X_selected, y)
y_pred = lin_reg.predict(X_selected)
print(f"R2 Score: {r2_score(y, y_pred)}")
print(f"MAE: {mean_absolute_error(y, y_pred)}")
##Converting to DF for as column names gives readibility
X_scaled = pd.DataFrame(X_scaled, columns=column_names)
y = y.values
# We must add a constants 1s for intercept before doing Linear Regression with statsmodel
X_scaled = sm.add_constant(X_scaled)
X_scaled.head()
#Constants 1 added for intercept term
# Step 1: With all Features
model = sm.OLS(y, X_scaled)
results = model.fit()
results.summary()
# Identify max P-value (P>|t|) column
# Feature ssc_p has 0.995
#drop ssc_p
X_scaled = X_scaled.drop('length_of_job_search', axis=1)
model = sm.OLS(y, X_scaled)
results = model.fit()
results.summary()
# In[221]:
# Identify max P-value (P>|t|) column
# Feature ssc_p has 0.995
#drop ssc_p
X_scaled = X_scaled.drop('highest_level_of_education', axis=1)
model = sm.OLS(y, X_scaled)
results = model.fit()
results.summary()
# In[222]:
# Identify max P-value (P>|t|) column
# Feature ssc_p has 0.995
#drop ssc_p
X_scaled = X_scaled.drop('professional_experience', axis=1)
model = sm.OLS(y, X_scaled)
results = model.fit()
results.summary()
# In[ ]:
# In[9]:
#Feature-Gender
data.gender.value_counts()
# In[10]:
sns.countplot("gender", hue="placed", data=data)
plt.show()
# In[12]:
sns.kdeplot(data.program_duration_days[ data.gender=="Male"])
sns.kdeplot(data.program_duration_days[ data.gender=="Female"])
plt.legend(["Male", "Female"])
plt.xlabel("program_duration_days")
plt.show()
# In[13]:
plt.figure(figsize =(18,6))
sns.boxplot("program_duration_days", "gender", data=data)
plt.show()
# In[14]:
#Feature-exp
data.professional_experience.value_counts()
# In[15]:
sns.countplot("professional_experience", hue="placed", data=data)
plt.show()
# In[16]:
sns.kdeplot(data.program_duration_days[ data.professional_experience=="1-2 years"])
sns.kdeplot(data.program_duration_days[ data.professional_experience=="Less than one year"])
sns.kdeplot(data.program_duration_days[ data.professional_experience=="3-4 years"])
sns.kdeplot(data.program_duration_days[ data.professional_experience=="5+ years"])
plt.legend(["1-2 years","Less than one year","3-4 years","5+ years"])
plt.xlabel("program_duration_days")
plt.show()
# In[18]:
#Feature-primary_track
data.primary_track.value_counts()
# In[19]:
sns.countplot("primary_track", hue="placed", data=data)
plt.show()
# In[21]:
sns.kdeplot(data.program_duration_days[ data.primary_track=="SWE"])
sns.kdeplot(data.program_duration_days[ data.primary_track=="PSO"])
sns.kdeplot(data.program_duration_days[ data.primary_track=="Design"])
sns.kdeplot(data.program_duration_days[ data.primary_track=="Data"])
plt.legend(["SWE","PSO","Design","Data"])
plt.xlabel("program_duration_days")
plt.show()
# In[22]:
plt.figure(figsize =(18,6))
sns.boxplot("program_duration_days", "primary_track", data=data)
plt.show()
# In[23]:
#Feature-highest_level_of_education
data.highest_level_of_education.value_counts()
# In[24]:
sns.countplot("highest_level_of_education", hue="placed", data=data)
plt.show()
# In[25]:
sns.kdeplot(data.program_duration_days[ data.highest_level_of_education=="Bachelor's Degree"])
sns.kdeplot(data.program_duration_days[ data.highest_level_of_education=="Master's Degree"])
sns.kdeplot(data.program_duration_days[ data.highest_level_of_education=="Some College, No Degree "])
sns.kdeplot(data.program_duration_days[ data.highest_level_of_education=="Doctorate or Professional Degree"])
plt.legend(["Bachelor's Degree","Master's Degree","Some College, No Degree ","Doctorate or Professional Degree"])
plt.xlabel("program_duration_days")
plt.show()
# In[26]:
sns.countplot("work_authorization_status", hue="placed", data=data)
plt.show()
# In[27]:
#abcd
#Kernel-Density Plot
sns.kdeplot(data.number_of_applications[ data.placed=="1"])
sns.kdeplot(data.number_of_applications[ data.placed=="0"])
plt.legend(["Placed", "Not Placed"])
plt.xlabel("number_of_applications")
plt.show()
| [
"noreply@github.com"
] | SaloniGoradia.noreply@github.com |
136f50ecefd925362792a48b9ea8e42abaf88013 | 54cb06e3bbe63ee9e51063bb4deede1c7c9dd107 | /main2.py | e0bb5f609a06409b68c79a55e6f78d3803e15108 | [] | no_license | Stantes/lawyers | 50b833b0ee2a0cc4f1a5b115096814969cd87ead | 2cc3bdfb76923d579eb993c53065e535b7ba6a84 | refs/heads/master | 2023-02-21T23:54:29.640532 | 2021-01-18T07:43:57 | 2021-01-18T07:43:57 | 330,377,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | import sys
def read_input(file):
for line in file:
yield line.split()
# data = read_input(sys.stdin)
with open('lawyers_cards.csv', 'a') as file:
data = read_input(file)
print(data) | [
"stantes@yandex.ru"
] | stantes@yandex.ru |
72157102bb80ee0f8f2cacab349489faecb2d9a1 | 7097771e4359040dfe94be9c7894ebd1108e6fc2 | /placeAD3.py | 09b539501aef894eb97074793186a0a7d99e9b9a | [] | no_license | lyttonhao/UEProject | a7facc7635ce9054338644a7d375dedadb552d1c | 593b2584722a11592b365b839e96cd3a2ffc7e06 | refs/heads/master | 2021-01-18T14:48:59.612782 | 2015-07-28T01:52:48 | 2015-07-28T01:52:48 | 39,484,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,136 | py | import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')
threshd_value = 200
threshd_expo = 0.35
threshd_skin = 130
def detect_faces( img ):
'''
detect faces in img by opencv face detect module
return detected faces
'''
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 3, minSize=(img.shape[0]/3, img.shape[1]/3))
return faces
def detectEye( img, rect ):
'''
detect eyes in faces
'''
mask = np.zeros( img.shape[:2], np.uint8)
(x,y,w,h)= rect
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eyes = eye_cascade.detectMultiScale( gray[y:y+h, x:x+w] )
for (ex, ey, ew, eh) in eyes:
mask[y+ey:y+ey+eh, x+ex:x+ex+ew] = 1
return mask
def calMeanStd(x, l, r):
'''
calculate mean and std of x in the percentage of [l, r]
'''
y = np.sort(x)
n = x.shape[0]
return np.mean(y[l*n:r*n]), np.std(y[l*n:r*n])
def detect_skin( _img, _mask ):
'''
Calculate the skin region of _img according to the mean and std of YCrCb channels.
_mask: initial mask
Pixel which are in [u-2*sigma, u+2*sigma] are considered in the skin region
'''
scale = 2.0
img = cv2.resize(_img, dsize = (0,0), fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_NEAREST)
mask = cv2.resize(_mask, dsize = (0,0), fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_NEAREST)
skinMask = np.zeros( img.shape[:2], np.uint8 )
YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
Y, Cr, Cb = YCrCb[:,:,0], YCrCb[:,:,1], YCrCb[:,:,2]
Uy, Sy = calMeanStd(Y[mask == 1], 0.05, 0.95)
Ucb, Scb = calMeanStd(Cb[mask == 1], 0.05, 0.95)
Ucr, Scr = calMeanStd(Cr[mask == 1], 0.05, 0.95)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
skinMask[i,j] = 1 if Y[i,j] > Uy - 2*Sy and Y[i,j] < Uy + 2* Sy \
and Cb[i,j] > Ucb - 2* Scb and Cb[i,j] < Ucb + 2* Scb \
and Cr[i,j] > Ucr - 2* Scr and Cr[i,j] < Ucr + 2* Scr else 0
skinMask = cv2.resize(skinMask, dsize = (_img.shape[1], _img.shape[0]), interpolation=cv2.INTER_NEAREST)
return skinMask
def cal_exposure( img ):
'''
calculate exposure ratio of img according to the proportion of large value pixels
'''
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hist = cv2.calcHist( [gray], [0], None, [256], [0,256])
chist = np.cumsum(hist, axis = 0)
value = (chist[255,0] - chist[threshd_value,0]) / chist[255,0]
print value
return value
def calAvgWhiten( img, mask ):
'''
Calculate the whiten level of mask region of img
by the Euclidean distance to (255,255,255)
'''
d = 255-img[mask == 1, :].astype(np.int32)
d = d*d
value = np.mean( np.sqrt(d.sum(axis=1)) )
print value
return value
def grabcut( img, rect ):
'''
using grabcut of opencv to extract foreground face with inital rect region
'''
mask = np.zeros( img.shape[:2], np.uint8 )
bgdModel = np.zeros((1,65), np.float64)
fgdModel = np.zeros((1,65), np.float64)
mask, bgdModel, fgdModel = cv2.grabCut( img, mask, rect, bgdModel, fgdModel, 4, cv2.GC_INIT_WITH_RECT )
mask2 = np.where( (mask==2)|(mask==0), 0, 1 ).astype('uint8')
return mask2
def gamma_correction(img, gamma = 1.5):
'''
return gamma correction result of img with parameter gamma
'''
vmap = [math.pow(x/255.0, 1.0/gamma)*255.0 for x in range(256)]
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
l = hls[:, :, 1]
newl = np.zeros( l.shape, dtype=np.uint8 )
for i in range(l.shape[0]):
for j in range(l.shape[1]):
newl[i, j] = vmap[l[i,j]]
hls[:,:,1] = newl
result = cv2.cvtColor(hls, cv2.COLOR_HLS2BGR)
return result
def whiten( img ):
'''
Case 3: whiten face in the img
'''
faces = detect_faces( img )
if len(faces) > 1 or len(faces) == 0:
print len(faces)
print "more than one or no faces detected"
return img
x,y,h,w = faces[0]
if (h < img.shape[0]/2.5 and w < img.shape[1]/2.5):
print "face is too small"
return img
if cal_exposure( img ) > threshd_expo:
print "don't meet exposure condition"
return img
rect = (x,y,h,w)
mask = grabcut( img, rect )
eyemask = detectEye( img, rect )
mask = mask & (1-eyemask)
skinMask = detect_skin( img, mask )
if calAvgWhiten( img, skinMask ) < threshd_skin:
print "skin is too white"
return img
# im = img*mask[:,:,np.newaxis]
# cv2.imshow("mask", im)
# im = img*skinMask[:,:,np.newaxis]
# cv2.imshow("skinmask", im)
ga_im = gamma_correction( img, 1.5 )
bl_im = cv2.bilateralFilter(ga_im, 10, 30, 30)
result = ga_im*(1-skinMask[:,:,np.newaxis]) + bl_im*skinMask[:,:,np.newaxis]
# mask3 = cv2.cvtColor((1-skinMask)*255, cv2.COLOR_GRAY2BGR)
# output = cv2.seamlessClone(ga_im, bl_im, mask3, (ga_im.shape[1]/2, ga_im.shape[0]/2), cv2.MIXED_CLONE)
return result
if __name__ == '__main__':
for i in range(1,9):
print i
im = cv2.imread('images3/image' +str(i) +'.jpg')
result = whiten( im )
# cv2.imshow("result",result)
cv2.imwrite("images3/result" +str(i) + ".png", result)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| [
"lyttonhao@gmail.com"
] | lyttonhao@gmail.com |
79e2ed636cd3e13acfb28777f0b5c940def5aa88 | 99a7e056918e40159168fe23097fe5d4cca9fe15 | /post_page.py | 29941b4a052465c9d9c437e1f69aa55fb30221cb | [] | no_license | apiusage/StartupIdeasEvaluator | 4fa984a685e07f69118444b102da6f7884234fcc | 3d5414c09b4a5e119bd266b23432615a1c8bea8d | refs/heads/master | 2023-04-15T10:45:30.799471 | 2021-04-15T11:20:04 | 2021-04-15T11:20:04 | 358,228,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,284 | py | import streamlit as st
from openpyxl import Workbook
from db import *
import pandas as pd
options = ["1", "2", "3"]
colOptions = ["Customer Problem", "Product Features", "Business Model", "Product",
"Hair on Fire Factor", "Access to Market", "Day 1 Revenue", "Revenue Scalability", "Defensibility",
"Lack of Competitors", "Personal Passion", "Unfair Advantage", "IP Creation", "Acquisition Potential"]
def run_startup_idea_page():
addIdea()
updateIdea()
deleteIdea()
def addIdea():
businessModelDF = pd.read_excel('data/Business Model.xlsx', engine='openpyxl')
businessModelDF = businessModelDF['Business Models'].values.tolist()
with st.beta_expander("Add Product"):
customerProblem = st.text_area("Describe Customer Problem:")
productFeatures = st.text_area("Product features (tackle problems):")
business_model = st.selectbox("Choose a Business Model:", businessModelDF)
product = st.text_input("Product Name:")
col1, col2 = st.beta_columns(2)
with col1:
hair_on_fire_factor = st.radio("Hair on Fire Factor:", options)
access_to_market = st.radio("Access to Market:", options)
day_1_revenue = st.radio("Day 1 Revenue:", options)
revenueScalability = st.radio("Revenue Scalability:", options)
defensibility = st.radio("Defensibility:", options)
with col2:
lackofCompetitors = st.radio("Lack of Competitors:", options)
personal_Passion = st.radio("Personal Passion:", options)
unfair_Advantage = st.radio("Unfair Advantage:", options)
ipCreation = st.radio("IP Creation:", options)
acquisition_Potential = st.radio("Acquisition Potential:", options)
if st.button("Add Start Up Idea"):
add_data(customerProblem, productFeatures, business_model, product,
hair_on_fire_factor, access_to_market, day_1_revenue, revenueScalability, defensibility,
lackofCompetitors, personal_Passion, unfair_Advantage, ipCreation, acquisition_Potential)
st.success("Added {}".format(product))
def updateIdea():
businessModelDF = pd.read_excel('data/Business Model.xlsx', engine='openpyxl')
businessModelDF = businessModelDF['Business Models'].values.tolist()
with st.beta_expander("Update Product"):
list_of_product = [i[0] for i in view_all_product()]
selected_product = st.selectbox("Choose a product to update: ", list_of_product)
product_result = get_product_by_name(selected_product)
if product_result:
customerProblem = product_result[0][0]
productFeatures = product_result[0][1]
business_model = product_result[0][2]
product = product_result[0][3]
hair_on_fire_factor = product_result[0][4]
access_to_market = product_result[0][5]
day_1_revenue = product_result[0][6]
revenueScalability = product_result[0][7]
defensibility = product_result[0][8]
lackofCompetitors = product_result[0][9]
personal_Passion = product_result[0][10]
unfair_Advantage = product_result[0][11]
ipCreation = product_result[0][12]
acquisition_Potential = product_result[0][13]
updateCol1, updateCol2, updateCol3 = st.beta_columns([3,1,1])
with updateCol1:
new_customerProblem = st.text_area("Update Customer Problem:", customerProblem)
new_productFeatures = st.text_area("Update Product features:", productFeatures)
new_business_model = st.selectbox("Update Business Model:", businessModelDF)
new_product = st.text_input("Update Product Name:", product)
with updateCol2:
new_hair_on_fire_factor = st.radio("Update Hair on Fire Factor:", options, index=int(int(hair_on_fire_factor)-1))
new_access_to_market = st.radio("Update Access to Market:", options, index=int(int(access_to_market)-1))
new_day_1_revenue = st.radio("Update Day 1 Revenue:", options, index=int(int(day_1_revenue)-1))
new_revenueScalability = st.radio("Update Revenue Scalability:", options, index=int(int(revenueScalability)-1))
new_defensibility = st.radio("Update Defensibility:", options, index=int(int(defensibility)-1))
with updateCol3:
new_lackofCompetitors = st.radio("Update Lack of Competitors:", options, index=int(int(lackofCompetitors)-1))
new_personal_Passion = st.radio("Update Personal Passion:", options, index=int(int(personal_Passion)-1))
new_unfair_Advantage = st.radio("Update Unfair Advantage:", options, index=int(int(unfair_Advantage)-1))
new_ipCreation = st.radio("Update IP Creation:", options, index=int(int(ipCreation)-1))
new_acquisition_Potential = st.radio(" Update Acquisition Potential:", options, index=int(int(acquisition_Potential)-1))
if st.button("Update Product"):
edit_product_data(new_customerProblem, new_productFeatures, new_business_model, new_product,
new_hair_on_fire_factor, new_access_to_market, new_day_1_revenue, new_revenueScalability, new_defensibility,
new_lackofCompetitors, new_personal_Passion, new_unfair_Advantage, new_ipCreation, new_acquisition_Potential, product)
st.success("Updated {}".format(product))
def deleteIdea():
with st.beta_expander("Delete Product"):
result = view_all_data()
df = pd.DataFrame(result, columns=colOptions)
st.dataframe(df)
productList = [i[0] for i in view_all_product()]
productName = st.selectbox("Product to Delete", productList)
if st.button("Delete"):
delete_data(productName)
st.warning("Deleted {}".format(productName))
result2 = view_all_data()
new_df = pd.DataFrame(result2, columns=colOptions)
st.dataframe(new_df) | [
"apiusage8@gmail.com"
] | apiusage8@gmail.com |
7d43dc393642d3c39820832bea87292ba80cabc6 | a965b9a24a44b91732b4c445b5ccd36bd2b0daff | /Generador_Embeddings_V2/similarity.py | d4fd1eaeb0d8057c3b49181482f5b4617539d490 | [] | no_license | Antonio4132/Machine-Learning-ML-a-partir-de-Ontologias | 555e606df044acd33309df1f0f16c41a492b2abc | 56a433346fbd5e9caadca964c9a406f80133b3c0 | refs/heads/main | 2023-08-11T21:38:21.828804 | 2021-09-19T08:53:45 | 2021-09-19T08:53:45 | 408,074,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # -*- coding: utf-8 -*-
"""
Created on Sat May 22 19:22:01 2021
@author: anton
"""
import gensim
if __name__ == '__main__':
print("Introduce el nombre del archivo embeding:")
modelo = str(input())
print("--- Cargando Embedding... ---")
model = gensim.models.Word2Vec.load(modelo)
vocabulary = set(model.wv.vocab)
print("--- Embedding Cargado ---")
print(modelo)
while True:
print("Introduzca palabra clave para similitud:")
palabra = str(input())
result = model.wv.most_similar(positive=[palabra])
for r in result:
print(r)
print("Salir? [y/n] [y]")
s = str(input())
if s != "n":
break
| [
"noreply@github.com"
] | Antonio4132.noreply@github.com |
5f44a17eaedbeb86d608fb3d6bf0a493dae1b079 | 868883523a33fe6db707f8e210666b6fe5ce1c64 | /app/core/admin.py | 16e677c981335993c199134b23429d6d3153ed7a | [
"MIT"
] | permissive | victorm-fj/recipe-app-api | 1d4874346c61b783d2402fd7ed24a91bc8b37d9b | 23793684d3660bc26893fc05acbae4f16e8d0aaf | refs/heads/master | 2020-04-13T20:52:45.094902 | 2019-01-02T00:49:19 | 2019-01-02T00:49:19 | 163,441,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {
'fields': (
'email', 'password'
),
}),
(_('Personal Info'), {
'fields': (
'name',
),
}),
(_('Permissions'), {
'fields': (
'is_active', 'is_staff', 'is_superuser'
),
}),
(_('Important dates'), {
'fields': (
'last_login',
),
})
)
add_fieldsets = (
(None, {
'classes': (
'wide',
),
'fields': (
'email', 'password1', 'password2'
),
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredient)
admin.site.register(models.Recipe)
| [
"victorm.floresj@gmail.com"
] | victorm.floresj@gmail.com |
7b86956b25b2b8011d081a205726701643364717 | 8f6b4b6a95ee21051de9b58a96800aff920037a5 | /lib/pysubsonic/config.py | 855997ccfe3a3f681a1cdf5d79ee718efec5ba3b | [
"MIT"
] | permissive | vbatts/pysubsonic | a6d9e8feb6c969d1ef074df1baddb5767bf3a1f9 | df59fc40c7587c80647f0d01e504ec7210f92552 | refs/heads/master | 2021-01-18T13:54:04.948560 | 2011-12-13T15:31:14 | 2011-12-13T15:31:14 | 2,940,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | '''
Copyright (c) 2011 Vincent Batts, Vienna, VA, USA
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import os
import logging
from ConfigParser import ConfigParser
from pysubsonic import const
log = logging.getLogger('pysubsonic.config')
def build_config(file):
print 'Generating new configuration file "%s"' % file
url = raw_input("Enter subsonic server url (i.e."
" http://subsonic.home.lan:4040/: ")
username = raw_input("Enter username: ")
password = raw_input("Enter password: ").encode("hex")
sect = 'auth'
config = ConfigParser()
config.add_section(sect)
config.set(sect, 'url', url)
config.set(sect, 'username', username)
config.set(sect, 'password', password)
with open(file, 'wb') as configfile:
config.write(configfile)
return True
def read_config(file = const.DEFAULT_CONF_FILE):
configdict = dict()
file = os.path.expanduser(file)
if not os.path.exists(file):
if not build_config(file):
return False
config = ConfigParser()
config.read([file])
for sect in config.sections():
configdict[sect] = dict()
for i in config.items(sect):
(k,v) = i
configdict[sect][k] = v
log.debug(configdict)
return configdict
| [
"vbatts@hashbangbash.com"
] | vbatts@hashbangbash.com |
9c6167865077eb377498163f9d50476fc3187c50 | 72d44a894f09e60c41d362cdd656b3f644e52b92 | /src/spectral/core/source/rect.py | 8283eaee1b9452eda42109f8cfead879e9e28242 | [] | no_license | antiface/spectral | d4de2eaaa3d2fecf39cd2699ee1233919d78654e | 6b8a6df5d76f88d9bb89c87236b3b1cac7f2532e | refs/heads/master | 2020-05-30T15:08:24.797242 | 2015-07-02T12:57:32 | 2015-07-02T12:57:32 | 45,659,486 | 1 | 0 | null | 2015-11-06T04:36:01 | 2015-11-06T04:36:00 | null | UTF-8 | Python | false | false | 1,447 | py | from .simulatedsource import SimulatedSource
import numpy as np
import scipy as sp
class Rect(SimulatedSource):
"""
Signal representing a number of rectangles in the frequency domain.
Args:
frequencies: A list of center frequencies of the blocks.
widths: A list of widths of the rectangles
samp_freq: Sample frequency
SNR: Signal to Noise ratio of the output signal
"""
def __init__(self, frequencies, widths, samp_freq, SNR=None):
super(Rect, self).__init__(frequencies, samp_freq, SNR=SNR)
self.widths = widths
def generate(self, no_samples):
"""
Generator function that generates a signal with rectangular
frequency components specified on object creation.
Args:
no_sampls: Number of samples to generate
Returns:
Generated samples with a Signal to Noise ratio as specified on
object creation.
"""
t = np.arange(0, no_samples) / self.samp_freq
duration = t[-1]
signals = []
for f, width in zip(self.frequencies, self.widths):
component = 2 * width * np.sinc(2 * width * (t - duration / 2))
carrier = np.sin(2 * np.pi * f * t)
signals.append(component * carrier)
signal = reduce(np.add, signals)
signal *= sp.signal.hamming(len(signal))
return self.white_gaussian_noise(self.SNR, signal)
| [
"willem.melching@gmail.com"
] | willem.melching@gmail.com |
2c3332c069479eb826d6525bee8c9d3190b55f5d | a17eec889363d466984fea50100c1c8e7baa0177 | /Fiction/Fiction/spiders/biquge5200.py | ba2924d7de124dc815c21ba9593132ddbd22edaf | [] | no_license | demonsheart/Web-Crawler | ec3bd42a4840183a811db89ec73759fa2efd4e63 | 24fb022407ec209dba72556fbe9d3624ce7ea92f | refs/heads/main | 2023-05-21T12:35:02.163756 | 2021-02-26T11:41:33 | 2021-02-26T11:41:33 | 333,744,328 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | import scrapy
from Fiction.items import FictionItem
class Biquge5200Spider(scrapy.Spider):
name = 'biquge5200'
allowed_domains = ['biquge5200.com']
start_urls = ['https://www.biquge5200.com/76_76490/147016121.html']
def parse(self, response):
title = response.xpath(
'//div[@class="bookname"]/h1/text()').extract()[0]
lines = response.xpath('//div[@id="content"]/p/text()').extract()
content = ''
for line in lines:
content = content + line + '\n\n'
item = FictionItem()
item['title'] = title
item['content'] = content
yield item
next_url = response.xpath(
'//div[@class="bottem2"]/a[4]/@href').extract_first()
if next_url.find('.html') != -1:
yield scrapy.Request(next_url, callback=self.parse)
| [
"2509875617@qq.com"
] | 2509875617@qq.com |
0c393c1bb6ac6f0c84684f0a59fa3935b38f86d9 | 99678f87eadc5519dca0b7045c13973ca8d8861d | /graficwidget.py | 0750c912a518a21a5a965de15af59544f81da088 | [] | no_license | PP-62/sql_viewer | 4235a43536d81b3b2892dd266d9ef2c087f5d80f | 12a9e8476d8d2bd641122c9dc1f394b1f0f37935 | refs/heads/main | 2023-04-26T22:32:43.745710 | 2021-05-24T13:27:03 | 2021-05-24T13:27:03 | 356,977,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from PyQt5.QtWidgets import*
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
class GraficWidget(QWidget):
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.canvas = FigureCanvas(Figure())
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.canvas)
self.canvas.axes = self.canvas.figure.add_subplot(111)
self.setLayout(vertical_layout) | [
"77794838+PP-62@users.noreply.github.com"
] | 77794838+PP-62@users.noreply.github.com |
0a7339c381f9b70c492c662908a64640078995c9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/332/101124/submittedfiles/principal.py | 1030f266c05bfbd1f491c8b15901e3fd2c180f5d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
L=[5,7,2,9,4,1,3]
print('lista=',L)
L.sort()
print('lista em ordem crescente',L) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
869c0c7fc8b49d963ba647cbb381872ee3e09495 | 755e5a3cee8b06688abe486fd462cccfd3fc0f4e | /tic_tac_toe.py | f27079b496a952b99cf7e6187452de3a1485dc7d | [] | no_license | RamlathYasar/Tic-Tac-Toe | efb0c8eb3ef5d1b3c3e7719fe5b498065a86e95e | abbbdcf615893f15bc9de98d233cea5776942429 | refs/heads/master | 2020-06-18T01:03:53.303711 | 2019-07-10T08:23:12 | 2019-07-10T08:23:12 | 196,117,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | import random
board = [0,1,2,
3,4,5,
6,7,8]
def show():
print('\n')
print('|',board[0],'|',board[1],'|',board[2],'|')
print('---------------')
print('|',board[3],'|',board[4],'|',board[5],'|')
print('---------------')
print('|',board[6],'|',board[7],'|',board[8],'|')
print('\n')
def checkLine(char, spot1, spot2, spot3):
if board[spot1] == char and board[spot2] == char and board[spot3] == char:
return True
else:
return False
def checkAll(char):
if checkLine(char , 0, 1, 2):
True
if checkLine(char , 3, 4, 5):
True
if checkLine(char , 6, 7, 8):
True
if checkLine(char , 0, 3, 6):
True
if checkLine(char , 1, 4, 7):
True
if checkLine(char , 2, 5, 8):
True
if checkLine(char , 0, 4, 8):
True
if checkLine(char , 2, 4, 6):
True
while True:
choice = int(input('Select a spot:'))
if board[choice] != 'x' and board[choice] != 'o':
board[choice] = 'x'
if checkAll('x') == True:
print('----X Wins----')
break
while True:
random.seed()
opponent = random.randint(0,8)
if board[opponent] != 'o' and board[opponent] != 'x':
board[opponent] = 'o'
if checkAll('o') == True:
print('----O Wins----')
break
break
else:
print('This spot is taken')
show() | [
"ramlathktg35@gmail.com"
] | ramlathktg35@gmail.com |
4fc0d89461eda5906b07a5f6de99cf319725d42e | ec12cf1875069bdda354e21869a1d99da95e2370 | /bot.py | b5aa1417a3710837a13f679b7e301ef118a8b324 | [] | no_license | googolmogol/FirsMyBot | 940f1e8e7302bdabebcc54b459fee88a934bb878 | eea21652295bfbc165e9dd3aeb18594ddbbfa60e | refs/heads/master | 2023-03-29T00:17:17.028543 | 2021-03-25T19:47:40 | 2021-03-25T19:47:40 | 351,557,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,205 | py | from datetime import datetime
from threading import Thread
import telebot
import schedule
import time
bot = telebot.TeleBot("801359509:AAHjuBl_1xRdDHHTTacpT3Q1TSiXl_qQiCw")
keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True)
keyboard1.row('Привет', 'Пока')
chat_id = ''
chat_id_list = []
user_var = None
week = False
lock_is = True
@bot.message_handler(commands=['start'])
def send_welcome(message1):
global chat_id, user_var
chat_id = message1.chat.id
print(message1.from_user.first_name)
if chat_id not in chat_id_list:
chat_id_list.append(chat_id)
markup = telebot.types.InlineKeyboardMarkup()
button = telebot.types.InlineKeyboardButton(text="Хаб с материалами", url="https://drive.google.com/drive"
"/folders"
"/16c2M4x1JY1PdvjVngOBrNG29B5Pn5p0o"
"?usp=sharing")
markup.add(button)
bot.send_message(message1.chat.id,
'<strong>Здарова студентам!</strong>\N{Victory Hand}\nЯ бот, который будет уведомлять о парах'
'\N{Robot Face}\n ', parse_mode="HTML", reply_markup=markup)
def job(day, time_lesson):
global lock_is
lesson = ''
if datetime.weekday == 0:
lock_is = True
if week:
if day == 'monday':
lesson = "<strong>Пара: </strong>Машинне навчання.\n\n<strong>Викладач: </strong>професор Романюк " \
"В.В.\n\nБажаю хорших знань!"
elif day == 'tuesday' and time_lesson == "11:30":
lesson = "<strong>Пара: </strong>Інженерія програмних систем для паралельних та розподілених " \
"систем.\n\n<strong>Викладач: </strong>доцент Вороной С.М.\n\nБажаю хорших знань! "
elif day == 'tuesday' and time_lesson == "15:40":
lesson = "<strong>Пара: </strong>Сучасні технології баз даних.\n\n<strong>Викладач: </strong>Гладких " \
"В.М.\n\nБажаю хорших знань! "
elif day == 'wednesday':
lesson = "<strong>Пара: </strong>Алгоритми та технології побудови рекомендаційних " \
"систем.\n\n<strong>Викладач: </strong>старший викладач Височіненко М.С.\n\nБажаю хорших знань!"
elif day == 'thursday':
lesson = "<strong>Вітаю, сьогодні у Вас вікно!!!</strong>"
elif day == 'friday':
lesson = "<strong>Пара: </strong>Методологія підтримки прийняття рішень в інженерії програмного " \
"забезпечення.\n\n<strong>Викладач: </strong>професор Романюк " \
"В.В.\n\nБажаю хорших знань!"
else:
if day == 'monday':
lesson = "<strong>Вітаю, сьогодні у Вас вікно!!!</strong>"
elif day == 'tuesday' and time_lesson == "13:15":
lesson = "<strong>Пара: </strong>Інженерія програмних систем для паралельних та розподілених " \
"систем.\n\n<strong>Викладач: </strong>доцент Вороной С.М.\n\nБажаю хорших знань! "
elif day == 'tuesday' and time_lesson == "15:40":
lesson = "<strong>Пара: </strong>Методологія підтримки прийняття рішень в інженерії програмного " \
"забезпечення.\n\n<strong>Викладач: </strong>професор Романюк " \
"В.В.\n\nБажаю хорших знань!"
elif day == 'wednesday' and time_lesson == "14:10":
lesson = "<strong>Пара: </strong>Сучасні технології баз даних.\n\n<strong>Викладач: </strong>Гладких " \
"В.М.\n\nБажаю хорших знань! "
elif day == 'wednesday' and time_lesson == "17:10":
lesson = "<strong>Пара: </strong>Алгоритми та технології побудови рекомендаційних " \
"систем.\n\n<strong>Викладач: </strong>старший викладач Височіненко М.С.\n\nБажаю хорших знань! "
elif day == 'thursday':
lesson = "<strong>Пара: </strong>Моделювання та верифікація програмного забезпечення.\n\n<strong>" \
"Викладач: </strong>кандитат технічних наук, доцент Іларіонов О.Є.; доктор технічних наук " \
"професор Комарова Л.О.\n\nБажаю хорших знань!"
elif day == 'friday':
lesson = "<strong>Пара: </strong>Машинне навчання.\n\n<strong>Викладач: </strong>професор Романюк " \
"В.В.\n\nБажаю хорших знань!"
print("Chat_id_list:", chat_id_list)
for k in chat_id_list:
msg = "Вітаю, за 10 хвилин розпочннеться пара! Підготуйтеся, згодом надішлю посилання."
try:
bot.send_message(k, msg)
except:
print("The user has stopped the bot")
chat_id_list.remove(k)
print(chat_id)
button_generation(lesson, "https://github.com/illuhakupchenko/Pybot/blob/master/bot.py")
def button_generation(text, url):
markup = telebot.types.InlineKeyboardMarkup()
button = telebot.types.InlineKeyboardButton(text="Посилання на пару", url=url)
markup.add(button)
for member in chat_id_list:
try:
bot.send_message(member, text, parse_mode="HTML", reply_markup=markup)
except:
print("The user has stopped the bot")
chat_id_list.remove(member)
# https://qna.habr.com/q/394496
def checker_schedule():
global week, lock_is
if datetime.weekday == 6 and lock_is:
if not week:
week = True
lock_is = False
else:
week = False
lock_is = False
if week:
schedule.every().monday.at("18:40").do(job, "monday", "18:40")
schedule.every().tuesday.at("11:30").do(job, "tuesday", "11:30")
schedule.every().tuesday.at("15:40").do(job, "tuesday", "15:40")
schedule.every().wednesday.at("14:10").do(job, "wednesday", "14:10")
schedule.every().thursday.at("09:00").do(job, "thursday", "09:00")
schedule.every().friday.at("15:40").do(job, "friday", "15:40")
else:
schedule.every().monday.at("09:00").do(job, "monday", "09:00")
schedule.every().tuesday.at("13:15").do(job, "tuesday", "13:15")
schedule.every().tuesday.at("14:10").do(job, "tuesday", "14:10")
schedule.every().wednesday.at("14:10").do(job, "wednesday", "14:10")
schedule.every().wednesday.at("17:10").do(job, "wednesday", "17:10")
schedule.every().thursday.at("14:10").do(job, "thursday", "14:10")
schedule.every().friday.at("15:40").do(job, "friday", "15:40")
while True:
if chat_id != '':
schedule.run_pending()
time.sleep(1)
# https://bit.ly/3dnzZbh
# обязательно нужен новый поток, чтобы не было споров цикла бота и schedule
Thread(target=checker_schedule).start()
def start_bot():
bot.polling(none_stop=True)
Thread(target=start_bot).start()
| [
"googolmogolua@gmail.com"
] | googolmogolua@gmail.com |
c397663ffc3fa45e69cdc90864b43c49beda5c20 | 469812282890b7113be0716688c7c7cc5fb9ed95 | /stocks/cors.py | fe68db2c94e82c5b7e23fa08b65edc761b202b64 | [] | no_license | anubhav-1/rest | 69ed64ba9c450b09bb9523aa25c460250588d75b | b53fc9c43da8e2491b889df15bc872d659828a4c | refs/heads/master | 2020-04-17T03:29:30.505684 | 2019-01-17T08:05:56 | 2019-01-17T08:05:56 | 166,185,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | class CorsMiddleware(object):
def process_response(self, req, resp):
response["Access-Control-Allow-Origin"]= "*"
return response
| [
"anubhav.gupta@crypton.internal.utradesolutions.com"
] | anubhav.gupta@crypton.internal.utradesolutions.com |
ecd23f3889afb9f418bcdf0abd5061d9edd6c3c2 | cccf178ff9513d7f41202e46aabb4ce9c5704974 | /read_csv_from_s3.py | 72fa6d49a93d2b8788790f1e586a3779158f8c6c | [] | no_license | mistwire/aws_tag_changers | 9b06fb2e5ab5e4327b60fd0deb0ed538d079146e | f838d9cd343252ca85d01b3a54305edf5cfc84c0 | refs/heads/master | 2023-07-24T08:34:49.003198 | 2023-07-18T02:17:56 | 2023-07-18T02:17:56 | 217,740,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import boto3
from csv import DictReader
bucket = "test-csv-pull"
file_name = "csvloggingtest.csv"
s3 = boto3.client('s3')
# 's3' is a key word. create connection to S3 using default config and all buckets within S3
obj = s3.get_object(Bucket= bucket, Key= file_name)
# get object and file (key) from bucket
for i in obj['Body']:
print(i) | [
"chrisfwilliams@gmail.com"
] | chrisfwilliams@gmail.com |
14ff0b9eaaa24fab20c4b9e88038e39cee101426 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/2374.py | 3bfb9aefbafc0afcf5a26e222d920922d38daec3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | # Google Codejam - Cookie Clicker Alpha
def ShouldInvest(C, F, X, Gradient):
return Gradient < (F * (X - C) / C) # mathematics ;P
def MinimumTime(C, F, X):
Gradient = 2
Time = 0
# start
while 1:
if ShouldInvest(C, F, X, Gradient):
Time += C / Gradient # time till we reach C cookies
Gradient += F # we create a new farm
else: # we do not invest anymore
Time += X / Gradient # wait till we reach X cookies
return Time
return -1 # if the interpreter arrives here, something went wrong
def main():
try:
f = open('cookie.in','r')
except:
print('cannot open input file!')
T = int(f.readline())
for t in range(1, T + 1):
print('Case #', t, ': ', end='', sep='')
Line = f.readline()
Line = Line.split(' ')
C = float(Line[0])
F = float(Line[1])
X = float(Line[2])
print('{0:.7f}'.format(MinimumTime(C, F, X)))
f.close()
return 0
if __name__=='__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
511bdcc9a23d3de38bb8c6f63f195166aade0f76 | 50ab3aa62fc8d3589604c0b2e02b97fd78703a28 | /lab06/make_a_rocket.py | c6a3169e6ad0425fffca02b3ee07c5549cc5f765 | [] | no_license | clairefan816/PythonExcercises | ca70d38b1233c6e9950d5e714f2c284742a21a7b | f8a4f581723423f4ba0904d9d7ca5a1526f394d2 | refs/heads/main | 2023-01-23T09:57:53.066419 | 2020-11-23T05:44:34 | 2020-11-23T05:44:34 | 315,205,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | import sys as s
import math as m
WIDTH_ARG_POSITION = 1
SEGMENT_ARG_POSITION = 2
STYLE_ARG_POSITION = 3
def cone(width):
"""
draw cone part
width -> None
"""
row = 0
height = m.ceil((width-2)/2)
while row < height:
print(' ' * int(height - row), end='')
print('*' * int(row * 2 + 1), end='')
if width % 2 == 0:
print(end='*')
print()
row += 1
def fulselage(width, style):
"""
draw fulselage part
width, default -> None
"""
row = 0
height = width
while row < height:
# Decide what to print.
if style == 'striped':
if row < height/2:
char = '_'
else:
char = 'x'
else:
char = style
# Print one row.
print(char * width)
row += 1
def base(width):
"""
draw the base
width -> None
"""
if (width - width//2) % 2 == 0:
fin_width = width//2
elif(width - width//2) % 2 != 0:
fin_width = width//2 - 1
row = 0
while row < (width - fin_width) / 2 + 1:
space = (width - fin_width) / 2 - row
print(' ' * int(space), end='')
print('*' * int(fin_width + 2 * row))
row += 1
print('*' * width)
def main(argv):
if not (3 <= len(argv) <= 4):
print("Usage: python make_a_rocket.py <width> <segments> [style]")
exit()
width = int(argv[WIDTH_ARG_POSITION])
sqr_segement = int(argv[SEGMENT_ARG_POSITION])
style = argv[STYLE_ARG_POSITION] if len(argv) == 4 else 'x'
cone(width)
for _ in range(sqr_segement):
fulselage(width, style)
base(width)
main(s.argv)
| [
"fan.yu@husky.neu.edu"
] | fan.yu@husky.neu.edu |
d25ab67c89046696d29e31710c026ae71cc91feb | 9c60a733badfb8ceacbf7d0a1e0220d9d15de50d | /biolib/SeqFeatureIO/GFF3IO.py | ce9b0fcfe1056d93f987e43db88e64348dfca575 | [
"MIT"
] | permissive | nijibabulu/biolib | b01b1c5882760474ad99b574b8a1e0a817118a67 | 81b0f35c5f78b70b1054abecb56481029cb3660a | refs/heads/master | 2020-06-27T18:52:35.984876 | 2016-08-25T16:01:46 | 2016-08-25T16:01:46 | 66,572,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,953 | py | """Supplement the missing support for GFF3 from biopython.
Taken from FastaIO"""
from Bio.SeqFeature import SeqFeature,FeatureLocation
_gff3_strand_to_numeric = { '+': 1, '-': -1, '.': 0}
_numeric_to_gff3_strand = { '1': '+', '-1': '-', '0': '.' }
#This is a generator function!
def GFF3Iterator(handle):
"""Generator function to iterate over Fasta records (as SeqRecord objects).
handle - input file
If this is not given, then the entire title line will be used
as the description, and the first word as the id and name.
Note that use of title2ids matches that of Bio.Fasta.SequenceParser
but the defaults are slightly different.
"""
line_no = 0
#Skip any text before the first record (e.g. blank lines, comments)
for line in handle:
line = line.strip()
line_no += 1
if len(line) == 0 or line[0] == "#":
continue
try:
ref,source,type,start,end,score,strand,frame,attributes = \
line.split("\t")
except:
raise ValueError, "Problem with line %d in %s. Line was\n%s" %\
(line_no,handle.name,line)
attr_pairs = attributes.strip(';').split(";")
try:
attr_dict = dict(map(lambda x: tuple(x.split("=")), attr_pairs))
except:
print attr_pairs
raise
# gff3=1-base SeqFeature=0-base
result = SeqFeature(location=FeatureLocation(int(start)-1,int(end)-1),
type=type,strand=_gff3_strand_to_numeric[strand],ref=ref,ref_db=source)
result.id = attr_dict.get("ID",None)
result.name = attr_dict.get("Name",None)
result.attributes = attr_dict # not an official property of SeqFeature.
yield result
class GFF3Writer:
"""Class to write Fasta format files."""
def __init__(self, handle):
"""Create a Fasta writer.
handle - Handle to an output file, e.g. as returned
by open(filename, "w")
You can either use:
myWriter = FastaWriter(open(filename,"w"))
writer.write_file(myRecords)
Or, follow the sequential file writer system, for example:
myWriter = FastaWriter(open(filename,"w"))
writer.write_header() # does nothing for Fasta files
...
Multiple calls to writer.write_record() and/or writer.write_records()
...
writer.write_footer() # does nothing for Fasta files
writer.close()
"""
self.handle = handle
self._header_written = False
self._feature_written = False
def write_header(self):
self.handle.write("##gff-version 3\n")
self._header_written = True
def write_feature(self, feature):
"""Write a single GFF3 feature to the file."""
if not self._header_written:
self.write_header()
self._feature_written = True
source = getattr(feature,"ref_db","Unknown") or 'Unknown'
try:
if int(feature.score) == feature.score:
score = str(int(feature.score))
elif isinstance(feature.score,float):
score = '%0.3f' % feature.score
else:
score = str(feature.score)
except AttributeError:
score = '.'
except TypeError:
score = str(feature.score)
self.handle.write("\t".join([
feature.ref,
source,
feature.type,
str(int(feature.location.nofuzzy_start+1)), # gff3=1-base SeqFeature=0-base
str(int(feature.location.nofuzzy_end+1)),
score,
_numeric_to_gff3_strand[str(feature.strand)],
getattr(feature,'phase','.') or '.',
]))
self.handle.write("\t")
if hasattr(feature,"attributes"):
for k,v in feature.attributes.items():
self.handle.write("%s=%s;" % (k,v))
self.handle.write("\n")
def write_file(self, features): # should be in a superclass...
written = 0
for feature in features:
self.write_feature(feature)
written += 1
return written
| [
"robert.zimmermann@univie.ac.at"
] | robert.zimmermann@univie.ac.at |
27002e3299c4f583c1b03c101a7962e936bb3231 | d4c4407c86d0f987f0211dc3f40f6f1e241e9faa | /hanghui/experiment.py | e8d43b1808c5f5b4b17227d1314dbaa8150fd808 | [] | no_license | liruixl/CheckProcess | 31ef648a2ea4257b20da0d4855ac7f144764c642 | 2726817c91191e25e5b93c1f4ca21cb293c07c3f | refs/heads/master | 2023-03-18T05:28:25.327554 | 2021-03-16T14:15:19 | 2021-03-16T14:15:19 | 272,733,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,780 | py | import cv2
import numpy as np
def find_tuanhua_roi():
imFilename = r'img_tuanhua/tuanhua_uv_4.jpg'
# imFilename = r'img_hanghui/zhongnong_uv.jpg'
# imFilename = r'img_dect/tuanhua_1.jpg' # 变造
print("Reading image to align : ", imFilename)
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# _, gray = cv2.threshold(gray, thre, 255, cv2.THRESH_BINARY)
_, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# 只寻找最外轮廓 RETR_EXTERNAL RETR_CCOMP
_, contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
H, W = gray.shape
roi = np.zeros((H, W), dtype=np.uint8)
# 绘制轮廓
cv2.drawContours(im, contours, -1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
for cnt in contours:
if cv2.contourArea(cnt) > H*W//2:
cv2.drawContours(roi, [cnt], 0, 255, -1, lineType=cv2.LINE_AA)
# 平滑轮廓
# median = cv2.medianBlur(gray, 5)
# _, contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# roi_smooth = np.zeros((H, W), dtype=np.uint8)
# cv2.drawContours(roi_smooth, contours, -1, 255, -1, lineType=cv2.LINE_AA)
# 显示图像
cv2.imshow('gray', gray)
cv2.imshow('Contours', im)
cv2.imshow('roi', roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
CONTOURS_1 = contours # 保留一下,以防用到
RECTS_1 = []
AREA_1 = []
for i in range(len(contours)):
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
area = cv2.contourArea(cnt)
RECTS_1.append([x, y, x + w, y + h])
AREA_1.append(area)
cv2.waitKey(0)
if __name__ == '__main__':
find_tuanhua_roi() | [
"1176867093@qq.com"
] | 1176867093@qq.com |
1a2eb32ad0cc9c0b8701c6109716a32382c95ba0 | a690a23c322033e4946940099f124a1136964929 | /ardoq/task1/task1.py | 98c1be97eb28a9b99f0b0b4a3088e3e21ca94f74 | [] | no_license | erlendb/Kodeoppgaver-jobbintervju | 8cd11191eafd0f27770b634b0a03fe8eedad1c93 | bd6650b4bc11addf603cd32b209fb2a01d0afc37 | refs/heads/main | 2023-01-14T00:04:05.271368 | 2020-11-20T14:04:57 | 2020-11-20T14:04:57 | 314,567,465 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from copy import copy
def multiply_n_highest_in_list(list, n):
list = copy(list) # Ensure we only manipulate the list in the function scope
n = min(len(list), n) # Ensure n is inside bounds of the list
list.sort()
product = 1
i = len(list) - 1
while n > 0:
product *= list[i]
i -= 1
n -= 1
return product
if __name__ == '__main__':
list = [1, 2, -3, 4, 5, -7]
n = 3
print(multiply_n_highest_in_list(list, n))
print(list) | [
"erlend@elendig.no"
] | erlend@elendig.no |
34123afb0b325b58f7a6061f42f343964cb69254 | 7f10b68de8672fdd9a2b50f3f9617397c7f1f7b2 | /utils.py | 845c6eb32137078c15aa9672c9b8921da7ebdc24 | [] | no_license | guy94/SearchEngine | 15c54e6f6476706a1388443e05f4563c4dcee8a4 | 26114fc2dfe3947a549469cb763d10f2d071d7b7 | refs/heads/master | 2023-03-06T19:24:34.829803 | 2021-02-16T13:40:38 | 2021-02-16T13:40:38 | 309,371,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import pickle
def save_obj(obj, name):
"""
This function save an object as a pickle.
:param obj: object to save
:param name: name of the pickle file.
:return: -
"""
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
"""
This function will load a pickle file
:param name: name of the pickle file
:return: loaded pickle file
"""
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def load_list(file):
"""
This function will load a pickle file
:param name: name of the pickle file
:return: loaded pickle file
"""
return pickle.load(file)
def load_inverted_index(name):
"""
This function will load a pickle file
:param name: name of the inverted index file
:return: loaded inverted index
"""
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
| [
"guyklinger94@gmail.com"
] | guyklinger94@gmail.com |
6b9cce7d4954a71bc35a69a3320b8607b60ba58b | bd2e7913176193deff8db9253ea07765223c33e3 | /query.py | cca979929b132406bc7a2d24ed38355c434ff2b0 | [] | no_license | nan-li/restaurant-meetup-app | 638335cdba5b618cbc56873815356629ea758f35 | ee0c504d2c096e8d2e42b255bdfa1210dbaced84 | refs/heads/master | 2023-04-01T06:00:03.406691 | 2021-04-09T20:44:05 | 2021-04-09T20:44:05 | 337,905,243 | 4 | 0 | null | 2021-03-02T08:04:50 | 2021-02-11T01:53:21 | JavaScript | UTF-8 | Python | false | false | 910 | py | """Test queries"""
from model import db, User, Meetup, Restaurant, Message, Notification, Comment, favorites, user_meetups
import crud
def show_all():
"""Show all users and their restaurant and meetup information."""
# TODO: make these queries more efficient
users = User.query.options(db.joinedload('favorites')).all()
for user in users:
print(user.fname, user.lname)
for rest in user.favorites:
print("- ", rest.name)
print("Hosted Meetups")
for meetup in user.hosted_meetups:
print("*** Host: ", meetup.restaurant)
print("Attending Meetups")
for meetup in user.meetups:
print(" Meetup: ", meetup.restaurant)
print("\n")
if __name__ == '__main__':
from server import app
from model import connect_to_db
connect_to_db(app, echo=False)
| [
"nanftw@gmail.com"
] | nanftw@gmail.com |
b4b47a4e29b3294d7da188d17225586ad465cec6 | 1a3362c92082ac8c65ebfcd0cbc548b7aab14514 | /tests/test_events/artifact_published/test_artifact_published.py | f5b771f37c8ea158db33c7dbd6f013915d0d4f0f | [
"Apache-2.0"
] | permissive | henning-roos/eiffel-graphql-api | a8829d010380fc13ea130b375f610e11997fa27e | 9bffe27478a088e5438762e0104d1901c7baab01 | refs/heads/master | 2022-10-10T05:19:00.377557 | 2020-02-05T13:23:22 | 2020-02-05T13:23:22 | 264,891,498 | 0 | 0 | Apache-2.0 | 2020-05-18T09:29:07 | 2020-05-18T09:29:06 | null | UTF-8 | Python | false | false | 5,084 | py | # Copyright 2019 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import pytest
import logging
from unittest import TestCase
from .event import *
from .queries import *
from tests.lib.query_handler import GraphQLQueryHandler
logging.basicConfig(
level=logging.DEBUG
)
class TestArtifactPublished(TestCase):
@classmethod
def setUpClass(cls):
cls.query_handler = GraphQLQueryHandler("http://127.0.0.1:12345/graphql")
cls.events = [
eiffel_artifact_created_event(),
eiffel_artifact_published_event()
]
cls.logger = logging.getLogger("TestArtifactPublished")
def setUp(self):
self.logger.info("\n")
for event in self.events:
insert(event)
def tearDown(self):
for event in self.events:
remove(event)
def test_artifact_published_data(self):
"""Test that it is possible to query 'data' from artifact published.
Approval criteria:
- It shall be possible to query 'data' from graphql.
- Data shall be:
- locations: {"type": "OTHER", "uri": "http://anotherplace.com"}
Test steps:
1. Query 'data' from ArtifactPublished in Graphql.
2. Verify that the response is correct.
"""
self.logger.info("STEP: Query 'data' from ArtifactPublished in Graphql.")
self.logger.debug(DATA_ONLY)
response = self.query_handler.execute(DATA_ONLY)
self.logger.debug(pretty(response))
self.logger.info("STEP: Verify that the response is correct.")
data = self.query_handler.get_node(response, "data")
self.assertIsInstance(data, dict)
self.assertGreater(len(data), 0)
self.assertDictEqual(
data,
{
"locations": [
{
"type": "OTHER",
"uri": "http://anotherplace.com"
}
]
}
)
def test_artifact_published_artifact_link(self):
"""Test that it is possible to query a valid artifact link on artifact published.
Approval criteria:
- Graphql shall return an ArtifactCreated event when requesting Artifact.
Test steps:
1. Query 'links.Artifact' from ArtifactPublished in Graphql.
2. Verify that the returned event is a ArtifactCreated.
"""
self.logger.info("STEP: Query 'links.Artifact' from ArtifactPublished in Graphql.")
self.logger.debug(LINKS_ONLY)
response = self.query_handler.execute(LINKS_ONLY)
self.logger.debug(pretty(response))
self.logger.info("STEP: Verify that the returned event is a ArtifactCreated.")
link_meta = self.query_handler.get_node(response, "meta")
self.assertDictEqual(link_meta, {"id": "7c2b6c13-8dea-4c99-a337-0490269c374d",
"type": "EiffelArtifactCreatedEvent"})
def test_artifact_published_meta(self):
"""Test that it is possible to query 'meta' from artifact published.
Approval criteria:
- It shall be possible to query 'meta' from graphql.
- Data returned shall be correct:
- version: "3.0.0"
- type : "EiffelArtifactPublishedEvent"
- id : "031c2f9a-92f0-4cac-9320-e0113adafd7d"
- time : 1575981255471
Test steps:
1. Query 'meta' from ArtifactPublished in Graphql.
2. Verify that the response is not empty.
3. Verify that meta data returned correctly.
"""
self.logger.info("STEP: Query 'meta' from ArtifactPublished in Graphql.")
self.logger.debug(META_ONLY)
response = self.query_handler.execute(META_ONLY)
self.logger.debug(pretty(response))
self.logger.info("STEP: Verify that response is not empty.")
meta = self.query_handler.get_node(response, "meta")
self.assertIsInstance(meta, dict)
self.assertGreater(len(meta), 0)
self.logger.info("STEP: Verify that meta data returned correctly.")
self.assertEqual(meta.get("version"), "3.0.0")
self.assertEqual(meta.get("type"), "EiffelArtifactPublishedEvent")
self.assertEqual(meta.get("id"), "031c2f9a-92f0-4cac-9320-e0113adafd7d")
self.assertEqual(meta.get("time"), "2019-12-10T13:34:15.471000")
| [
"tobiaspn@axis.com"
] | tobiaspn@axis.com |
4385449dcabdb8a7e0edd0efa6d13f5b22104d81 | 2be1a1249c7d5f7783d31098b176087667897c12 | /venv/bin/django-admin.py | 97e02a3349610dc305e31efafe1c84bb6ee10ce5 | [] | no_license | dmstyx/Django_RF_Ecommerce | 0691f116ec2a111891afe4b36d6199b7dd29c393 | 3d2584beec2300626af0586138cf7efc68f9e5ab | refs/heads/master | 2023-07-15T17:30:22.004035 | 2021-08-26T10:55:30 | 2021-08-26T10:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #!/Users/carlos/Desktop/PythonCourse/Django_RF_Ecommerce/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"synova_matare@hotmail.co.uk"
] | synova_matare@hotmail.co.uk |
7ff0eec880370f1067cbf023397bce134a271a40 | 43b8292d9d00cddb17484c1665e862963f962d6b | /advent_of_code_2020/day_10_test.py | 8c6a55e03654bb302a9358eb112028ccf673be12 | [] | no_license | acviana/advent-of-code-2020 | a85aab04426c3476cd6e90e61f793befaa6000bf | 4a92f0db321ff35b737b11a7fa0436fc259670b5 | refs/heads/main | 2023-03-20T21:15:35.267514 | 2021-03-06T12:22:59 | 2021-03-06T12:22:59 | 317,749,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | from advent_of_code_2020.day_10 import (
calc_differences,
calc_all_paths,
parse_data,
)
TEST_DATA = """16
10
15
5
1
11
7
19
6
12
4"""
TEST_DATA = [item for item in TEST_DATA.split("\n")]
TEST_PARSED_DATA = [0, 1, 4, 5, 6, 7, 10, 11, 12, 15, 16, 19, 22]
TEST_DATA_2 = """28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3"""
TEST_DATA_2 = [item for item in TEST_DATA_2.split("\n")]
def test_parse_data():
assert parse_data(TEST_DATA) == TEST_PARSED_DATA
def test_calc_differences():
assert calc_differences(TEST_PARSED_DATA) == [1, 3, 1, 1, 1, 3, 1, 1, 3, 1, 3, 3]
def test_calc_options():
calc_all_paths(TEST_PARSED_DATA) == 8
calc_all_paths(parse_data(TEST_DATA_2)) == 19208
| [
"alexcostaviana@gmail.com"
] | alexcostaviana@gmail.com |
1287d939b80a8b381b0780f7e75ea61fee6bfb37 | 78859c9b76dbfc81de1be15d0916ce189d66f650 | /Python/Scripts/venv/bin/pip3.6 | 9325619779691158b1a639d9615bed094629637a | [] | no_license | wcbrown86/workspace | 8216d66b49fad70993699f6baa3b7b64355b79cc | 9f33c6f7e01b8d9af83f5481e0e755ab84ab4ffe | refs/heads/master | 2022-12-24T19:31:43.640744 | 2020-07-14T05:08:10 | 2020-07-14T05:08:10 | 138,726,896 | 0 | 0 | null | 2022-12-16T07:46:30 | 2018-06-26T11:12:57 | Python | UTF-8 | Python | false | false | 423 | 6 | #!/Users/williambrown/Documents/Scripts/Utlity_Scripts/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"brownwc86@gmail.com"
] | brownwc86@gmail.com |
96b6503aa91ab1f80512a325ee7bc4ebe1634202 | 2f6e97bf9b6835701a15c07d3e1a6d13f3485d5b | /internal/app/hb-parser/replay.py | e5d40951ecd12cc9c6012e83c7374ff961a5a8be | [
"MIT"
] | permissive | Purely-Imaginary/referee-go | 361ac49e55f79d89f3cd0b96919c7f6ee221062f | 07756665a60070594684e473332aa6d1c9ede929 | refs/heads/master | 2023-04-27T07:10:53.723554 | 2021-05-11T13:04:16 | 2021-05-11T13:04:16 | 279,971,506 | 1 | 0 | MIT | 2020-09-16T08:43:18 | 2020-07-15T20:30:20 | JavaScript | UTF-8 | Python | false | false | 2,434 | py | from enum import Enum
import struct, json
class Team(Enum):
Red = 0
Blue = 1
class State(Enum):
Menu = 0
Pause = 1
Warmup = 2
Game = 3
Goal = 4
class Input(Enum):
Up = 1
Down = 2
Left = 4
Right = 8
Kick = 16
def test(self, input_):
return input_ & self.value == self.value
class Disc:
def __init__(self, x, y, vx, vy):
self.x, self.y, self.vx, self.vy = x, y, vx, vy
class Player:
def __init__(self, id_, input_, kick, team, disc):
self.id, self.input, self.kick, self.team, self.disc = id_, input_, kick, team, disc
self.input = [bool(input_ & (1 << n)) for n in range(5)]
class Game:
def __init__(self, replayTime, state, gameTime, score, overtime, players, ball):
self.replayTime, self.state, self.gameTime, self.score, self.overtime, self.players, self.ball = replayTime, state, gameTime, score, overtime, players, ball
def unpack2(struct_, buffer, offset):
return struct_.unpack_from(buffer, offset), offset + struct_.size
_Disc_s = struct.Struct('>dddd')
def _Disc(buffer, offset):
(x, y, vx, vy), offset = unpack2(_Disc_s, buffer, offset)
return Disc(x, y, vx, vy), offset
_Player_s = struct.Struct('>BBBB')
def _Player(buffer, offset):
(id_, input_, kick_, team_), offset = unpack2(_Player_s, buffer, offset)
disc, offset = _Disc(buffer, offset)
return Player(id_, input_, bool(kick_), Team(team_), disc), offset
_Game_s1 = struct.Struct('>dB')
_Game_s2 = struct.Struct('>dBBBB')
def _Game(buffer, offset):
(replayTime, state_), offset = unpack2(_Game_s1, buffer, offset)
state = State(state_)
if state == State.Menu:
gameTime, score, overtime, ball, players = None, None, None, None, None
else:
(gameTime, redScore, blueScore, overtime_, playersCount), offset = unpack2(_Game_s2, buffer, offset)
overtime = bool(overtime_)
score = (redScore, blueScore)
players = []
for _ in range(playersCount):
player, offset = _Player(buffer, offset)
players.append(player)
ball, offset = _Disc(buffer, offset)
return Game(replayTime, state, gameTime, score, overtime, players, ball), offset
def Replay(buffer):
separator = b'\r\n\r\n'
offset = buffer.find(separator)
names = {int(k): v for k, v in json.loads(buffer[:offset].decode('utf8')).items()}
offset += len(separator)
games = []
while offset < len(buffer):
game, offset = _Game(buffer, offset)
games.append(game)
return names, games
| [
"ao@tksolutions.pl"
] | ao@tksolutions.pl |
e11f0dd605afd1a4880e850dee65ebfcfede424c | 75095e198e782d5c735d1a2bcfd2fc8ba714f6a3 | /def/def_demo.py | fe1f0a25dca1df1029660c3cf9b76d65953e1374 | [] | no_license | zhouxiaofeng12/PythonDemo | 81d8167bfc4682b93789c1cf6396e249dd4fb09d | 0004a695abbca922900b6732cf273ba50544b477 | refs/heads/master | 2020-12-03T08:17:57.236073 | 2017-09-07T12:49:35 | 2017-09-07T12:49:35 | 95,669,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | #!/user/bin/python
#-*- coding:UTF-8 -*-
##函数==
def printme(str):
'打印传入的字符串到标准显示设备上'
print str
return
printme('我要调用用户自定义函数')
printme('再次调用同一个数据')
##python传入不可变对象实例
def ChangeInt(a):
a=10
b=2
ChangeInt(2)
print b
def changeme(mylist):
mylist.append([2,3223,133]);
print "函数内取值",mylist
return
mylist=[10,23,23,2424];
changeme(mylist);
print '函数外取值',mylist
| [
"wangshijie@wangshijiedeMacBook-Pro.local"
] | wangshijie@wangshijiedeMacBook-Pro.local |
550c0de84f2fb3a9f49462534f551540f2a83251 | f47edebe28f491f94b4fd30c2e4d8b29e1abc2a1 | /src/models/configs/model_operation.py | f56635621bd7c1d91bc7c576904309f5abe043dd | [
"MIT"
] | permissive | Nardri/rbac-service | 4a2053a38597315b4ef1ce5b877cf031e64eae12 | c5cf6baf60e95a7790156c85e37c76c697efd585 | refs/heads/develop | 2022-10-04T06:17:30.766467 | 2020-01-31T11:06:03 | 2020-01-31T11:06:03 | 226,638,869 | 0 | 0 | MIT | 2022-09-16T18:19:46 | 2019-12-08T08:48:12 | Python | UTF-8 | Python | false | false | 1,072 | py | """Common model operations"""
from .database import database as db
class ModelOperationsMixin:
"""Mixin Model operation"""
__abstract__ = True
def save(self):
"""Save to the database."""
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Delete from database."""
db.session.delete(self)
db.session.commit()
return self
def update_(self, **kwargs):
"""Update entries.
Args:
**kwargs: kwargs to update
Returns:
object: Model Instance
"""
for field, value in kwargs.items():
setattr(self, field, value)
db.session.commit()
return self
@classmethod
def query_(cls, **kwargs):
"""
Args:
**kwargs:
Returns:
Object :
"""
if not kwargs:
instance = cls.query.filter_by().order_by(cls.created_at)
else:
instance = cls.query.filter_by(**kwargs)
return instance
| [
"nwokeochavictor@gmail.com"
] | nwokeochavictor@gmail.com |
312cb8289eff20e438b1f6bde7a166c75d60d7a6 | 2372e09f0d0c82f2dcff992dbb44671ddadceacb | /dne-security-code/intro-umbrella/mission/umbrellamission.py | 8836015edeb49fd919a6f9d7d2320d41383377ad | [] | no_license | YarikVznk/Python_cisco | 6c32556f3d0372b6dd8dde3e5729c3465480c3f5 | e969e6c463d03fb285ebdd0a288c709af907be6a | refs/heads/master | 2022-12-10T12:34:34.631662 | 2019-12-04T10:34:56 | 2019-12-04T10:34:56 | 225,835,492 | 0 | 0 | null | 2022-12-08T05:24:41 | 2019-12-04T10:02:10 | Python | UTF-8 | Python | false | false | 7,994 | py | #!/usr/bin/env python
"""Mission - Cisco Umbrella
This is your research step in the Zero-day Workflow.
Copyright (c) 2018-2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
import requests
import socket
import configparser
import json
import sys
from pathlib import Path
import webexteamssdk
from crayons import blue, green, red
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Locate the directory containing this file and the repository root.
# Temporarily add these directories to the system path so that we can import
# local files.
here = Path(__file__).parent.absolute()
repository_root = (here / ".." / "..").resolve()
sys.path.insert(0, str(repository_root))
sys.path.insert(0, str(repository_root))
from env_lab import UMBRELLA # noqa
from env_user import UMBRELLA_ENFORCEMENT_KEY
from env_user import UMBRELLA_INVESTIGATE_KEY # noqa
from env_user import WEBEX_TEAMS_ACCESS_TOKEN
from env_user import WEBEX_TEAMS_ROOM_ID
# Disable insecure request warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def is_valid_ipv4_address(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
#print(address)
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
# Read the config file to get settings
enforcement_api_key = UMBRELLA_ENFORCEMENT_KEY
time = datetime.now().isoformat()
investigate_api_key = UMBRELLA_INVESTIGATE_KEY
# URL needed to do POST requests
event_url = UMBRELLA.get("en_url")
# URL needed for POST request
url_post = event_url + '?customerKey=' + enforcement_api_key
inv_u = UMBRELLA.get("inv_url")
#TODO: finish the URL to get the Status and Category of a domain!
env_lab.print_missing_mission_warn(env_lab.get_line())
#create header for authentication and set limit of sample return to 1
headers = {
'Authorization': 'Bearer ' + investigate_api_key,
'limit': '1'
}
#print(url_post)
def get_DomainStatus(getUrl, domain):
#print(getUrl)
req = requests.get(getUrl, headers=headers)
if(req.status_code == 200):
output = req.json()
domainOutput = output[domain]
domainStatus = domainOutput["status"]
if(domainStatus == -1):
print("SUCCESS: The domain %(domain)s is found MALICIOUS at %(time)s" %
{'domain': domain, 'time': time})
return "bad"
elif(domainStatus == 1):
#print("SUCCESS: The domain %(domain)s is found CLEAN at %(time)s" %
#{'domain': domain, 'time': time})
return "clean"
#TODO: check if else the domain status is risky
elif(domainStatus == 0):
print("SUCCESS: The domain %(domain)s is found UNDEFINED / RISKY at %(time)s" %
{'domain': domain, 'time': time})
return "risky"
else:
print("An error has ocurred with the following code %(error)s, please consult the following link: https://docs.umbrella.com/investigate-api/" %
{'error': req.status_code})
return "error"
def readIocsFile(filename):
with open (filename, 'r') as fp:
shalist = json.loads(fp.read())
return shalist
def write_risky_domains_for_firewall(filenamed, domainlist):
with open(filenamed, "w") as file:
json.dump(domainlist, file, indent=2)
def removeDups(list):
domain_list_r = []
domin_filter_ip = []
domain_final = []
for i in list:
if i not in domain_list_r:
domain_list_r.append(i)
domain_filter_ip = domain_list_r
print("We found dulicates and pruned the list :\n")
return domain_filter_ip
def handleDomains(filename):
try:
domain_list = readIocsFile(filename)
time = datetime.now().isoformat()
domain_list_f = []
#TODO: call the correct function to remove duplicate domains from the domain list
#domain_list = MISSION
env_lab.print_missing_mission_warn(env_lab.get_line())
#TODO: loop through every domain in the domain list HINT: for ... in ...:
for a in domain_list:
print(f"Working on {domain} .....")
get_url = investigate_url + domain + "?showLabels"
status = get_DomainStatus(get_url, domain)
if(status != "error"):
if ((status == "bad") or (status == "risky")):
post_Enforcement(domain)
domain_list_f.append(domain)
else:
print(f"Found clean domain, ignoring enforcement on {domain}")
else:
print("got error from Umbrella investigate")
#Let's save another file with Umbrella Disposition on Domains
# so that we block only bad & risky domains on firewalls
filenamed = repository_root / "mission-data/riskydomains.json"
write_risky_domains_for_firewall(filenamed, domain_list_f)
except KeyboardInterrupt:
print("\nExiting...\n")
def post_Enforcement(domdata):
data={
"alertTime": time + "Z",
"deviceId": "ba6a59f4-e692-4724-ba36-c28132c761de",
"deviceVersion": "13.7a",
"dstDomain": domdata,
"dstUrl": "http://" + domdata + "/",
"eventTime": time + "Z",
"protocolVersion": "1.0a",
"providerName": "Security Platform"
}
request_post = requests.post(url_post, data=json.dumps(data), headers={
'Content-type': 'application/json', 'Accept': 'application/json'})
if(request_post.status_code == 202):
print("\n")
print(f"SUCCESS: {domdata} BLOCKED!!")
print("\n")
# error handling
else:
print("An error has ocurred with the following code %(error)s, please consult the following link: https://docs.umbrella.com/investigate-api/" %
{'error': request_post.status_code})
if __name__ == "__main__":
# Save the MAC addresses of the endpoints where malware executed to a JSON
# file. In the ISE Mission we will read this file and quarantine these
# endpoints.sha256-list.json
domainlist_path = repository_root / "mission-data/domainlist.json"
handleDomains(domainlist_path)
#TODO: initialize the teams object with the webexteamssdk using your access token
teams.messages.create(
roomId=WEBEX_TEAMS_ROOM_ID,
markdown=f"**Umberlla Mission completed!!!** \n\n"
f"I checked the status of the domains generated by ThreatGrid using Umbrella Investigate! \n\n"
f"I also blocked DNS lookup using Umbrella Enforcement API for Malicious and Risky Domains"
)
print(green("Umbrella Mission Completed!!!"))
| [
"yarik.vznk@gmail.com"
] | yarik.vznk@gmail.com |
d9508c77ce6a5869b179fdf31d94ffeefa784cfd | 800b5166148d4e3cd03825d7d20e2900fbc6c789 | /report_form/migrations/0016_helperdataform_year_date.py | 42b2c55917bfd96ca6159e3ab7b4b2c5526990c0 | [] | no_license | JiSuPiaoYi/dawufupin | 4ffc979a93502eb576776673c98aaeb16021827e | 57756a501436fabe9b27ebca2e80e60932da30dc | refs/heads/master | 2020-04-07T11:37:35.728108 | 2018-11-20T09:09:50 | 2018-11-20T09:09:50 | 158,334,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-06-01 13:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_form', '0015_auto_20180531_1322'),
]
operations = [
migrations.AddField(
model_name='helperdataform',
name='year_date',
field=models.CharField(blank=True, db_column='year_date', max_length=10),
),
]
| [
"360510132@qq.com"
] | 360510132@qq.com |
08a9bc6f942bd23a66be1813fc2a38ef6adaad56 | eab2cc4e9c50cf23c158372d2ca2d11db48d4a00 | /untitled2/venv/bin/chardetect | 1ce0cb2702fc6a32e9ffd2f99943229db3dca2f2 | [] | no_license | DatRakumo/Project_Python_Basic | 2ee211352119fbd78aa01093b8013c509c52a0ed | 8275318363e2120afcfe6387df4661a745bfde35 | refs/heads/master | 2022-04-16T14:01:15.042750 | 2020-04-10T22:25:15 | 2020-04-10T22:25:15 | 254,747,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/home/datnv/PycharmProjects/untitled2/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"datnv@rakumo.vn"
] | datnv@rakumo.vn | |
5de256e32a402880bdf25291d2565540e40c7a5a | 47a8663886adfe3782fb0ac2d8818c5217300a14 | /setup.py | ee32b51fe99888f2bf39853fc8322648448df683 | [] | no_license | pjb-asl/de_tech_test_trial_run | a58f1251d5017683bc0d324ad65d81df7c56ceba | 8a1f18ccdbc19c270b9b68077edff738043c2da6 | refs/heads/master | 2022-11-30T13:16:33.332762 | 2020-08-14T10:09:48 | 2020-08-14T10:09:48 | 286,747,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # setup.py
import setuptools
REQUIRED_PACKAGES = []
PACKAGE_NAME = 'my_package'
PACKAGE_VERSION = '0.0.1'
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description='Example project',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
)
| [
"phillip@agentsoftware.net"
] | phillip@agentsoftware.net |
b4f56b1eea627129fcefd1fe93b9c6f1fdf811e5 | 04f7c2480bd25fedd7152e9ea9a754c77ce99f67 | /templates/vendor/apps.py | 9858a4b48b9651767c1d90455834fed619a82287 | [] | no_license | gabelatendai/realwedding | b14f43bc3d1e874d0267b3f6fc500958cfa5cd1d | 8f206a809afb044f95e72bd51ecfbf8d4389eee4 | refs/heads/master | 2020-12-15T17:59:31.376889 | 2020-01-20T21:46:26 | 2020-01-20T21:46:26 | 235,200,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class VendorConfig(AppConfig):
name = 'templates.vendor'
| [
"gabela.musodza33@gmail.com"
] | gabela.musodza33@gmail.com |
3be99a44370990ebfa4eadf7d43901928505d70d | 1b7bc114f89f036d09fe32e0760cd1565fe63b5a | /Day 18/day18_2.py | be1872de4208eccdc81549b76b0be2a3b1a2993a | [] | no_license | Anshuman-UCSB/2015-Advent-of-Code | 86125f454c299a2aeed384108feab2b94a119a82 | b108a1bd925cbcb8129b673e3b25d0cc768e0f24 | refs/heads/master | 2020-12-02T03:12:33.915887 | 2020-01-01T00:23:10 | 2020-01-01T00:23:10 | 230,868,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | with open("day18.txt") as file:
data = file.read().splitlines()
grid = []
for line in data:
grid.append([])
for val in line:
if val == ".":
grid[-1].append(0)
else:
grid[-1].append(1)
temp = [0 for i in range(len(grid))]
tgrid = [temp.copy() for i in range(len(grid))]
def printGrid():
for line in grid:
for val in line:
if val == 1:
print("#",end="")
else:
print(".",end="")
print()
def getNeighbors(x,y):
total = 0
for i in range(-1,2):
for j in range(-1,2):
try:
if i!=0 or j!=0:
if y+j != -1 and x+i != -1:
total+=grid[y+j][x+i]
except:
pass
return total
def update():
grid[0][0] = 1
grid[0][-1] = 1
grid[-1][0] = 1
grid[-1][-1] = 1
for x in range(len(grid)):
for y in range(len(grid)):
tgrid[y][x] = 0
if grid[y][x] == 1:
if getNeighbors(x, y) in [2,3]:
tgrid[y][x] = 1
else:
tgrid[y][x] = 0
elif grid[y][x] == 0:
if getNeighbors(x, y) == 3:
tgrid[y][x] = 1
else:
tgrid[y][x] = 0
tgrid[0][0] = 1
tgrid[-1][0] = 1
tgrid[0][-1] = 1
tgrid[-1][-1] = 1
for x in range(len(grid)):
for y in range(len(grid)):
grid[y][x] = tgrid[y][x]
printGrid()
count = 0
while count!=100:
count+=1
print(count)
update()
#printGrid()
total = 0
for row in grid:
total+=sum(row)
print(total)
| [
"anshuman@ucsb.edu"
] | anshuman@ucsb.edu |
603a26cce337d9dbecac369be965211011a3f6da | 9626aee30344961b261522e0fcb9ffdfd03665a0 | /Modelsim/multisim_parser/multisim_parser.py | 7fb8e312fb5f5695cb35540d9c2f12447438b26f | [] | no_license | AntonKudriavcev/DDSynthesis | 7c11ea8cfbdabf1d030b968ee5f6f82560d28aa8 | bbd74660d65a9cd05e339a82d9b3f56c118ff0a1 | refs/heads/master | 2023-01-29T02:58:45.001742 | 2020-12-08T14:31:55 | 2020-12-08T14:31:55 | 294,670,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,051 | py | ##-----------------------------------------------------------------------------
##-----------------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
##-----------------------------------------------------------------------------
f_sampling = 13e9 ## Hz
f_carrier = 1.3e9 + 0 ## Hz
t_impulse = 1e-6 ##
vobulation = 1
num_of_imp = 3 ##
t_period_1 = 2e-6 ##
t_period_2 = 3e-6 ##
t_period_3 = 4e-6 ##
t_period_4 = 2e-6 ##
t_period_5 = 2e-6 ##
t_period_6 = 2e-6 ##
t_period_7 = 2e-6 ##
t_period_8 = 2e-6 ##
t_period_9 = 2e-6 ##
t_period_10 = 2e-6 ##
t_period_11 = 2e-6 ##
t_period_12 = 2e-6 ##
t_period_13 = 2e-6 ##
t_period_14 = 2e-6 ##
t_period_15 = 2e-6 ##
t_period_16 = 2e-6 ##
t_period_17 = 2e-6 ##
t_period_18 = 2e-6 ##
t_period_19 = 2e-6 ##
t_period_20 = 2e-6 ##
t_period_21 = 2e-6 ##
t_period_22 = 2e-6 ##
t_period_23 = 2e-6 ##
t_period_24 = 2e-6 ##
t_period_25 = 2e-6 ##
t_period_26 = 2e-6 ##
t_period_27 = 2e-6 ##
t_period_28 = 2e-6 ##
t_period_29 = 2e-6 ##
t_period_30 = 2e-6 ##
t_period_31 = 2e-6 ##
t_period_32 = 2e-6 ##
deviation = 3e6
mult_coef = 16384
accum_bit_deph = 12
DAC_bit_resolution = 12
M = (mult_coef * 2**accum_bit_deph * f_carrier)/f_sampling
print('Шаг =\t', M)
M = int(M)
print('Округленный шаг =\t', M)
f_out = (M * f_sampling)/(mult_coef * 2**accum_bit_deph)
print('Требуемая вых. частота =\t', f_carrier)
print('Смоделировнная вых. частота =\t', f_out)
delta = (f_sampling)/(mult_coef * 2**accum_bit_deph)
print('Допустимое отклонение частоты от требуемой = %.3f\t' %delta)
if vobulation:
time_of_simulation = (t_impulse +
(t_period_1 * (num_of_imp > 1)) +
(t_period_2 * (num_of_imp > 2)) +
(t_period_3 * (num_of_imp > 3)) +
(t_period_4 * (num_of_imp > 4)) +
(t_period_5 * (num_of_imp > 5)) +
(t_period_6 * (num_of_imp > 6)) +
(t_period_7 * (num_of_imp > 7)) +
(t_period_8 * (num_of_imp > 8)) +
(t_period_9 * (num_of_imp > 9)) +
(t_period_10 * (num_of_imp > 10)) +
(t_period_11 * (num_of_imp > 11)) +
(t_period_12 * (num_of_imp > 12)) +
(t_period_13 * (num_of_imp > 13)) +
(t_period_14 * (num_of_imp > 14)) +
(t_period_15 * (num_of_imp > 15)) +
(t_period_16 * (num_of_imp > 16)) +
(t_period_17 * (num_of_imp > 17)) +
(t_period_18 * (num_of_imp > 18)) +
(t_period_19 * (num_of_imp > 19)) +
(t_period_20 * (num_of_imp > 20)) +
(t_period_21 * (num_of_imp > 21)) +
(t_period_22 * (num_of_imp > 22)) +
(t_period_23 * (num_of_imp > 23)) +
(t_period_24 * (num_of_imp > 24)) +
(t_period_25 * (num_of_imp > 25)) +
(t_period_26 * (num_of_imp > 26)) +
(t_period_27 * (num_of_imp > 27)) +
(t_period_28 * (num_of_imp > 28)) +
(t_period_29 * (num_of_imp > 29)) +
(t_period_30 * (num_of_imp > 30)) +
(t_period_31 * (num_of_imp > 31)) +
(t_period_32 * (num_of_imp > 32)))
else:
time_of_simulation = (num_of_imp - 1) * t_period_1 + t_impulse
num_of_samples = time_of_simulation * f_sampling
print('Неокругленное количество отсчетов в сигнале\t=', num_of_samples)
num_of_samples = int(num_of_samples)
print('Oкругленное количество отсчетов в сигнале\t=', num_of_samples)
##-----------------------------------------------------------------------------
path = 'D:/study/6_year/diploma/Diploma/code/DDSynthesis/Modelsim/digital_synthesizer_v1.2.1/data/output_signal.txt'
with open(path) as file:
data = file.read().split()
signal = []
for i in range(len(data)):
if (data[i] == 'z'):
# data[i] = -1000
pass
else:
signal.append(int(data[i], 10))
# data[i] = int(data[i], 10)
signal = np.array(signal[0:num_of_samples])
m_tr = int(2**DAC_bit_resolution/2 - 1) ## требуемое значение матожидания выходного процесса
sigma_tr = int(m_tr/3) ## требуемое значение СКО выходного процесса
# x = np.linspace(0, 4095, 4096)
# w = 1/(np.sqrt(2*np.pi) * sigma_tr) * np.exp(-(x - m_tr)**2/(2*sigma_tr**2))
# plt.hist(signal, bins = 100, density = True)
# plt.plot(x, w)
# plt.show()
fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1)
time = np.linspace(0, 1, num_of_samples) * time_of_simulation
ax1.plot(time, signal[0:num_of_samples], linewidth = 0.5)
ax1.grid()
signal = signal/(2**DAC_bit_resolution - 1) - 0.5
spectrum = abs(np.fft.fft(signal, n = 1*len(signal), axis = -1))
spectrum/= spectrum.max()
frequ_points = np.linspace(0, 1, len(spectrum)) * f_sampling
ax2.plot(frequ_points, spectrum, linewidth = 0.5)
ax2.set_xlim(0, f_sampling/2)
ax2.grid()
plt.show()
simulated_freq = np.argmax(spectrum) * f_sampling/(len(spectrum)-1)
print('Частота смоделированного сигнала =\t', simulated_freq)
acf = abs(np.correlate(signal, signal, 'full'))
acf /= acf.max()
# acf = 10 * np.log10(acf)
plt.plot(acf, linewidth = 0.5)
plt.grid()
plt.show()
# print(4096*3000000/(13000000000*10/2*13000))
# print(2*3000000/(25390625*10*1625))
# print(4096/13000000000 * (1301500000 - 3000000/(10*))) | [
"anto.shker@yandex.ru"
] | anto.shker@yandex.ru |
8b8c5260d9ff6bece29de9e4737737178180c8ac | f3ceb18737110ea8d8f5c81d6ddff382018dc9d2 | /services/app/wsgi.py | 2077d9d94916ffdc082d8d5232be73014e1256a6 | [] | no_license | bcflock/SolutionChallenge_2020 | b74951b3d99087cd55be65579388499ed4903b31 | 603e19c8259a3728b645f41d884c0ec35f51bbbc | refs/heads/master | 2023-04-10T15:01:56.668926 | 2020-05-18T14:06:10 | 2020-05-18T14:06:10 | 243,390,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | from .web import app
if __name__ == '__main__':
app.run() | [
"bcflock@gmail.com"
] | bcflock@gmail.com |
bafa75244e5e14e02a4c02d998a42c51c676b107 | d49c9cb06ad040db48f4cd5e7a746b91218f591a | /powers.py | 09b496518eb036a7867314640ffd1a0c1d0b3dac | [] | no_license | naveen6797/Git_tutorials | 876ec6597517f74736fa686d43edba0a415b0116 | dc504e4929f4223f85458a86212a526588370a8f | refs/heads/master | 2023-07-03T16:28:54.882930 | 2021-08-24T03:20:29 | 2021-08-24T03:20:29 | 399,171,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | def
for i in range(1,10):
result = 2**i
print(result)
| [
"matetinaveen97@gmail.com"
] | matetinaveen97@gmail.com |
a6c64362796b4533fed6ec134c75d928ae42b834 | b973eb6c0aaac46cb5568ac3a29024ecfc5dbd3d | /initial/util/fautil.py | aedb9794eaff2a5a9c40154e2f9104bb6c52fe8f | [] | no_license | maurete/pfc | 0325ff3b21f8c6ba16f480b54dabad4c17bb316e | 45f3e79c8b989ef02547e80e79af7ad9479e6406 | refs/heads/master | 2021-01-18T23:14:32.453821 | 2019-02-27T18:49:10 | 2019-02-27T18:49:10 | 12,885,733 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,730 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import re
import sys
import random
import math
# formato de la linea de descripcion:
# * comienza con un > (opcionalmente precedido de espacios
# * (1) id: 3 letras + n { alfanum | _ | - }
# * se ignoran otros caracteres hasta el fin de linea
# >hsa-mir-123 Homo sapiens... etc
desc_fmt = r"^\s*>([a-zA-Z]{3}[\w_-]+)([\s|].+)?\s*$"
# formato de la linea de secuencia:
# * sólo se reconocen los caracteres GCAUTgcaut
# opcionalmente rodeados de espacios
# GCGCGAAUACUCUCCUAUAUAAACC... etc
seqn_fmt = r"^\s*([GCAUTgcaut]+)\s*$"
# formato de la linea de estructura secundaria:
# * idem anterior, pero con caracteres .()
# * puede estar terminada por un numero entre parentesis
# ...(((((.((((...(((.(((((.....))))))...)).).. etc
snds_fmt = r"^\s*([.()]+)(\s+\((\s*-?[0-9.]+)\s*\))?\s*$"
# formato de un string de estructura *con más de un loop*
# * no acepta otra cosa que .(), sin espacios, nada
# ....(((((.((..))))))..((((...))))).))... etc
mult_fmt = r"[.(]+\)[.()]*\([.)]+"
def load_file ( f ):
"""
Lee el archivo pasado como parámetro y lo guarda en un diccionario.
@param f: el archivo a leer.
@return: el diccionario con los items leídos del archivo.
@rtype: dict
"""
# en entries guardo cada entrada leida
entries = dict()
# si f es una lista de archivos
# hago recursion con cada elem
if type(f) is list:
for li in f:
entries.update(load_file(li))
else:
# variables auxiliares
lineno = 0
cur_dsc = None
cur_seq = ""
cur_st2 = ""
id_ = ""
# leo cada linea del archivo
for line in f:
lineno += 1
# si leo una linea de descripcion
if re.match(desc_fmt, line):
# si no es la primera iteracion
if cur_dsc:
# obtengo el id
id_ = re.split(desc_fmt, cur_dsc)[1]
# guardo la entrada en el dict
entries[id_] = (cur_dsc, cur_seq, cur_st2)
# asigno el valor actual a la
# linea de descripcion y reseteo las otras
lll = line.replace( '''
''', '')
cur_dsc = lll[:-1]
cur_seq = ""
cur_st2 = ""
# si leo una linea de secuencia
elif re.match(seqn_fmt, line):
# agrego el pedazo de secuencia
# al final de la variable cur_seq
cur_seq += re.split(seqn_fmt,line)[1]
# si leo una linea de estructura secundaria
elif re.match(snds_fmt, line):
# separo la linea segun la regexp
split = re.split(snds_fmt, line)
# guardo al parte de estruct secund al
# final de la var cur_st2
cur_st2 += split[1]
# si al final la linea contene la free energy
if split[3]:
# la agrego al final de la linea de descripcion
cur_dsc += " FREE_ENERGY {}".format(split[3])
# si no entiendo la linea, escribo una advertencia
else:
sys.stderr.write("WARNING: {}: ignoring line {:d}\n".format(
f.name,lineno))
# si lei algo del for anterior, me queda
# la ultima entrada sin guardar:
if cur_dsc:
# obtengo el id
id_ = re.split(desc_fmt, cur_dsc)[1]
# guardo la entrada en el dict
entries[id_] = (cur_dsc, cur_seq, cur_st2)
# asigno el valor actual a la
# linea de descripcion y reseteo las otras
cur_dsc = line[:-1]
cur_seq = ""
cur_st2 = ""
return entries
def strip ( infile, outfile, d=0 ):
"""
Lee el archivo de entrada y guarda sólo aquellas entradas
que no contengan múltiples loops en el archivo de salida.
@param infile: archivo(s) a leer
@param outfile: archivo donde escribir la salida
@param d: auxiliar para recursión, debe ser cero
@rtype: None
"""
# leo el archivo
seq = load_file(infile)
# para cada entrada
for val in seq.values():
# testeo multiples loops
if re.match( mult_fmt, val[2]):
pass
# si solo hay un loop
else:
# guardo la entrada en el archivo
outfile.write(val[0] + '\n' +
val[1] + '\n' +
val[2] + '\n')
def count ( infile, d=0 ):
"""
Lee el (los) archivo(s) de entrada y cuenta el número de
entradas, detallando cuáles tiene estructura secundaria.
@param infile: archivo(s) a leer
@param d: auxiliar para recursión, debe ser cero
@rtype: None
"""
# variables donde cuento
num_desc_lines = 0
num_seqn_lines = 0
num_str2_lines = 0
# si infile es una lista de archivos
# hago recursion con cada elem
if type(infile) is list:
for li in infile:
(d,s,s2) = count(li, d+1)
num_desc_lines += d
num_seqn_lines += s
num_str2_lines += s2
# si solo me pasaron un archivo
else:
# cargo el archivo
seq = load_file(infile)
# cuento las entradas en el diccionario
for val in seq.values():
num_desc_lines += 1
# chequeo si tiene la secuencia
if val[1]:
num_seqn_lines += 1
# chequeo si tiene la estructura secundaria
if val[2]:
num_str2_lines += 1
# imprimo las cuentas para el archivo actual
print('''{}:
{:>10d} description lines
{:>10d} sequence lines
{:>10d} secondary-structure lines
'''.format(infile.name,
num_desc_lines,
num_seqn_lines,
num_str2_lines))
return (num_desc_lines,
num_seqn_lines,
num_str2_lines)
# imprimo las cuentas totales
print('''
total:
{:>10d} description lines
{:>10d} sequence lines
{:>10d} secondary-structure lines
'''.format(num_desc_lines,
num_seqn_lines,
num_str2_lines))
def compare( set1files, set2files ):
"""
Lee los archivos de entrada, correspondientes a los sets 1 y 2
y muestra información comparando set1 con set2
@param set1files: archivos del set 1
@param set2files: archivos del set 2
@rtype: None
"""
set1 = dict()
set2 = dict()
# leo el contenido de los archivos
set1.update(load_file(set1files))
set2.update(load_file(set2files))
# los dict se pueden restar: set1-set2 devuelve un nuevo dict
# con los elementos de set1 que no estan en set2
entries_only_set1 = set(set1.keys()) - set(set2.keys())
entries_only_set2 = set(set2.keys()) - set(set1.keys())
# en estas variables cuento diferencias de secuencia/estructura
differing_sequences = 0
differing_secondary = 0
# para cada elemento de la interseccion
for key in set(set1.keys()) & set(set2.keys()):
# si coinciden, no hago nada
if set1[key] == set2[key]:
pass
else:
# si difiere la secuencia, la cuento
if set1[key][1] != set2[key][1]:
differing_sequences += 1
# si difiere la estructura, cuento
if set1[key][2] != set2[key][2]:
differing_secondary += 1
# modo super-verbose imprime entrada tras entrada
if verbosity > 1 and set1[key][1:2] != set2[key][1:2]:
print( "set1: {}".format(set1[key]) )
print( "set2: {}".format(set2[key]) )
# modo verbose muestra elementos unicos en cada conjunto
if verbosity > 0:
if len(entries_only_set1) > 0:
print ("\nentries in set 1 but not in set 2:")
for e in entries_only_set1:
print ("{}".format(e))
if len(entries_only_set2) > 0:
print ("\nentries in set 2 but not in set 1:")
for e in entries_only_set2:
print ("{}".format(e))
# muestro la informacion recopilada, salgo
print('''{:>10d} entries read into set1
{:>10d} entries read into set2
{:>10d} entries have different sequences
{:>10d} entries have different secondary structure
{:>10d} entries in set1 not found in set2
{:>10d} entries in set2 not found in set1
'''.format(len(set1),
len(set2),
differing_sequences,
differing_secondary,
len(entries_only_set1),
len(entries_only_set2)))
def triplet ( sequence, structure, normalize = True ):
"""
Calcula el 32-vector de frecuencia de triplets según el
procedimiento explicado en Xue et al.
@param sequence: string de secuencia (long. N)
@param structure: string de estructura secundaria (long. N)
@return: 32-vector, cada elemento tiene el nro de ocurrencias para el triplet
@rtype: list
"""
# las longitudes deben coincidir
if len(sequence) != len(structure):
raise Exception( "sequence and structure differ in length!" )
# occur mapea triplet: nro de ocurrencias,
# por ej: occur['G.(('] = 2
occur = dict()
occur['total'] = 0
# inicializo los elementos en 0
for n in 'GCUA':
for s in ['...', '..(', '.(.', '.((', '(..', '(.(', '((.', '(((']:
occur[n + s] = 0
# busco limites de areas a considerar (ver paper)
ll = structure.find('(')
lr = structure.rfind('(')
rl = structure.find(')')
rr = structure.rfind(')')
ll = max(1,ll)
rr = min(len(structure)-2,rr)
# si lr > rl probablemente haya mas de un loop
if lr > rl:
raise Exception("couldn't guess hairpin structure (more than one loop?)")
# convierto todos los parentesis a '('
structure = structure.replace(')','(')
# recorro la secuencia: para cada triplet sumo 1 al bucket correspondiente
for i in list(range(ll,lr+1)) + list(range(rl,rr+1)):
occur[sequence[i] + structure[i-1:i+2]] += 1
occur['total'] += 1
vector = []
tot = float(occur['total'])
# finalmente genero el vector de ocurrencias
for n in 'GCUA':
for s in ['...', '..(', '.(.', '.((', '(..', '(.(', '((.', '(((']:
if normalize:
vector.append(occur[n + s] / tot)
else:
vector.append(occur[n + s])
return vector
def svmout ( infile, outfile, label ):
"""
Genera el archivo de entrada para libsvm, etiquetando
a todos los elementos con la etiqueta 'label' (int)
@param infile: archivo(s) de entrada (formato FASTA)
@param outfile: archivo de salida (formato libsvm)
@param label: etiqueta para los elementos de salida (int)
@rtype: None
"""
# leo el archivo
seq = load_file(infile)
# para cada elemento leido
for val in seq.values():
# si es multiloop, paso
if re.match( mult_fmt, val[2]):
pass
else:
# obtengo el vector de frecuencias de triplets
v = triplet(val[1], val[2])
# escribo la etiqueta al arch. de salida
outfile.write("{:d}".format(label))
# para cada i de 1 a 32
for i in range(len(v)):
if v[i] != 0:
# escribo i:v[i]
outfile.write( " {:d}:{:f}".format(i+1,v[i]) )
# escribo un retorno de linea
outfile.write('\n')
def fannout ( infile, outfile, label ):
"""
Genera el archivo de entrada para libfann, etiquetando
a todos los elementos con la etiqueta 'label' (int)
@param infile: archivo(s) de entrada (formato FASTA)
@param outfile: archivo de salida (formato libfann)
@param label: etiqueta para los elementos de salida (int)
@rtype: None
"""
# leo el archivo
seq = load_file(infile)
# escribo el número de patrones, entradas y salidas
outfile.write("{:d} {:d} {:d}\n".format(len(seq), 32, 1))
# para cada elemento leido
for val in seq.values():
# si es multiloop, paso
if re.match( mult_fmt, val[2]):
pass
else:
# obtengo el vector de frecuencias de triplets
v = triplet(val[1], val[2])
# para cada input de 1 a 32
for i in v:
# escribo v[i]
outfile.write( "{:f} ".format(i) )
# escribo la etiqueta (salida esperada) al arch. de salida
outfile.write("\n{:d}\n".format(label))
def partition ( infile, outfile1, outfile2, s1=None, s2=None, d=0 ):
"""
Particiona los elementos de entrada en dos archivos de salida.
Las particiones se generan sampleando aleatoriamente y no tienen
elementos en común.
@param infile: archivo(s) de entrada
@param outfile1: archivo de salida partición 1
@param outfile2: archivo de salida partición 2
@param s1: número de elementos en la part 1 (default 70%)
@param s2: número de elementos en la part 2 (default 30%)
@param d: auxiliar de recursión. No especificar!
@rtype: None
"""
# aca leo las entradas
entries = load_file(infile)
random.seed()
# valores por defecto para s1, s2
if not s1:
s1 = math.floor(len(entries) * 0.7)
if not s2:
s2 = math.ceil(len(entries) * 0.3)
# sampleo elementos para part1
p1k = random.sample(entries.keys(), s1)
# sampleo elementos para part2 (que manera más guay de hacerlo :)
p2k = random.sample([k for k in entries.keys() if k not in p1k], s2)
# mañas de python
if type(outfile1) is list:
outfile1 = outfile1[0]
if type(outfile2) is list:
outfile2 = outfile2[0]
# escribo los archvos de salida
for k in p1k:
outfile1.write(entries[k][0] + '\n' +
entries[k][1] + '\n' +
entries[k][2] + '\n')
for k in p2k:
outfile2.write(entries[k][0] + '\n' +
entries[k][1] + '\n' +
entries[k][2] + '\n')
outfile1.close()
outfile2.close()
# wrapper para las funciones
def wrap_strip (obj):
strip(obj.file, obj.outfile)
def wrap_count(obj):
count(obj.file)
def wrap_compare(obj):
compare(obj.set1, obj.set2)
def wrap_svmout (obj):
svmout(obj.file, obj.outfile, obj.label)
def wrap_fannout (obj):
fannout(obj.file, obj.outfile, obj.label)
def wrap_part (obj):
partition(obj.file, obj.outfile1, obj.outfile2, obj.size1, obj.size2)
parser = argparse.ArgumentParser( description='FASTA file format manipulation utility.',
prog='fautil')
parser.add_argument( '--verbose', '-v',
action='count',
help='increase verbosity level')
subp = parser.add_subparsers()
countp = subp.add_parser('count',
#aliases=['contar'],
description="count entries in input files")
cmppar = subp.add_parser('compare',
#aliases=['cmp', 'comparar'],
description="compare two sets of files")
strp = subp.add_parser('strip',
#aliases=['singloop'],
description="strip multiloop entries from input")
svmp = subp.add_parser('svm',
#aliases=['svmout'],
description="convert to libsvm format")
fannp = subp.add_parser('fann',
#aliases=['svmout'],
description="convert to libfann format")
partp = subp.add_parser('part',
#aliases=['partition'],
description="partition input file into two output sets")
countp.add_argument('file',
type=argparse.FileType('r'),
nargs='*',
default=sys.stdin,
help='input file(s)')
cmppar.add_argument('--set1', '-1',
type=argparse.FileType('r'),
nargs='+',
required=True,
help='file(s) to be considered as set 1')
cmppar.add_argument('--set2', '-2',
type=argparse.FileType('r'),
nargs='+',
required=True,
help='file(s) to be counted in the second set')
strp.add_argument ('file',
type=argparse.FileType('r'),
nargs='*',
default=sys.stdin,
help='file(s) to read from')
strp.add_argument ('--outfile', '-o',
type=argparse.FileType('w'),
nargs='?',
default=sys.stdout,
help="output file to write")
svmp.add_argument ('file',
type=argparse.FileType('r'),
nargs='*',
default=sys.stdin,
help='file(s) to read from')
svmp.add_argument ('--outfile', '-o',
type=argparse.FileType('w'),
nargs='?',
default=sys.stdout,
help="output file to write")
svmp.add_argument ('--label', '-l',
type=int,
nargs='?',
default=-1,
help="label for the dataset (default = -1)")
fannp.add_argument ('file',
type=argparse.FileType('r'),
nargs='*',
default=sys.stdin,
help='file(s) to read from')
fannp.add_argument ('--outfile', '-o',
type=argparse.FileType('w'),
nargs='?',
default=sys.stdout,
help="output file to write")
fannp.add_argument ('--label', '-l',
type=int,
nargs='?',
default=-1,
help="label for the dataset (default = -1)")
partp.add_argument ('file',
type=argparse.FileType('r'),
nargs='*',
default=sys.stdin,
help='file(s) to read from')
partp.add_argument ('--outfile1', '-o1', '-1',
type=argparse.FileType('w'),
nargs=1,
default=None,
help="output file for partition 1")
partp.add_argument ('--outfile2', '-o2', '-2',
type=argparse.FileType('w'),
nargs=1,
default=None,
help="output file for partition 2")
partp.add_argument ('--size1', '-s1',
type=int,
nargs='?',
default=None,
help="size of partition 1 [entries] (default = 70% of entries)")
partp.add_argument ('--size2', '-s2',
type=int,
nargs='?',
default=None,
help="size of partition 2 [entries] (default = 30% of entries)")
strp.set_defaults(func=wrap_strip)
svmp.set_defaults(func=wrap_svmout)
fannp.set_defaults(func=wrap_fannout)
countp.set_defaults(func=wrap_count)
cmppar.set_defaults(func=wrap_compare)
partp.set_defaults(func=wrap_part)
if __name__ == "__main__":
verbosity = 0
obj = parser.parse_args()
if obj.verbose is not None:
verbosity = obj.verbose
obj.func(obj)
| [
"mauro@qom"
] | mauro@qom |
86b3b02b6f05cbe25bef31510464df38e907cde1 | 5c9c0d575d21c407aef155e49dda67b51cb9867c | /service_4/app.py | af045d2efb56adc1abbeac67569b1f8972ecfa78 | [] | no_license | kaziimtiaz29/project_2 | 5dcc937868738782accc1562f8b29e0d23fbf403 | f56cc0923bae98a8922e41b19cc1346230b4d96e | refs/heads/master | 2023-05-31T09:59:22.375425 | 2021-06-14T08:12:46 | 2021-06-14T08:12:46 | 375,376,758 | 0 | 0 | null | 2021-06-14T13:06:35 | 2021-06-09T14:04:58 | Python | UTF-8 | Python | false | false | 591 | py | from flask import Flask
from flask import redirect, url_for, request, Response, jsonify
import random, requests
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
@app.route('/prize',methods=["GET","POST"])
def prize_gen():
number= request.json['num']
colour = request.json['col']
app.logger.info(number)
prize_obtained=""
if number['number'] == 15:
prize_obtained= "£50"
else:
prize_obtained="0"
return prize_obtained
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5003, debug=True) | [
"uddinimtiaz29@yahoo.com"
] | uddinimtiaz29@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.