blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdb1154ad002002f1aea81f229180159a5ca1049 | 5b14eb5c7d24b189846e0fdf71ca292a0e89f1d4 | /test/crab/crab3_Data_TimingScan2016_0T_RECO_ZB1.py | 0b8808bdef7d713e7189287f52f9c66f8d370de1 | [] | no_license | jkarancs/PixelTimingStudy | 6e1254e00c2df8394f7d084f94b87db3fa4aeca6 | b8a9306e4b9f3e3d034ba9bc0a65625a84e4e877 | refs/heads/master | 2021-04-22T06:37:59.388077 | 2017-03-22T13:12:33 | 2017-03-22T13:12:33 | 11,422,604 | 0 | 3 | null | 2015-12-09T22:22:15 | 2013-07-15T12:27:54 | C++ | UTF-8 | Python | false | false | 1,421 | py | import os
import glob
from WMCore.Configuration import Configuration
config = Configuration()
PTSdir = os.environ['CMSSW_BASE']+'/src/DPGAnalysis/PixelTimingStudy/'
config.section_('General')
config.General.transferOutputs = True
config.General.transferLogs = True
config.General.requestName = 'TimingScan16_RECO_ZB1' #can be anything
config.section_('JobType')
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = PTSdir+'test/TimingStudy_RunIIData_80X_cfg.py'
config.JobType.inputFiles = [ PTSdir+'portcardmap.dat', PTSdir+'run_ls_instlumi_pileup_2015.txt' ]
config.JobType.outputFiles = ['Ntuple.root']
config.JobType.disableAutomaticOutputCollection = True
#config.JobType.priority = -1
config.section_('Data')
config.Data.inputDataset = '/ZeroBias1/Run2016A-PromptReco-v2/RECO'
#config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions15/13TeV/Reprocessing/Cert_13TeV_16Dec2015ReReco_Collisions15_25ns_JSON_MuonPhys.txt'
config.Data.outLFNDirBase = '/store/user/jkarancs/TimingStudy/Scans'
config.Data.outputDatasetTag = 'v3936_INC_SPL1_def_805p1_80X_dataRun2_Prompt_v6_TimingScan16_0T_RECO'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 2
#config.Data.totalUnits = 2
config.Data.runRange = '271188,271191,271192,271193'
config.section_('Site')
config.Site.storageSite = 'T2_HU_Budapest'
| [
"janos.karancsi@cern.ch"
] | janos.karancsi@cern.ch |
788e5f42510d70310fec1a080d28c3bc5e7e33d6 | b77265a5b6a20277439c5f61d0dccf30ed71bed9 | /qqGroup.py | 1b6d797d493bc3ca85a1ed4e8485b5b27c023306 | [] | no_license | LightAndDarkNight/craw_qq_group_members | 2959dc8d83e2527db2d64297842a19327ba268e1 | 08f6965c1277a69343a9075f3934bdfe3a6a5b7b | refs/heads/master | 2021-06-27T19:00:08.249306 | 2021-03-08T07:37:52 | 2021-03-08T07:37:52 | 219,451,088 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | import pandas as pd
import tkinter as tk
from Crawer import craw
str1='''
--------------------------------------------
欢迎来到由(^v^)制作的qq群成员的小程序
请根据以下注意事项爬取进行爬取:
1、本程序暂时只支持火狐浏览器
2、登录后请将成员表下拉到底后关闭浏览器。
3、本程序仅供学习。
--------------------------------------------
'''
windows =tk.Tk()
windows.resizable(False, False)
CurrentShow = tk.StringVar()
CurrentShow.set(str1)
#窗口设置
windows.title("爬取qq群成员")
windows.geometry("300x300")
def getList(number,filename):
#获取输入框中的值
n=number.get()
f=str(filename.get())
if n=="" or f=="":
return
#爬取网页
menber_list=craw(n)
if menber_list:
data=pd.DataFrame(menber_list).T
data.to_excel('result/'+f+'.xls')
CurrentShow.set("爬取成功")
else:
CurrentShow.set("爬取失败")
def start():
#页面布局
qqNumber=tk.Entry(windows)
qqNumber.place(x=50,y=50,width=200,height=30)
qqNumber_label=tk.Label(windows,text="请输入qq群号:")
qqNumber_label.place(x=75,y=20,width=150,height=20)
fileName_label=tk.Label(windows,text="请输入保存的文件名:")
fileName_label.place(x=75,y=85,width=150,height=20)
fileName=tk.Entry(windows)
fileName.place(x=50,y=105,width=200,height=30)
begin=tk.Button(text="开始爬取",command=lambda :getList(qqNumber,fileName))
begin.place(x=50,y=150,width=80,height=40)
myQuit=tk.Button(text="退出",command=quit)
myQuit.place(x=170,y=150,width=80,height=40)
craw_message=tk.Label(windows,textvariable=CurrentShow,bg='white',justify='left')
craw_message.place(x=10,y=200,width=280,height=90)
windows.mainloop()
if __name__=="__main__":
start() | [
"”2787819143@qq.com"
] | ”2787819143@qq.com |
62cb20767643e6e16dd23e0aead9b51ac4bab037 | 137b88f81a7b08aecc5ca0b624a6b68c4578585d | /baekjoon/DP/9095_123sum.py | f1339fcd89edf77060b395b7fbf62159cc69ebe9 | [] | no_license | mayblue9/Algorithm_Python | dbce59a9aefbcef5bbbcd1b250d2297f0e5fed0c | 42b65f8e9a118453d627d2a362493fcd50f9e35c | refs/heads/master | 2021-01-17T05:53:14.052432 | 2016-02-24T12:40:44 | 2016-02-24T12:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | """
1, 2, 3 더하기 성공
한국어 원문 문제집
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율
1 초 128 MB 1622 1137 856 68.866%
문제
정수 4를 1, 2, 3의 조합으로 나타내는 방법은 총 7가지가 있다.
1+1+1+1
1+1+2
1+2+1
2+1+1
2+2
1+3
3+1
정수 n이 주어졌을 때, n을 1,2,3의 합으로 나타내는 방법의 수를 구하는 프로그램을 작성하시오.
입력
첫쨰 줄에 테스트 케이스의 개수 T가 주어진다. 각 테스트 케이스는 한 줄로 이루어져 있고, 정수 n이 주어진다. n은 양수이며 11보다 작다.
출력
각 테스트 케이스마다, n을 1,2,3의 합으로 나타내는 방법의 수를 출력한다.
예제 입력 복사
3
4
7
10
예제 출력 복사
7
44
274
"""
count = int(input())
for _ in range(count):
num = int(input())
d = [0] * (num+1) # n을 만드는 방법을 저장하는 메모
def cal(n):
if(d[n] > 0):
return d[n]
elif(n <= 0):
return 0
elif(n == 1):
d[n] = 1
return d[n]
elif(n == 2):
d[n] = 2
return d[n]
elif(n == 3):
d[n] = 4
return d[n]
d[n] = cal(n-1) + cal(n-2) + cal(n-3) # 점화식
return d[n]
print(cal(num)) | [
"alwaysfun2183@gmail.com"
] | alwaysfun2183@gmail.com |
8e8eb775dbbed0858f599e14b3f48a96ad069ad2 | 4e7d0dd207dda13c9b3c3ce86dca10f07aad0d74 | /HockeyFights/learning/rnn/cnnVideo.py | 66abbfabf23a034f69c4de5fe3e94c48afb8b910 | [
"MIT"
] | permissive | mletunov/MachineLearning | bf5ea39f7df2761e9e094e4f9257962503e768d5 | 051f21150b7404025dc45a0eb13b4aba891f6f2d | refs/heads/master | 2021-01-11T13:34:04.572473 | 2017-04-20T20:54:03 | 2017-04-20T20:54:03 | 81,570,629 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,617 | py | import numpy as np
import tensorflow as tf
import dataset.utils as utils
from learning import baseNetwork
class CnnModel(baseNetwork.BaseModel):
def __init__(self, frame, norm_type, checkpoint_dir=None, seed=None):
self.frame = frame
self.seed = seed
return super().__init__(norm_type, checkpoint_dir)
def build(self, rnn_state=20, num_steps=30, avg_result=False, batch_norm=False, dropout=False):
self.graph = tf.Graph()
with self.graph.as_default():
self.num_classes = 2
self.num_steps=num_steps
if self.seed:
tf.set_random_seed(self.seed)
with tf.name_scope("input"):
# num_steps sequence of frames: height x width x depth
x = tf.placeholder(tf.float32, (None, num_steps, *self.frame))
if dropout:
self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
flatten_x = tf.reshape(x, (-1, *self.frame))
with tf.name_scope("cnn"):
with tf.name_scope("conv1"):
conv1_w = tf.Variable(self.initializer(shape=(5, 5, 3, 16)), tf.float32, name="w")
conv1_b = tf.Variable(tf.zeros(shape=(16)), name="b")
conv1 = tf.nn.relu(tf.nn.conv2d(flatten_x, conv1_w, strides=[1, 2, 2, 1], padding="VALID") + conv1_b, name="conv")
if batch_norm:
with tf.name_scope("batch_norm"):
batch_mean, batch_var = tf.nn.moments(conv1,[0])
scale = tf.Variable(tf.ones(tf.shape(batch_mean)))
beta = tf.Variable(tf.zeros(tf.shape(batch_mean)))
# Small epsilon value for the BN transform
epsilon = 1e-3
conv1 = tf.nn.batch_normalization(conv1, batch_mean, batch_var, beta, scale, epsilon)
with tf.name_scope("conv2"):
conv2_w = tf.Variable(self.initializer(shape=(7, 7, 16, 16)), tf.float32, name="w")
conv2_b = tf.Variable(tf.zeros(shape=(16)), name="b")
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, conv2_w, strides=[1, 2, 2, 1], padding="VALID") + conv2_b, name="conv")
with tf.name_scope("conv3"):
conv3_w = tf.Variable(self.initializer(shape=(9, 9, 16, 16)), tf.float32, name="w")
conv3_b = tf.Variable(tf.zeros(shape=(16)), name="b")
conv3 = tf.nn.relu(tf.nn.conv2d(conv2, conv3_w, strides=[1, 2, 2, 1], padding="VALID") + conv3_b, name="conv")
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding="VALID", name="pool")
with tf.name_scope("flatten"):
size = np.prod(pool3.get_shape().as_list()[1:])
flatten_conv = tf.reshape(pool3, shape=(-1, size))
if dropout:
flatten_conv = tf.nn.dropout(flatten_conv, self.keep_prob, name="dropout")
with tf.name_scope("rnn"):
rnn_inputs = tf.reshape(flatten_conv, (-1, num_steps, size))
cell = tf.contrib.rnn.GRUCell(rnn_state)
init_state = cell.zero_state(tf.shape(x)[0], dtype=tf.float32)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
output = tf.reshape(rnn_outputs, shape=(-1, rnn_state)) if avg_result else rnn_outputs[:,-1,:]
with tf.name_scope("dense"):
dense_w = tf.Variable(self.initializer(shape=(rnn_state, self.num_classes)), tf.float32, name="w")
dense_b = tf.Variable(tf.zeros([self.num_classes]), tf.float32, name = "b")
dense = tf.add(tf.matmul(output, dense_w), dense_b, name="out")
with tf.name_scope("prediction"):
score = tf.reduce_mean(tf.reshape(dense, shape=(-1, num_steps, self.num_classes)), axis = 1) if avg_result else dense
prediction = tf.cast(tf.arg_max(score, dimension=1), tf.int32)
self.input = x
self.score = score
self.prediction = prediction
self.saver = tf.train.Saver(max_to_keep=5)
self.init = tf.global_variables_initializer()
return self
class CnnTrainer(baseNetwork.BaseTrainer):
def __init__(self, model, **kwargs):
return super().__init__(model, **kwargs)
def build(self, learning_rate=1e-2, l2_loss=False):
with self.model.graph.as_default():
with tf.name_scope("input"):
# video class - fight (1) or not (0)
y = tf.placeholder(tf.int32, (None,))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(tf.equal(self.model.prediction, y), tf.float32), name="accuracy")
tf.summary.scalar("accuracy", accuracy)
with tf.name_scope("train"):
if l2_loss:
loss = tf.nn.l2_loss(self.model.score - tf.one_hot(y, depth=self.model.num_classes))
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.model.score, labels=y)
total_loss = tf.reduce_mean(loss)
tf.summary.scalar("loss", total_loss)
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
self.expected = y
self.train_step = train_step
self.loss = total_loss
self.accuracy = accuracy
self.init = [var.initializer for var in tf.global_variables() if var.name.startswith('train')]
return self
def train(self, dataset, epochs, batch_size=20, dropout=0.75):
if self.model.seed:
np.random.seed(self.model.seed)
# split train and test datasets constantly during whole training 9:1
train_names, test_names = utils.split(dataset.read_names(), frac = 0.9)
# shuffle training dataset before each epoch
train_dataset = lambda: dataset.gen_dataset(utils.shuffle(train_names), by_video=False, frames_count=self.model.num_steps, batch_size=batch_size)
# it doesn't matter to shuffle test dataset because it will not take any impact on result
test_dataset = lambda: dataset.gen_dataset(test_names, by_video=True, frames_count=self.model.num_steps, batch_size=batch_size)
return super()._train(epochs, train_dataset, test_dataset, dropout) | [
"maxim.letunov@artezio.com"
] | maxim.letunov@artezio.com |
8d4cb48582b4ba184f76bbd0fcb6b26d97663617 | 614c3b9a7b8d948f21c7a522f03028dc801e40c9 | /FunWithHashing.py | 25edb699ed8b98932091d356f9ef468333f8cd11 | [] | no_license | Clockwick/DataStructureP2 | ee049e8a92a06269ecb5574b40206901571e90ef | 323ca4a5fd8e5addfe3b371010048faa37a160b5 | refs/heads/master | 2023-01-13T05:33:21.929581 | 2020-11-22T05:25:52 | 2020-11-22T05:25:52 | 291,057,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,149 | py | class Data:
def __init__(self, key, value):
self.key = key
self.value = value
def __str__(self):
return "({0}, {1})".format(self.key, self.value)
class Hash:
def __init__(self, size, max_col):
self.current_size = 0
self.size = size
self.max_col = max_col
self.table = [None] * size
def __str__(self):
ss = ""
for i in range(1,self.size+1):
ss += (f"#{i} {self.table[i-1]}\n")
return ss[:-1]
def add(self, x):
first_word = x.split()[0]
second_word = x.split()[1]
size = self.size
t = self.table
asc = 0
for d in first_word:
asc += ord(d)
indx = asc % size
if t[indx] == None and self.current_size < size:
t[indx] = Data(first_word, second_word)
self.current_size += 1
else:
if self.current_size >= size:
return
else:
i = 0
n = 1
while (indx + pow(i,2)) % size < size:
hx = (indx + pow(i,2)) % size
if t[hx] == None:
t[hx] = Data(first_word, second_word)
self.current_size += 1
return
if n >= self.max_col:
print(f"collision number {n} at {hx}")
print("Max of collisionChain")
return
else:
print(f"collision number {n} at {hx}")
i += 1
n += 1
def get_size(self):
return self.current_size
print(" ***** Fun with hashing *****")
inp = input("Enter Input : ").split("/")
table_size, max_col = list(map(int,inp[0].split()))
data = inp[1].split(",")
hash_table = Hash(table_size, max_col)
for item in data:
hash_table.add(item)
print(hash_table)
print("---------------------------")
if hash_table.get_size() >= table_size:
print("This table is full !!!!!!")
break
| [
"oclockth@gmail.com"
] | oclockth@gmail.com |
a4edb019c8bb611867382bd41f3e6b01771f81a0 | 1ac2594314c0de24528b171c8f9120566a3cd4be | /tests/test_api/test_application_auth.py | 0a6f5f693cb4951d759daff7ef217d31d039c1b5 | [
"MIT"
] | permissive | Zheaoli/huskar | a4c7e7b02bef301b5283519b1e1608489d79d95b | 395775c59c7da97c46efe9756365cad028b7c95a | refs/heads/master | 2022-07-11T06:54:34.810211 | 2020-01-01T08:00:57 | 2020-01-01T08:00:57 | 218,746,862 | 0 | 0 | MIT | 2019-10-31T11:06:11 | 2019-10-31T11:06:10 | null | UTF-8 | Python | false | false | 8,603 | py | from __future__ import absolute_import
from pytest import fixture, mark
from huskar_api.models.auth import User, ApplicationAuth, Authority
from ..utils import assert_response_ok
@fixture
def add_user(faker):
def factory(names):
for name in names:
if isinstance(name, list):
name, email = name
else:
email = '%s@example.com' % name
User.create_normal(
name, faker.password(), email=email,
is_active=True)
return factory
@fixture
def add_application_auth(db, test_application, test_application_token):
def factory(names):
for name in names:
username, authority = name.split(':', 1)
user_id = db.query(User.id).filter_by(username=username).scalar()
authority = Authority(authority)
test_application.ensure_auth(authority, user_id)
return factory
@fixture
def list_application_auth(db, test_application):
def generator():
for auth in db.query(ApplicationAuth).filter_by(
application_id=test_application.id).all():
user = db.query(User).get(auth.user_id)
if not user.is_application:
yield '%s:%s' % (user.username, auth.authority)
return generator
@fixture
def format_values(test_application):
def factory(d):
template_vars = {'test_application': test_application.application_name}
r = dict(d)
r.update((k, v % template_vars) for k, v in d.items()
if isinstance(v, basestring))
return r
return factory
@mark.xparametrize
def test_add_application_auth(
last_audit_log, add_user, add_application_auth, list_application_auth,
present_user, present_auth, request_auth, expected_resp, expected_auth,
client, test_application, admin_token):
add_user(present_user)
add_application_auth(present_auth)
username, authority = request_auth.split(':', 1)
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': username, 'authority': authority},
headers={'Authorization': admin_token})
assert r.status_code == expected_resp['status_code']
assert r.json == expected_resp['content']
assert set(list_application_auth()) == set(expected_auth)
audit_log = last_audit_log()
if expected_resp['status_code'] == 200:
assert audit_log.action_name == 'GRANT_APPLICATION_AUTH'
assert audit_log.action_json['application_name'] == \
test_application.application_name
assert audit_log.action_json['username'] == username
assert audit_log.action_json['authority'] == authority
else:
assert audit_log is None
def test_add_application_auth_to_invalid_application(
db, client, faker, add_user, admin_token, last_audit_log,
test_application):
add_user(['foo'])
name = faker.uuid4()
test_application.archive()
r = client.post(
'/api/auth/application/%s' % name,
data={'username': 'foo', 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 404
assert r.json['status'] == 'NotFound'
assert r.json['message'] == 'application %s does not exist' % name
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': 'foo', 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 404
assert r.json['status'] == 'NotFound'
assert r.json['message'] == ('application %s does not exist' %
test_application.application_name)
assert last_audit_log() is None
def test_add_application_auth_to_invalid_user(
client, faker, add_user, admin_token, last_audit_log,
test_application):
add_user(['foo'])
user = User.get_by_name('foo')
user.archive()
application_name = test_application.application_name
unknow_user = faker.uuid4()[:6]
r = client.post(
'/api/auth/application/%s' % application_name,
data={'username': unknow_user, 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['message'] == 'user %s does not exist' % unknow_user
r = client.post(
'/api/auth/application/%s' % application_name,
data={'username': 'foo', 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['message'] == 'user foo does not exist'
assert last_audit_log() is None
@mark.xparametrize
def test_delete_application_auth(
add_user, add_application_auth, list_application_auth, format_values,
present_user, present_auth, request_auth, expected_resp, expected_auth,
client, test_application, admin_token, last_audit_log):
add_user(present_user)
add_application_auth(present_auth)
username, authority = request_auth.split(':', 1)
r = client.delete(
'/api/auth/application/%s' % test_application.application_name,
data={'username': username, 'authority': authority},
headers={'Authorization': admin_token})
assert r.status_code == expected_resp['status_code']
assert r.json == format_values(expected_resp['content'])
assert set(list_application_auth()) == set(expected_auth)
audit_log = last_audit_log()
if expected_resp['status_code'] == 200:
assert audit_log.action_name == 'DISMISS_APPLICATION_AUTH'
assert audit_log.action_json['application_name'] == \
test_application.application_name
assert audit_log.action_json['username'] == username
assert audit_log.action_json['authority'] == authority
else:
assert audit_log is None
@mark.xparametrize
def test_list_application_auth(
add_user, add_application_auth, list_application_auth, format_values,
present_user, present_auth, expected_data,
client, test_application, admin_token):
add_user(present_user)
add_application_auth(present_auth)
r = client.get(
'/api/auth/application/%s' % test_application.application_name,
headers={'Authorization': admin_token})
assert_response_ok(r)
for item, expected_item in zip(
reversed(r.json['data']['application_auth']), # order by key desc
expected_data['application_auth']):
ex = format_values(expected_item)
assert item['authority'] == ex['authority']
assert item['user']['username'] == ex['username']
assert item['user']['is_application'] == ex['is_application']
assert item['user']['is_active'] is True
assert item['user']['is_admin'] is False
def test_add_application_without_permission(
client, test_user, test_token, test_application,
list_application_auth, last_audit_log):
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': test_user.username, 'authority': 'read'},
headers={'Authorization': test_token})
assert r.status_code == 400
assert r.json['status'] == 'NoAuthError'
assert r.json['data'] is None
assert set(list_application_auth()) == set([])
assert last_audit_log() is None
@mark.parametrize('test_authority', ['unknow'])
def test_add_application_with_unknown_authority(
client, test_user, test_application, test_authority, admin_token,
list_application_auth, last_audit_log):
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': test_user.username, 'authority': test_authority},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['data'] is None
assert set(list_application_auth()) == set([])
assert last_audit_log() is None
@mark.parametrize('test_authority', ['unknow'])
def test_delete_application_with_unknown_authority(
client, test_user, test_application, test_authority, admin_token):
r = client.delete(
'/api/auth/application/%s' % test_application.application_name,
data={'username': test_user.username, 'authority': test_authority},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['data'] is None
| [
"mozillazg101@gmail.com"
] | mozillazg101@gmail.com |
a9861cd77b3dc21961d3a34b43b9b498b8566623 | f90cfb75e75aef72fcb7c7b66c5775ecc712868b | /3.py | c60f6f79f822f2f3496947fa4761bdb3836e6558 | [] | no_license | 757306650/learnPythonTheHardWay | 5a73b0c40df7174f4b2b119166da986638b8a0fc | 9764b41c220570792aa9d4fa51d94ac224b95fd8 | refs/heads/master | 2020-08-09T00:07:56.496258 | 2015-03-05T03:33:33 | 2015-03-05T03:33:33 | 23,909,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | print "I will now count my chickens:"
print "Hens", 25 + 30 / 6
print "Roosters",100 -25 * 3 % 4
print "Now I will count the eggs:"
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4.0 + 6
print "Is it true that 3 + 2 < 5 - 7?"
print 3 + 2 < 5 - 7
print "What is 3 +2?", 3 + 2
print "What is 5 - 7?", 5 - 7
print "Oh, that's why it's False."
print "How about some more."
print "Is it greater?", 5 > -2
print "Is it greater or equal", 5 >= -2
print "Is it less or equal?", 5 <= -2 | [
"757306650@qq.com"
] | 757306650@qq.com |
d4d08f73436f51abbf9249999f8bd5b6dce1cb2a | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_add_campaign_feed_response_wrapper_body.py | 2f27de5df0deda8130743c0c31f94739b41f8938 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 928 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.campaignfeed.model.campaign_feed_type import CampaignFeedType
globals()['CampaignFeedType'] = CampaignFeedType
from baiduads.campaignfeed.model.add_campaign_feed_response_wrapper_body import AddCampaignFeedResponseWrapperBody
class TestAddCampaignFeedResponseWrapperBody(unittest.TestCase):
"""AddCampaignFeedResponseWrapperBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddCampaignFeedResponseWrapperBody(self):
"""Test AddCampaignFeedResponseWrapperBody"""
# FIXME: construct object with mandatory attributes with example values
# model = AddCampaignFeedResponseWrapperBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"tokimekiyxp@foxmail.com"
] | tokimekiyxp@foxmail.com |
5e9f5edc1885013a836356ef125492c6de7d6b52 | 8ddda8fb6e5853126dcdafa3281c75071ada45c1 | /vyperlogix/trees/BinarySearchTree/__init__.py | 9e650390474f68b25b15dfda4a0d59db47884e97 | [
"CC0-1.0"
] | permissive | raychorn/chrome_gui | a48f3f9d931922a018e894f891ccd952476cd1ee | f1fade70b61af12ee43c55c075aa9cfd32caa962 | refs/heads/master | 2022-12-19T19:46:04.656032 | 2020-10-08T14:45:14 | 2020-10-08T14:45:14 | 299,167,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,463 | py | __copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
from vyperlogix.classes.CooperativeClass import Cooperative
class Node(Cooperative):
def __init__(self, lchild=None, rchild=None, value=-1, data=None):
self.lchild = lchild
self.rchild = rchild
self.value = value
self.data = data
class BinaryTree(Cooperative):
"""Implement Binary Search Tree."""
def __init__(self):
self.l = [] # Nodes
self.root = None
def add(self, key, dt):
"""Add a node in tree."""
if self.root == None:
self.root = Node(value=key, data=dt)
self.l.append(self.root)
return 0
else:
self.p = self.root
while True:
if self.p.value > key:
if self.p.lchild == None:
self.p.lchild = Node(value=key, data=dt)
return 0 # Success
else:
self.p = self.p.lchild
elif self.p.value == key:
return -1 # Value already in tree
else:
if self.p.rchild == None:
self.p.rchild = Node(value=key, data=dt)
return 0 # Success
else:
self.p = self.p.rchild
return -2 # Should never happen
def search(self, key):
"""Search Tree for a key and return data; if not found return None."""
self.p = self.root
if self.p == None:
return None
while True:
# print self.p.value, self.p.data
if self.p.value > key:
if self.p.lchild == None:
return None # Not found
else:
self.p = self.p.lchild
elif self.p.value == key:
return self.p.data
else:
if self.p.rchild == None:
return None # Not found
else:
self.p = self.p.rchild
return None # Should never happen
def deleteNode(self, key):
"""Delete node with value == key."""
if self.root.value == key:
if self.root.rchild == None:
if self.root.lchild == None:
self.root = None
else: self.root = self.root.lchild
else:
self.root.rchild.lchild = self.root.lchild
self.root = self.root.rchild
return 1
self.p = self.root
while True:
if self.p.value > key:
if self.p.lchild == None:
return 0 # Not found anything to delete
elif self.p.lchild.value == key:
self.p.lchild = self.proceed(self.p, self.p.lchild)
return 1
else:
self.p = self.p.lchild
# There's no way for self.p.value to be equal to key:
if self.p.value < key:
if self.p.rchild == None:
return 0 # Not found anything to delete
elif self.p.rchild.value == key:
self.p.rchild = self.proceed(self.p, self.p.rchild)
return 1
else:
self.p = self.p.rchild
return 0
def proceed(self, parent, delValue):
if delValue.lchild == None and delValue.rchild == None:
return None
elif delValue.rchild == None:
return delValue.lchild
else:
return delValue.rchild
def sort(self):
self.__traverse__(self.root, mode=1)
def __traverse__(self, v, mode=0):
"""Traverse in: preorder = 0, inorder = 1, postorder = 2."""
if v == None:
return
if mode == 0:
print (v.value, v.data)
self.__traverse__(v.lchild)
self.__traverse__(v.rchild)
elif mode == 1:
self.__traverse__(v.lchild, 1)
print (v.value, v.data)
self.__traverse__(v.rchild, 1)
else:
self.__traverse__(v.lchild, 2)
self.__traverse__(v.rchild, 2)
print (v.value, v.data)
if (__name__ == "__main__"):
import sys
print >>sys.stdout, __copyright__
print >>sys.stderr, __copyright__
tree = BinaryTree()
tree.add(4, "test1")
tree.add(10, "test2")
tree.add(23, "test3")
tree.add(1, "test4")
tree.add(3, "test5")
tree.add(2, "test6")
tree.sort()
print tree.search(3)
print tree.deleteNode(10)
print tree.deleteNode(23)
print tree.deleteNode(4)
print tree.search(3)
tree.sort()
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
5c7f43b343bb678692b6d73976eb01b710234f75 | ee8414ce8c0ea9439731e89e2e72e3759ad5d59b | /blog/migrations/0001_initial.py | cf36c2f89715075d097e5d8f50ab66bd33368009 | [] | no_license | louis6575/blog_project | cd38d06bfd0f448cbb886304fc3bbd4f32b210d0 | 452140512bc6f0b58c9bafad98d243db6084cc03 | refs/heads/master | 2020-07-25T19:53:28.237448 | 2019-09-14T08:25:50 | 2019-09-14T08:25:50 | 208,408,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,290 | py | # Generated by Django 2.1.7 on 2019-09-13 05:19
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(blank=True, default='avatar/default.png', max_length=200, null=True, upload_to='avatar/%Y/%m', verbose_name='用户头像')),
('qq', models.CharField(blank=True, max_length=20, null=True, verbose_name='QQ号码')),
('mobile', models.CharField(blank=True, max_length=11, null=True, unique=True, verbose_name='手机号码')),
('url', models.URLField(blank=True, max_length=100, null=True, verbose_name='个人网页地址')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'ordering': ['-id'],
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Ad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='广告标题')),
('description', models.CharField(max_length=200, verbose_name='广告描述')),
('image_url', models.ImageField(upload_to='ad/%Y/%m', verbose_name='图片路径')),
('callback_url', models.URLField(blank=True, null=True, verbose_name='回调url')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='发布时间')),
('index', models.IntegerField(default=999, verbose_name='排列顺序(从小到大)')),
],
options={
'verbose_name': '广告',
'verbose_name_plural': '广告',
'ordering': ['index', 'id'],
},
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='文章标题')),
('desc', models.CharField(max_length=50, verbose_name='文章描述')),
('content', models.TextField(verbose_name='文章内容')),
('click_count', models.IntegerField(default=0, verbose_name='点击次数')),
('is_recommend', models.BooleanField(default=False, verbose_name='是否推荐')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='发布时间')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
'ordering': ['-date_publish'],
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='分类名称')),
('index', models.IntegerField(default=999, verbose_name='分类的排序')),
],
options={
'verbose_name': '分类',
'verbose_name_plural': '分类',
'ordering': ['index', 'id'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='评论内容')),
('username', models.CharField(blank=True, max_length=30, null=True, verbose_name='用户名')),
('email', models.EmailField(blank=True, max_length=50, null=True, verbose_name='邮箱地址')),
('url', models.URLField(blank=True, max_length=100, null=True, verbose_name='个人网页地址')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='发布时间')),
('article', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Article', verbose_name='文章')),
('pid', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Comment', verbose_name='父级评论')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
},
),
migrations.CreateModel(
name='Links',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='标题')),
('description', models.CharField(max_length=200, verbose_name='友情链接描述')),
('callback_url', models.URLField(verbose_name='url地址')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='发布时间')),
('index', models.IntegerField(default=999, verbose_name='排列顺序(从小到大)')),
],
options={
'verbose_name': '友情链接',
'verbose_name_plural': '友情链接',
'ordering': ['index', 'id'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='标签名称')),
],
options={
'verbose_name': '标签',
'verbose_name_plural': '标签',
},
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='分类'),
),
migrations.AddField(
model_name='article',
name='tag',
field=models.ManyToManyField(to='blog.Tag', verbose_name='标签'),
),
migrations.AddField(
model_name='article',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
]
| [
"louis6575@laiyun.comouis6575@laiyun.com"
] | louis6575@laiyun.comouis6575@laiyun.com |
47193ec756fca771cdefe437d9cfd48ad786e116 | a9ac3c537fc778b34cb32d4528e2d1190e65e19e | /scripts/quantum_hall/plot_soi_vs_density.py | 1d23715075d28101edc610eb3beca90b6a499c9b | [
"MIT"
] | permissive | wms269/shabanipy | 9f770cfdf113ca8e8af69cd793be2f8bf9b0141a | 1e751631e031c528e18d5e0d8ff4fa1457f4107e | refs/heads/master | 2022-09-23T15:43:43.875608 | 2020-04-09T17:49:24 | 2020-04-09T17:49:24 | 265,638,022 | 1 | 0 | MIT | 2020-05-20T17:25:40 | 2020-05-20T17:25:39 | null | UTF-8 | Python | false | false | 2,360 | py | # -*- coding: utf-8 -*-
"""Plot Rashba and mobility vs density from extracted parameters.
The csv file to read is expected to have been generated by
extract_soi_from_wal.py
"""
# =============================================================================
# --- Parameters --------------------------------------------------------------
# =============================================================================
#: Path to the csv fild holding the data
PATH = ('/Users/mdartiailh/Documents/PostDocNYU/DataAnalysis/WAL/JS124/'
'average_rashba_only/JS138_124HB_BM003_004_wal_analysis_avg.csv')
#: Density column
DENSITY_COLUMN = 'Density (m^-2)'
#: Mobility column
MOBILITY_COLUMN = 'Mobility (m^2V^-1s^-1)'
#: SOI column
SOI_COLUMN = 'Rashba SOI (meV.A)'
#: Densities to mark on the SOI plot (in 1e12 cm^-2)
DENSITIES = [1.3, 1.9, 3.8]
#: Number of points on which to average the SOI strength to compute stderr
STDERR_COMPUTATION = 3
# =============================================================================
# --- Execution ---------------------------------------------------------------
# =============================================================================
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({'pdf.fonttype': 42})
data = pd.read_csv(PATH, comment='#')
density = np.array(data[DENSITY_COLUMN][1:])
mobility = np.array(data[MOBILITY_COLUMN][1:])
rashba = np.array(data[SOI_COLUMN][1:])
density = np.mean(density.reshape((-1, STDERR_COMPUTATION)), axis=1)
mean_mob = np.mean(mobility.reshape((-1, STDERR_COMPUTATION)), axis=1)
std_mob = np.std(mobility.reshape((-1, STDERR_COMPUTATION)), axis=1)
mean_soi = np.mean(rashba.reshape((-1, STDERR_COMPUTATION)), axis=1)
std_soi = np.std(rashba.reshape((-1, STDERR_COMPUTATION)), axis=1)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, constrained_layout=True, figsize=(15,4))
ax1.errorbar(density/1e4, mean_mob*1e4, std_mob*1e4, fmt='+', color='C2')
ax1.set_ylabel('Mobility (cm$^{-2}$V${^-1}$s$^{-1}$)')
ax1.set_xlabel('Density (cm$^{-2}$)')
ax2.errorbar(density/1e4, mean_soi, std_soi, fmt='+',)
ax2.set_ylabel('Rashba SOI (meV.A)')
ax2.set_xlabel('Density (cm$^{-2}$)')
for n in DENSITIES:
ax2.axvline(n*1e12, ymin=0.95, color='k')
plt.show()
| [
"marul@laposte.net"
] | marul@laposte.net |
34cc387898fdb3ccf60d8ab5773e76ac4b48c1a4 | bba5208fdaf32b36fddfeb918154646fdb750f3f | /FlaskApp/testFlaskMysql.py | f1583accdbcce1da54afc7c5396b3ad2680aff38 | [] | no_license | rwenor/python | ef3005fcbdb6ab288a2537689d90b0695e1afb1d | e17da86ce1cbbcae56a45ea4c13170cbf73ca14f | refs/heads/master | 2020-12-14T02:38:52.397482 | 2019-03-03T22:18:50 | 2019-03-03T22:18:50 | 31,793,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from flask import Flask, request
from flaskext.mysql import MySQL
mysql = MySQL()
app = Flask(__name__)
app.config['MYSQL_DATABASE_USER'] = 'webuser'
app.config['MYSQL_DATABASE_PASSWORD'] = 'webbruker'
app.config['MYSQL_DATABASE_DB'] = 'EmpData'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
@app.route("/")
def hello():
return "Welcome to Python Flask App!"
@app.route("/Authenticate")
def Authenticate():
username = request.args.get('UserName')
password = request.args.get('Password')
cursor = mysql.connect().cursor()
cursor.execute("SELECT * from User where Username='" + username + "' and Password='" + password + "'")
data = cursor.fetchone()
if data is None:
return "Username or Password is wrong"
else:
return "Logged in successfully"
@app.route("/UserList")
def UserList():
cursor = mysql.connect().cursor()
cursor.execute("SELECT * from User")
data = cursor.fetchone()
if data is None:
return "Username or Password is wrong"
else:
s = ""
#s = data.keys()
s += "<table>"
# s += "<th><td></td>"
while data:
s += "<tr><td>" + str(data[0]) + "<td><td>" + str(data[1]) + "<td><tr>"
data = cursor.fetchone()
s += "</table>"
return s
if __name__ == "__main__":
app.run()
| [
"rwenor1@icloud.com"
] | rwenor1@icloud.com |
9c32089e5865258988d73e8474c68a70f34955e7 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /Django/进阶部分/day67课上代码两个项目哦/day67/mysite67/app01/urls.py | 59ca711f596bdc197689aaf3513219e0abe2620d | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 338 | py | from django.conf.urls import url
from app01 import views
urlpatterns = [
url(r'^home/', views.home, {"age": 18}, name="home"),
# 位置参数
url(r'^book/([0-9]{2,4})/([a-zA-Z]{2})/$', views.book, name="book"),
# 关键字参数
# url(r'^book/(?P<year>[0-9]{2,4})/(?P<title>[a-zA-Z]{2})/$', views.book, name="book")
] | [
"13269469526@163.com"
] | 13269469526@163.com |
75fa7c2ad05614228581a1e25cce3711f66b50db | 4b694bb43cd5caa11391c6a1c5855367b2396a1b | /大三下多媒体系统/multimedia/trade/trade/wsgi.py | fc614da8677f2f87b3ee7b77612bea0feb48f375 | [] | no_license | Echo120799/homework-backup | 52bd379c0cee351d529626238b89f567ce4b453e | 0324e6cdaf2b7fc0d51c866ae8ede4dac592926b | refs/heads/master | 2023-02-10T04:31:23.257213 | 2021-01-07T13:15:08 | 2021-01-07T13:15:08 | 327,533,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
WSGI config for trade project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trade.settings")
application = get_wsgi_application()
| [
"zyw_120799@163.com"
] | zyw_120799@163.com |
afc4f75c6fcc4bf51e815449ac88b7cfae427d06 | e23acd18bd00ba669ef3321fb7801cf83575ef6b | /scamp_catalog_elvis_ssos.py | 35e62b5f9a70b525fbdce09e0ab1b1954796ff70 | [] | no_license | sgongar/Euclid-SSOs-Pipeline-Tests | dfb986c008ca34d28b6e5ad4d155b2b88cf6d091 | fec2dcb33578d901cbe34df41558c03a345c9cd1 | refs/heads/master | 2020-03-26T14:13:20.181947 | 2018-10-19T15:58:14 | 2018-10-19T15:58:14 | 144,977,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,906 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" Creates a catalog populated of ssos from sextracted catalogs
of single CCDs images.
Versions:
- 0.1: Initial release.
Information:
-
Todo:
* Get out columns definition. Too long for a single function.
* Improve variable nomenclature.
* Explanations are still not so easy to understand.
* POO implementation?
* Unit testing.
*GNU Terry Pratchett*
"""
from math import cos, sin
from multiprocessing import Process
from sys import stdout
from astropy.units import degree
from astropy.coordinates import SkyCoord
from numpy import pi
from pandas import concat, DataFrame, read_csv
from images_management_elvis import get_borders
from misc_cats import extract_cats_d, create_full_cats, extract_ssos_df
from misc import extract_settings_elvis, check_distance, check_source
__author__ = "Samuel Góngora García"
__copyright__ = "Copyright 2018"
__credits__ = ["Samuel Góngora García"]
__version__ = "0.1"
__maintainer__ = "Samuel Góngora García"
__email__ = "sgongora@cab.inta-csic.es"
__status__ = "Development"
def create_output_catalog_dict():
"""
:return: cat_d
"""
cat_d = {'IDX': [], 'SOURCE': [], 'DITHER': [], 'RA': [], 'DEC': [],
'VEL': [], 'ABMAG': [], 'THETA': [], 'MAG_AUTO': [],
'A_IMAGE': [], 'B_IMAGE': [], 'THETA_IMAGE': [],
'ERRA_IMAGE': [], 'ERRB_IMAGE': [], 'MAGERR_AUTO': [],
'ERRA_WORLD': [], 'ERRB_WORLD': [], 'ERRTHETA_WORLD': [],
'CLASS_STAR': [], 'PM': [], 'PMERR': []}
return cat_d
def create_empty_catalog_dict():
"""
:return: cat_d
"""
cat_d = {'IDX': [], 'SOURCE': [], 'DITHER': [], 'RA': [], 'DEC': [],
'VEL': [], 'ABMAG': [], 'THETA': []}
return cat_d
def propagate_dithers():
"""
:return:
"""
ssos_d = create_empty_catalog_dict()
ssos_df = extract_ssos_df()
unique_sources = ssos_df['SOURCE']
idx = 0
# Move over sources and dithers
for idx_source, source_ in enumerate(unique_sources):
source_df = ssos_df[ssos_df['SOURCE'].isin([source_])]
# Dither 1
ssos_d['IDX'].append(idx)
idx_source = source_df['SOURCE'].iloc[0]
ssos_d['SOURCE'].append(idx_source)
dither_source = 1
ssos_d['DITHER'].append(dither_source)
pm_source = source_df['VEL'].iloc[0]
ssos_d['VEL'].append(pm_source)
pa_source = source_df['THETA'].iloc[0]
ssos_d['THETA'].append(pa_source)
mag_source = source_df['ABMAG'].iloc[0]
ssos_d['ABMAG'].append(mag_source)
dither_time = (565.0 / 2) / 3600.0
alpha_source = source_df['RA'].iloc[0]
alpha_increment_per_hour = cos(
float(pa_source * pi / 180.0)) * float(pm_source)
alpha_increment_per_dither = alpha_increment_per_hour * dither_time
alpha_source = alpha_source + (alpha_increment_per_dither / 3600.0)
ssos_d['RA'].append(alpha_source)
delta_source = source_df['DEC'].iloc[0]
delta_increment_per_hour = sin(
float(pa_source * pi / 180.0)) * float(pm_source)
delta_increment_per_dither = delta_increment_per_hour * dither_time
delta_source = delta_source + (delta_increment_per_dither / 3600.0)
ssos_d['DEC'].append(delta_source)
for dither_source in range(2, 5, 1):
# dither_time is equal to fraction of hour
dither_time = 1003.0/3600.0
idx += 1
alpha_increment_per_hour = cos(float(pa_source*pi/180.0)) * float(pm_source)
alpha_increment_per_dither = alpha_increment_per_hour * dither_time
alpha_source = alpha_source + (alpha_increment_per_dither / 3600.0)
delta_increment_per_hour = sin(float(pa_source*pi/180.0)) * float(pm_source)
delta_increment_per_dither = delta_increment_per_hour * dither_time
delta_source = delta_source + (delta_increment_per_dither / 3600.0)
ssos_d['IDX'].append(idx)
ssos_d['SOURCE'].append(idx_source)
ssos_d['DITHER'].append(dither_source)
ssos_d['RA'].append(alpha_source)
ssos_d['DEC'].append(delta_source)
ssos_d['VEL'].append(pm_source)
ssos_d['THETA'].append(pa_source)
ssos_d['ABMAG'].append(mag_source)
idx += 1
sso_cat = DataFrame(ssos_d)
return sso_cat
def filter_by_position(sso_df):
"""
sso_clean_df columns:
- ABMAG
- DEC
- DITHER
- IDX
- RA
- SOURCE
- THETA
- VEL
:param sso_df:
:return: sso_clean_df:
"""
borders_d = get_borders()
right_sources = []
unique_sources = list(set(sso_df['SOURCE']))
for idx_source_, source_ in enumerate(unique_sources):
source_df = sso_df[sso_df['SOURCE'].isin([source_])]
for idx_dither_, row in enumerate(source_df.itertuples(), 1):
alpha = row.RA
delta = row.DEC
source_coords = SkyCoord(ra=alpha * degree, dec=delta * degree,
equinox='J2021.5')
source_coords_ecliptic = source_coords.barycentrictrueecliptic
lon_e = float(source_coords_ecliptic.lon.degree)
lat_e = float(source_coords_ecliptic.lat.degree)
for ccd_ in borders_d[idx_dither_].keys():
borders = borders_d[idx_dither_][ccd_]
lon_comp = borders['below_lon'] < lon_e < borders['above_lon']
lat_comp = borders['below_lat'] < lat_e < borders['above_lat']
comp = lon_comp and lat_comp
if comp:
right_sources.append(row.IDX)
# Removes non visible sources
sso_clean_df = sso_df[sso_df['IDX'].isin(right_sources)]
return sso_clean_df
def create_catalog():
"""
:return:
"""
cats_d = extract_cats_d() # extracts dataframes from catalogues
full_d = create_full_cats(cats_d) # creates dataframe from CCDs catalogues
ssos_df = propagate_dithers()
ssos_clean_df = filter_by_position(ssos_df)
unique_sources = list(set(ssos_clean_df['SOURCE'].tolist()))
total_ssos = len(list(set(ssos_clean_df['SOURCE'].tolist())))
sub_list_size = total_ssos / 10
sub_list_l = []
for idx_sub_list in range(0, 10, 1):
if idx_sub_list != (10 - 1):
idx_down = sub_list_size * idx_sub_list
idx_up = sub_list_size * (idx_sub_list + 1)
sub_list_l.append(unique_sources[idx_down:idx_up])
else:
idx_down = sub_list_size * idx_sub_list
sub_list_l.append(unique_sources[idx_down:])
areas_j = []
for idx_l in range(0, 10, 1):
areas_p = Process(target=create_ssos_catalog_thread,
args=(idx_l, sub_list_l[idx_l], ssos_clean_df,
full_d))
areas_j.append(areas_p)
areas_p.start()
active_areas = list([job.is_alive() for job in areas_j])
while True in active_areas:
active_areas = list([job.is_alive() for job in areas_j])
pass
# Merges areas
# Merges catalogs
ssos_list = []
for idx_csv in range(0, 10, 1):
ssos_ = read_csv('tmp_ssos/ssos_{}.csv'.format(idx_csv),
index_col=0)
ssos_list.append(ssos_)
ssos_df = concat(ssos_list)
ssos_df.to_csv('catalogues_detected/ssos.csv')
return ssos_df
def create_ssos_catalog_thread(idx_l, sub_list, ssos_df, full_d):
"""
:param idx_l:
:param sub_list:
:param ssos_df:
:param full_d:
:return:
"""
keys = ['ALPHA_J2000', 'DELTA_J2000']
# Creates an empty catalog for all detected sources
cat_d = create_output_catalog_dict()
total_thread = len(sub_list)
stdout.write('total SSOs {} of thread {}\n'.format(total_thread, idx_l))
for idx, sso in enumerate(sub_list):
source_df = ssos_df[ssos_df['SOURCE'].isin([sso])]
for dither_ in source_df['DITHER'].tolist():
dither_df = source_df[source_df['DITHER'].isin([dither_])]
# Gets alpha/delta of actual source
alpha = float(dither_df['RA'].iloc[0])
delta = float(dither_df['DEC'].iloc[0])
o_df = check_source(full_d[dither_], alpha, delta, keys)
if o_df.empty is not True:
# Returns the index of the closest found source
index = check_distance(o_df, alpha, delta)
o_df = o_df.iloc[[index]]
print(o_df.columns)
raise Exception
idx_ = int(dither_df['IDX'].iloc[0])
cat_d['IDX'].append(idx_)
source = int(dither_df['SOURCE'].iloc[0])
cat_d['SOURCE'].append(source)
cat_d['DITHER'].append(dither_)
alpha_j2000 = float(o_df['ALPHA_J2000'].iloc[0])
cat_d['RA'].append(alpha_j2000)
delta_j2000 = float(o_df['DELTA_J2000'].iloc[0])
cat_d['DEC'].append(delta_j2000)
vel = float(dither_df['VEL'].iloc[0])
cat_d['VEL'].append(vel)
abmag = float(dither_df['ABMAG'].iloc[0])
cat_d['ABMAG'].append(abmag)
theta = float(dither_df['THETA'].iloc[0])
cat_d['THETA'].append(theta)
mag_auto = float(o_df['MAG_AUTO'].iloc[0])
cat_d['MAG_AUTO'].append(mag_auto)
a_image = float(o_df['A_IMAGE'].iloc[0])
cat_d['A_IMAGE'].append(a_image)
b_image = float(o_df['B_IMAGE'].iloc[0])
cat_d['B_IMAGE'].append(b_image)
theta_image = float(o_df['THETA_IMAGE'].iloc[0])
cat_d['THETA_IMAGE'].append(theta_image)
erra_image = float(o_df['ERRA_IMAGE'].iloc[0])
cat_d['ERRA_IMAGE'].append(erra_image)
errb_image = float(o_df['ERRB_IMAGE'].iloc[0])
cat_d['ERRB_IMAGE'].append(errb_image)
magerr_auto = float(o_df['MAGERR_AUTO'].iloc[0])
cat_d['MAGERR_AUTO'].append(magerr_auto)
erra_world = float(o_df['ERRA_WORLD'].iloc[0])
cat_d['ERRA_WORLD'].append(erra_world)
errb_world = float(o_df['ERRB_WORLD'].iloc[0])
cat_d['ERRB_WORLD'].append(errb_world)
errtheta_world = float(o_df['ERRTHETA_WORLD'].iloc[0])
cat_d['ERRTHETA_WORLD'].append(errtheta_world)
class_star = float(o_df['CLASS_STAR'].iloc[0])
cat_d['CLASS_STAR'].append(class_star)
pm = float(o_df['PM'].iloc[0])
cat_d['PM'].append(pm)
pmerr = float(o_df['PMERR'].iloc[0])
cat_d['PMERR'].append(pmerr)
cat_df = DataFrame(cat_d, columns=['IDX', 'SOURCE', 'DITHER', 'RA', 'DEC',
'VEL', 'ABMAG', 'THETA', 'MAG_AUTO',
'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE',
'ERRA_IMAGE', 'ERRB_IMAGE',
'MAGERR_AUTO', 'ERRA_WORLD',
'ERRB_WORLD', 'ERRTHETA_WORLD',
'CLASS_STAR', 'PM', 'PMERR'])
cat_df.to_csv('tmp_ssos/ssos_{}.csv'.format(idx_l))
if __name__ == "__main__":
prfs_dict = extract_settings_elvis()
catalogue = create_catalog()
| [
"s.gongoragarcia@gmail.com"
] | s.gongoragarcia@gmail.com |
49fc0cdc6dbac8ebbb8f5e66c1e4a477e42004de | e7558ffd73cbfafc742c989590d27526d2417f2f | /src/husky_highlevel_controller_ex3.py | e7d151443e825794b3eea583fb39f49e2db895ab | [] | no_license | gbrlb/husky_highlevel_controller | afdace7982e4c050aca8c6482214fac098c1deec | b230782325d92598a9e1313a3ab7ff9806fd4103 | refs/heads/master | 2023-02-10T21:38:15.379345 | 2021-01-11T20:28:52 | 2021-01-11T20:28:52 | 328,766,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,964 | py | #!/usr/bin/env python
import rospy
import tf
import tf2_ros
import numpy as np
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist, Quaternion, Pose, Point, Vector3
from visualization_msgs.msg import Marker
def callback(data):
dist_min_index = np.argmin(data.ranges)
dist_min = data.ranges[dist_min_index]
angle = (360 - dist_min_index) * data.angle_increment
angle_deg = np.rad2deg(angle)
dist_x = np.cos(angle) * dist_min
dist_y = -np.sin(angle) * dist_min
print("===============================================")
rospy.loginfo('distancia minima index %s', dist_min_index)
rospy.loginfo('distancia minima %s', dist_min)
rospy.loginfo('angle %s, %s', angle, np.rad2deg(angle))
rospy.loginfo('distancia x %s', dist_x)
rospy.loginfo('distancia y %s', dist_y)
## Marker
scale = Vector3(3,2,1)
colum_pose = Pose(Point(x=dist_x, y=dist_y, z=0), Quaternion(x=0, y=0, z=0, w=1))
print(colum_pose)
marker_colum_pub.publish(make_marker(Marker.CYLINDER, frame_id='/base_laser', pose=colum_pose, scale=[.5, .5, 1], color=[0, 1, 0, 1]))
## Controlador Proporcional
loop_rate = rospy.Rate(1000)
print("===== go to ======")
print('linear:', dist_min)
print('angular:', np.rad2deg(angle))
# rospy.loginfo("Moves from:[x={:.2f}, y={:.2f}] to Goal:[x={:.2f}, y={:.2f}]".format(x, y, x_goal, y_goal))
if dist_min > data.range_min:
print("range min:", data.range_min)
cmd_vel_msg.linear.x = dist_min * kp_linear
cmd_vel_msg.angular.z = angle * kp_angular
print(cmd_vel_msg.linear.x)
print(cmd_vel_msg.angular.z)
vel_publisher.publish(cmd_vel_msg)
loop_rate.sleep()
else:
print("estoy aqui!!!")
path = tfBuffer.lookup_transform('odom', 'base_link', rospy.Time())
marker_path.points.append(path.transform.translation)
maker_Husky_path_pub.publish(marker_path)
def make_marker(marker_type=1,
frame_id='world',
pose=None,
position=None,
orientation=None,
scale=[.1, .1, .1],
color=[1, 1, 0, 1] # ['r','g','b','a']
):
marker = Marker()
marker.header.frame_id = frame_id
marker.header.stamp = rospy.Time.now()
marker.ns = 'marker_%d' % marker_type
marker.id = 0
marker.type = marker_type
marker.action = Marker.ADD
# pose ['position','orientation']
if pose is not None:
marker.pose = pose
# marker orientaiton ['x','y','z','w']
if orientation is not None:
marker.pose.orientation = orientation
# marker position ['x','y','z']
if position is not None:
marker.pose.position = position
# maker scale Vector3 ['x','y','z']
marker.scale.x = scale[0]
marker.scale.y = scale[1]
marker.scale.z = scale[2]
# maker color ColorRGBA ['r','g','b','a']
marker.color.r = color[0]
marker.color.g = color[1]
marker.color.b = color[2]
marker.color.a = color[3] # Don't forget to set the alpha!
return marker
def HuskyHighlevelController():
rospy.Subscriber('/scan', LaserScan, callback)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('HuskyHighlevelController_node', anonymous=True)
topic = "/husky_velocity_controller/cmd_vel"
queue_size = 10
vel_publisher = rospy.Publisher(topic, Twist)
marker_colum_pub = rospy.Publisher('visualization_marker', Marker, queue_size=5)
cmd_vel_msg = Twist()
kp_linear = 10
kp_angular = 1
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
maker_Husky_path_pub = rospy.Publisher('Husky_path', Marker, queue_size=5)
marker_path = make_marker(Marker.LINE_STRIP,
frame_id='odom',
scale=[0.03, 0, 0],
color=[1, 1, 0, 1])
HuskyHighlevelController() | [
"user@email.com"
] | user@email.com |
4870c9e3db9c7c26afdfe0929fb1dfcf4a5bc6d3 | 199f1aefc6eac12a72a4097332c3f88020d7b1ef | /CovidCanada/backend/DateFormatter.py | ce76cd8abe460bc38be654b4c8423f9ea14f8a6a | [] | no_license | ghepburn/Python-Canadian-CoronaVirus-Dashboard | 8451fd46481dada1692a11a9abeb11bb82b1e2f1 | 7e0d886f0c39f4df2ac983f057c1a771b665c955 | refs/heads/master | 2021-04-21T01:50:05.191047 | 2020-03-24T15:14:52 | 2020-03-24T15:14:52 | 249,738,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import datetime as dt
class DateFormatter():
def reformat(self, date, currentFormat):
dt_date = dt.datetime.strptime(date, currentFormat)
new_date = str(dt_date)[:10]
return new_date
| [
"ghepburn@hotmail.ca"
] | ghepburn@hotmail.ca |
fb3f759c7e3ac455a1fcda37deb223b5a01ada9d | f0ba54fbfa0935f71d3f23b9f8379d159aa406ac | /aide_moi/urls.py | 84967579505c6bc14f3ed04c322d0762da07525b | [] | no_license | rruellepetel/django_project | 11473ebb309033fd5c5ee5a4048257c5b8240b4d | 7c28553f379a2d560d8cf6b686c25687352ef92c | refs/heads/master | 2022-12-14T15:29:00.891276 | 2017-06-02T14:48:31 | 2017-06-02T14:48:31 | 93,175,598 | 0 | 0 | null | 2022-12-07T23:57:46 | 2017-06-02T14:47:52 | Python | UTF-8 | Python | false | false | 765 | py | """aide_moi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"ruellepetelromain@gmail.com"
] | ruellepetelromain@gmail.com |
07d4e5cde570f5d256a407ac909fef297459f0d4 | fed717fba196d75a4f2cc9625b9edf0ebe30af41 | /cleanup/prettify.py | 07210656fec9e74f763d39a8b4cacd4b2d2b7f87 | [
"Apache-2.0"
] | permissive | swapniltamse/openstack-doc-tools | 1c551ddc6104165d470e025f7c13c98aa34ffe0c | efc4e36190f9ade8baa5ec1c975f98beacaa7373 | refs/heads/master | 2021-01-21T03:08:13.236216 | 2014-09-22T09:08:43 | 2014-09-22T09:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,246 | py | #!/usr/bin/env python
"""A script to prettify HTML and XML syntax.
Some examples of the prettified syntax are available
in the following changes:
* https://review.openstack.org/#/c/98652/
* https://review.openstack.org/#/c/98653/
* https://review.openstack.org/#/c/98655/
"""
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# author: Christian Berendt <berendt@b1-systems.de>
from __future__ import print_function
import argparse
import sys
from bs4 import BeautifulSoup
def parse_command_line_arguments():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--write-changes", action="store_true", default=False,
help="Write prettified XML or HTML syntax "
"back to file.")
parser.add_argument("file", type=str, default=None,
help="A XML or HTML File to prettify.")
return parser.parse_args()
def main():
"""Entry point for this script."""
args = parse_command_line_arguments()
try:
soup = BeautifulSoup(open(args.file))
except IOError as exception:
print("ERROR: File '%s' could not be parsed: %s"
% (args.file, exception))
return 1
if args.write_changes:
try:
with open(args.file, 'wb') as output:
prettified = soup.prettify(encoding="utf8")
output.write(prettified)
except IOError as exception:
print("ERROR: File '%s' could not be written: %s"
% (args.file, exception))
return 1
else:
prettified = soup.prettify(encoding="utf8")
print(prettified)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"berendt@b1-systems.de"
] | berendt@b1-systems.de |
601584c7d405f3a1ac1a5d97c88af82afb9011f4 | 4ef7c87861ea33e2348afe88b8614fc4119c0a19 | /app.py | 1c46a62caa1f617f5588702e50dba65f2fb9f3b3 | [] | no_license | imnayakshubham/Quora-Question-Pairs | b1a3fc942731bd7a2ddc8f4d1398b7d33490ad19 | 656927385103f5988d66f36143fe36dc82b83704 | refs/heads/main | 2022-12-29T13:01:55.241008 | 2020-10-04T08:43:35 | 2020-10-04T08:43:35 | 300,828,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | from flask import Flask,render_template,request
import re
import nltk
from nltk.stem import WordNetLemmatizer
lm = WordNetLemmatizer()
import pickle
stopword = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such']
tfidf = pickle.load(open('tfidf.pkl','rb'))
model = pickle.load(open('quora.pkl','rb'))
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict' ,methods = ['POST'])
def predict():
if request.method == "POST":
rawtext = request.form['t1'] + " " + request.form['t2']
corpus = []
review = re.sub('[^a-zA-Z]',' ',rawtext)
review = review.lower()
review = review.split()
review = [lm.lemmatize(word) for word in review if not word in stopword]
review = ' '.join(review)
corpus.append(review)
vector = tfidf.transform(corpus).toarray()
prediction = model.predict(vector)
return render_template('index.html',result = prediction)
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | imnayakshubham.noreply@github.com |
53870faab9508764cebb4450d548559b848be407 | 4816a1942aece925334ce6a1acfdff2587871ad7 | /lecture-1.py | 623a48177fc7aefc0ce0e73bf26e5dc4714dd115 | [] | no_license | jahidshawon19/Advanced-Python | 3cf2cdeed968223c7aca076efc53c0ab589164f7 | fec6f5e19351b023c3005e0ddd1e82dfe1b28929 | refs/heads/master | 2023-06-29T00:01:46.307562 | 2021-07-25T06:17:57 | 2021-07-25T06:17:57 | 388,816,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | ############################# DECORATORS ###################################
"""
defination: decorators are a way to change or modify
the behavior of any of your functions or methods
without directly changing any of the code"
"""
# prog-1
# def div(x,y): # div function calculation division for two numbers
# if x < y: # issue is here we had to change in our function.
# x,y = y,x
# print(x/y)
# to fix this issue we can apply decorators below
def div(x,y): # div function calculation division for two numbers
print(x/y)
def my_decorator(f): # here f contains div() function
def inner(x,y): # this function performs to swapping variable
if x < y:
x,y = y,x
return f(x,y)
return inner
if __name__ == "__main__":
result = my_decorator(div)
result(5,15)
############################# GENERATORS ###################################
'''
Generator functions allow you to declare a function that behaves like an iterator.
They allow programmers to make an iterator in a fast, easy, and clean way.
it is Saving memory space.
'''
#prog-1
def sqaure():
n=1
while n <= 10:
sq = n*n;
yield sq #Yield is generally used to convert a regular Python function into a generator
n += 1
result = sqaure()
for i in result:
print(i)
############################# CONTEXT MANAGER - FILE HANDLING ###################################
'''
context manager allows us to properly manage resources so that we can specify exactly what we want to set up.
'''
with open('employee.txt', 'r') as f:
print(f.read()) # print whole content
print(f.readline()) # print line by line
print(f.read(10)) #print first 10 char
############################# MULTITHREADING ###################################
from time import sleep
from threading import *
class Airstrike(Thread):
def run(self):
for i in range(5):
print("Lunch Bomb")
sleep(2)
class Artillery(Thread):
def run(self):
for i in range(5):
print("Throw Rocket")
sleep(2)
captain_tahmid = Airstrike()
colonel_guljar = Artillery()
captain_tahmid.start()
sleep(3)
colonel_guljar.start()
captain_tahmid.join()
colonel_guljar.join()
print("Destroyed Israel!")
| [
"jahidshawon1730@gmail.com"
] | jahidshawon1730@gmail.com |
c1ea7be5a424f44a18d28aa26aac27e24250d36d | 5fefad55c16c792a3dc0477c8385644fdc961cc5 | /domain/music.py | fcdd118d6beb25e122f5408ece5c3ef0f85ba5a2 | [] | no_license | Dorin07/project_sdb | a0674efc566b1abc5fccf70bcb071294c69c539b | 1ee421acd021492b1638485ee95859249aa50f7f | refs/heads/master | 2022-12-02T00:53:31.470724 | 2020-08-18T19:42:33 | 2020-08-18T19:42:33 | 272,493,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | class Music:
def __init__(self, id, name, artist, gen, duration):
self.__name = name
self.__artist = artist
self.__gen = gen
self.__duration = duration
self.__id = id
def get_id(self):
return self.__id
def set_id(self, value):
self.__id = value
def get_name(self):
return self.__name
def get_artist(self):
return self.__artist
def get_gen(self):
return self.__gen
def get_duration(self):
return self.__duration
def set_name(self, name):
self.__name = name
def set_artist(self, artist):
self.__artist = artist
def set_gen(self, gen):
self.__gen = gen
def set_duration(self, duration):
self.__duration = duration
def __repr__(self):
return "Id: {0}, Name: {1}, Artist: {2}, Gen: {3}, Duration: {4}".format(self.__id, self.__name, self.__artist,
self.__gen,
self.__duration)
def __str__(self):
return "Id: {0}, Name: {1}, Artist: {2}, Gen: {3}, Duration: {4}".format(self.__id, self.__name, self.__artist,
self.__gen,
self.__duration)
def __eq__(self, other):
"""
Verif if two songs are equals
:param other:
:return:
"""
return other.get_id() == self.__id
| [
"47871786+Dorin07@users.noreply.github.com"
] | 47871786+Dorin07@users.noreply.github.com |
28630e10caac44c62f98b0f86af906f33d97d559 | b1f801f4f805467491c0b7c2db01c7806c10f4ea | /hockey/oilers.py | e86e659297f1c98fe3f825a975a73fb97d18d29d | [
"MIT"
] | permissive | Obliviatum/Trusty-cogs | 2fd00effade8cb45c139a85aac53b791d1a278f9 | f2297675f92b8cfc25993271b8ad6abccbec7230 | refs/heads/master | 2022-12-16T15:51:05.072770 | 2020-09-10T23:40:16 | 2020-09-10T23:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | import asyncio
from phue import Bridge
import functools
class Oilers:
def __init__(self, bot):
self.bot = bot
self.bridge = Bridge("192.168.50.123")
self.lights = self.bridge.lights
self.bridge2 = Bridge("192.168.50.163")
self.lights2 = self.bridge2.lights
self.cur_lights = {}
self.cur_lights2 = {}
def goal_lights(self):
async def task():
task = functools.partial(self.get_current_lights_setting)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return
for i in range(5):
task = functools.partial(self.oilers_hex_set, x=1.0, y=1.0)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
pass
# await self.oilers_hex_set(1.0, 1.0)
await asyncio.sleep(0.5)
task = functools.partial(self.oilers_hex_set, x=0, y=0)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
pass
# await self.oilers_hex_set(0, 0)
await asyncio.sleep(0.5)
task = functools.partial(self.reset_light_setting)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return
return self.bot.loop.create_task(task())
def reset_light_setting(self):
for light in self.lights:
old_temp = self.cur_lights[light.name][1]
if old_temp < 154:
old_temp = 154
if old_temp > 500:
old_temp = 499
light.colortemp = old_temp
light.on = self.cur_lights[light.name][0]
for light in self.lights2:
old_temp = self.cur_lights2[light.name][1]
if old_temp < 154:
old_temp = 154
if old_temp > 500:
old_temp = 499
light.colortemp = old_temp
light.on = self.cur_lights2[light.name][0]
return
def get_current_lights_setting(self):
for light in self.lights:
self.cur_lights[light.name] = [light.on, light.colortemp]
for light in self.lights2:
self.cur_lights2[light.name] = [light.on, light.colortemp]
return
def oilers_hex_set(self, x: float, y: float):
"""Sets the colour for Oilers Goals"""
if x > 1.0 or x < 0.0:
x = 1.0
if y > 1.0 or y < 0.0:
y = 1.0
for light in self.lights:
if not light.on:
light.on = True
light.xy = [x, y]
for light in self.lights2:
if not light.on:
light.on = True
light.xy = [x, y]
| [
"TrustyJAID@gmail.com"
] | TrustyJAID@gmail.com |
bad0060f6efaaa896c6b73ab19d0e3d274c6d7c3 | 7e1c3f30d14295e099014cc5ce05bacfbec029f1 | /djangoProject/djangoProject/demo/migrations/0008_article_kind.py | 9d4957e5062a9f7e7085ca48768a7ad4ae5e3687 | [] | no_license | CloyeeYH/SE-blog | 91adae86eab208aff6531e5262391d4771ea3040 | 96717b8d07849d6227958f8cd2bf8616f2f13723 | refs/heads/master | 2023-08-04T22:30:18.603869 | 2021-09-09T14:21:14 | 2021-09-09T14:21:14 | 404,715,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # Generated by Django 3.2.6 on 2021-09-02 07:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0007_auto_20210901_1405'),
]
operations = [
migrations.AddField(
model_name='article',
name='kind',
field=models.IntegerField(default=0),
),
]
| [
"yuhui0120@126.com"
] | yuhui0120@126.com |
ae660978c70bf156c4711f7b042186b8b7f5f25d | ff642c107f68787cae4fb3cf95e428e7e0856228 | /FAQ1/nlp_engine/extractors/Company_next.py | f902059eb5325d62b7b27e63bbc6d211a2e77542 | [] | no_license | riteshsharthi/botx | 62d0d86fdcde58e4222cbf6655e1315175d2d3d3 | dc6031ac1bb788bae6697eacbd5c2d942d7f9f95 | refs/heads/master | 2022-12-08T09:46:10.626671 | 2019-01-03T12:03:05 | 2019-01-03T12:03:05 | 163,968,682 | 0 | 0 | null | 2022-11-22T03:11:05 | 2019-01-03T12:12:58 | HTML | UTF-8 | Python | false | false | 995 | py | import re
class CompanyNameExtractor:
def __init__(self):
pass
def company_code_extract(self,input):
## Number Only
#input=input("Enter Company Name: ")
# number=re.findall(r'\d+', input)
# if len(number)==0:
# print("Enpty list")
# else:
# print(number)
match = re.findall(r'[\w\.-]+-[\w\.-]+', input)
#print(type(match))
if len(match)==0:
number = re.findall(r'[\w\.-]+\d[\w\.-]+', input)
# print(dict({"Company Details are: ":number}))
else:
#number = re.findall(r'[\w\.-]+\d[\w\.-]+', input)
# print((dict({"Company Details are: ":match})))
if number:
number = match
#print(dict({'Company Name is': match}))
return number[0]
if __name__ == '__main__':
user_input=input("Enter Company Name: ")
obj=CompanyNameExtractor()
obj.company_code_extract(user_input)
| [
"ritesh@gmail.com"
] | ritesh@gmail.com |
f1c28b9f2d8984912a425244146ed95fcf931182 | bcdc3d92655eb28246710344ec08b2d9c02521c3 | /dz6-2.py | 05fc43d524b51d135bacbd516541dce4781ccc33 | [] | no_license | sydekz/NetologyHomeWork | b1cc44b4483d6dd3ef38a1a894f2586573269216 | d753d75354105acd095ef345ff2582c8d8ce3a75 | refs/heads/master | 2023-05-08T21:38:46.108675 | 2021-05-24T16:40:01 | 2021-05-24T16:40:01 | 323,945,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py |
class Student:
def __init__(self, name, surname, gender):
self.name = name
self.surname = surname
self.gender = gender
self.finished_courses = []
self.courses_in_progress = []
self.grades = {}
def rate_lecturer(self, lecturer, course, grade):
if isinstance(lecturer, Lecturer) and course in lecturer.courses_attached and course in self.courses_in_progress:
if course in lecturer.grades:
lecturer.grades[course] += [grade]
else:
lecturer.grades[course] = [grade]
else:
return 'Ошибка'
class Mentor:
def __init__(self, name, surname):
self.name = name
self.surname = surname
self.courses_attached = []
class Lecturer(Mentor):
def __init__(self, name, surname):
super().__init__(name, surname)
self.grades = {}
class Reviewer(Mentor):
def rate_hw(self, student, course, grade):
if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:
if course in student.grades:
student.grades[course] += [grade]
else:
student.grades[course] = [grade]
else:
return 'Ошибка'
new_lecturer = Lecturer('Anton', 'Ivanov')
new_lecturer.courses_attached += ['Python']
print(f'{new_lecturer.name} {new_lecturer.surname} {new_lecturer.courses_attached}')
new_reviewer = Reviewer('Alex', 'Mitnik')
print(f'{new_reviewer.name} {new_reviewer.surname} {new_reviewer.courses_attached}')
best_student = Student('Ruoy', 'Eman', 'your_gender')
best_student.courses_in_progress += ['Python']
best_student.rate_lecturer(new_lecturer, 'Python', 10)
best_student.rate_lecturer(new_lecturer, 'Python', 9)
best_student.rate_lecturer(new_lecturer, 'Python', 10)
print(f'{new_lecturer.grades}')
#
# cool_mentor = Mentor('Some', 'Buddy')
# cool_mentor.courses_attached += ['Python']
#
# cool_mentor.rate_hw(best_student, 'Python', 10)
# cool_mentor.rate_hw(best_student, 'Python', 10)
# cool_mentor.rate_hw(best_student, 'Python', 10)
#
# print(best_student.grades) | [
"ladanov1@gmail.com"
] | ladanov1@gmail.com |
bed929c7f266d81eb3a9dadbc78e91464fd733ba | 3d3e4bb6c50b949f12714feb9b14842203df61be | /feature_extraction/tests/test_hog_extract_provided_images.py | cc0e8528c456076cbac716a3fd8d07a96674f9e2 | [] | no_license | Wubuntu88/CarND-Vehicle-Detection | 1d8c1683be4186193ecc1f868e9d57399c1402cc | 23e4e4b35ec60606b27cfea6ffe3a3019495a5cf | refs/heads/master | 2021-08-14T23:59:09.092897 | 2017-11-17T01:54:52 | 2017-11-17T01:54:52 | 110,314,332 | 0 | 0 | null | 2017-11-11T03:20:43 | 2017-11-11T03:20:43 | null | UTF-8 | Python | false | false | 719 | py | import feature_extraction.feature_extractor as fe
import data_loader.training_data_loader as tdl
import feature_extraction.hog_extractor as he
import matplotlib.pyplot as plt
import cv2
bgr_img = cv2.imread(filename='../../test_images/test1.jpg')
rgb_img = cv2.cvtColor(src=bgr_img, code=cv2.COLOR_BGR2RGB)
# junk1, hog_img_1 = he.get_single_channel_hog_features_and_images(img=rgb_img[:, :, 0], vis=True)
# junk2, hog_img_2 = he.get_single_channel_hog_features_and_images(img=rgb_img[:, :, 1], vis=True)
junk3, hog_img_3 = he.get_single_channel_hog_features_and_images(img=rgb_img[:, :, 2], vis=True)
plt.title("test1.jpg blue channel hog visualization", fontsize=20)
plt.imshow(hog_img_3, cmap='gray')
plt.show()
| [
"wgilles1@emich.edu"
] | wgilles1@emich.edu |
c1569e832b4ce1b84d1552812b7c073e05c446b1 | 74b91a49fafb871dd2fdfc5b80bd948bc960b9e6 | /mainsite/views.py | 81655a474530eff0e6dc34d2a3ebea118ffb0f10 | [] | no_license | jameerpathan111/jpathan-site | f39e4002464b81055959c426312b3ba1a0afac63 | ff37129c7a595133b6878229223e42e7835eb447 | refs/heads/main | 2022-12-27T01:13:19.923387 | 2020-10-11T13:33:19 | 2020-10-11T13:33:19 | 303,064,640 | 0 | 0 | null | 2020-10-11T13:17:02 | 2020-10-11T07:19:17 | Python | UTF-8 | Python | false | false | 126 | py | from django.shortcuts import render
def landing_page(request):
return render(request, 'mainsite/landing_page.html', {})
| [
"jameerpathan111@gmail.com"
] | jameerpathan111@gmail.com |
566782c6bc7bfe23bca51561bf47ecf9f8a6e789 | 22251d3abc44761f60bde4a7243c320983a4c777 | /python_scripts/main_leaderboard.py | 6b8795592b6756d6e00f6f9b251b07b42fec7c48 | [] | no_license | openpharma/openpharma.github.io | b8ebd4d2eece05d03d2f329599bd87a734425404 | 46e70910282c3f434ee45377f26d0416919576f9 | refs/heads/master | 2023-08-30T02:39:14.951348 | 2023-08-17T15:24:56 | 2023-08-17T15:24:56 | 95,364,409 | 9 | 3 | null | 2023-05-21T20:29:15 | 2017-06-25T14:21:59 | R | UTF-8 | Python | false | false | 2,631 | py | import os
import boto3
import pandas as pd
import gh_issues_graphql
import clean_leaderboard
"""
Env varibales for AWS S3 Bucket - data uploading there
"""
os.environ['AWS_ACCESS_KEY_ID'] = os.getenv('OPENPHARMA_AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY'] = os.getenv('OPENPHARMA_AWS_SECRET_ACCESS_KEY')
PATH_REPOS_CLEAN = "scratch/repos_clean.csv"
PATH_GH_LEADERBOARD = "scratch/gh_leaderboard.parquet"
PATH_GH_LEADERBOARD_PHARMAVERSE = "scratch/gh_leaderboard_pharmaverse.parquet"
PATH_PEOPLE = "scratch/people.csv"
PATH_PEOPLE_CLEAN = "scratch/people_clean.csv"
PATH_PEOPLE_CLEAN_PHARMAVERSE = "scratch/people_clean_pharmaverse.csv"
PATH_COMMIT = "scratch/commits.csv"
PATH_PHARMAVERSE_PACKAGES = "scratch/pharmaverse_packages.csv"
#scope all contributors of packages
df_repos_clean = pd.read_csv(PATH_REPOS_CLEAN)
df_gh_leaderboard = gh_issues_graphql.main_gh_issues(
df_repos_clean=df_repos_clean,
scope="all"
)
df_gh_leaderboard.to_parquet(PATH_GH_LEADERBOARD)
#pharmaverse scope
df_gh_leaderboard = gh_issues_graphql.main_gh_issues(
df_repos_clean=df_repos_clean,
scope="pharmaverse"
)
df_gh_leaderboard.to_parquet(PATH_GH_LEADERBOARD_PHARMAVERSE)
# We clean people csv here and not in main_clean becauze we need repos_clean (otherwise cycle in DAG)
#scope all contributors of packages
df_people_clean = clean_leaderboard.main_overall_metric(
path_people=PATH_PEOPLE,
path_gh_graphql=PATH_GH_LEADERBOARD,
path_commits=PATH_COMMIT,
scope="all"
)
df_people_clean.to_csv(PATH_PEOPLE_CLEAN, index=False)
#pharmaverse scope
df_people_clean = clean_leaderboard.main_overall_metric(
path_people=PATH_PEOPLE,
path_gh_graphql=PATH_GH_LEADERBOARD_PHARMAVERSE,
path_commits=PATH_COMMIT,
scope = "pharmaverse"
)
df_people_clean.to_csv(PATH_PEOPLE_CLEAN_PHARMAVERSE, index=False)
"""
AWS client
"""
client = boto3.client('s3',
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
)
"""
Raw data in parquet format (bcz it preserves type contrarely to csv (dict and list))
"""
client.upload_file(Filename=PATH_GH_LEADERBOARD,
Bucket='openpharma',
Key='gh_leaderboard.parquet'
)
client.upload_file(Filename=PATH_GH_LEADERBOARD_PHARMAVERSE,
Bucket='openpharma',
Key='gh_leaderboard_pharmaverse.parquet'
)
"""
People clean data in csv format
"""
client.upload_file(Filename=PATH_PEOPLE_CLEAN,
Bucket='openpharma',
Key='people_clean.csv'
)
client.upload_file(Filename=PATH_PEOPLE_CLEAN_PHARMAVERSE,
Bucket='openpharma',
Key='people_clean_pharmaverse.csv'
)
| [
"mathieu.cayssol@gmail.com"
] | mathieu.cayssol@gmail.com |
aaab9495f26f7668c8d90c13c024a08cbcaee48b | 83adf05e3e2a20d1facce16981359f60568852b5 | /proxy/http/exception/base.py | 65138e87b756050ecaa88371391ac4547cf8c9fc | [
"BSD-3-Clause"
] | permissive | hash3liZer/proxy.py | e8cbb2e0b0f988312fb9080e7038d06429af5c69 | 29b88c5291a6193a438d92dcc5c025a5c07f6349 | refs/heads/develop | 2023-08-02T16:40:22.976904 | 2021-09-11T10:12:52 | 2021-09-11T10:12:52 | 399,802,139 | 3 | 0 | BSD-3-Clause | 2021-09-11T10:12:52 | 2021-08-25T12:00:29 | Python | UTF-8 | Python | false | false | 822 | py | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import Optional
from ..parser import HttpParser
class HttpProtocolException(Exception):
"""Top level HttpProtocolException exception class.
All exceptions raised during execution of Http request lifecycle MUST
inherit HttpProtocolException base class. Implement response() method
to optionally return custom response to client."""
def response(self, request: HttpParser) -> Optional[memoryview]:
return None # pragma: no cover
| [
"noreply@github.com"
] | hash3liZer.noreply@github.com |
2b4d92d3292e81047c5230dabf58430a113fa1b0 | 453d2e699d218fdb3bc1e535a707988194ac6717 | /dash/render/renderer.py | 87d2dcebc49cfb1dfa4d0322c90249b714943d80 | [
"MIT"
] | permissive | defgsus/thegame | d54ffcd343c7e1805d2c11e24cd38b02243e73d4 | 38a627d9108f1418b94b08831fd640dd87fbba83 | refs/heads/master | 2023-07-23T06:32:40.297591 | 2022-04-11T12:02:32 | 2022-04-11T12:02:32 | 127,875,178 | 1 | 0 | MIT | 2023-07-06T22:07:07 | 2018-04-03T08:21:31 | Python | UTF-8 | Python | false | false | 3,349 | py | import time
import math
from typing import Optional
from pyglet import gl
import glm
from lib.opengl import *
from lib.math import FollowFilter
from .._path import ASSET_PATH
from ..game import Game
from .rs import GameRenderSettings
from .tilemap_node import TileMapNode
from .objects_node import ObjectsNode
from .object_debug_node import ObjectDebugNode
from .constraint_debug_node import ConstraintDebugNode
class GameRenderer:
def __init__(self, game: Game):
self.game = game
self.graph: Optional[RenderGraph] = None
self.pipeline: Optional[RenderPipeline] = None
self.render_settings = GameRenderSettings(32, 32)
self.debug_node = None
self.frame_number = 0
self.camera_pos = glm.vec2(-1, -5)
self.camera_rotation = 0.
self._target_speed_filter = FollowFilter(follow_up=.03, follow_down=.01)
def update(self, time: float, dt: float):
target = self.game.player
#target_speed = self._target_speed_filter(target.average_speed)
target_pos = glm.vec2(target.position) #+ target.direction_of_movement * target_speed * .5
self.camera_pos += min(1., dt * 3.) * (target_pos - self.camera_pos)
# self.camera_rotation += min(1., dt*.3) * (self.game.player.rotation - self.camera_rotation)
self.pipeline.update(self.render_settings, dt)
def render(self):
self.render_settings.projection.location = self.camera_pos
self.render_settings.projection.rotation_deg = self.camera_rotation
if self.graph is None:
self.graph = self.create_render_graph()
if self.pipeline is None:
self.pipeline = self.graph.create_pipeline()
self.pipeline.dump()
# self.pipeline.verbose = 5
#if self.frame_number % 100 == 0:
# self.tile_render_node.upload_map(self.game.tile_map.get_map(0, 0, 32, 32))
self.pipeline.render(self.render_settings)
self.pipeline.render_to_screen(self.render_settings)
self.frame_number += 1
def create_render_graph(self) -> RenderGraph:
graph = RenderGraph()
tile_tex = Texture2DNode(
ASSET_PATH /
"tileset03.png"
)
graph.add_node(tile_tex)
self.tile_render_node = TileMapNode(
"tilerender", self.game.world.tile_map,
tile_size=(16, 16),
tile_set_size=(10, 6),
)
graph.add_node(self.tile_render_node)
graph.connect(tile_tex, 0, self.tile_render_node, mag_filter=gl.GL_NEAREST)
self.object_node = ObjectsNode(
"objects", self.game.world.objects,
tile_size=(16, 16),
tile_set_size=(10, 6),
)
graph.add_node(self.object_node)
graph.connect(tile_tex, 0, self.object_node, mag_filter=gl.GL_NEAREST)
if 1:
self.debug_node = ConstraintDebugNode(
"debug", self.game.world.objects,
)
graph.add_node(self.debug_node)
mix_node = graph.add_node(postproc.Add("mix", count=3 if self.debug_node else 2))
graph.connect(self.tile_render_node, 0, mix_node, 0)
graph.connect(self.object_node, 0, mix_node, 1)
if self.debug_node:
graph.connect(self.debug_node, 0, mix_node, 2)
return graph
| [
"s.berke@netzkolchose.de"
] | s.berke@netzkolchose.de |
924ae8bf0152d014b3f5c86c50d8949e89d12888 | 7020eab2c3d3cab30eb558437fce7e9364af053c | /installation-mysql.py | 34169820ece7e389826f3651ee0b86b67341df0a | [
"MIT"
] | permissive | Svoy007/db-events-notify-tool | 2c92783c8b8d126cde52e58f9432693f82ad872f | 189109590588dbc73c19e9309219537c1b422651 | refs/heads/master | 2022-02-13T22:52:40.919653 | 2019-09-14T06:15:37 | 2019-09-14T06:15:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | #!/usr/bin/python3
import sys
import os
import mysql.connector
def myloading():
cfgpath = "config-mysql.txt"
fconf = open(cfgpath, 'r')
tconf = fconf.read()
fconf.close()
conf_list = tconf.split('\n')
cfgpath_logic = "config.txt"
fconf_logic = open(cfgpath_logic, 'r')
tconf_logic = fconf_logic.read()
fconf_logic.close()
conf_list_send = tconf_logic.split('\n')
conf_list.append(conf_list_send[1])
conf_list.append(conf_list_send[3])
return conf_list
def config_db(loop_params):
mydb = mysql.connector.connect(
host=loop_params[2],
user=loop_params[0],
passwd=loop_params[1],
database=loop_params[4]
)
table_md = loop_params[5]
mydb.autocommit = True
mycursor = mydb.cursor()
print(loop_params[8])
if loop_params[8] == "mod":
sql = "ALTER TABLE "+table_md+" ADD is_ack INT DEFAULT 0;"
print(sql)
mycursor.execute(sql)
sql = "UPDATE "+table_md+" SET is_ack = 1 WHERE is_ack = 0;"
print(sql)
mycursor.execute(sql)
else:
sql = """CREATE TABLE events_notify (
id_event int(11) NOT NULL,
is_ack tinyint(11) NOT NULL DEFAULT '0')
ENGINE=InnoDB DEFAULT CHARSET=utf8;"""
print(sql)
mycursor.execute(sql)
sql = """ALTER TABLE events_notify
ADD PRIMARY KEY (id_event);"""
print(sql)
mycursor.execute(sql)
sql = """ALTER TABLE events_notify
MODIFY id_event int(11) NOT NULL AUTO_INCREMENT;"""
print(sql)
mycursor.execute(sql)
sql = "CREATE TRIGGER notify_trigger AFTER INSERT ON "+table_md+" FOR EACH ROW INSERT INTO events_notify (id_event, is_ack) VALUES (NULL, '0')"
print(sql)
mycursor.execute(sql)
print("setup completed")
if __name__ == "__main__":
my_params = myloading()
config_db(my_params)
else:
print("the program is being imported into another module")
| [
"returnstrike@yandex.ru"
] | returnstrike@yandex.ru |
9401b55d04da56398a1e37b2b0993ab738b406e1 | f51e853ccae92be668301549842ff4cba7c01fc9 | /core/commerce/migrations/0019_auto_20210123_2017.py | 3d2c5cd640b1b024e55c21723d23361cfba6745e | [] | no_license | felipebarraza6/consume_nuble | 93217a3d6a2605c9fe1e2736772ad939c9563568 | aba49e69081b7b7d0f7cc51b05c5e0bb04fc1bed | refs/heads/master | 2023-02-20T02:15:58.011524 | 2021-01-24T02:10:46 | 2021-01-24T02:10:46 | 289,100,164 | 1 | 0 | null | 2020-12-16T02:51:11 | 2020-08-20T20:07:43 | Python | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.1.5 on 2021-01-23 20:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commerce', '0018_auto_20210123_0807'),
]
operations = [
migrations.AlterField(
model_name='daynumber',
name='title',
field=models.CharField(max_length=200),
),
]
| [
"felipe.barraza.vega@gmail.com"
] | felipe.barraza.vega@gmail.com |
fb369c57b7560e302fb7a8a1affab5e6abc69c69 | 0493ef568113c088ddb71a35669c70ba7481a82d | /hello/urls.py | 24db3831bac6db5c0013ca7bf95be7ede9f94241 | [] | no_license | shivamgupta1319/Sample-website | 6e8b00065ba03a06bab04b94b0470be6b0daf8ed | c846050f9a6e9593258c8a86a346436d565eeca0 | refs/heads/main | 2023-06-26T14:40:21.901807 | 2021-07-30T13:40:29 | 2021-07-30T13:40:29 | 368,810,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | """hello URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
admin.site.site_header = "COOL Admin"
admin.site.site_title = "COOL Admin Portal"
admin.site.index_title = "Welcome to My Portal"
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls'))
]
| [
"noreply@github.com"
] | shivamgupta1319.noreply@github.com |
f458ad4002467ad907cdc7a43c5ef58e1ddad7ba | 6d3cd1dcb5185deeb53986412ad21154adddc5a7 | /run.py | 061b97d278600e5dd35fe6ccdbf01a976950f42e | [] | no_license | lowellbander/cubscout-typeahead | eab96707e6b0568624698566c89a5a375ec44bd5 | 78c718c318b92017db0347e4ce86360eac2f420b | refs/heads/master | 2021-01-10T00:53:20.538668 | 2014-05-31T06:36:42 | 2014-05-31T06:36:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | import os
from flask import Flask
from flask import render_template
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run()
| [
"lowellbander@gmail.com"
] | lowellbander@gmail.com |
71d9cc4da8147cc2f67d21c406a7c186b03a3ea2 | e4a028dbd0f3cf2070590bf4a2b795c0e8b72296 | /code/data_synthesis.py | 5cac394677a12fa385f270cb746de303d7b5770a | [] | no_license | waymane1/fast-weights-rnn | 8344a480588b71f8d1959f61eec1aab3568039be | 493ccbf7a88df73d37a8852747dd213d8f228ed7 | refs/heads/master | 2020-04-18T16:11:44.578418 | 2017-04-29T03:06:17 | 2017-04-29T03:06:17 | 149,297,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | import numpy as np
import string
def synthesize_key_value(n):
'''
Generates n keys and n values to be used for training/testing.
'''
alpha = string.ascii_lowercase
alpha_idx = np.random.choice(26, size=n, replace=False)
alpha_array = [alpha[x] for x in alpha_idx]
num_idx = np.random.random_integers(26, high=35, size=n)
return (alpha_idx, num_idx)
def synthesize_sequence(size=3):
'''
Creates an array of alternating key value pairs.
'''
x, y = synthesize_key_value(size)
sequence = np.insert(y, np.arange(len(x)), x)
# Generate the random index we will use to query
# choice() returns an array. Since we only need one, return 0th index.
idx = np.random.choice(size, size=1, replace=False)[0]
sol = y[idx]
query_list = [36, 36, x[idx]]
sequence = np.append(sequence, np.array(query_list))
return sequence, sol
def one_hot(seq, size=3, char_set_size=37):
'''
Converts a given sequence into it's one-hot encoding
'''
num_columns = 37
num_rows = (2*size) + 3
ohot_matrix = np.zeros((num_rows, num_columns))
ohot_matrix[np.arange(num_rows), seq] = 1
return ohot_matrix
def debug_display_sequence(seq, characters = None):
'''
Outputs the given numerical sequence as a string.
'''
output = ""
for i, x in enumerate(seq):
output += characters[x]
return output
if __name__ == "__main__":
sequence, sol = synthesize_sequence(5)
char_set = list(string.lowercase[:]) + [str(x) for x in range(10)] + ["?"]
char_sequence = debug_display_sequence(sequence, char_set)
print sequence, char_sequence, sol
print one_hot(sequence, size=5, char_set_size=len(char_set))
| [
"mathewa6@msu.edu"
] | mathewa6@msu.edu |
dce81e0ddd460c5f5dfc846725f2fc1cdfc2ab67 | 31b746109a92c01a10c8482630e3f5d155d64347 | /model_no_dep_test.py | 8325027e36cc14319a6a3cc96f4f19d19a513bab | [] | no_license | huangliu0909/Dependency-Parser | 98700359a5dc766763de5375134b97171ab95129 | 97c49d75a0935178d07803f4e823d4fb80000bae | refs/heads/master | 2020-09-02T16:08:29.184789 | 2019-11-03T06:00:49 | 2019-11-03T06:00:49 | 219,256,091 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | import time
from my_model_no_dep import *
def test():
w, p, d, t = get_input("feature")
print(len(w))
w_tensor = tf.placeholder(tf.float32, shape=[None, w_input_node], name="w_tensor")
p_tensor = tf.placeholder(tf.float32, shape=[None, p_input_node], name="p_tensor")
y_ = tf.placeholder(tf.float32, shape=[None, n_classes], name="y_")
# regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)
y = inference(w_tensor, p_tensor)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(0.99)
variables_to_store = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_store)
# F = open("./Model_no_dep/dev_result_0.txt", "w")
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state("./Model_no_dep/")
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict={w_tensor: w,
p_tensor: p,
y_: t})
print("after %s training steps, validation accuracy = %g" % (global_step, accuracy_score))
# F.write(str([global_step, accuracy_score]) + "\n")
# F.close()
# F = open("./Model_no_dep/dev_result_0.txt", "a")
else:
print("model not found!")
time.sleep(5)
if __name__ == "__main__":
test() | [
"noreply@github.com"
] | huangliu0909.noreply@github.com |
73b5ff6d6c26cb18d4ac2f537a5e603cd6f88143 | 914fd5a975a1abf304c5138d83bcdc6270948dd9 | /Year 10 summatives/Unit A /random.py | 37ed1b9c4f2ce95d6dc560d13bfe7219fa1da6d0 | [] | no_license | ATarsky23/Year_10_Design | 61b574e7673e94d4d3fbd25058d9606eca4f64ad | 38187a23db9850103347c0dfafb28232f65d7572 | refs/heads/master | 2023-01-29T13:59:54.126501 | 2020-12-09T01:02:41 | 2020-12-09T01:02:41 | 294,112,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | import tkinter as tk
import requests
from pprint import pprint
import random
uNames = ["ATarsky15","user2@test.com","User29"]
pWords = ["Pword123","EntryWord5","Password10"]
active = ["",""] #Stores active user
def checkCred(*arg):
print("Checking")
#Write the code to
#Step 1: Take entry for user name entunLF
u = entunLF.get()
#Step 2: Take entry for password
p = entpwLF.get()
#Step 3: Loop through usernames and check if valid with password
for i in range(0,len(uNames),1):
if (uNames[i] == u):
if (pWords[i] == p):
#SWAP SCREENS
loginFrame.pack_forget()
homeFrame.pack()
#SET ACTIVE USER
active[0] = u
active[1] = p
entunLF.delete(0,tk.END)
entpwLF.delete(0,tk.END)
return
#If the username nad password is correct swap frames.
entunLF.delete(0,tk.END)
entpwLF.delete(0,tk.END)
def logout(*args):
#SWAP
loginFrame.pack()
homeFrame.pack_forget()
#REMOVE ACTIVE USER
active[0] = ""
active[1] = ""
root = tk.Tk() #Creates your main window
#Build a login frame
loginFrame = tk.Frame(root)
labunLF = tk.Label(loginFrame,text = "User Name:", bg = "orange")
entunLF = tk.Entry(loginFrame, width = 20)
labpwLF = tk.Label(loginFrame,text = "Password", bg = "orange")
entpwLF = tk.Entry(loginFrame, width = 20)
submitLF = tk.Button(loginFrame, text = "Login", command = checkCred)
labunLF.pack()
entunLF.pack()
labpwLF.pack()
entpwLF.pack()
submitLF.pack()
#Build a home page frame
homeFrame = tk.Frame(root)
labHF = tk.Label(homeFrame, text = "Welcome to Soccer Magic")
logoutHF = tk.Button(homeFrame, text = "logout",command = logout)
labHF.pack()
logoutHF.pack()
loginFrame.pack()
root.mainloop()
print("END PROGRAM") | [
"adam.tarsky@GCCYJQ2BJ1WK.local"
] | adam.tarsky@GCCYJQ2BJ1WK.local |
def33672c3b90b99f079a7efece2450e249cb582 | 1916be92deec08255aafb9906f7f59bc8709543d | /authors/apps/comments/models.py | f576a26a8664e3f44c0a75a5c8d48a6b007d7383 | [
"BSD-3-Clause"
] | permissive | andela/Ah-backend-xmen | aac1a41b092c7b09ce52e2d057f0c2047a1f9eba | 60c830977fa39a7eea9ab978a9ba0c3beb0c4d88 | refs/heads/develop | 2020-04-11T07:05:35.379340 | 2019-03-06T17:47:56 | 2019-03-06T17:47:56 | 161,600,867 | 4 | 10 | BSD-3-Clause | 2019-03-06T17:47:58 | 2018-12-13T07:32:28 | Python | UTF-8 | Python | false | false | 2,318 | py | from django.dispatch import receiver
from django.db.models.signals import pre_save
from django.db import models
from authors.apps.articles.models import Article
from authors.apps.profiles.models import Profile
from simple_history.models import HistoricalRecords
class Comment(models.Model):
"""
Handles CRUD on a comment that has been made on article
"""
body=models.TextField(max_length=500)
createdAt=models.DateTimeField(auto_now_add=True)
updatedAt=models.DateTimeField(auto_now=True)
highlight_start = models.PositiveIntegerField(null=True, blank=True)
highlight_end = models.PositiveIntegerField(null=True, blank=True)
highlight_text = models.TextField(max_length=500, null=True)
author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by')
article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article')
comment_history = HistoricalRecords()
class Meta:
ordering=['-createdAt']
def __str__(self):
return self.body
class CommentReply(models.Model):
"""
Handles replying on a specific comment by made on an article
"""
comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies')
reply_body=models.TextField()
repliedOn=models.DateTimeField(auto_now_add=True)
updatedOn=models.DateTimeField(auto_now=True)
author=models.ForeignKey(Profile,on_delete=models.CASCADE)
reply_history = HistoricalRecords()
class Meta:
ordering=['repliedOn']
def __str__(self):
return self.reply_body
class CommentLike(models.Model):
"""
Handles liking of a specific user by an authenticated user
"""
comment=models.ForeignKey(Comment,on_delete=models.CASCADE)
like_status=models.BooleanField()
liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE)
def __str__(self):
return "like by {}".format(self.liked_by)
class CommentReplyLike(models.Model):
"""
Holds data for liking reply made a comment
"""
liked=models.BooleanField()
reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE)
comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE)
def __str__(self):
return "reply liked by {}".format(self.reply_like_by)
| [
"39955305+ja-odur@users.noreply.github.com"
] | 39955305+ja-odur@users.noreply.github.com |
ea89ba2fabd2bed874469971edc76605636bea7a | 08f8fe42acf057841cd0fdf2fd5000d967befa68 | /find_your_bmr_v3.py | f105ea796272ab4059fdb1915afa2f60605498af | [] | no_license | RyanJamesA/Basal-Metabolic-Rate-Calculator | ee5d36013aaf3412deedbb35672a2e9267bf39b9 | 08374f14829721632e1b7513fc9a765ccf1e815a | refs/heads/master | 2021-09-01T22:15:10.437040 | 2017-12-28T21:23:08 | 2017-12-28T21:23:08 | 111,618,234 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | import sys
def main():
"""Calls the other functions and then calls my_calculations()"""
print("\n\tWelcome to the BMR calculator!")
print("\tPress 'q' at any time to stop!")
active = True
while active:
units = get_unit()
sex = get_sex()
age = get_age()
height = get_height()
weight = get_weight()
my_calculations(units, sex, age, height, weight)
def get_unit():
"""Lets the user set his unit preference"""
print("\nDo you prefer Imperial or Metric units?")
imp_or_met_input = input("imp/met: ")
while True:
if imp_or_met_input == 'imp' or imp_or_met_input == 'met':
return imp_or_met_input
if imp_or_met_input == 'q':
sys.exit()
else:
print("Sorry, only 'met' and 'imp' works.")
get_unit()
def get_sex():
"""Lets the user set his sex"""
print("\nWhat is your sex?")
get_sex_input = input("male/female: ")
while True:
if get_sex_input == 'male' or get_sex_input == 'female':
return get_sex
if get_sex_input == 'q':
sys.exit()
else:
print("Sorry, only 'male' and 'female' inputs work!")
get_sex()
def get_age():
"""Lets the user set his age"""
try:
print("\nHow old are you? ")
age_input = input("ex 18: ")
if age_input == 'q':
sys.exit()
else:
age_input = int(age_input)
return age_input
except ValueError:
print("Sorry, numbers only please!")
get_age()
def get_height():
"""Lets the user set his height"""
try:
print("\nHow tall are you?")
height_input = input("ex 73: ")
if height_input == 'q':
sys.exit()
else:
return float(height_input)
except ValueError:
print("Sorry, numbers only please!")
get_height()
def get_weight():
"""Lets the user set his weight"""
try:
print("\nHow much do you weigh?")
weight_input = input("ex 202/70: ")
if weight_input == 'q':
sys.exit()
else:
return float(weight_input)
except ValueError:
print("Sorry, numbers only please!")
get_height()
def my_calculations(units, sex, age, height, weight):
"""Combines the measurements gathered from main() and does calculations"""
if units == 'imp':
user_height = height * 2.54
user_weight = weight * 0.45359237
bmr_cals = (user_weight * 10) + (user_height * 6.25) - (5 * age)
+ (5 if sex == 'male' else -161)
print(f"You are expending {bmr_cals} calories just by existing!")
if units == 'met':
bmr_cals = (weight * 10) + (height * 6.25) - (5 * age)
+ (5 if sex == 'male' else -161)
print(f"You are expending {bmr_cals} calories just by existing!")
start_again()
def start_again():
while True:
print("\nWould you like to do another one?")
user_response = input("y/n ")
if user_response == 'y':
main()
if user_response == 'n' or user_response == 'q':
sys.exit()
else:
print("Sorry, the only valid responses are y/n/q")
main()
| [
"noreply@github.com"
] | RyanJamesA.noreply@github.com |
c367e3cd6d7683b19c8cc39895743def75f2a3ad | 62341bf81e95e5ecbba65214915fe950de17faa7 | /2020/tests/test_day10.py | 5448cbb0ecf0e504d9f3948077168bb110ec7f8b | [] | no_license | arnauddelaunay/AdventOfCode | d53b27a9d10fe26a0989d2768b0c37dd3e5868f0 | 176fe8329bfd638a60eacd4b7555ff3ea65f2a5a | refs/heads/master | 2023-04-10T17:10:59.459477 | 2020-12-18T14:53:27 | 2020-12-18T14:53:27 | 113,903,047 | 2 | 0 | null | 2021-04-20T21:25:54 | 2017-12-11T20:04:35 | Jupyter Notebook | UTF-8 | Python | false | false | 792 | py | import pytest
import os
from aoc2020.utils import write_puzzle
from aoc2020.day10 import run1, run2
PUZZLE = """16
10
15
5
1
11
7
19
6
12
4"""
PUZZLE_2 = """28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3"""
@pytest.mark.parametrize("puzzle,expected", [
(PUZZLE, 35),
(PUZZLE_2, 220)
])
def test_run1(puzzle, expected):
# Given
day_file_path = write_puzzle(puzzle, day=10)
# When
actual = run1(day_file_path)
# Then
assert actual == expected
@pytest.mark.parametrize("puzzle,expected", [
(PUZZLE, 8),
(PUZZLE_2, 19208)
])
def test_run2(puzzle, expected):
# Given
day_file_path = write_puzzle(puzzle, day=10)
# When
actual = run2(day_file_path)
# Then
assert actual == expected
| [
"adelaunay.prestataire@oui.sncf"
] | adelaunay.prestataire@oui.sncf |
eb81eb06396d3b4c6f49f68fbe3a14e0f299e3de | 8e831a52e0d3796fc046ed389e15652c2c202a0c | /heroku_files/Streamlit.py | 2b366479055818d6077fc40ded6acd231f2ef744 | [] | no_license | brianhtam/Myers_Briggs_Classifier | f39a1dc03c7925be6a0d0c9ce1bd7808d08779b3 | 4a63d0ec598e4e842a44ae1440e6ca7e8c6a1dc1 | refs/heads/main | 2023-01-18T15:55:06.687007 | 2020-11-22T04:50:34 | 2020-11-22T04:50:34 | 305,927,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,761 | py |
#import package
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import pickle
#preprocessing
import spacy
import re
import string
#import the data
image = Image.open("images/MBTI_people.png")
#intro
st.sidebar.write("This is an application for predicting your MBTI personality type with natural language processing!")
text = st.sidebar.text_area("Enter in someone's writting and I'll predict their Myer's Briggs")
st.sidebar.button("Predict")
Title_html = """
<style>
.title h1{
user-select: none;
font-size: 43px;
color: white;
background: repeating-linear-gradient(-45deg, red 0%, yellow 7.14%, rgb(0,255,0) 14.28%, rgb(0,255,255) 21.4%, cyan 28.56%, blue 35.7%, magenta 42.84%, red 50%);
background-size: 600vw 600vw;
-webkit-text-fill-color: transparent;
-webkit-background-clip: text;
animation: slide 10s linear infinite forwards;
}
@keyframes slide {
0%{
background-position-x: 0%;
}
100%{
background-position-x: 600vw;
}
}
.reportview-container .main .block-container{
padding-top: 3em;
}
body {
background-image:url('https://images2.alphacoders.com/692/692539.jpg');
background-position-y: -200px;
}
@media (max-width: 1800px) {
body {
background-position-x: -500px;
}
}
.Widget.stTextArea, .Widget.stTextArea textarea {
height: 586px;
width: 400px;
}
h1{
color: brown
}
h2{
color: cyan
}
.sidebar-content {
width:25rem !important;
}
.Widget.stTextArea, .Widget.stTextArea textarea{
}
.sidebar.--collapsed .sidebar-content {
margin-left: -25rem;
}
.streamlit-button.small-button {
padding: .5rem 9.8rem;
}
.streamlit-button.primary-button {
background-color: yellow;
}
</style>
<div>
<h1>Welcome to the Myers Briggs Prediction App!</h1>
</div>
"""
st.markdown(Title_html, unsafe_allow_html=True) #Title rendering
# Calculate our prediction
# import models
EI = pd.read_pickle('pickled_models/EI_Logistic Reg.pkl')
NS = pd.read_pickle('pickled_models/NS_Logistic Reg.pkl')
FT = pd.read_pickle('pickled_models/FT_Logistic Reg.pkl')
PJ = pd.read_pickle('pickled_models/PJ_Logistic Reg.pkl')
# import transformations
tfidf = pd.read_pickle('pickled_transformations/tfidf.pkl')
TopicModel = pd.read_pickle('pickled_transformations/NMF.pkl')
# parser = pd.read_pickle('pickled_transformations/parser.pkl')
stop_words = pd.read_pickle('pickled_transformations/stop_words.pkl')
# Load English tokenizer, tagger, parser, NER and word vectors
parser = spacy.load('en_core_web_sm')
# Create our list of punctuation marks
punctuations = string.punctuation
# Creating our tokenizer function
def spacy_tokenizer(sentence):
# Creating our token object, which is used to create documents with linguistic annotations.
mytokens = parser(sentence)
# Lemmatizing each token and converting each token into lowercase
mytokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in mytokens ]
# Removing stop words
mytokens = [ word for word in mytokens if word not in stop_words and word not in punctuations ]
# return preprocessed list of tokens
return ' '.join(mytokens)
alphanumeric = lambda x: re.sub('\w*\d\w*', '', x)
punc_lower = lambda x: re.sub('[%s]' % re.escape(string.punctuation), '', x.lower())
new_text = pd.Series(text).apply(spacy_tokenizer).map(alphanumeric).map(punc_lower)
X_test_tfidf = tfidf.transform(pd.Series(new_text))
def display_topics(model, feature_names, no_top_words, topic_names=None):
"""
Takes in model and feature names and outputs
a list of string of the top words from each topic.
"""
topics = []
for ix, topic in enumerate(model.components_):
topics.append(str(", ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]])))
return topics
topics = display_topics(TopicModel, tfidf.get_feature_names(), 3)
topic_word = pd.DataFrame(TopicModel.components_.round(3),
index = topics,
columns = tfidf.get_feature_names())
X_test_topic_array = TopicModel.transform(pd.DataFrame(X_test_tfidf.toarray(), columns=tfidf.get_feature_names()))
X_test_topics = pd.DataFrame(X_test_topic_array.round(5),
columns = topics)
pred_list = []
if EI.predict(X_test_topics) == 1:
pred_list.append('E')
else:
pred_list.append('I')
if NS.predict(X_test_topics) == 1:
pred_list.append('N')
else:
pred_list.append('S')
if FT.predict(X_test_topics) == 1:
pred_list.append('F')
else:
pred_list.append('T')
if PJ.predict(X_test_topics) == 1:
pred_list.append('P')
else:
pred_list.append('J')
prediction = ''.join(pred_list)
if text == '':
st.image(image, use_column_width=True)
st.markdown("<div class='title'><h1>Start by writing text on the left sidebar</h1></div>", unsafe_allow_html=True)
if text != '':
st.header('We guess that you are:')
predict_html = f"<div class='title'><h1>{prediction}</h1></div>"
st.markdown(predict_html, unsafe_allow_html=True)
st.image(f'images/{prediction}.png',width=340)
st.header('Are we correct?')
st.write('Find more information here:')
st.write(f'https://www.16personalities.com/{prediction.lower()}-personality')
try:
# Generate WordCloud
from wordcloud import WordCloud
# Generate a word cloud image
wordcloud = WordCloud(width = 1000, height = 1000,
background_color ='white',
min_font_size = 20).generate(text)
st.header('Word cloud of your text:',)
# Display the generated image:
# the matplotlib way:
fig = plt.figure(figsize=(10,10))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
st.pyplot(fig)
# fig2 = plt.figure(figsize=(10,10))
st.header('Top 3 Topics in your writing')
fig2 = plt.figure()
plt.barh(X_test_topics.T.sort_values(0).tail(10).index, X_test_topics.T.sort_values(0).tail(10)[0])
st.pyplot(fig2)
# st.pyplot(fig2)
st.image('Tableau_topics.gif', use_column_width=True)
st.header('Here is your writing:')
st.write(text)
except ValueError:
pass
| [
"btgood2me@gmail.com"
] | btgood2me@gmail.com |
b2f791688e9cf9763ce7a102d6f828efd414cda7 | bcfab09a01d20ecfda43701ca664e4b15ed9453e | /main.py | a8cb1998bc4621098beeaae8fded80e62a189c90 | [] | no_license | Abhijit8Chingalwar/Chatbot | 65329db80057db0f5a39c0dfe62b7f533d4e5465 | 7bcd9fdfb1a183af3b29f4cff4ebbaedc7007dce | refs/heads/main | 2023-03-05T15:40:49.011572 | 2021-02-20T18:10:37 | 2021-02-20T18:10:37 | 340,641,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from tkinter import *
import pyttsx3 as pp
import SpeechRecognition as s
import threading
engine = pp.init()
voices = engine.getProperty('voices')
print(voices)
engine.setProperty('voices',voices[0].id)
def speak(word):
engine.say(word)
engine.runAndWait()
bot = ChatBot("MyBot")
convo = [
"Hello",
"Hi there!",
"What is your name?",
"My name is chatbot prepared by Abhijit Chingalwar.",
"WHere are you studying?",
"Nit jalandhar",
"How are you doing?",
"I'm doing great,what about you?",
"M fine",
"That is good to hear",
"Thank you.",
"You're welcome."
]
#now training the bot with the help of trainer
trainer = ListTrainer(bot)
trainer.train(convo)
#answer = bot.get_response("How are you doing?")
#print(answer)
#print("Talk to bot")
#while True:
#query = input()
#if query == 'exit':
# break
#answer = bot.get_response(query)
#print("bot: ",answer)
main = Tk()
main.geometry("500x650") #500 is width and 650 is height
main.title("mychatbot")
img =PhotoImage(file="mat1.png") #img is object of class PhotoImage
photoL = Label(main,image=img)
photoL.pack(pady=5) #yaxis pading=5
#query takin:It takes audio ias a input froma a user and convert it into a astring
def takeQuery():
sr = s.Recognizer()
sr.pause_threshold=1
print("your bot is listening to you")
with s.Microphone() as m:
try:
audio = sr.listen(m)
query = sr.recognize_google(audio,language='eng-in')
print(query)
textF.delete(0,END)
textf.insert(0,query)
ask_from_bot()
except exception as e:
print(e)
print("Not recongnized")
def ask_from_bot():
query = textF.get()
answer_from_bot = bot.get_response(query)
msgs.insert(END,"you :" +query)
msgs.insert(END,"bot : "+str(answer_from_bot))
speak(answer_from_bot)
textF.delete(0,END)
msgs.yview(END)
frame =Frame(main) #list window for conversation
sc = Scrollbar(frame)
msgs =Listbox(frame,width=80,height=20,yscrollcommand=sc.set)
sc.pack(side =RIGHT ,fill = Y) #appear on right side
msgs.pack(side = LEFT ,fill =BOTH,pady=10)
frame.pack()
#creating text field
textF=Entry(main,font=("Verdana",20))
textF.pack(fill = X,pady=10)
btn =Button(main,text="Ask From Bot",font=("Verdana",20),command=ask_from_bot)
btn.pack()
#crating a function
def enter_function(event):
btn.invoke()
#going to bind main window with eneter key
main.bind('<Return>',enter_function)
def repeatL():
while True:
takeQuery()
t=threading.Thread(target=repeatL)
t.start()
main.mainloop() | [
"noreply@github.com"
] | Abhijit8Chingalwar.noreply@github.com |
c8db3a8e226cb70ad8c96b08f2330917343112c1 | 58141d7fc37854efad4ad64c74891a12908192ed | /config/coconut/node_272.py | 6b2d09a25a2e728fba4ec6858f853d862492788a | [] | no_license | stanleylio/fishie | b028a93b2093f59a8ceee4f78b55a91bb1f69506 | 0685045c07e4105934d713a0fd58c4bc28821ed6 | refs/heads/master | 2022-08-14T13:08:55.548830 | 2022-07-29T01:32:28 | 2022-07-29T01:32:28 | 30,433,819 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,993 | py | name = 'controller02'
location = 'Coconut Island'
note = "v0 code: {'neutral':0, 'heating':1, 'cooling':2, 'flush':3}"
latitude = 21.4347
longitude = -157.7990
deployment_status = 'deployed'
conf = [
{
'dbtag':'ts',
'description':'Device clock',
'interval':60,
},
{
'dbtag':'t0',
'unit':'\u00b0C',
'description':'Water Temperature',
'lb':22,
'ub':35,
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'c0',
'unit':'\u00b0C',
'description':'Probe offset',
'lb':-1,
'ub':1,
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'s0',
'unit':'\u00b0C',
'description':'Setpoint',
'lb':0,
'ub':50,
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'v0',
'unit':'-',
'description':'Valve state',
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'k',
'unit':'-',
'description':'Tank number',
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'uptime_second',
'description':'Uptime in seconds',
'lb':24*60*60,
'interval':60,
'plot_range':2*24,
},
{
'dbtag':'freeMB',
'unit':'MB',
'description':'Remaining free disk space',
'lb':800,
'interval':60,
'plot_range':2*24,
},
{
'dbtag':'cpu_temp',
'unit':'\u00b0C',
'description':'CPU Temperature',
'lb':5,
'ub':68,
'interval':60,
'plot_range':2*24,
},
]
if '__main__' == __name__:
for c in conf:
print('- - -')
for k,v in c.items():
print(k,':',v)
import sys
sys.path.append('../..')
from os.path import basename
from storage.storage2 import create_table
create_table(conf, basename(__file__).split('.')[0].replace('_','-'))
| [
"stanleylio@gmail.com"
] | stanleylio@gmail.com |
8581ab52da69a33a52e9e032a072500a8b46dcd6 | 86fa7e2c47446a5aa22c4f398afb7a6e2ca6dbc6 | /bin/pip3.7 | 20bb3d69629f4fb3d7eb4b2af9e0277186623f72 | [] | no_license | Ktheara/practice-opencv | e17c3305be11387beb13fabfab562da98b2bf082 | 7f191d2ee48c3bcbbda554853052c9741b6007ea | refs/heads/master | 2022-12-28T19:53:40.858928 | 2020-10-16T09:24:39 | 2020-10-16T09:24:39 | 303,602,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | 7 | #!/home/msi/Coding/git_workspace/python-dev/practice-cv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"thera729@gmail.com"
] | thera729@gmail.com |
630c66bb78a625c9e531276f9885b701e08eb8fb | de12f1aa52c120b1928dd603a3501c80e75204d4 | /numpy/numpy_fancy_idx.py | 6a72bfcb07f66124e04c8b3d6162472c8833505c | [] | no_license | nicosalaz/pythonProject | 2d983ff6aa2712918e8204b3594ed6639d8a9035 | 997dbfcfa62a88187a82f3252597f0e4501ef9cf | refs/heads/master | 2023-01-12T12:23:13.469368 | 2020-11-18T03:54:28 | 2020-11-18T03:54:28 | 309,724,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | import numpy as np
# creamos el arreglo de 6x6 con step de 1 en las columnas y
# step de 10 en las filas
a = np.arange(6) + np.arange(0, 51, 10)[:, np.newaxis]
# imprimimos indicando con enteros los indices del arreglo
print (a[(range(0,5), range(1,6))])
print (a[3:, [0,2,5]])
# creamos una máscara de valores booleanos
mask = np.array([1,0,1,0,0,1], dtype=bool)
print(mask)
#imprimimos solo los elementos donde la mascara es True
print(a[mask,2])
# i = np.array([1,2,3])
# j = np.array([4,5])
#print(i+j) | [
"nsalazarv@correo.usbcali.edu.co"
] | nsalazarv@correo.usbcali.edu.co |
6944244c75b73c86e7d31bb45e23c65b20ccf0ef | 975b9e37ad6063acd5ca6b91e668f76e56dc936b | /.py | d3671387285bc377c3023a7bf601a694acafc72e | [] | no_license | NHRK555/S-CNN-classification | 665306f7fc96c2c1af0a4500a03eaa578416a61c | 0384e5febe1962bee31551554efb80cfce09340b | refs/heads/master | 2020-04-01T18:38:27.736476 | 2018-10-17T18:46:19 | 2018-10-17T18:46:19 | 153,505,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,200 | py | import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras import backend as K
from keras.utils import np_utils
from keras.layers.convolutional import *
from keras.utils import conv_utils
from keras.layers.core import *
from keras.engine.topology import Layer
import numpy as np
import scipy.io as sio
import random
from random import shuffle
import matplotlib.pyplot as plt
import os.path
import errno
import scipy.ndimage
from skimage.transform import rotate
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.decomposition import NMF
from keras.layers import Input, Conv2D, Lambda, merge, Dense, Flatten,MaxPooling2D
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.optimizers import SGD,Adam
from keras.losses import binary_crossentropy
import numpy.random as rng
import numpy as np
import os
import dill as pickle
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from keras.layers import concatenate
def loadIndianPinesData():
data = sio.loadmat(os.path.join( 'Indian_pines.mat'))['indian_pines']
labels = sio.loadmat(os.path.join('Indian_pines_gt.mat'))['indian_pines_gt']
return data, labels
def splitTrainTestSet(X, y, testRatio=0.10):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=345,
stratify=y)
return X_train, X_test, y_train, y_test
def standartizeData(X):
newX = np.reshape(X, (-1, X.shape[2]))
scaler = preprocessing.StandardScaler().fit(newX)
newX = scaler.transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1],X.shape[2]))
return newX, scaler
def applyPCA(X, numComponents=75, drawPlot = False):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
def createPatches(X, y, windowSize=3):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
def Patch(data,height_index,width_index):
height_slice = slice(height_index, height_index+PATCH_SIZE)
width_slice = slice(width_index, width_index+PATCH_SIZE)
patch = data[height_slice, width_slice, :]
return patch
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
n_classes=16
def get_batch(n,X_data,y_data):
n_examples,w,h,ch = X_data.shape
"""Create batch of n pairs, half same class, half different class"""
categories = rng.choice(n_classes,size=(n,),replace=False)
pairs=[np.zeros((n, h, w,200)) for i in range(2)]
targets=np.zeros((n,))
targets[n//2:] = 1
for i in range(n):
category = categories[i]
idx_1 = rng.randint(0,n_examples)
#print(pairs[0][0].shape, X_train[0].shape,np.where(y_train == category)[0][i])
pairs[0][i] = X_data[np.where(y_data == category)[0][i]]
idx_2 = rng.randint(0,n_examples)
#pick images of same class for 1st half, different for 2nd
category_2 = category if i >= n//2 else (category + rng.randint(1,n_classes)) % n_classes
pairs[1][i] = X_data[np.where(y_data == category_2)[0][i]]
return pairs, targets
#(inputs,targets)=get_batch(15,X_train,y_train)
def W_init(shape,name=None):
"""Initialize weights as in paper"""
values = rng.normal(loc=0,scale=1e-2,size=shape)
return K.variable(values,name=name)
#//TODO: figure out how to initialize layer biases in keras.
def b_init(shape,name=None):
"""Initialize bias as in paper"""
values=rng.normal(loc=0.5,scale=1e-2,size=shape)
return K.variable(values,name=name)
def getModel(input_shape):
input_shape=(9,9,200)
left_input = Input(input_shape)
right_input = Input(input_shape)
model = Sequential()
model.add(Conv2D(100, (3, 3), padding='same', input_shape=input_shape,data_format='channels_last'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1000,kernel_initializer=W_init,bias_initializer=b_init))
model.add(Activation('relu'))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(300))
model.add(Activation('relu'))
encoded_l = model(left_input)
encoded_r = model(right_input)
#merge two encoded inputs with the l1 distance between them
L1_distance = lambda x: K.abs(x[0]-x[1])
both = merge([encoded_l,encoded_r], mode = L1_distance, output_shape=lambda x: x[0])
prediction = Dense(1,activation='sigmoid',bias_initializer=b_init)(both)
siamese_net = Model(input=[left_input,right_input],output=prediction)
return siamese_net,model
windowSize=9
X,y= loadIndianPinesData()
print(X.shape,y.shape)
numComponents=200
isPCA=True
# PCA
if isPCA == True:
X,pca = applyPCA(X,numComponents=numComponents)
print(X.shape,y.shape)
XPatches, yPatches = createPatches(X, y, windowSize=windowSize)
X_train, X_test, y_train, y_test = splitTrainTestSet(XPatches, yPatches, 0.25)
print(X_train.shape)
print(y_test.shape)
#X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[3], X_test.shape[1], X_test.shape[2]))
y_test_ct = np_utils.to_categorical(y_test)
#X_train = np.reshape(X_train, (X_train.shape[0],X_train.shape[3], X_train.shape[1], X_train.shape[2]))
y_train_ct = np_utils.to_categorical(y_train)
print(y_train.shape)
print(X_train.shape)
print(X_train[0].shape)
print(y_train[0])
model, dum = getModel(X_train[0].shape)
opt = keras.optimizers.Adam(lr=0.0001,decay=1e-6)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
for i in range(400):
(inputs,targets)=get_batch(15,X_train,y_train)
loss=model.train_on_batch(inputs,targets)
print(loss)
model.summary()
dum.summary()
| [
"noreply@github.com"
] | NHRK555.noreply@github.com |
ca2608d7b0365e3be9639fc614c42d74c868cfdc | aaccbefb16acdd99ba1da426be64c9db023fa3c0 | /Module2.py | ab48cdfc8dece0106f7c95e5bb318343e0700437 | [] | no_license | HowardTsang/Example2 | 87ef7f243ef848c1c0d51f8af9db68bd1c27c2fe | d77d07a715e54895d3d902fd9432496dc1368411 | refs/heads/master | 2023-03-08T23:13:02.278537 | 2021-03-20T14:11:59 | 2021-03-20T14:11:59 | 343,098,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,969 | py | #Experiment in the IPython Shell; type 5 / 8, for example.
#Add another line of code to the Python script on the top-right (not in the Shell): print(7 + 10).
#Hit Submit Answer to execute the Python script and receive feedback.
# Example, do not modify!
print(5 / 8)
# Print the sum of 7 and 10
print(7+10)
#Above the print(7 + 10), add the comment
# Division
print(5 / 8)
# Addition
print(7 + 10)
#Suppose you have $100, which you can invest with a 10% return each year. After one year, it's 100X1.1=110 dollars,and after two years it's 100X1.1=121.Add code to calculate how much money you end up with after 7 years, and print the result.
# Addition, subtraction
print(5 + 5)
print(5 - 5)
# Multiplication, division, modulo, and exponentiation
print(3 * 5)
print(10 / 2)
print(18 % 7)
print(4 ** 2)
# Instruction
print(100*1.1**7)
# How much is your $100 worth after 7 years?
print(100*1.1**7)
#Create a variable savings with the value 100.
#Check out this variable by typing print(savings) in the script.
# Create a variable savings
savings = 100
# Print out savings
print(savings)
#Create a variable growth_multiplier, equal to 1.1.
#Create a variable, result, equal to the amount of money you saved after 7 years.
#Print out the value of result.
# Create a variable savings
savings = 100
# Create a variable growth_multiplier
growth_multiplier = 1.1
# Calculate result
result = savings * growth_multiplier ** 7
# Print out result
print (result)
#Create a new string, desc, with the value "compound interest".
#Create a new boolean, profitable, with the value True.
# Create a variable desc
desc = "compound interest"
# Create a variable profitable
profitable = True
#Calculate the product of savings and growth_multiplier. Store the result in year1.
#What do you think the resulting type will be? Find out by printing out the type of year1.
#Calculate the sum of desc and desc and store the result in a new variable doubledesc.
#Print out doubledesc. Did you expect this?
savings = 100
growth_multiplier = 1.1
desc = "compound interest"
# Assign product of growth_multiplier and savings to year1
year1 = savings * growth_multiplier
# Print the type of year1
print(type(year1))
# Assign sum of desc and desc to doubledesc
doubledesc = desc + desc
# Print out doubledesc
print(doubledesc)
#Hit Run Code to run the code. Try to understand the error message.
#Fix the code such that the printout runs without errors; use the function str() to convert the variables to strings.
#Convert the variable pi_string to a float and store this float as a new variable, pi_float.
# Definition of savings and result
savings = 100
result = 100 * 1.10 ** 7
# Fix the printout
print("I started with $" + str(savings) + " and now have $" + str(result) + ". Awesome!")
# Definition of pi_string
pi_string = "3.1415926"
# Convert pi_string into float: pi_float
float(pi_string)
#Create a list, areas, that contains the area of the hallway (hall), kitchen (kit), living room (liv), bedroom (bed) and bathroom (bath), in this order. Use the predefined variables.
#Print areas with the print() function.
# area variables (in square meters)
hall = 11.25
kit = 18.0
liv = 20.0
bed = 10.75
bath = 9.50
# Create list areas
areas = [hall,kit,liv,bed,bath]
# Print areas
print(areas)
#Finish the code that creates the areas list. Build the list so that the list first contains the name of each room as a string and then its area. In other words, add the strings "hallway", "kitchen" and "bedroom" at the appropriate locations.
#Print areas again; is the printout more informative this time?
# area variables (in square meters)
hall = 11.25
kit = 18.0
liv = 20.0
bed = 10.75
bath = 9.50
# Adapt list areas
areas = ["hallway",hall, "kitchen",kit, "living room",liv,"bedroom",bed,"bathroom", bath]
# Print areas
print(areas)
#Finish the list of lists so that it also contains the bedroom and bathroom data. Make sure you enter these in order!
#Print out house; does this way of structuring your data make more sense?
#Print out the type of house. Are you still dealing with a list?
# area variables (in square meters)
hall = 11.25
kit = 18.0
liv = 20.0
bed = 10.75
bath = 9.50
# house information as list of lists
house = [["hallway", hall],
["kitchen", kit],
["living room", liv],
["bedroom", bed],
["bathroom", bath]]
# Print out house
print(house)
# Print out the type of house
print(type(house))
#Print out the second element from the areas list (it has the value 11.25).
#Subset and print out the last element of areas, being 9.50. Using a negative index makes sense here!
#Select the number representing the area of the living room (20.0) and print it out.
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Print out second element from areas
print(areas[1])
# Print out last element from areas
print(areas[-1])
# Print out the area of the living room
print(areas[5])
#Using a combination of list subsetting and variable assignment, create a new variable, eat_sleep_area, that contains the sum of the area of the kitchen and the area of the bedroom.
#Print the new variable eat_sleep_area.
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Sum of kitchen and bedroom area: eat_sleep_area
eat_sleep_area = areas[3] + areas[-3]
# Print the variable eat_sleep_area
print(eat_sleep_area)
#Use slicing to create a list, downstairs, that contains the first 6 elements of areas.
#Do a similar thing to create a new variable, upstairs, that contains the last 4 elements of areas.
#Print both downstairs and upstairs using print().
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Use slicing to create downstairs
downstairs = areas[:6]
# Use slicing to create upstairs
upstairs = areas[-4:]
# Print out downstairs and upstairs
print(downstairs)
print(upstairs)
#Create downstairs again, as the first 6 elements of areas. This time, simplify the slicing by omitting the begin index.
#Create upstairs again, as the last 4 elements of areas. This time, simplify the slicing by omitting the end index.
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Alternative slicing to create downstairs
downstairs = areas[:6]
# Alternative slicing to create upstairs
upstairs = areas[-4:]
#Update the area of the bathroom area to be 10.50 square meters instead of 9.50.
#Make the areas list more trendy! Change "living room" to "chill zone".
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Correct the bathroom area
areas[-1] = 10.50
# Change "living room" to "chill zone"
areas[4] = "chill zone"
#Use the + operator to paste the list ["poolhouse", 24.5] to the end of the areas list. Store the resulting list as areas_1.
#Further extend areas_1 by adding data on your garage. Add the string "garage" and float 15.45. Name the resulting list areas_2.
# Create the areas list and make some changes
areas = ["hallway", 11.25, "kitchen", 18.0, "chill zone", 20.0,
"bedroom", 10.75, "bathroom", 10.50]
# Add poolhouse data to areas, new list is areas_1
areas_1 = areas + ["poolhouse", 24.5]
# Add garage data to areas_1, new list is areas_2
areas_2 = areas_1 + ["garage", 15.45]
#Change the second command, that creates the variable areas_copy, such that areas_copy is an explicit copy of areas. After your edit, changes made to areas_copy shouldn't affect areas. Submit the answer to check this.
# Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Create areas_copy
areas_copy = list(areas)
# Change areas_copy
areas_copy[0] = 5.0
# Print areas
print(areas)
#Use print() in combination with type() to print out the type of var1.
#Use len() to get the length of the list var1. Wrap it in a print() call to directly print it out.
#Use int() to convert var2 to an integer. Store the output as out2.
# Create variables var1 and var2
var1 = [1, 2, 3, 4]
var2 = True
# Print out type of var1
print(type(var1))
# Print out length of var1
print(len(var1))
# Convert var2 to an integer: out2
out2 = int(var2)
#Use + to merge the contents of first and second into a new list: full.
#Call sorted() on full and specify the reverse argument to be True. Save the sorted list as full_sorted.
#Finish off by printing out full_sorted.
# Create lists first and second
first = [11.25, 18.0, 20.0]
second = [10.75, 9.50]
# Paste together first and second: full
full = first + second
# Sort full in descending order: full_sorted
full_sorted = sorted(full, reverse = True)
# Print out full_sorted
print(full_sorted)
#Use the upper() method on place and store the result in place_up. Use the syntax for calling methods that you learned in the previous video.
#Print out place and place_up. Did both change?
#Print out the number of o's on the variable place by calling count() on place and passing the letter 'o' as an input to the method. We're talking about the variable place, not the word "place"!
# string to experiment with: place
place = "poolhouse"
# Use upper() on place: place_up
place_up = place.upper()
# Print out place and place_up
print(place)
print(place_up)
# Print out the number of o's in place
print(place.count('o'))
#Use the index() method to get the index of the element in areas that is equal to 20.0. Print out this index.
#Call count() on areas to find out how many times 9.50 appears in the list. Again, simply print out this number.
# Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Print out the index of the element 20.0
print(areas.index(20.0))
# Print out how often 9.50 appears in areas
print(areas.count(9.50))
#Use append() twice to add the size of the poolhouse and the garage again: 24.5 and 15.45, respectively. Make sure to add them in this order.
#Print out areas
#Use the reverse() method to reverse the order of the elements in areas.
#Print out areas once more.
# Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Use append twice to add poolhouse and garage size
areas.append(24.5)
areas.append(15.45)
# Print out areas
print(areas)
# Reverse the orders of the elements in areas
areas.reverse()
# Print out areas
print(areas)
#Import the math package. Now you can access the constant pi with math.pi.
#Calculate the circumference of the circle and store it in C.
#Calculate the area of the circle and store it in A.
# Definition of radius
r = 0.43
# Import the math package
import math
# Calculate C
C = 2 * r * math.pi
# Calculate A
A = math.pi * r ** 2
# Build printout
print("Circumference: " + str(C))
print("Area: " + str(A))
#Perform a selective import from the math package where you only import the radians function.
#Calculate the distance travelled by the Moon over 12 degrees of its orbit. Assign the result to dist. You can calculate this as r * phi, where r is the radius and phi is the angle in radians. To convert an angle in degrees to an angle in radians, use the radians() function, which you just imported.
#Print out dist.
# Definition of radius
r = 192500
# Import radians function of math package
from math import radians
# Travel distance of Moon over 12 degrees. Store in dist.
dist = r * radians(12)
# Print out dist
print(dist)
#Import the numpy package as np, so that you can refer to numpy with np.
#Use np.array() to create a numpy array from baseball. Name this array np_baseball.
#Print out the type of np_baseball to check that you got it right.
# Create list baseball
baseball = [180, 215, 210, 210, 188, 176, 209, 200]
# Import the numpy package as np
import numpy as np
# Create a Numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
# Print out type of np_baseball
print(type(np_baseball))
#Create a numpy array from height_in. Name this new array np_height_in.
#Print np_height_in.
#Multiply np_height_in with 0.0254 to convert all height measurements from inches to meters. Store the new values in a new array, np_height_m.
#Print out np_height_m and check if the output makes sense.
# height is available as a regular list
# Import numpy
import numpy as np
# Create a numpy array from height_in: np_height_in
np_height_in = np.array(height_in)
# Print out np_height_in
print(np_height_in)
# Convert np_height_in to m: np_height_m
np_height_m = np_height_in * 0.0254
# Print np_height_m
print(np_height_m)
#Create a numpy array from the weight_lb list with the correct units. Multiply by 0.453592 to go from pounds to kilograms. Store the resulting numpy array as np_weight_kg.
#Use np_height_m and np_weight_kg to calculate the BMI of each player. Use the following equation:
# BMI= weight(kg)/ height(m)^2
#Save the resulting numpy array as bmi.
#Print out bmi.
# height and weight are available as regular lists
# Import numpy
import numpy as np
# Create array from height_in with metric units: np_height_m
np_height_m = np.array(height_in) * 0.0254
# Create array from weight_lb with metric units: np_weight_kg
np_weight_kg = np.array(weight_lb) * 0.453592
# Calculate the BMI: bmi
bmi = np_weight_kg / np_height_m ** 2
# Print out bmi
print(bmi)
#Create a boolean numpy array: the element of the array should be True if the corresponding baseball player's BMI is below 21. You can use the < operator for this. Name the array light.
#Print the array light.
#Print out a numpy array with the BMIs of all baseball players whose BMI is below 21. Use light inside square brackets to do a selection on the bmi array.
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Calculate the BMI: bmi
np_height_m = np.array(height_in) * 0.0254
np_weight_kg = np.array(weight_lb) * 0.453592
bmi = np_weight_kg / np_height_m ** 2
# Create the light array
light = bmi < 21
# Print out light
print(light)
# Print out BMIs of all baseball players whose BMI is below 21
print(bmi[light])
#Subset np_weight_lb by printing out the element at index 50.
#Print out a sub-array of np_height_in that contains the elements at index 100 up to and including index 110.
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Store weight and height lists as numpy arrays
np_weight_lb = np.array(weight_lb)
np_height_in = np.array(height_in)
# Print out the weight at index 50
print(np_weight_lb[50])
# Print out sub-array of np_height_in: index 100 up to and including index 110
print(np_height_in[100:111])
#Use np.array() to create a 2D numpy array from baseball. Name it np_baseball.
#Print out the type of np_baseball.
#Print out the shape attribute of np_baseball. Use np_baseball.shape.
# Create baseball, a list of lists
baseball = [[180, 78.4],
[215, 102.7],
[210, 98.5],
[188, 75.2]]
# Import numpy
import numpy as np
# Create a 2D numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
# Print out the type of np_baseball
print(type(np_baseball))
# Print out the shape of np_baseball
print(np_baseball.shape)
#Use np.array() to create a 2D numpy array from baseball. Name it np_baseball.
#Print out the shape attribute of np_baseball.
# baseball is available as a regular list of lists
# Import numpy package
import numpy as np
# Create a 2D numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
# Print out the shape of np_baseball
print(np_baseball.shape)
#Print out the 50th row of np_baseball.
#Make a new variable, np_weight_lb, containing the entire second column of np_baseball.
#Select the height (first column) of the 124th baseball player in np_baseball and print it out.
# baseball is available as a regular list of lists
# Import numpy package
import numpy as np
# Create np_baseball (2 cols)
np_baseball = np.array(baseball)
# Print out the 50th row of np_baseball
print(np_baseball[49,:])
# Select the entire second column of np_baseball: np_weight_lb
np_weight_lb = np_baseball[:,1]
# Print out height of 124th player
print(np_baseball[123, 0])
#You managed to get hold of the changes in height, weight and age of all baseball players. It is available as a 2D numpy array, updated. Add np_baseball and updated and print out the result.
#You want to convert the units of height and weight to metric (meters and kilograms respectively). As a first step, create a numpy array with three values: 0.0254, 0.453592 and 1. Name this array conversion.
#Multiply np_baseball with conversion and print out the result.
# baseball is available as a regular list of lists
# updated is available as 2D numpy array
# Import numpy package
import numpy as np
# Create np_baseball (3 cols)
np_baseball = np.array(baseball)
# Print out addition of np_baseball and updated
print(np_baseball + updated)
# Create numpy array: conversion
conversion = np.array([0.0254, 0.453592, 1])
# Print out product of np_baseball and conversion
print(np_baseball * conversion)
#Create numpy array np_height_in that is equal to first column of np_baseball.
#Print out the mean of np_height_in.
#Print out the median of np_height_in.
# np_baseball is available
# Import numpy
import numpy as np
# Create np_height_in from np_baseball
np_height_in = np_baseball[:,0]
# Print out the mean of np_height_in
print(np.mean(np_height_in))
# Print out the median of np_height_in
print(np.median(np_height_in))
#The code to print out the mean height is already included. Complete the code for the median height. Replace None with the correct code.
#Use np.std() on the first column of np_baseball to calculate stddev. Replace None with the correct code.
#Do big players tend to be heavier? Use np.corrcoef() to store the correlation between the first and second column of np_baseball in corr. Replace None with the correct code.
# np_baseball is available
# Import numpy
import numpy as np
# Print mean height (first column)
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
# Print median height. Replace 'None'
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
# Print out the standard deviation on height. Replace 'None'
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
# Print out correlation between first and second column. Replace 'None'
corr = np.corrcoef(np_baseball[:,0], np_baseball[:,1])
print("Correlation: " + str(corr))
#Convert heights and positions, which are regular lists, to numpy arrays. Call them np_heights and np_positions.
#Extract all the heights of the goalkeepers. You can use a little trick here: use np_positions == 'GK' as an index for np_heights. Assign the result to gk_heights.
#Extract all the heights of all the other players. This time use np_positions != 'GK' as an index for np_heights. Assign the result to other_heights.
#Print out the median height of the goalkeepers using np.median(). Replace None with the correct code.
#Do the same for the other players. Print out their median height. Replace None with the correct code.
# heights and positions are available as lists
# Import numpy
import numpy as np
# Convert positions and heights to numpy arrays: np_positions, np_heights
np_positions = np.array(positions)
np_heights = np.array(heights)
# Heights of the goalkeepers: gk_heights
gk_heights = np_heights[np_positions == 'GK']
# Heights of the other players: other_heights
other_heights = np_heights[np_positions != 'GK']
# Print out the median height of goalkeepers. Replace 'None'
print("Median height of goalkeepers: " + str(np.median(gk_heights)))
# Print out the median height of other players. Replace 'None'
print("Median height of other players: " + str(np.median(other_heights)))
| [
"hotsang@deloitte.ie"
] | hotsang@deloitte.ie |
8621cb1d7de29506a7b2ef9c6c062765513539a1 | 92a3ad663d5cafebb30065f7e8b80bfe9157aa90 | /course/admin.py | 55a71f2d200d2e338cb9eec8dcb23bdcfd19e366 | [] | no_license | rohaanmd/nibodh-django-project | cd583a2170ebdd38fd4392f4465ee433517e5e27 | 54148cf3f310a655eafa95bc57d0afffe073a307 | refs/heads/master | 2023-07-14T23:03:34.545308 | 2021-08-30T08:47:45 | 2021-08-30T08:47:45 | 400,447,342 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | from django.contrib import admin
from .models import course , contact
# Register your models here.
admin.site.register(course)
admin.site.register(contact)
| [
"panzerxhack@gmail.com"
] | panzerxhack@gmail.com |
532c26f52ffa9ce044639660b22851aff45beba8 | 2a02d288dd7ceb421b3f86f826bf74cd6d8bf224 | /qgis3_xlsx2postgres.py | c08249be01b2a8cd744703e1721c753b158491c7 | [
"MIT"
] | permissive | ANAGEO/QGis_scripts | db92052ea11030ac2bed9c0604a088517dd6e04d | b09635ed7c51385eeb6af82db55b71d2d7c4d78e | refs/heads/master | 2020-06-18T06:09:30.384867 | 2019-10-08T12:58:34 | 2019-10-08T12:58:34 | 196,190,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,775 | py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
## This script takes a .xlsx file and loads it into a postgresql database, from witihn QGis. It doesn't use the pandas module
## because it might not be installed (by default, QGis doesn't); it uses xlrd instead of pandas, and psycopg2 for the connection to the server.
## It can be stored as a layer action or run from the python console. And maybe as a processing toolbox script.
## Since it is supposed to work from within QGis, it is asumed that there is already a live connection to postgresql with stored
## parameters, and we use here the QSettings to find them and retrieve the login and password without storing them in the script.
__author__ = 'didier'
__version__ = '0.6'
import os, sys, psycopg2, csv, xlrd
from PyQt5 import Qt
from qgis.gui import QgsMessageBar
from PyQt5.QtWidgets import QFileDialog
from qgis.core import *
from qgis.utils import iface
## Function to retrieve the login and password from a stored PostGIS connection
def get_postgres_conn_info(selected):
""" Read PostgreSQL connection details from QSettings stored by QGIS
"""
settings = QSettings()
settings.beginGroup(u"/PostgreSQL/connections/" + selected)
if not settings.contains("database"): # non-existent entry?
return {}
conn_info = dict()
conn_info["host"] = settings.value("host", "", type=str)
# password and username
username = ''
password = ''
authconf = settings.value('authcfg', '')
if authconf :
# password encrypted in AuthManager
auth_manager = QgsApplication.authManager()
conf = QgsAuthMethodConfig()
auth_manager.loadAuthenticationConfig(authconf, conf, True)
if conf.id():
username = conf.config('username', '')
password = conf.config('password', '')
else:
# basic (plain-text) settings
username = settings.value('username', '', type=str)
password = settings.value('password', '', type=str)
return username, password
## dialog to select the excel file
file, _filter = QFileDialog.getOpenFileName(None, "Open Data File", '.', "(*.xlsx)")
if file == "":
errmsg_xl = 'Selection canceled'
iface.messageBar().pushMessage("Error", errmsg_xl, level=Qgis.Warning, duration=6)
raise
## initiating a csv file at the same place than the excel file
csvfile = file.replace(".xlsx", ".csv")
## function to populate the csv
## takes 2 arguments: the xlsx file name and the csv name (+ path)
def csv_from_excel(arg_xlsx, arg_csv):
wb = xlrd.open_workbook(arg_xlsx)
sh = wb.sheet_by_index(0)
nblines = sh.nrows - 1 # counting the lines
to_csv_file = open(arg_csv, 'w', newline='')
wr = csv.writer(to_csv_file, delimiter =';', quoting=csv.QUOTE_NONE)
## next step removes the '.0' from the numbers because excel doesnt make any difference between number types;
## this would cause the integers to become decimal numbers, which could be a problem afterward when uploading the data to
## a table where integers are expected
for rownum in range(0, sh.nrows):
newrow=[]
if rownum == 0:
wr.writerow(sh.row_values(rownum))
else:
for x, col in enumerate(sh.row_values(rownum)):
if str(col).endswith('.0'):
newcol=str(col).replace(".0", "")
else:
newcol=col
newrow.append(newcol)
wr.writerow(newrow)
return nblines
to_csv_file.close()
## connexion to the server
myname, mypass = get_postgres_conn_info("connection_name") # provide here a stored connection name
try:
conn = psycopg2.connect(host="xxx.xxx.xxx.xxx", port="5432", dbname="xxxxxxxxxx", user = myname, password = mypass)
cur = conn.cursor()
except (Exception, psycopg2.Error) as error :
errmsg = 'The connection failed.' # \( ' + error + ' \)'
iface.messageBar().pushMessage("Error", errmsg, level=Qgis.Critical, duration=10)
raise
del mypass. ## deleting the password
## creating the csv
nblin = csv_from_excel(file, csvfile)
## uploading the csv to an existing table (table_in) in the server.
## This steps uses the psycopg2 cur.copy_from() method because it is much faster than inserts,
## and this is the reason to convert the xlsx file into a csv
try:
with open(csvfile, 'r') as f:
next(f) # Skip the header row.
cur.copy_from(f, table_in, sep=';')
except (Exception, psycopg2.Error) as error :
errmsg_im = 'Error while loading' #
iface.messageBar().pushMessage("Error", errmsg_im, level=Qgis.Critical, duration=10)
raise
## closing database connection.
if(conn):
cur.close()
conn.commit()
conn.close()
## cleaning up
os.remove(csvfile)
| [
"noreply@github.com"
] | ANAGEO.noreply@github.com |
96086c5b38846ca222a885be2f5064dc0af59287 | 6660f9ec539a0841219af7b28c529bbfe5576456 | /blog/migrations/0001_initial.py | 19ec7a932ac2ec2b067f96564fd3f6c57d98dbc4 | [] | no_license | aditya-prayaga/ExampleDjango | db36dac31876e0deb61bc4ddf99e9f46f61cd718 | f31cd95b38f297bf153180ec3eea5d90625308bc | refs/heads/master | 2022-03-16T05:00:19.193967 | 2019-11-29T11:17:01 | 2019-11-29T11:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.2.7 on 2019-11-28 16:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"adityaprayaga@gmail.com"
] | adityaprayaga@gmail.com |
45137db05e56a967ff7ac4516a6d166bf2626def | a88cfe07a8131ca5a37d3a1ae9ec76442afe4921 | /api/.history/sardata/residentes/models_20191022115502.py | fc5bc176943c6ee741d2dec1b64ec762992873b0 | [] | no_license | edwinvivas/APIsanlorenzo | a866471efad8214762d7db70fdc1069213a11f11 | 3e8e4090cc33642ee45e2d718d1ce6b053d38655 | refs/heads/master | 2020-08-27T00:46:46.153959 | 2019-10-24T19:21:16 | 2019-10-24T19:21:16 | 217,197,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | from django.db import models
from sardata.utils.uuid_field import BinaryUUIDField
from parametricos.models import TipoDocumento;
import uuid
# Create your models here.
class TipoPropiedad(models.Model):
"""Model definition for TipoPropiedad."""
name = models.CharField(max_length=50)
class Meta:
"""Meta definition for TipoPropiedad."""
verbose_name = 'TipoPropiedad'
verbose_name_plural = 'TipoPropiedads'
def __str__(self):
"""Unicode representation of TipoPropiedad."""
return "("+str(self.id)+") "+self.name
class Propiedad(models.Model):
"""Model definition for Propiedad."""
tipoPropiedad = models.ForeignKey(TipoPropiedad, on_delete=models.CASCADE)
interior = models.CharField(max_length=3)
numero = models.CharField(max_length=3)
deposito = models.CharField(max_length=3,null=True)
nombre_propietario = models.CharField(max_length=255)
tipo_documento_propietario = models.ForeignKey(TipoDocumento, on_delete=models.CASCADE)
class Meta:
"""Meta definition for Propiedad."""
verbose_name = 'Propiedad'
verbose_name_plural = 'Propiedads'
def __str__(self):
"""Unicode representation of Propiedad."""
pass
class Vehiculo(models.Model):
"""Model definition for Vehiculo."""
placa = models.CharField(max_length=8)
marca = models.CharField(max_length=20)
modelo = models.IntegerField(default=0)
color = models.CharField(max_length=20)
class Meta:
"""Meta definition for Vehiculo."""
verbose_name = 'Vehiculo'
verbose_name_plural = 'Vehiculos'
def __str__(self):
"""Unicode representation of Vehiculo."""
pass
| [
"edwin.vivas@xphera.co"
] | edwin.vivas@xphera.co |
ae263db6c9aab2ca89826dc38cb0f9707dbb1acd | efdb3fab00bf620180ba375db25fdbf9348ca6fa | /api/app/routing.py | 2a612ae5c4686d693fffb86ce3b10bfb60363ace | [] | no_license | rohansagar/eecs149-smart-dimming | ad6cf1bfd045de8425521f141d4d710381d5dff3 | 6b286eb0ac371f80adf91999990bd08ac28d4058 | refs/heads/master | 2020-09-20T14:20:42.218982 | 2018-12-15T01:08:10 | 2018-12-15T01:08:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^lights/(?P<id>[0-9]+)/$', consumers.LightSocketConsumer),
url(r'^lights/config/$', consumers.LightNameConsumer)
]
| [
"milodarling@berkeley.edu"
] | milodarling@berkeley.edu |
e6ca65fac78376ade8e5869a2476eb7f852c0a26 | 7b7be9b58f50415293def4aa99ef5795e6394954 | /sim/unitop/XYTable.py | e3c183d059ee5dec2082846749b29ef58b7aa0ae | [] | no_license | sabualkaz/sim42 | 80d1174e4bc6ae14122f70c65e259a9a2472ad47 | 27b5afe75723c4e5414904710fa6425d5f27e13c | refs/heads/master | 2022-07-30T06:23:20.119353 | 2020-05-23T16:30:01 | 2020-05-23T16:30:01 | 265,842,394 | 0 | 0 | null | 2020-05-21T12:26:00 | 2020-05-21T12:26:00 | null | UTF-8 | Python | false | false | 6,209 | py | """Module for a simple XY table
Classes:
DataSeries - an array of data
ATable - class containning multiple DataSeries
"""
from sim.solver.Variables import *
SERIES_OBJ = 'Series'
TABLETAG_VAR = 'TagValue'
class DataSeries(object):
def __init__(self, typeName = GENERIC_VAR):
# i do not want to trigger a solve in each series input
# otherwise, it would be more convenient to have a list of BasicVariable
self._myData = []
self._myType = PropTypes.get(typeName, PropTypes[GENERIC_VAR])
self.unitOpParent = None
self.name = ''
def CleanUp(self):
self.unitOpParent = None
def __str__(self):
t = re.sub(' .*', '', repr(self))[1:]
s = '%s = %s; %s' % (self.name, t, self._myType)
for i in self._myData:
s += ' ' + str(i)
return s
def GetContents(self):
return [('UnitType', self._myType.unitType), ('Values', self._myData)]
def Initialize(self, unitOpObj, name):
self.unitOpParent = unitOpObj
self.name = name
def SetParent(self, parent):
self.unitOpParent = parent
def SetValues(self, vals, dummy=None):
"""the dummy is just to make it match the BasicProperty call"""
self._myData = []
if vals != None:
for i in range(len(vals)):
if vals[i] == 'None':
self._myData.append(None)
else:
try:
self._myData.append(float(vals[i]))
except:
# string data support
self._myData.append(vals[i])
if self.unitOpParent:
self.unitOpParent.ForgetAllCalculations()
def GetValues(self):
vals = []
for i in range(len(self._myData)): vals.append(self._myData[i])
return vals
def GetType(self):
return self._myType
def SetType(self, typeName):
self._myType = PropTypes.get(typeName, PropTypes[GENERIC_VAR])
if self.name == '':
self.name = typeName
def GetLen(self):
return len(self._myData)
def GetDataValue(self, idx, allowExtrap = 1):
# idx can be any real number
#linear interpolation
n = len(self._myData)
if (n == 0):
return None
elif (n == 1):
return self._myData[0]
else:
if (idx <= 0):
idx1 = 0
if ( not allowExtrap): return self._myData[0]
elif (idx >= n-1):
idx1 = n-2
if (not allowExtrap): return self._myData[n-1]
else:
idx1 = long(idx)
return self._myData[idx1] + (self._myData[idx1+1] - self._myData[idx1]) * (idx - idx1)
def GetDataIndex(self, val):
n = len(self._myData)
if (n == 0):
return None
elif (n == 1):
return 0
for i in range(n-1):
factor = (val - self._myData[i]) * (self._myData[i+1] - val)
if (factor >= 0 and self._myData[i+1] != self._myData[i]):
return i + (val - self._myData[i]) / (self._myData[i+1] - self._myData[i])
# value out of bound, returns the end point
if ((self._myData[n-1] - self._myData[0]) * (self._myData[0] - val)) >= 0.0:
return 0
else:
return n-1
def GetDataPoint(self, idx):
if idx >= 0 and idx < len(self._myData):
return self._myData[idx]
def Clone(self):
clone = self.__class__(self._myType.name)
clone.name = self.name
clone._myData = copy.deepcopy(self._myData)
return clone
class ATable(object):
def __init__(self, typeName = GENERIC_VAR):
self._TagValue = 0.0
self._Series = {}
self._Tag = BasicProperty(typeName)
def CleanUp(self):
for s in self._Series.values():
s.CleanUp()
self._Series.clear()
def GetObject(self, name):
if (name == TABLETAG_VAR):
return self._Tag
elif self._Series.has_key(name):
return self._Series[name]
else:
for i in range(nSeries):
if self._Series[SERIES_OBJ + str(i)].name == name:
return self._Series[SERIES_OBJ + str(i)]
def GetLen(self):
maxLen = 0
nSeries = len(self._Series)
for i in range(nSeries):
n = self._Series[SERIES_OBJ + str(i)].GetLen()
if n > maxLen:
maxLen = n
return maxLen
def GetContents(self):
result = [(TABLETAG_VAR, self._Tag)]
for key in self._Series:
result.append((key, self._Series[key]))
return result
def SetSeriesCount(self, n):
nSeries = len(self._Series)
for i in range(nSeries, n, -1):
del self._Series[SERIES_OBJ + str(i-1)]
for i in range(nSeries, n):
self._Series[SERIES_OBJ + str(i)] = DataSeries()
def SetTagType(self, typeName):
# if i switch data type, erase existing data
if typeName != self._Tag.GetName():
self._Tag = None
self._Tag = BasicProperty(typeName)
def TagValue(self):
return self._Tag.GetValue()
def SetSeriesType(self, seriesIdx, typeName):
seriesName = SERIES_OBJ + str(seriesIdx)
if (self._Series.has_key(seriesName)):
self._Series[seriesName].SetType(typeName)
def GetSeries(self, idx):
if idx < len(self._Series):
return self._Series[SERIES_OBJ + str(idx)]
else:
return None
def Clone(self):
clone = self.__class__(self._Tag._type.name)
for key, value in self._Series.items():
clone._Series[key] = value.Clone()
clone._Tag = self._Tag.Clone()
return clone | [
"jonathan.xavier@gmail.com"
] | jonathan.xavier@gmail.com |
e47902764e65c6ccb0accf6f20a76b7d7751571b | 6149adc0dc1c0ef50b643dbbc71e3672dcaf17c6 | /program_2.py | 9686b1cb93a2a474bf3722e97511a9c57d70d896 | [] | no_license | JackAmos/ProgramTwo | 4635987d1828413f3a42365419bb6449fd3aca41 | 89a21d3926921dd3861795740d954c34d4fe11a9 | refs/heads/master | 2020-07-30T11:20:50.991387 | 2019-10-04T21:23:35 | 2019-10-04T21:23:35 | 210,211,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,993 | py | #Jack Amos
#CS2300-002
#10/04/2019
#Project 2
#using Python 3.7
import random
def print_coordinates(filename):
with open(filename,'r') as file:
contents = file.read()
#get each value in string an convert it to integer
# [llx,lly,d,p1,p2,v1,v2]
coord_values = [int(x) for x in contents.split()]
#sets scale of "screen"
dimension = 41
#2d array that will be printed to fucntion as screen, random numbers exist so python does not make all indexes of non-unique elements 0
screen = [[str(random.randint(0,10000)) for x in range(41)] for x in range(41)]
implicit = get_implicit(coord_values)
#figure out points
if coord_values[0] > 0:
x_point = 20 - coord_values[0]
elif coord_values[0] <= 0:
x_point = 20 + coord_values[0]
y_point = 20 - coord_values[1]
r1 = range((y_point-coord_values[2]+1),y_point+1)
index = x_point
#make actual box
for n in screen:
if screen.index(n) in r1:
n[x_point] = "*"
try:
n[x_point+coord_values[2]] = "*"
except:
pass
if screen.index(n) == y_point or screen.index(n) == (y_point-coord_values[2]+1):
while index < (x_point+coord_values[2]):
try:
n[index] = "*"
except:
pass
index+=1
index = x_point
#draw line
for n in screen:
for i in n:
if (implicit[0]*(20-screen.index(n))+implicit[1]*(n.index(i)-20)+implicit[2]) == 0:
screen[screen.index(n)][n.index(i)] = "*"
y_axis = 20
#add axis
for n in screen:
if y_axis < 0 and y_axis >-10 or y_axis > 9:
n.insert(0,str(y_axis)+" ")
elif y_axis > -1:
n.insert(0,str(y_axis)+" ")
else:
n.insert(0,str(y_axis))
y_axis-=1
#removes random numbers but not axis
for n in screen:
for i in n:
if i.isdigit() and n.index(i) != 0:
n[n.index(i)] = " "
row = 0
screen_string = ""
while row < dimension:
screen_string += str(screen[row]) + "\n"
#increment by column number so vars are at the start and end of next row
row+=1
#removes unneeded chars for beautification
screen_string = screen_string.replace(",","").replace("[","").replace("]","").replace("'","")
#print final graph
print(screen_string)
print("-20 -19 -18 -17 -16 -15 -14 -13 -12 -11 -10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20")
print("\n")
print("Implicit Form: "+str(implicit[0])+"x+"+str(implicit[1])+"x+"+str(implicit[2])+"=0")
print("______________________________________________________________________________________________________________________________\n")
#gets values for implicit equation
def get_implicit(coord_values):
#[a,b,c]
implicit_values = []
#a
implicit_values.append(-1*coord_values[5])
#b
implicit_values.append(coord_values[6])
#c
implicit_values.append(-1*(coord_values[6]*coord_values[3]+(-1*coord_values[5]*coord_values[4])))
return implicit_values
#call func
print_coordinates("line1-1.txt")
print_coordinates("line2.txt")
print_coordinates("line3.txt")
print_coordinates("line4.txt")
print_coordinates("line5.txt")
| [
"jack671guam@yahoo.com"
] | jack671guam@yahoo.com |
0511863743d10d0d427ae11b828555184b5f94b5 | 8e237170f03564dc27adc306020832915bee8511 | /ask_nano/methods/urls.py | a0b81d63826ba783cda0c38fb8f78cb8729eb7d7 | [] | no_license | nanomishka/WEB.homework | 6ac0178e498d6b18f1e3fe2c1b75256253cc8f4c | e4ea4df6a1e19bbde557a55b8d7559edde9ee97c | refs/heads/master | 2020-05-20T09:48:02.917511 | 2015-03-08T16:41:41 | 2015-03-08T16:41:41 | 26,712,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.conf.urls import patterns, url
from methods import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
) | [
"mihail777str@yandex.ru"
] | mihail777str@yandex.ru |
cbe654798d1d772583f313e95681fa8a748f9389 | 8c91ab9c35280a97fe9ffcf38f07f8fca2625d4b | /MULTIPLE_THREAD.py | 982143c558f651677ac00e0637d4e9f70dcc4b86 | [] | no_license | Roick-Leo/Multiple_thread | 4e8823c7c63a3990094ce99dc7c3b8bcd778659d | 50ce8681b607a6235bd12f423f8627746ca760f0 | refs/heads/master | 2022-11-25T08:53:00.581398 | 2020-07-29T11:29:07 | 2020-07-29T11:29:07 | 283,479,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,738 | py | import time
import subprocess
import argparse
###input parameters
parser = argparse.ArgumentParser(description='Running multiple commands with multiple threads.')
parser.add_argument('-T', '--threads', default=8, type=int, help='number of threads')
parser.add_argument('-F', '--command_file', default='command_lines', help='file contains commands to run')
parser.add_argument('-W', '--waiting_time', default=30, type=int, help='time before checking if each thread have finished current job')
parser.add_argument('-L', '--logs_file', default='', help='save stdout and stderr to log file presented in this file, the line count must equal to commands, or it will be save to one log_all.txt under current directory')
args = parser.parse_args()
command_file_location=args.command_file
thread_number=args.threads
waiting_time_length=args.waiting_time
log_file_location=args.logs_file
cmd_list=list()
cmd_file=open(command_file_location,'r')
for lines in cmd_file:
cmd_list.append(lines.split('\n')[0].split('\r')[0]+'\n')
if log_file_location!='':
log_list=list()
log_file=open(log_file_location,'r')
for lines in log_file:
log_list.append(lines.split('\n')[0].split('\r')[0])
if len(cmd_list)!=len(log_list):
print('log file number not equal to cmd number, saving logs to log_all.txt')
log_list=list()
for cmd in cmd_list:
log_list.append('log_all.txt')
#create N threads
threads_list=list()
cmd_index=0
#Need_for_cycling=True#if there is not less commands than threads, then there is no need for cycling
logfiles=list()
for i in range(0,thread_number):
if cmd_index>=len(cmd_list):
#Need_for_cycling=False
break
new_thread=list()
new_thread.append(cmd_list[cmd_index]) #cmd
if log_file_location=='':
new_thread.append(subprocess.Popen(cmd_list[cmd_index].split('\n')[0],shell=True))
else:
logfiles.append(open(log_list[cmd_index].split('\n')[0],'a'))
new_thread.append(subprocess.Popen(cmd_list[cmd_index].split('\n')[0],shell=True,stdout=logfiles[cmd_index],stderr=logfiles[cmd_index]))
new_thread.append(cmd_index)
print(cmd_list[cmd_index])
new_thread.append('going')#status
threads_list.append(new_thread)
cmd_index+=1
effective_thread_number=len(threads_list)
if log_file_location=='':
status_index=2
else:
status_index=3
strwrite_summary=''
stoped_num=0
while True:
time.sleep(waiting_time_length)
#wait for a moment
for i in range(0,effective_thread_number):
if threads_list[i][1].poll()!=None and threads_list[i][status_index]=='going':
#finish current task
if threads_list[i][1].poll()!=0:
#error, should output message
strwrite_summary+=str(threads_list[i][1].poll())+'\t'+threads_list[i][0]+'\n'
#write error to summary
threads_list[i][1].wait()
#collected resource
if log_file_location!='':
logfiles[threads_list[i][2]].flush()
logfiles[threads_list[i][2]].close()
#flush log
if cmd_index<len(cmd_list):
#still commands left
threads_list[i][0]=cmd_list[cmd_index]
if log_file_location=='':
threads_list[i][1]=subprocess.Popen(cmd_list[cmd_index].split('\n')[0],shell=True)
else:
logfiles.append(open(log_list[cmd_index].split('\n')[0],'a'))
threads_list[i][1]=subprocess.Popen(cmd_list[cmd_index].split('\n')[0],shell=True,stdout=logfiles[cmd_index],stderr=logfiles[cmd_index])
threads_list[i][2]=cmd_index
print(cmd_list[cmd_index])
#new thread
cmd_index+=1
else:
threads_list[i][status_index]='stop'
print('thread '+str(i)+' stop')
stoped_num+=1
print(stoped_num)
if stoped_num>=effective_thread_number:
break
if stoped_num>=effective_thread_number:
break
file_summary=open('run_summary.txt','a')
file_summary.write(strwrite_summary)
#done
| [
"1757605052@qq.com"
] | 1757605052@qq.com |
3a797ab7a7a8762069a67c8c3002d40d390de67b | f12bab32f1f71ec22b91e98294a5005489fb55c8 | /star_triangle.py | 3f3497038575e7f585102952a60571f2786fbb27 | [] | no_license | IoSapsai/SoftUniExercises | 021e34102b5588bad6dafed0f9979c972aa7356f | f571c078e2c0f68bbf2adbf8c90616828c8f1e73 | refs/heads/main | 2023-01-30T08:03:53.678880 | 2020-12-14T18:42:14 | 2020-12-14T18:42:14 | 321,440,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | n = "*"
for i in range(10):
print(n)
n = n+"*" | [
"noreply@github.com"
] | IoSapsai.noreply@github.com |
874d2c0bd5020ce7c467c78d3599b12ee5eecd57 | c1fe63cf20d0e466ba6b3e0697c5cf6842e567aa | /pizzafag.py | bf53063b045ab0eeab64724022f7be11b0e89a07 | [] | no_license | gabssluc/untitled5 | 927467d13338ba69d47d9c00678a7cedc0ae1add | 6ebd75715b0abc0ecbcbbee900d3567cf7f79c9f | refs/heads/master | 2021-03-13T09:12:21.469151 | 2020-03-11T19:38:18 | 2020-03-11T19:38:18 | 246,663,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | class pizza(object):
def __init__(self, cobertura):
self.cobertura = cobertura
@property
def pega_cobertura(self):
return self.cobertura
@pega_cobertura.setter
def pega_cobertura(self, valor):
self.cobertura = ingtvalor
a = pizza("calabreza")
print(a.pega_cobertura)
a.pega_cobertura = True
print(a.pega_cobertura) | [
"33334180+gabssluc@users.noreply.github.com"
] | 33334180+gabssluc@users.noreply.github.com |
a4a6ddc3ff4189207d3424818543b80b9af8c4e8 | 0e149571c0a8a355a8d298677b762b05506b46eb | /problem6.py | a31264344a2f48968b4b186e11125d637e2e8936 | [] | no_license | Paris18/Python-Codes | 18ebb1b671e9e613cad792c577119920ecd3331d | 50b6b67b1845d43d23adfcddbd041e3ce5a869d5 | refs/heads/master | 2020-04-12T01:53:38.286944 | 2019-04-22T19:50:22 | 2019-04-22T19:50:22 | 162,231,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | '''n! means n × (n − 1) × ... × 3 × 2 × 1
For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!'''
import functools
n = 10
dgsum = lambda x: x%10 if x//10 == 0 else x%10+dgsum(x//10)
print (dgsum(functools.reduce(lambda x,y: x*y, range(1,n+1)))) | [
"noreply@github.com"
] | Paris18.noreply@github.com |
5e6e4fdf910746c167dea134355fe5ed90b7815c | a951bcce35dfa63db7a812bd27c1863f286e37cf | /tests/queries/0_stateless/02126_url_auth.python | 60009624c76f78c7ea551d97905ef90873870283 | [
"Apache-2.0"
] | permissive | nikitamikhaylov/ClickHouse | 294be1c43cbb0e6100145ce4cc5d3fb1191c0de2 | 88629657ca54f92c7fe1bf3f055e3389668ded3c | refs/heads/master | 2022-03-02T09:35:26.300566 | 2022-01-27T10:09:17 | 2022-01-27T10:09:17 | 197,409,528 | 1 | 3 | Apache-2.0 | 2021-11-10T16:03:27 | 2019-07-17T14:50:45 | C++ | UTF-8 | Python | false | false | 8,117 | python | #!/usr/bin/env python3
import socket
import csv
import sys
import tempfile
import threading
import os
import traceback
import urllib.request
import subprocess
from io import StringIO
from http.server import BaseHTTPRequestHandler, HTTPServer
def is_ipv6(host):
try:
socket.inet_aton(host)
return False
except:
return True
def get_local_port(host, ipv6):
if ipv6:
family = socket.AF_INET6
else:
family = socket.AF_INET
with socket.socket(family) as fd:
fd.bind((host, 0))
return fd.getsockname()[1]
CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', '127.0.0.1')
CLICKHOUSE_PORT_HTTP = os.environ.get('CLICKHOUSE_PORT_HTTP', '8123')
#####################################################################################
# This test starts an HTTP server and serves data to clickhouse url-engine based table.
# In order for it to work ip+port of http server (given below) should be
# accessible from clickhouse server.
#####################################################################################
# IP-address of this host accessible from the outside world. Get the first one
HTTP_SERVER_HOST = subprocess.check_output(['hostname', '-i']).decode('utf-8').strip().split()[0]
IS_IPV6 = is_ipv6(HTTP_SERVER_HOST)
HTTP_SERVER_PORT = get_local_port(HTTP_SERVER_HOST, IS_IPV6)
# IP address and port of the HTTP server started from this script.
HTTP_SERVER_ADDRESS = (HTTP_SERVER_HOST, HTTP_SERVER_PORT)
if IS_IPV6:
HTTP_SERVER_URL_STR = 'http://' + f'[{str(HTTP_SERVER_ADDRESS[0])}]:{str(HTTP_SERVER_ADDRESS[1])}' + "/"
else:
HTTP_SERVER_URL_STR = 'http://' + f'{str(HTTP_SERVER_ADDRESS[0])}:{str(HTTP_SERVER_ADDRESS[1])}' + "/"
CSV_DATA = os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
def get_ch_answer(query):
host = CLICKHOUSE_HOST
if IS_IPV6:
host = f'[{host}]'
url = os.environ.get('CLICKHOUSE_URL', 'http://{host}:{port}'.format(host=CLICKHOUSE_HOST, port=CLICKHOUSE_PORT_HTTP))
return urllib.request.urlopen(url, data=query.encode()).read().decode()
def check_answers(query, answer):
ch_answer = get_ch_answer(query)
if ch_answer.strip() != answer.strip():
print("FAIL on query:", query, file=sys.stderr)
print("Expected answer:", answer, file=sys.stderr)
print("Fetched answer :", ch_answer, file=sys.stderr)
raise Exception("Fail on query")
class CSVHTTPServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/csv')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(('hello, world').encode())
# with open(CSV_DATA, 'r') as fl:
# reader = csv.reader(fl, delimiter=',')
# for row in reader:
# self.wfile.write((', '.join(row) + '\n').encode())
return
def read_chunk(self):
msg = ''
while True:
sym = self.rfile.read(1)
if sym == '':
break
msg += sym.decode('utf-8')
if msg.endswith('\r\n'):
break
length = int(msg[:-2], 16)
if length == 0:
return ''
content = self.rfile.read(length)
self.rfile.read(2) # read sep \r\n
return content.decode('utf-8')
def do_POST(self):
data = ''
while True:
chunk = self.read_chunk()
if not chunk:
break
data += chunk
with StringIO(data) as fl:
reader = csv.reader(fl, delimiter=',')
with open(CSV_DATA, 'a') as d:
for row in reader:
d.write(','.join(row) + '\n')
self._set_headers()
self.wfile.write(b"ok")
def log_message(self, format, *args):
return
class HTTPServerV6(HTTPServer):
address_family = socket.AF_INET6
def start_server(requests_amount):
if IS_IPV6:
httpd = HTTPServerV6(HTTP_SERVER_ADDRESS, CSVHTTPServer)
else:
httpd = HTTPServer(HTTP_SERVER_ADDRESS, CSVHTTPServer)
def real_func():
for i in range(requests_amount):
httpd.handle_request()
t = threading.Thread(target=real_func)
return t
# test section
def test_select(table_name="", schema="str String,numuint UInt32,numint Int32,double Float64", requests=[], answers=[], test_data=""):
with open(CSV_DATA, 'w') as f: # clear file
f.write('')
if test_data:
with open(CSV_DATA, 'w') as f:
f.write(test_data + "\n")
if table_name:
get_ch_answer("drop table if exists {}".format(table_name))
get_ch_answer("create table {} ({}) engine=URL('{}', 'CSV')".format(table_name, schema, HTTP_SERVER_URL_STR))
for i in range(len(requests)):
tbl = table_name
if not tbl:
tbl = "url('{addr}', 'CSV', '{schema}')".format(addr=HTTP_SERVER_URL_STR, schema=schema)
check_answers(requests[i].format(tbl=tbl), answers[i])
if table_name:
get_ch_answer("drop table if exists {}".format(table_name))
def test_insert(table_name="", schema="str String,numuint UInt32,numint Int32,double Float64", requests_insert=[], requests_select=[], answers=[]):
with open(CSV_DATA, 'w') as f: # flush test file
f.write('')
if table_name:
get_ch_answer("drop table if exists {}".format(table_name))
get_ch_answer("create table {} ({}) engine=URL('{}', 'CSV')".format(table_name, schema, HTTP_SERVER_URL_STR))
for req in requests_insert:
tbl = table_name
if not tbl:
tbl = "table function url('{addr}', 'CSV', '{schema}')".format(addr=HTTP_SERVER_URL_STR, schema=schema)
get_ch_answer(req.format(tbl=tbl))
for i in range(len(requests_select)):
tbl = table_name
if not tbl:
tbl = "url('{addr}', 'CSV', '{schema}')".format(addr=HTTP_SERVER_URL_STR, schema=schema)
check_answers(requests_select[i].format(tbl=tbl), answers[i])
if table_name:
get_ch_answer("drop table if exists {}".format(table_name))
def test_select_url_engine(requests=[], answers=[], test_data=""):
for i in range(len(requests)):
check_answers(requests[i], answers[i])
def main():
test_data = "Hello,2,-2,7.7\nWorld,2,-5,8.8"
"""
select_only_requests = {
"select str,numuint,numint,double from {tbl}" : test_data.replace(',', '\t'),
"select numuint, count(*) from {tbl} group by numuint" : "2\t2",
"select str,numuint,numint,double from {tbl} limit 1": test_data.split("\n")[0].replace(',', '\t'),
}
insert_requests = [
"insert into {tbl} values('Hello',10,-2,7.7)('World',10,-5,7.7)",
"insert into {tbl} select 'Buy', number, 9-number, 9.9 from system.numbers limit 10",
]
select_requests = {
"select distinct numuint from {tbl} order by numuint": '\n'.join([str(i) for i in range(11)]),
"select count(*) from {tbl}": '12',
'select double, count(*) from {tbl} group by double': "7.7\t2\n9.9\t10"
}
"""
if IS_IPV6:
query = "select * from url('http://guest:guest@" + f'[{str(HTTP_SERVER_ADDRESS[0])}]:{str(HTTP_SERVER_ADDRESS[1])}' + "/', 'RawBLOB', 'a String')"
else:
query = "select * from url('http://guest:guest@" + f'{str(HTTP_SERVER_ADDRESS[0])}:{str(HTTP_SERVER_ADDRESS[1])}' + "/', 'RawBLOB', 'a String')"
select_requests_url_auth = {
query : 'hello, world',
}
t = start_server(len(list(select_requests_url_auth.keys())))
t.start()
test_select(requests=list(select_requests_url_auth.keys()), answers=list(select_requests_url_auth.values()), test_data=test_data)
t.join()
print("PASSED")
if __name__ == "__main__":
try:
main()
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stderr)
print(ex, file=sys.stderr)
sys.stderr.flush()
os._exit(1)
| [
"binghan1589@163.com"
] | binghan1589@163.com |
3506d022cee02d199aafa4c55355cf35e981379e | 441f4edd589ad9b9b7eb6ebad1a2fc4276bd742e | /PycharmProjects/letscodeit/pages/home/login_page.py | d89047adb2515be7a18a15e524fbfc3073a04039 | [] | no_license | haris-rizwan/Projects | 705da3bebb37e8d2d3e9b00cd7b0b12ffedace38 | 7a5aae1bfc794e6853c32d72ed4531e847ad643a | refs/heads/master | 2021-06-20T05:38:44.991539 | 2017-08-01T02:36:53 | 2017-08-01T02:36:53 | 94,132,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | from selenium.webdriver.common.by import By
import time
from base.SeleniumDriver import SeleniumDriver
import Utilities.custom_logger as cl
import logging
from base.basepage import BasePage
class LoginClass(BasePage):
log = cl.customLogger(logging.DEBUG)
def __init__(self,driver):
super().__init__(driver)
self.driver = driver
# locators identifiers
_login_link= "Login"
_email_field= "user_email"
_password_field="user_password"
_login_button="commit"
# methods for performing actions on elements
def clickLoginLink(self):
self.elementClick(self._login_link,locatorType="link")
def enterEmail(self,email):
self.sendKeys(email,self._email_field,locatorType="id")
def enterPassword(self,password):
self.sendKeys(password,self._password_field,locatorType="id")
def clickLoginButton(self):
self.elementClick(self._login_button,locatorType="name")
# Functionnality which wraps all the actions needed to be performed on the page class
def Login(self,email="",password=""):
self.clickLoginLink()
time.sleep(3)
self.enterEmail(email)
self.enterPassword(password)
self.clickLoginButton()
def verifyLoginSuccess(self):
result = self.isElementPresent(".//div[@id='navbar']//span[text()='User Settings']", locatorType="xpath")
return result
def verifyLoginFail(self):
result = self.isElementPresent("//div[contains(text(),'Invalid email or password')]", locatorType="xpath")
return result
def verifyLoginTitle(self):
return self.verifyPageTitle("Google")
| [
"haarisrizwan@gmail.com"
] | haarisrizwan@gmail.com |
39751900ad45ccf2f0e07b299a38c6134add16ff | 130946de28fd5367cc262c1ab7a5bd9038e2dd81 | /mongo/mongoQuery_Regex.py | c7b2f54f6d06bd543046ef8c668f59db58ab91e8 | [] | no_license | brijesh3601/Python | 0d463797ecf6a22ec691ddcfba141d5989a445b1 | 2b76f04a1aa64f60aa55ea06734d9658e7bbeef4 | refs/heads/master | 2021-08-02T19:11:51.853312 | 2021-07-30T02:30:14 | 2021-07-30T02:30:14 | 152,303,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | import pymongo
mongoCli = pymongo.MongoClient("mongodb://localhost:27017/")
mongoDb = mongoCli["test"]
mongoColln = mongoDb["customers"]
addrQuery = { "address": "Park Lane 38" }
# starts with 'S'
startWithSQuery = { "address": { "$regex": "^S", "$options" : "i" } }
# start with 'S' or higher like S, T, U etc
startWithS_or_HigherQuery = { "address": { "$gt": "S" } }
mongoDoc = mongoColln.find(addrQuery)
for x in mongoDoc:
print(x)
print("## Print S or higher")
mongoDoc = mongoColln.find(startWithS_or_HigherQuery)
for x in mongoDoc:
print(x)
print("## Print starts with S ")
mongoDoc = mongoColln.find(startWithSQuery)
for x in mongoDoc:
print(x) | [
"Brijesh.Kachalia@amwater.com"
] | Brijesh.Kachalia@amwater.com |
5499e3228acf0bb436928c8478218fd9b7c8deb0 | de0007b7a4a2ccc5eca3f39a8a4086ecadaccf7d | /code/util/memex.py | 0f753fd22d1485dbbf7498ebfe1a4d25c8905f67 | [
"Apache-2.0"
] | permissive | AffDk/dd-genomics | 00dd0605d9547aa1e5ae599908f81b2d58fd6075 | 3e3f272327d4c29daa53347b7ddb20ea1a9ae78a | refs/heads/master | 2022-02-23T08:55:05.088238 | 2016-02-26T16:21:33 | 2016-02-26T16:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | ######################################################################################
# LATTICE - MEMEX plugins for latticelib
#
# latticelib is an extraction framework to allow quickly building extractors by
# specifying minimal target-specific code (candidate generation patterns and
# supervision rules). It has a set of pre-built featurization code that covers
# a lot of Memex flag extractors. The goal is to make it more general,
# powerful, fast to run and easy to use.
#
# This file contains Memex-specific components for latticelib.
#
# For sample usage, see:
# udf/util/test/latticelib/module.py
# udf/extract_underage.py
#
######################################################################################
# Default dictionaries tailored for Memex. Will function in addition to the
# one in latticelib
default_dicts = {
'short_words': [
'the',
'and',
'or',
'at',
'in',
'see',
'as',
'an',
'data',
'for',
'not',
'our',
'ie',
'to',
'eg',
'one',
'age',
'on',
'center',
'right',
'left',
'from',
'based',
'total',
'via',
'but',
'resp',
'no',
],
'intensifiers': [
'very',
'really',
'extremely',
'exceptionally',
'incredibly',
'unusually',
'remarkably',
'particularly',
'absolutely',
'completely',
'quite',
'definitely',
'too',
],
'pos_certainty': [
'likely',
'possibly',
'definitely',
'absolutely',
'certainly',
],
'modal': [
'will',
'would',
'may',
'might',
],
'mutation': [
'mutation',
'variant',
'allele',
'deletion',
'duplication',
'truncation',
],
'levels': [
'serum',
'level',
'elevated',
'plasma',
]
'expression': [
'express',
'expression',
'coexpression',
'coexpress',
'co-expression',
'co-express',
'overexpress',
'overexpression',
'over-expression',
'over-express',
'production',
'product',
'increased',
'increase',
'increas',
]
}
| [
"jbirgmei@stanford.edu"
] | jbirgmei@stanford.edu |
a64f6c3790d25f1976b3b98150499dc49cc51197 | 690eae459b5a5d76e3fef818296ec875490729e8 | /ipart_processed/detect_AR_merra.py | 32ac9c1c77c3922579c342ccba783d2b38794a34 | [] | no_license | mukeshraeee/Mukesh_ACP_Manuscript | 5575e32822c8b663dd91bfc6fce7340f2f151324 | 8afb38b75e19013b9a6d1dd54f8353c964f2c1bf | refs/heads/main | 2023-04-19T05:28:04.599752 | 2022-03-09T10:05:45 | 2022-03-09T10:05:45 | 467,843,631 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,254 | py | """This code enable to produce AR detection
inspired by Guangzhi Xu (xugzhi1987@gmail.com)"""
#====== Load Libraries ======================================================
import os, sys
import numpy as np
import pandas as pd
from ipart.utils import funcs
from ipart.AR_detector import findARs
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from ipart.utils import plot
YEAR=2021
TIME_START = '%d-03-01 00:00:00' %YEAR
TIME_END = '%d-03-03 00:00:00' %YEAR
#==== Import data = =================
uq_file = os.path.join('.', 'spring1.nc')
vq_file = os.path.join('.', 'spring1.nc')
#==== ivt reconstruction and anomalies ===================
ivt_file = os.path.join('.', 'spring_ivt_dust-THR-kernel-t1-s20.nc') #save nc file in given name
#------------------Output folder------------------
output_dir = os.path.join('.', str(YEAR))
PLOT=True # create maps of found ARs or not
SHIFT_LON=10 # degree, shift left bound to longitude. Should match
# that used in compute_thr_singlefile.py
PARAM_DICT={
# kg/m/s, define AR candidates as regions >= than this anomalous ivt.
'thres_low' : 0.5,
# km^2, drop AR candidates smaller than this area.
'min_area': 5*1e4,
# km^2, drop AR candidates larger than this area.
'max_area': 1800*1e4,
# float, minimal length/width ratio.
'min_LW': 2,
# degree, exclude systems whose centroids are lower than this latitude.
'min_lat': 00,
# degree, exclude systems whose centroids are higher than this latitude.
'max_lat': 80,
# km, ARs shorter than this length is treated as relaxed.
'min_length': 2000,
# km, ARs shorter than this length is discarded.
'min_length_hard': 1500,
# degree lat/lon, error when simplifying axis using rdp algorithm.
'rdp_thres': 2,
# grids. Remove small holes in AR contour.
'fill_radius': None,
# do peak partition or not, used to separate systems that are merged
# together with an outer contour.
'single_dome': False,
# max prominence/height ratio of a local peak. Only used when single_dome=True
'max_ph_ratio': 0.6,
# minimal proportion of flux component in a direction to total flux to
# allow edge building in that direction
'edge_eps': 0.4
}
#==== Read in data ===============
uflux=funcs.readNC(uq_file, 'DUFLUXU')
vflux=funcs.readNC(vq_file, 'DUFLUXV')
#-------------------Read in ivt-------------------
print('\n# Read in file:\n',ivt_file)
ivt=funcs.readNC(ivt_file, 'idt')
ivtrec=funcs.readNC(ivt_file, 'ivt_rec')
ivtano=funcs.readNC(ivt_file, 'ivt_ano')
#-----------------Shift longitude-----------------
qu=uflux.shiftLon(SHIFT_LON)
qv=vflux.shiftLon(SHIFT_LON)
ivt=ivt.shiftLon(SHIFT_LON)
ivtrec=ivtrec.shiftLon(SHIFT_LON)
ivtano=ivtano.shiftLon(SHIFT_LON)
#--------------------Slice data--------------------
qu=uflux.sliceData(TIME_START,TIME_END,axis=0).squeeze()
qv=vflux.sliceData(TIME_START,TIME_END,axis=0).squeeze()
ivt=ivt.sliceData(TIME_START,TIME_END,axis=0).squeeze()
ivtrec=ivtrec.sliceData(TIME_START,TIME_END,axis=0).squeeze()
ivtano=ivtano.sliceData(TIME_START,TIME_END,axis=0).squeeze()
#-----------------Get coordinates-----------------
latax=qu.getLatitude()
lonax=qu.getLongitude()
timeax=ivt.getTime()
timeax=['%d-%02d-%02d %02d:00' %(timett.year, timett.month, timett.day, timett.hour) for timett in timeax]
time_idx, labels, angles, crossfluxes, result_df = findARs(ivt.data, ivtrec.data,
ivtano.data, qu.data, qv.data, latax, lonax, times=timeax, **PARAM_DICT)
#=== Plot ===================
plot_idx=time_idx[0]
plot_time=timeax[plot_idx]
slab=ivt.data[plot_idx]
slabrec=ivtrec.data[plot_idx]
slabano=ivtano.data[plot_idx]
ardf=result_df[result_df.time==plot_time]
plot_vars=[slab, slabrec, slabano]
titles=['IVT', 'THR_recon', 'THR_ano']
iso=plot.Isofill(plot_vars, 12, 1, 1,min_level=0,max_level=800)
figure=plt.figure(figsize=(12,10), dpi=100)
for jj in range(len(plot_vars)):
ax=figure.add_subplot(3,1,jj+1,projection=ccrs.PlateCarree())
pobj=plot.plot2(plot_vars[jj], iso, ax,
xarray=lonax, yarray=latax,
title='%s %s' %(plot_time, titles[jj]),
fix_aspect=False)
plot.plotAR(ardf, ax, lonax)
plt.show()
| [
"noreply@github.com"
] | mukeshraeee.noreply@github.com |
ba50bd85a9301d36b0a96a7102c9b07d83f8c265 | 4db7f072a2283f67ab98d6971c1d8d51943de100 | /LoanBroker/AwsStepFunctions/PubSub/GetAggregate.py | 4d47a9da1fa9eb1ad5a903aa60aed9768c48e25e | [
"MIT"
] | permissive | spac3lord/eip | 5ec381bb33735f1022e2d7d52cbe569d25f712f9 | 09d1e9371fb13fae4684506c778e6ca6b196c5d0 | refs/heads/master | 2023-03-18T10:24:43.374748 | 2023-03-11T17:01:02 | 2023-03-11T17:01:02 | 77,563,775 | 127 | 26 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import boto3
import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamo = boto3.resource('dynamodb')
def lambda_handler(event, context):
logger.info(event)
key = event['Id']
table = dynamo.Table('MortgageQuotes')
record = table.get_item(Key={'Id': key }, ConsistentRead=True)
if 'Item' in record:
return {'Quotes' : record['Item']['Quotes'] }
else:
logger.info("No aggregate for key %s" % key)
return {'Quotes' : [] }
| [
"inquiry@architectelevator.com"
] | inquiry@architectelevator.com |
f84325ca75ca67e9ffd9ab4ef169d506c3108975 | 3047dab2594b1edf05a54e85205d32ed1ba445ea | /Python/Project Euler/multiples_of_3_and_5.py | 4652df24a9d074c6ef7ef41cc2f0ce566fb2c8ec | [
"MIT"
] | permissive | ZacJoffe/competitive-programming | 99b9e13689508e4e01ee55089e8e59e88a532caf | 8150c9e12198500d8f57c6281f268d8027e7c318 | refs/heads/master | 2020-06-11T00:39:17.689265 | 2019-06-28T00:07:06 | 2019-06-28T00:07:06 | 193,806,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | def isMultiple(num):
if num / 3 == num // 3 or num / 5 == num // 5:
return True
return False
sum = 0
for i in range(3, 1000):
if isMultiple(i):
sum += i
print(sum)
| [
"zacharyjoffe@gmail.com"
] | zacharyjoffe@gmail.com |
c6e59b6836ac31b3775c83db628c1e1a2d0c6413 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/freestyle/styles/split_at_highest_2d_curvatures.py | 68a80d89ea7c7b23302858dd2ddfe88b93707121 | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-lat... | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 1,852 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : split_at_highest_2d_curvature.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Draws the visible lines (chaining follows same nature lines)
# (most basic style module)
from freestyle.chainingiterators import ChainSilhouetteIterator
from freestyle.functions import pyInverseCurvature2DAngleF0D
from freestyle.predicates import (
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueUP1D,
pyHigherLengthUP1D,
pyParameterUP0D,
)
from freestyle.shaders import (
ConstantThicknessShader,
IncreasingColorShader,
)
from freestyle.types import Operators
Operators.select(QuantitativeInvisibilityUP1D(0))
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(QuantitativeInvisibilityUP1D(0)))
func = pyInverseCurvature2DAngleF0D()
Operators.recursive_split(func, pyParameterUP0D(0.4, 0.6), NotUP1D(pyHigherLengthUP1D(100)), 2)
shaders_list = [
ConstantThicknessShader(10),
IncreasingColorShader(1, 0, 0, 1, 0, 1, 0, 1),
]
Operators.create(TrueUP1D(), shaders_list)
| [
"admin@irradiate.net"
] | admin@irradiate.net |
d6187c2b2fb01271c77e48fb992c901e13d387b9 | 64d971b307ced750cbe1d2f063b38c3056d627fb | /misc/gen_ops.py | 9bca19562cdaf91d7b5016cd7b4962a6bbd7f9de | [
"ISC"
] | permissive | azhai/wlang | b7f77ddd2e85eb3030284b7fb2de5b7254a805f3 | 03eb8e72eaacfa451be06cf398762a70cd7b30c8 | refs/heads/master | 2023-03-02T20:00:43.000062 | 2021-02-05T02:10:06 | 2021-02-05T02:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,398 | py | #!/usr/bin/env python
# encoding: utf8
#
# This script reads from:
# - src/ir/arch_*.lisp
# - src/types.h
# - src/token.h
# and patches:
# - src/ir/op.h
# - src/ir/op.c
# - src/token.h
# - src/types.h
#
#
import re, sys, os, os.path
from functools import reduce
import pprint
SRCFILE_ARCH_BASE = "src/ir/arch_base.lisp"
SRCFILE_IR_OP_C = "src/ir/op.c"
SRCFILE_IR_OP_H = "src/ir/op.h"
SRCFILE_TOKEN_H = "src/parse/token.h"
SRCFILE_TYPES_H = "src/types.h"
pp = pprint.PrettyPrinter(indent=2)
def rep(any): return pp.pformat(any)
# change directory to project root, i.e. "(dirname $0)/.."
scriptfile = os.path.abspath(__file__)
os.chdir(os.path.dirname(os.path.dirname(scriptfile)))
scriptname = os.path.relpath(scriptfile) # e.g. "misc/gen_ops.py"
DRY_RUN = False # don't actually write files
DEBUG = False # log stuff to stdout
# S-expression types
Symbol = str # A Scheme Symbol is implemented as a Python str
Number = (int, float) # A Scheme Number is implemented as a Python int or float
Atom = (Symbol, Number) # A Scheme Atom is a Symbol or Number
List = list # A Scheme List is implemented as a Python list
Exp = (Atom, List) # A Scheme expression is an Atom or List
# TypeCode => [irtype ...]
# Note: loadTypeCodes verifies that this list exactly matches the list in types.h
typeCodeToIRType :{str:[str]} = {
# multiple irtypes should be listed from most speecific to most generic
"bool": ["bool"],
"int8": ["s8", "i8"],
"uint8": ["u8", "i8"],
"int16": ["s16", "i16"],
"uint16": ["u16", "i16"],
"int32": ["s32", "i32"],
"uint32": ["u32", "i32"],
"int64": ["s64", "i64"],
"uint64": ["u64", "i64"],
"float32": ["f32"],
"float64": ["f64"],
}
typeCodeAliases = {
"int": "int32",
"uint": "uint32",
}
auxTypes = [
"IRAuxBool",
"IRAuxI8",
"IRAuxI16",
"IRAuxI32",
"IRAuxI64",
"IRAuxF32",
"IRAuxF64",
]
irtypeToAuxType = {
"bool" : "IRAuxBool",
"i8" : "IRAuxI8",
"i16" : "IRAuxI16",
"i32" : "IRAuxI32",
"i64" : "IRAuxI64",
"f32" : "IRAuxF32",
"f64" : "IRAuxF64",
}
# irtype => [ TypeCode ... ]
irTypeToTypeCode = {}
for typeCodeName, irtypes in typeCodeToIRType.items():
for irtype in irtypes:
v = irTypeToTypeCode.get(irtype, [])
v.append(typeCodeName)
if len(v) == 1:
irTypeToTypeCode[irtype] = v
longestTypeCode = reduce(lambda a, v: max(a, len(v)), typeCodeToIRType.keys(), 0)
# Maps AST operators to IR operator prefix
# Note: loadASTOpTokens verifies that this list exactly matches the list in token.h
astOpToIROpPrefix = {
# AST token IROp prefix
# (1-input, 2-input)
"TStar": (None, "Mul"), # *
"TSlash": (None, "Div"), # /
"TShl": (None, "ShL"), # <<
"TShr": (None, "ShR"), # >>
"TAnd": (None, "And"), # &
"TPercent": (None, "Rem"), # %
"TPlus": ("Pos", "Add"), # +
"TMinus": ("Neg", "Sub"), # -
"TPipe": (None, "Or"), # |
"THat": ("Compl", "XOr"), # ^ complement
"TTilde": ("BNot", None), # ~
"TEq": (None, "Eq"), # ==
"TNEq": (None, "NEq"), # !=
"TLt": (None, "Less"), # <
"TLEq": (None, "LEq"), # <=
"TGt": (None, "Greater"), # >
"TGEq": (None, "GEq"), # >=
"TPlusPlus": ("Incr", None), # ++
"TMinusMinus": ("Decr", None), # --
"TExcalm": ("Not", None), # !
}
# Operator flags. Encoded as bitflags and thus have a count limit of 31.
OpFlags = [
("ZeroWidth" , 'dummy op; no actual I/O.'),
("Constant" , 'true if the value is a constant. Value in aux'),
("Commutative" , 'commutative on its first 2 arguments (e.g. addition; x+y==y+x)'),
("ResultInArg0" , 'output of v and v.args[0] must be allocated to the same register.'),
("ResultNotInArgs" , 'outputs must not be allocated to the same registers as inputs'),
("Rematerializeable" , 'register allocator can recompute value instead of spilling/restoring.'),
("ClobberFlags" , 'this op clobbers flags register'),
("Call" , 'is a function call'),
("NilCheck" , 'this op is a nil check on arg0'),
("FaultOnNilArg0" , 'this op will fault if arg0 is nil (and aux encodes a small offset)'),
("FaultOnNilArg1" , 'this op will fault if arg1 is nil (and aux encodes a small offset)'),
("UsesScratch" , 'this op requires scratch memory space'),
("HasSideEffects" , 'for "reasons", not to be eliminated. E.g., atomic store.'),
("Generic" , 'generic op'),
("Lossy" , 'operation may be lossy. E.g. converting i32 to i16.'),
]
if len(OpFlags) > 31:
raise Exception("too many OpFlags to work as bitflags")
OpFlagsKeySet = set([k for k, _ in OpFlags])
# operator attributes which has a value. E.g. "(aux i32)"
OpKeyAttributes = set([
"aux",
])
# the IROpDescr struct's fields. (type, name, comment)
IROpDescr = [
("IROpFlag", "flags", ""),
("TypeCode", "outputType", "invariant: < TypeCode_NUM_END"),
("IRAux", "aux", "type of data in IRValue.aux"),
]
class Op:
def __init__(self, name :str, input :Exp, output :Exp, attributes :List, commentsPre :[str]):
self.name = name
self.input = input
self.output = output
self.commentsPre = commentsPre
self.commentsPost = []
self.attributes = {}
self.flags = set()
# inputSig is a string of the input, useful as a key of dicts
if isinstance(self.input, Atom):
self.inputSig = str(self.input)
self.inputCount = 1
else:
self.inputSig = " ".join([str(v) for v in self.input])
self.inputCount = len(self.input)
# outputSig is a string of the output, useful as a key of dicts
if isinstance(self.output, Atom):
self.outputSig = str(self.output)
self.outputCount = 1
else:
self.outputSig = " ".join([str(v) for v in self.output])
self.outputCount = len(self.output)
for attr in attributes:
if isinstance(attr, Atom):
if attr not in OpFlagsKeySet:
raise Exception(
"unexpected attribute %r in op %s.\nExpected one of:\n %s" %
(attr, self.name, "\n ".join(sorted(OpFlagsKeySet))))
self.flags.add(attr)
else:
if len(attr) < 2:
raise Exception("invalid attribute %r in op %s" % (attr, self.name))
key = attr[0]
if key not in OpKeyAttributes:
raise Exception(
"unexpected attribute %r in op %s.\nExpected one of:\n %s" %
(key, self.name, "\n ".join(sorted(OpKeyAttributes))))
self.attributes[key] = attr[1:]
class Arch:
isGeneric = False
name = "_"
addrSize = 0
regSize = 0
intSize = 0
ops :[Op] = []
def __init__(self, sourcefile :str):
self.sourcefile = sourcefile
# ------------------------------------------------------------------------------------------------
# main
def main():
typeCodes = loadTypeCodes(SRCFILE_TYPES_H)
astOps = loadASTOpTokens(SRCFILE_TOKEN_H)
baseArch = parse_arch_file(SRCFILE_ARCH_BASE)
baseArch.isGeneric = True
if DEBUG:
print("baseArch:", {
"addrSize": baseArch.addrSize,
"regSize": baseArch.regSize,
"intSize": baseArch.intSize,
"ops": len(baseArch.ops)
})
archs = [ baseArch ]
gen_IR_OPS(archs)
gen_IROpNames(archs)
gen_IROpConstMap(baseArch, typeCodes)
gen_IROpSwitches(baseArch, typeCodes, astOps)
gen_IROpConvTable(baseArch, typeCodes)
gen_IROpFlag()
gen_IRAux()
gen_IROpDescr()
gen_IROpInfo(archs, typeCodes)
# ------------------------------------------------------------------------------------------------
# generate output code
def gen_IROpInfo(archs :[Arch], typeCodes :[str]):
startline = 'const IROpDescr _IROpInfoMap[Op_MAX] = {'
endline = '};'
lines = [ startline, ' // Do not edit. Generated by %s' % scriptname ]
for a in archs:
for op in a.ops:
fields :[str] = []
for _, name, _ in IROpDescr:
value = "0"
comment = ""
if name == "outputType":
if isinstance(op.output, Atom):
typeCodes = irTypeToTypeCode[op.output]
if len(typeCodes) > 1:
# multiple output types means that the result depends on the input types
# print("TODO: multiple possible TypeCodes %r for ir type %r" % (typeCodes, op.output))
if isinstance(op.input, Atom):
value = "TypeCode_param1"
elif len(op.input) == 0:
# special case of ops that "generate" values, like constants.
value = "TypeCode_param1"
else:
value = ""
i = 1
for intype in op.input:
if intype == op.output:
value = "TypeCode_param%d" % i
break
i += 1
if value == "" or i > 2:
raise Exception(
"variable output type %r of Op%s without matching input %r" % (
op.output, op.name, op.input))
value = "TypeCode_param1"
comment = op.output
else:
value = "TypeCode_" + typeCodes[0]
elif len(op.output) == 0:
value = "TypeCode_nil"
else:
raise Exception("TODO: implement handling of multi-output ops (%s -> %r) in %s" % (
op.name, op.output, scriptname))
elif name == "flags":
if len(op.flags) == 0:
value = "IROpFlagNone"
else:
value = "|".join([ "IROpFlag" + flag for flag in sorted(op.flags) ])
elif name == "aux":
aux = op.attributes.get("aux")
value = "IRAuxNone"
if aux:
if len(aux) != 1:
raise Exception(
"invalid aux arguments in op %s (expected exactly 1 irtype; got %r)" %
(op.name, aux))
irtype = aux[0]
if irtype not in irtypeToAuxType:
raise Exception(
"invalid aux type %s in op %s (expected one of: %s)" %
(irtype, op.name, ", ".join(irtypeToAuxType.keys())))
value = irtypeToAuxType[irtype]
else:
raise Exception("unexpected field %r in IROpDescr" % name)
if len(comment) > 0:
fields.append("%s/*%s*/" % (value, comment))
else:
fields.append(value)
# lines.append(" /* %s = */ %s,%s" % (name, value, comment))
lines.append(" { /* Op%s */ %s }," % (op.name, ", ".join(fields)))
if a.isGeneric:
# ZERO entry for Op_GENERIC_END
lines.append(" {0,0,0}, // Op_GENERIC_END")
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_C, startline, "\n"+endline, "\n".join(lines))
def gen_IROpDescr():
startline = 'typedef struct IROpDescr {'
endline = '} IROpDescr;'
lines = [ startline, ' // Do not edit. Generated by %s' % scriptname ]
typeWidth = reduce(lambda a, v: max(a, len(v[0])), IROpDescr, 0)
nameWidth = reduce(lambda a, v: max(a, len(v[1])), IROpDescr, 0)
for type, name, comment in IROpDescr:
if len(comment) > 0:
comment = " // " + comment
lines.append((" {0:<{typeWidth}} {1:<{nameWidth}}{2}".format(
type,
name + ";",
comment,
typeWidth=typeWidth,
nameWidth=nameWidth+1)).strip())
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_H, startline, "\n"+endline, "\n".join(lines))
def gen_IRAux():
startline = 'typedef enum IRAux {'
endline = '} IRAux;'
lines = [
startline,
' // Do not edit. Generated by %s' % scriptname,
' IRAuxNone = 0,',
]
for s in auxTypes:
lines.append(" {0},".format(s))
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_H, startline, "\n"+endline, "\n".join(lines))
def gen_IROpFlag():
startline = 'typedef enum IROpFlag {'
endline = '} IROpFlag;'
lines = [
startline,
' // Do not edit. Generated by %s' % scriptname,
' IROpFlagNone = 0,',
]
nameWidth = reduce(lambda a, v: max(a, len(v[0])), OpFlags, 0)
value = 0
for name, comment in OpFlags:
lines.append(" IROpFlag{0:<{nameWidth}} = 1 << {1:<2},// {2}".format(
name,
value,
comment,
nameWidth=nameWidth))
value += 1
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_H, startline, "\n"+endline, "\n".join(lines))
def gen_IROpConvTable(baseArch :Arch, typeCodes :[str]):
# generates matrix table of size TypeCode_NUM_END*2,
# mapping fromType*toType to a Conv* operation.
# const IROp _IROpConvMap[TypeCode_NUM_END][TypeCode_NUM_END];
# typeCodeToIRType TypeCode => [irtype ...]
# irTypeToTypeCode irtype => [ TypeCode ... ]
# print("irTypeToTypeCode", rep(irTypeToTypeCode))
# print("typeCodeToIRType", rep(typeCodeToIRType))
table = {"bool":{}} # input type => { output type => op }
outTypeCodes = set(["bool"]) # tracks output types, used for error checking
for op in baseArch.ops:
if not op.name.startswith("Conv"):
continue
if not isinstance(op.input, Atom):
raise Exception("conversion op %s is expected to have exactly 1 input" % op.name)
if not isinstance(op.output, Atom):
raise Exception("conversion op %s is expected to have exactly 1 output" % op.name)
for inTypeCode in irTypeToTypeCode[op.input]:
for outTypeCode in irTypeToTypeCode[op.output]:
# print("%s\t-> %s\t%s" % (inTypeCode, outTypeCode, op.name))
outTypeCodes.add(outTypeCode)
m = table.get(inTypeCode)
if m is None:
m = {}
table[inTypeCode] = m
if outTypeCode in m:
raise Exception("duplicate conflicting conversion ops for %s->%s: %s and %s" % (
inTypeCode, outTypeCode, m[outTypeCode].name, op.name))
m[outTypeCode] = op
# fail if not all known TypeCodes are covered
if len(table) < len(typeCodes) - len(typeCodeAliases):
diff = set(typeCodes).difference(table)
raise Exception("not all source types covered; missing: %s -> *" % " -> *, ".join(diff))
if len(outTypeCodes) < len(typeCodes) - len(typeCodeAliases):
diff = set(typeCodes).difference(outTypeCodes)
raise Exception("not all destination types covered; missing: * -> %s" % ", * -> ".join(diff))
# generate 2-D table
startline = 'const IROp _IROpConvMap[TypeCode_NUM_END][TypeCode_NUM_END] = {'
endline = '};'
lines = [
startline,
' // Do not edit. Generated by %s' % scriptname,
]
# TypeCodes must be listed in order of the TypeCode enum.
# typeCodes is parsed from src/types.h
missing = []
for inTypeCode in typeCodes:
lines.append(" { // %s -> ..." % inTypeCode)
inTypeCode = typeCodeAliases.get(inTypeCode,inTypeCode)
m = table.get(inTypeCode)
for outTypeCode in typeCodes:
outTypeCodeC = typeCodeAliases.get(outTypeCode,outTypeCode)
opname = "Nil"
if m is not None:
op = m.get(outTypeCodeC)
if op is not None:
opname = op.name
if opname == "Nil" and inTypeCode != outTypeCodeC:
if (outTypeCodeC[0] == "u" and outTypeCodeC[1:] == inTypeCode) or \
(inTypeCode[0] == "u" and inTypeCode[1:] == outTypeCodeC):
# ignore signed differences. e.g. uint32 -> int32
pass
else:
missing.append("%s -> %s" % (inTypeCode, outTypeCode))
lines.append(" /* -> %s */ Op%s," % (outTypeCode, opname))
lines.append(" },")
# TODO: fix this so that we don't print extensions with sign diffs, e.g. i16 -> u32.
# # if any pairs are missing, print but don't fail
# if len(missing) > 0:
# print("conversion: Not all types covered by conversion. Missing:\n %s" % (
# "\n ".join(missing)
# ), file=sys.stderr)
# finalize table
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_C, startline, "\n"+endline, "\n".join(lines))
def gen_IROpSwitches(baseArch :Arch, typeCodes :[str], astOps):
# # preprocess astOps
# astOpMap = {}
# longestTok = 0
# longestAstComment = 0
# for tok, comment in astOps:
# astOpMap[tok] = comment
# longestTok = max(longestTok, len(tok))
# longestAstComment = max(longestAstComment, len(comment))
# map ops
iropsByInput = {} # inputSig => [ (astPrefix, Op) ... ]
for astTok, opPrefixes in astOpToIROpPrefix.items():
opPrefix1Input, opPrefix2Input = opPrefixes
for op in baseArch.ops:
if (opPrefix1Input and op.name.startswith(opPrefix1Input)) or \
(opPrefix2Input and op.name.startswith(opPrefix2Input)):
v = iropsByInput.get(op.inputSig, [])
v.append((astTok, op))
iropsByInput[op.inputSig] = v
revTypeCodeAliases = {}
for alias, target in typeCodeAliases.items():
v = revTypeCodeAliases.get(target,[])
v.append(alias)
revTypeCodeAliases[target] = v
def genAstOpToIrOpSwitch(lines, typeCode, ops):
for alias in revTypeCodeAliases.get(typeCode,[]):
lines.append(' case TypeCode_%s:' % alias)
lines.append(' case TypeCode_%s: switch (tok) {' % typeCode)
longestAstOp = 0
longestIrOp = 0
for astOp, op in ops:
longestAstOp = max(longestAstOp, len(astOp))
longestIrOp = max(longestIrOp, len(op.name))
for astOp, op in ops:
lines.append(' case %-*s : return Op%-*s ;// %s -> %s' %
(longestAstOp, astOp, longestIrOp, op.name, op.inputSig, op.outputSig))
lines.append(' default: return OpNil;')
lines.append(' }')
startline = ' //!BEGIN_AST_TO_IR_OP_SWITCHES'
endline = ' //!END_AST_TO_IR_OP_SWITCHES'
lines = [
startline,
'// Do not edit. Generated by %s' % scriptname,
]
lines.append('switch (type1) {')
i = 0
for type1 in typeCodes:
if type1 in typeCodeAliases:
continue
for alias in revTypeCodeAliases.get(type1,[]):
lines.append(' case TypeCode_%s:' % alias)
lines.append(' case TypeCode_%s:' % type1)
lines.append(' switch (type2) {')
# 1-input
ops1 = [] # [ (astPrefix, Op) ... ]
for irtype in typeCodeToIRType[type1]:
ops1 += iropsByInput.get(irtype, [])
# print(rep(ops1))
if len(ops1) > 0:
genAstOpToIrOpSwitch(lines, 'nil', ops1)
# 2-input
for type2 in typeCodes:
if type2 in typeCodeAliases:
continue
ops2 = [] # [ (astPrefix, Op) ... ]
for irtype1 in typeCodeToIRType[type1]:
for irtype2 in typeCodeToIRType[type2]:
ops2 += iropsByInput.get(irtype1 + " " + irtype2, [])
if len(ops2) > 0:
genAstOpToIrOpSwitch(lines, type2, ops2)
lines.append(' default: return OpNil;')
lines.append(' } // switch (type2)')
lines.append(' default: return OpNil;')
lines.append('} // switch (type1)')
lines.append(endline)
if DEBUG:
print("\n ".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_C, startline, endline, "\n ".join(lines))
def mustFind(s, substr, start=None):
i = s.find(substr, start)
if i == -1:
raise Exception("substring %r not found" % substr)
return i
def gen_IROpConstMap(baseArch :Arch, typeCodes :[str]):
constOps = createConstantOpsMap([baseArch]) # { irtype: [Op] }
# print("constOps", constOps)
startline = 'const IROp _IROpConstMap[TypeCode_NUM_END] = {'
endline = '};'
lines = [
startline,
' // Do not edit. Generated by %s' % scriptname,
]
for typeCode in typeCodes:
# find best matching constop
op = None
for irtype in typeCodeToIRType[typeCodeAliases.get(typeCode, typeCode)]:
op = constOps.get(irtype)
if op:
break
if not op:
raise Exception("no constant op for TypeCode %s" % typeCode)
lines.append(" /* TypeCode_%-*s = */ Op%s," % (longestTypeCode, typeCode, op.name))
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_C, startline, "\n"+endline, "\n".join(lines))
def gen_IROpNames(archs :[Arch]):
startline = 'const char* const IROpNames[Op_MAX] = {'
endline = '};'
lines = [
startline,
' // Do not edit. Generated by %s' % scriptname,
]
longestName = 0
for a in archs:
for op in a.ops:
lines.append(' "%s",' % op.name)
longestName = max(longestName, len(op.name))
if a.isGeneric:
lines.append(' "?", // Op_GENERIC_END')
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_C, startline, endline, "\n".join(lines))
# IROpNamesMaxLen:
lines = [
'// IROpNamesMaxLen = longest name in IROpNames',
'//!Generated by %s -- do not edit.' % scriptname,
'#define IROpNamesMaxLen %d' % longestName,
'//!EndGenerated',
]
replaceInSourceFile(SRCFILE_IR_OP_H, lines[0], lines[len(lines)-1], "\n".join(lines))
def gen_IR_OPS(archs :[Arch]):
startline = 'typedef enum IROp {'
endline = '} IROp;'
lines = [ startline ]
for a in archs:
lines.append(" // generated by %s from %s" % (scriptname, a.sourcefile))
for op in a.ops:
for comment in op.commentsPre:
lines.append(" //%s" % comment)
postcomment = " ".join(op.commentsPost)
if len(postcomment) > 0:
postcomment = "\t//%s" % postcomment
lines.append(" Op%s,%s" % (op.name, postcomment))
if a.isGeneric:
lines.append('')
lines.append(' Op_GENERIC_END, // ---------------------------------------------')
lines.append('')
lines.append(' Op_MAX')
lines.append(endline)
if DEBUG:
print("\n".join(lines))
replaceInSourceFile(SRCFILE_IR_OP_H, startline, endline, "\n".join(lines))
def replaceInSourceFile(filename, findstart, findend, body):
source = ""
with open(filename, "r") as f:
source = f.read()
start = source.find(findstart)
end = source.find(findend, start)
if start == -1:
raise Exception("can't find %r in %s" % (findstart, filename))
if end == -1:
raise Exception("can't find %r in %s" % (findend, filename))
# safeguard to make sure that the new content contains findstart and findend so that
# subsequent calls don't fail
bodystart = body.find(findstart)
if bodystart == -1:
raise Exception(
("Can't find %r in replacement body. Writing would break replacement." +
" To rename the start/end line, first rename in source file %s then in %s") %
(findstart, filename, scriptname))
if body.find(findend, bodystart) == -1:
raise Exception(
("Can't find %r in replacement body. Writing would break replacement." +
" To rename the start/end line, first rename in source file %s then in %s") %
(findend, filename, scriptname))
source2 = source[:start] + body + source[end + len(findend):]
# write changes only if we modified the source
if source2 == source:
if not DRY_RUN:
# touch mtime to satisfy build systems like make
with os.fdopen(os.open(filename, flags=os.O_APPEND)) as f:
os.utime(f.fileno() if os.utime in os.supports_fd else filename)
return False
if DRY_RUN:
print(scriptname + ": patch", filename, " (dry run)")
else:
print(scriptname + ": patch", filename)
with open(filename, "w") as f:
f.write(source2)
return True
# ------------------------------------------------------------------------------------------------
# types
def createConstantOpsMap(archs :[Arch]) -> {str:[Op]}:
# build map of constant ops.
constOps :{str:Op} = {} # irtype => ops. e.g. i32 => [op1, op2]
for a in archs:
for op in a.ops:
if "Constant" in op.flags:
if not isinstance(op.output, str):
raise Exception("constant op %r produces multiple outputs; should produce one" % op.name)
if op.output in constOps:
raise Exception("duplicate constant op %r for type %r" % (op.name, op.output))
constOps[op.output] = op
return constOps
def loadASTOpTokens(filename :str) -> [(str,str)]:
tokens = []
started = 0
ended = False
startSubstring = b"#define TOKENS("
startName = "T_PRIM_OPS_START"
endName = "T_PRIM_OPS_END"
pat = re.compile(r'\s*_\(\s*(\w+)\s*,\s*"([^"]*)"') # _( name, "repr" )
verifiedNames = set()
with open(filename, "rb") as fp:
for line in fp:
if started == 0:
if line.find(startSubstring) != -1:
started = True
else:
s = line.decode("utf8")
m = pat.match(s)
if m:
name = m.group(1)
if started == 1:
if name == startName:
started = 2
else:
if name == endName:
ended = True
break
if name in astOpToIROpPrefix:
verifiedNames.add(name)
tokens.append((name, m.group(2).replace('\\"', '"')))
else:
raise Exception("AST operator token %r missing in astOpToIROpPrefix map" % name)
if started == 0:
raise Exception("unable to find start substring %r" % startSubstring)
if started == 1:
raise Exception("unable to find start value %r" % startName)
if not ended:
raise Exception("unable to find ending value %r" % endName)
if len(astOpToIROpPrefix) != len(verifiedNames):
diff = set(astOpToIROpPrefix.keys()).difference(verifiedNames)
raise Exception(
"%d name(s) in astOpToIROpPrefix missing in %s: %s" %
(len(diff), filename, ", ".join(diff)))
return tokens
def loadTypeCodes(filename :str) -> [str]:
typeCodes = []
started = False
ended = False
startSubstring = b"#define TYPE_CODES"
endName = "NUM_END"
verifiedTypeCodes = set()
pat = re.compile(r'\s*_\(\s*(\w+)') # _( name, ... )
with open(filename, "rb") as fp:
for line in fp:
if not started:
if line.find(startSubstring) != -1:
started = True
else:
s = line.decode("utf8")
m = pat.match(s)
if m:
name = m.group(1)
if name == endName:
ended = True
break
else:
typeCodes.append(name)
if name in typeCodeToIRType or name in typeCodeAliases:
verifiedTypeCodes.add(name)
else:
raise Exception("TypeCode %r missing in typeCodeToIRType map" % name)
# print(s, m.group(1))
if not started:
raise Exception("unable to find start substring %r" % startSubstring)
if not ended:
raise Exception("unable to find ending typecode %r" % endName)
if len(typeCodeToIRType) + len(typeCodeAliases) != len(verifiedTypeCodes):
diff = set(typeCodeToIRType.keys()).difference(verifiedTypeCodes)
raise Exception(
"%d TypeCode(s) in typeCodeToIRType missing in %s: %s" %
(len(diff), filename, ", ".join(diff)))
return typeCodes
# ------------------------------------------------------------------------------------------------
# parse input lisp
def parse_op(xs, commentsPre) -> Op:
# Name INPUT -> OUTPUT ATTRIBUTES
# 0 1 2 3 4...
#
# Examples:
# (AddI16 (i16 i16) -> i16 Commutative ResultInArg0)
# (ConstI32 () -> u32 Constant (aux u32))
#
if len(xs) < 4:
raise Exception("not enough elements in op: %r" % xs)
name = xs[0]
if not isinstance(name, Symbol):
err("operation should start with a name. %r" % xs)
if xs[2] != "->":
err("missing '->' in %r" % xs)
input = xs[1]
output = xs[3]
attributes = xs[4:]
# print(name, input, output, attributes)
return Op(name, input, output, attributes, commentsPre)
def parse_arch_file(filename: str) -> Exp:
a = Arch(filename)
doc = None
with open(filename, "rb") as fp:
doc = parse_lisp( "(\n" + fp.read().decode("utf8") + "\n)" )
# print(doc)
for e in doc:
if isinstance(e, list) and (e[0] == ";" or e[0] == ";;"):
continue # skip comment
# accumulators
commentsPre = []
commentsPost = []
# print(e)
key = e[0] #.lower()
if key == "ops":
lastop = None
for x in e[1:]:
if not isinstance(x, list):
err("unexpected %r in ops spec" % x)
if len(x) > 0:
if x[0] == ";":
comment = decodeComment(x[1]) if len(x) > 1 else ""
# if len(comment) > 0 or len(commentsPre) > 0:
commentsPre.append(comment)
continue
if x[0] == ";;":
comment = decodeComment(x[1]) if len(x) > 1 else ""
# if len(comment) > 0 or len(commentsPost) > 0:
commentsPost.append(comment)
continue
if lastop and len(commentsPost):
lastop.commentsPost = commentsPost
commentsPost = []
lastop = parse_op(x, commentsPre)
a.ops.append(lastop)
commentsPre = []
# trailing/last op
if lastop and len(commentsPost):
lastop.commentsPost = commentsPost
commentsPost = []
elif hasattr(a, key):
if len(e) != 2:
err("unexpected attribute value %r" % e[1:])
setattr(a, key, e[1])
else:
err("unexpected arch attribute %r" % key)
return a
# S-expression / LISP parser from https://norvig.com/lispy.html
def parse_lisp(program: str) -> Exp:
"Read a Scheme expression from a string."
return read_from_tokens(tokenize(program))
comment_re = re.compile(r'^([^;\n]*);+([^\n]*)\n', re.M)
tokenize_re = re.compile(r'([\(\)])')
comment_enc_table = str.maketrans(" ()", "\x01\x02\x03")
comment_dec_table = str.maketrans("\x01\x02\x03", " ()")
def encodeComment(s):
return s.translate(comment_enc_table)
def decodeComment(s):
return s.translate(comment_dec_table)
def tokenize(text: str) -> list:
"Convert a string of characters into a list of tokens."
includeComments = True
def replaceComment(m):
commentType = ';'
# print(repr(m.group(1).strip()))
if len(m.group(1).strip()) > 0:
commentType = ';;' # trailing
return "%s(%s %s)\n" % (m.group(1), commentType, encodeComment(m.group(2)))
def stripComment(m):
return m.group(1) + "\n"
if includeComments:
text = comment_re.sub(replaceComment, text)
else:
text = comment_re.sub(stripComment, text)
# text = text.replace(',', ' ')
text = tokenize_re.sub(" \\1 ", text) # "a(b)c" => "a ( b ) c"
# print(text)
return text.split()
def read_from_tokens(tokens: list) -> Exp:
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF')
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
# if len(L) == 1 and isinstance(L[0], list) and L[0][0] == ';':
# # unwrap comment
# return L[0]
return L
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token: str) -> Atom:
"Numbers become numbers; every other token is a symbol."
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
def err(msg):
print(msg)
sys.exit(1)
main()
| [
"rasmus@notion.se"
] | rasmus@notion.se |
5d5f19e950b769abbcaddf745393f2ddc66ce44e | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/cyphon/cyphon/responder/couriers/migrations/0001_initial.py | 63f51235b83605b3ed2479a1caecf6f191b6d741 | [
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"MIT"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 2,005 | py | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
#
# Generated by Django 1.10.1 on 2017-03-20 16:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('visas', '0001_initial'),
('passports', '0001_initial'),
('actions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Courier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, unique=True)),
('endpoints', models.ManyToManyField(related_name='emissaries', related_query_name='emissary', to='actions.Action', verbose_name='actions')),
('passport', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='passports.Passport')),
('visa', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='visas.Visa')),
],
options={
'abstract': False,
},
),
migrations.AlterUniqueTogether(
name='courier',
unique_together=set([('passport', 'visa')]),
),
]
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
377925c7fb022acbe35a35694a0ca759689ceeb8 | 968b3b61cd47c5ec114a4747ccd481cb653d6ab4 | /robot.py | a629b60715c181fa8a74eb3034420c2bf543144f | [] | no_license | aakwah/python_projects | 1108cb4aab68eb7919ba90cdab0b9b1a18ae0eef | 2772f1a936fd24f60bc75d18cb375012fc527fbc | refs/heads/master | 2022-08-15T02:12:02.763541 | 2020-05-22T14:10:50 | 2020-05-22T14:10:50 | 266,073,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | import os
import time
import playsound
import speech_recognition as sr
from gtts import gTTS
def speak(text):
tts = gTTS(text=text, lang="en")
filename = "voice.mp3"
tts.save(filename)
playsound.playsound(filename)
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
said = ""
try:
said = r.recognize_google(audio)
print(said)
except Exception as e:
print("Exception: " + str(e))
return said
speak("hello and welcome, how can I help you")
while True:
text = get_audio()
if "what is your name" in text:
speak("My name is Robot")
if "how are you" in text:
speak("I am fine thank you, and what about you?")
if "how old are you" in text:
speak("sorry it is not your business")
if "are you stupid" in text:
speak("no, I am smart")
if "are you donkey" in text:
speak("no I am robot")
if "camera" in text:
speak("ok sir")
os.system('/usr/bin/cheese')
if "music" in text:
speak("ok sir")
os.system('/usr/bin/drumstick-vpiano')
if "what is the time now" in text:
speak("Fr 22 Mai 2020 13:21:55 CEST")
if "shark" in text:
os.system('/usr/bin/mplayer /home/ahmed/baby_shark.mp3')
if "Mario" in text:
speak("ok mamdam")
os.system('/snap/bin/mari0')
| [
"ahmed.elakwah@gmail.com"
] | ahmed.elakwah@gmail.com |
fe477ca8839d24ef5c701f59cf5ec4ec9470a23a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03547/s710097147.py | ae361a95c0c868de2d887122c718078902ffa546 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | X,Y = input().split()
S=[X,Y]
S.sort()
if X==Y:
print("=")
else:
if S[0]==X:
print("<")
elif S[0]==Y:
print(">") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0bac71a09f4284f19dd2c25666ccf560c688d990 | f57e150d756b929b73b83e95fdbbd71b29f73291 | /main.py | b7cd8c9c766e3c476b118971fca2c1fbfef23c08 | [] | no_license | mlobina/sqlalchemy | 084bd565ac20f4ef9abbb218d11055453c58059a | 8e19a76da3fcfa3756aa4463bb3b34d9891e7753 | refs/heads/master | 2023-03-02T14:07:38.715619 | 2021-02-09T20:23:19 | 2021-02-09T20:23:19 | 336,081,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,962 | py | import sqlalchemy as sq
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy import func, distinct
Base = declarative_base()
engine = sq.create_engine('postgresql+psycopg2://postgres:postgres@10.0.65.2:5432/mus_db')
Session = sessionmaker(bind=engine)
class Genre(Base):
__tablename__ = 'genre'
id = sq.Column(sq.Integer, primary_key=True)
name = sq.Column(sq.String)
musician = relationship('Musician', secondary='genremusician')
genremusician = sq.Table(
'genremusician', Base.metadata,
sq.Column('musician_id', sq.Integer, sq.ForeignKey('musician.id')),
sq.Column('genre_id', sq.Integer, sq.ForeignKey('genre.id')))
class Musician(Base):
__tablename__ = 'musician'
id = sq.Column(sq.Integer, primary_key=True)
name = sq.Column(sq.String)
nickname = sq.Column(sq.String)
genre = relationship('Genre', secondary='genremusician')
album = relationship('Album', secondary='musicianalbum')
musicianalbum = sq.Table(
'musicianalbum', Base.metadata,
sq.Column('musician_id', sq.Integer, sq.ForeignKey('musician.id')),
sq.Column('album_id', sq.Integer, sq.ForeignKey('album.id')))
class Album(Base):
__tablename__ = 'album'
id = sq.Column(sq.Integer, primary_key=True)
title = sq.Column(sq.String)
release_year = sq.Column(sq.Integer)
musician = relationship('Musician', secondary='musicianalbum')
track = relationship('Track', backref='album')
class Track(Base):
__tablename__ = 'track'
id = sq.Column(sq.Integer, primary_key=True)
title = sq.Column(sq.String)
duration = sq.Column(sq.Integer)
album_id = sq.Column(sq.Integer, sq.ForeignKey('album.id'))
collection = relationship('Collection', secondary='trackcollection')
trackcollection = sq.Table(
'trackcollection', Base.metadata,
sq.Column('track_id', sq.Integer, sq.ForeignKey('track.id')),
sq.Column('collection_id', sq.Integer, sq.ForeignKey('collection.id')))
class Collection(Base):
__tablename__ = 'collection'
id = sq.Column(sq.Integer, primary_key=True)
title = sq.Column(sq.String)
release_year = sq.Column(sq.Integer)
track = relationship('Track', secondary='trackcollection')
Base.metadata.create_all(engine)
session = Session()
#q = session.query(Track).filter(Track.duration == '3').all()
#print(q)
#for tr in q:
# print(tr.title)
#q = session.query(Album)
#res = q.all()
#print(res)
#for a in res:
# print(a.title, a.release_year)
#q = session.query(Genre.name, func.count(Musician.id)).join(Musician.genre).group_by(Genre.id).all()
#print(q)
#for g, c in q:
# print(g, c)
#q = session.query(func.count(Track.id)).join(Album.track).filter(Album.release_year.in_(['2019', '2020'])).all()
#print(q)
#q = session.query(Album.title, func.avg(Track.duration)).join(Album.track).group_by(Album.id).all()
#for t, d in q:
#print(t, d)
#q = session.query(Musician.name).join(Album.musician).filter(Album.release_year != '2020').all()
#for n in q:
# print(n)
#q = session.query(Collection.title).join(Collection.track).join(Track.album).join(Album.musician).filter(Musician.name == 'Roxette').all()
#print(q)
#subq = session.query(Musician.name).join(Genre.musician).group_by(Musician.id).\
#having(func.count(Genre.id) > 1).subquery()
#q = session.query(Album.title).join(Album.musician).join(subq).filter(Musician.name.in_subq) # н
#print(q)# никак не получается продолжить код
for item in session.query(Album).filter(Album.release_year == '2018'):
print(item.title, item.release_year)
print('_________')
for item in session.query(Track).order_by(Track.duration.desc()).slice(0, 1):
print(item.title, item.duration)
print('_________')
for item in session.query(Track).filter(Track.duration >= 3.5):
print(item.title, item.duration)
print('_________')
for item in session.query(Collection).filter(Collection.release_year >= 2018, Collection.release_year <= 2020):
print(item.title, item.release_year)
print('_________')
for item in session.query(Musician).filter(Musician.name.notlike('%% %%')):
print(item.name)
print('_________')
for item in session.query(Track).filter(Track.title.like('%%my%%')):
print(item.title)
print('_________')
for item in session.query(Genre).join(Genre.musician).order_by(func.count(Musician.id).desc()).group_by(Genre.id):
print(item.name, len(item.musician))
print('_________')
track_list = []
for item in session.query(Track).join(Album).filter(2019 <= Album.release_year, Album.release_year <= 2020):
track_list.append(item)
print(len(track_list))
print('_________')
for item in session.query(Track, Album).join(Album).filter(2019 <= Album.release_year, Album.release_year <= 2020):
print(f'{item[0].title}, {item[1].release_year}') ####
print('_________')
for item in session.query(Album, func.avg(Track.duration)).join(Track).group_by(Album.id):
print(f'{item[0].title}, {item[1]}')
print('_________')
subquery = session.query(distinct(Musician.name)).join(Musician.album).filter(Album.release_year == 2020)
for item in session.query(distinct(Musician.name)).filter(~Musician.name.in_(subquery)).order_by(
Musician.name.asc()):
print(f'{item}')
print('_________')
subquery = session.query(distinct(Musician.name)).join(Musician.album).filter(Album.release_year == 2020)
for item in session.query(distinct(Musician.name)).filter(Musician.name.notin_(subquery)).order_by(
Musician.name.asc()):
print(f'{item}') #### то же самое вместо filter(~Musician.name.in_(subquery)) filter(Musician.name.notin_(subquery))
print('_________')
for item in session.query(Collection).join(Collection.track).join(Track.album).join(Album.musician).filter(Musician.name == 'Sia'):
print(item.title)
print('_________')
for item in session.query(Album).join(Album.musician).join(Musician.genre).having(func.count(Genre.id) > 1).group_by(Album.id):
print(item.title)
print('_________')
for item in session.query(Track).outerjoin(Track.collection).filter(Collection.id == None):
print(item.title)
print('_________')
col = session.query(Collection.id).join(Collection.track).filter(Track.id == 20).first()
print(col)
print('_________')
sub = session.query(func.min(Track.duration)).subquery()
for item in session.query(Musician, Track.duration).join(Musician.album).join(Track).group_by(Musician.id,
Track.duration).having(Track.duration == sub):
print(item[0].name, item[1])
print('_________')
subquery = session.query(func.count(Track.id)).group_by(Track.album_id).order_by(func.count(Track.id)).limit(1)
subquery1 = session.query(Track.album_id).group_by(Track.album_id).having(func.count(Track.id) == subquery)
for item in session.query(Album).join(Track).filter(Track.album_id.in_(subquery1)).order_by(Album.title):
print(item.title)
| [
"mvlobina@gmsil.com"
] | mvlobina@gmsil.com |
7e624ff97ac10aa5b394c11d2fb2511a362df566 | 78d58111e5f491a660ad66c62b6598976d54948c | /_DTP_List_Of_All_Files.py | 7f0cf59a2f41bc7914ffbc524fd845c0143a053c | [] | no_license | parthi-ap/Python-demo | 4d400f84798ad33b517f3a1181e6f6c74b60d131 | aa13fee8e689d55edf93e586932b10385cb2e5b4 | refs/heads/master | 2022-11-05T12:04:22.716141 | 2020-06-14T14:13:28 | 2020-06-14T14:13:28 | 272,201,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py |
import glob
import xlwt
import xlrd
from natsort import natsorted, ns
wbk = xlwt.Workbook()
## worksheet "DTP-Proposed View"
sheet = wbk.add_sheet('File_Names')
print("*"*80)
print("Reading all File Names")
print("*"*80)
## To get list of all files in folder
list_of_Files = glob.glob("*.*")
row_no = 1
row_no_sheet1 = 1
file_list=[]
for each_file in list_of_Files:
file_list.append(each_file)
file_list=natsorted(file_list, alg=ns.IGNORECASE)
for each_file in file_list:
sheet.write(row_no,0,each_file) ## Write File name
row_no+=1
wbk.save("List_Of_Files.xls")
print("*"*80)
print("List_Of_Files.xls saved")
print("*"*80)
###################################################
###################################################
| [
"noreply@github.com"
] | parthi-ap.noreply@github.com |
334f4cf219abe0b7c4ce1ef12f2366dd7a08004d | e5a36361c6ed2ef74f7cd76fdb5c8ee3a97a371d | /mysite/settings.py | bd1dad90bdbc18d3a88418b2714ba456a93b4bfb | [] | no_license | mintiadry/my-first-blog | 7345cd96a4ff683bcb39a0b77987bd24f17b4b17 | ae311d5bdc7de4cb6be9246ea8606fe27debdb52 | refs/heads/master | 2022-12-30T13:38:39.087775 | 2020-10-20T08:41:12 | 2020-10-20T08:41:12 | 304,548,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%4zih!o-8_dvc&x!tp34uq2g_6cmmg!78v8+6vti&*(=!s_#-s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["127.0.0.1",".pythonanywhere.com"]
#fsfj
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"blog",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,"static")
| [
"takimoto.aiskk@gmail.com"
] | takimoto.aiskk@gmail.com |
f0b0e112aa2b5c6eee92980d89e572bde163f252 | 96e54dcebaa9f065c9f127fab8579ff129998837 | /src/cuda/deprecated/LeNetCUDA/PycaffeTrain-LeNet.py | 7c92b81725de6854c51db63f2fa7eccdb8b7cb01 | [] | no_license | prbodmann/radiation-benchmarks | 9e0e7b12f4b67d9d884b1a7b5e397736d017c2db | d842daa87e70a78dd7ea88dd6ac8cbac36291f0d | refs/heads/master | 2020-04-15T23:19:31.836567 | 2019-01-24T12:48:22 | 2019-01-24T12:48:22 | 165,102,669 | 0 | 0 | null | 2019-01-10T17:30:43 | 2019-01-10T17:30:43 | null | UTF-8 | Python | false | false | 7,875 | py | caffe_root = '/home/carol/radiation-benchmarks/src/cuda/LeNetCUDA/caffe/' # this file should be run from {caffe_root}/examples (otherwise change this line)
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
import lmdb
import numpy as np
from caffe import layers as L, params as P, proto, to_proto
# file path
root = '/home/your-account/DL-Analysis/'
train_list = root + 'mnist/mnist_train_lmdb'
test_list = root + 'mnist/mnist_test_lmdb'
train_proto = root + 'mnist/LeNet/train.prototxt'
test_proto = root + 'mnist/LeNet/test.prototxt'
deploy_proto = root + 'mnist/LeNet/deploy.prototxt'
solver_proto = root + 'mnist/LeNet/solver.prototxt'
def LeNet(data_list, batch_size, IncludeAccuracy = False, deploy = False):
"""
LeNet define
"""
if not(deploy):
data, label = L.Data(source = data_list,
backend = P.Data.LMDB,
batch_size = batch_size,
ntop = 2,
transform_param = dict(scale = 0.00390625))
else:
data = L.Input(input_param = {'shape': {'dim': [64, 1, 28, 28]}})
conv1 = L.Convolution(data,
kernel_size = 5,
stride = 1,
num_output = 20,
pad = 0,
weight_filler = dict(type = 'xavier'))
pool1 = L.Pooling(conv1,
pool = P.Pooling.MAX,
kernel_size = 2,
stride = 2)
conv2 = L.Convolution(pool1,
kernel_size = 5,
stride = 1,
num_output = 50,
pad = 0,
weight_filler = dict(type = 'xavier'))
pool2 = L.Pooling(conv2,
pool = P.Pooling.MAX,
kernel_size = 2,
stride = 2)
ip1 = L.InnerProduct(pool2,
num_output = 500,
weight_filler = dict(type = 'xavier'))
relu1 = L.ReLU(ip1,
in_place = True)
ip2 = L.InnerProduct(relu1,
num_output = 10,
weight_filler = dict(type = 'xavier'))
#loss = L.SoftmaxWithLoss(ip2, label)
if ( not(IncludeAccuracy) and not(deploy) ):
# train net
loss = L.SoftmaxWithLoss(ip2, label)
return to_proto(loss)
elif ( IncludeAccuracy and not(deploy) ):
# test net
loss = L.SoftmaxWithLoss(ip2, label)
Accuracy = L.Accuracy(ip2, label)
return to_proto(loss, Accuracy)
else:
# deploy net
prob = L.Softmax(ip2)
return to_proto(prob)
def WriteNet():
"""
write proto to file
"""
# train net
with open(train_proto, 'w') as file:
file.write( str(LeNet(train_list, 64, IncludeAccuracy = False, deploy = False)) )
# test net
with open(test_proto, 'w') as file:
file.write( str(LeNet(test_list, 100, IncludeAccuracy = True, deploy = False)) )
# deploy net
with open(deploy_proto, 'w') as file:
file.write( str(LeNet('not need', 64, IncludeAccuracy = False, deploy = True)) )
def GenerateSolver(solver_file, train_net, test_net):
"""
generate the solver file
"""
s = proto.caffe_pb2.SolverParameter()
s.train_net = train_net
s.test_net.append(test_net)
s.test_interval = 100
s.test_iter.append(100)
s.max_iter = 10000
s.base_lr = 0.01
s.momentum = 0.9
s.weight_decay = 5e-4
s.lr_policy = 'step'
s.stepsize = 3000
s.gamma = 0.1
s.display = 100
s.snapshot = 0
s.snapshot_prefix = './lenet'
s.type = 'SGD'
s.solver_mode = proto.caffe_pb2.SolverParameter.GPU
with open(solver_file, 'w') as file:
file.write( str(s) )
def set_device():
caffe.set_device(0)
caffe.set_mode_gpu()
def Training(solver_file):
"""
training
"""
set_device()
solver = caffe.get_solver(solver_file)
#solver.solve() # solve completely
number_iteration = 10000
# collect the information
display = 100
# test information
test_iteration = 100
test_interval = 100
# loss and accuracy information
train_loss = np.zeros( int(np.ceil(number_iteration * 1.0 / display)) )
test_loss = np.zeros( int(np.ceil(number_iteration * 1.0 / test_interval)) )
test_accuracy = np.zeros( int(np.ceil(number_iteration * 1.0 / test_interval)) )
# tmp variables
_train_loss = 0; _test_loss = 0; _test_accuracy = 0;
# main loop
for iter in range(number_iteration):
solver.step(1)
# save model during training
#~ if iter == number_iteration - 1: #in [10, 30, 60, 100, 300, 600, 1000, 3000, 6000, number_iteration - 1]:
#~ string = 'lenet_iter_%(iter)d.caffemodel'%{'iter': iter}
#~ solver.net.save(string)
if 0 == iter % display:
train_loss[iter // display] = solver.net.blobs['loss'].data
'''
# accumulate the train loss
_train_loss += solver.net.blobs['SoftmaxWithLoss1'].data
if 0 == iter % display:
train_loss[iter // display] = _train_loss / display
_train_loss = 0
'''
if 0 == iter % test_interval:
for test_iter in range(test_iteration):
solver.test_nets[0].forward()
_test_loss += solver.test_nets[0].blobs['loss'].data
_test_accuracy += solver.test_nets[0].blobs['accuracy'].data
test_loss[iter / test_interval] = _test_loss / test_iteration
test_accuracy[iter / test_interval] = _test_accuracy / test_iteration
_test_loss = 0
_test_accuracy = 0
# save for analysis
#~ np.save('./train_loss.npy', train_loss)
#~ np.save('./test_loss.npy', test_loss)
#~ np.save('./test_accuracy.npy', test_accuracy)
def test(model, weights, db_path):
net = caffe.Net(model, weights,caffe.TEST)
set_device()
#db_path = './examples/mnist/mnist_test_lmdb'
lmdb_env = lmdb.open(db_path)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
count = 0
correct = 0
for key, value in lmdb_cursor:
print "Count:", count
count += 1
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum)
image = image.astype(np.uint8)
net.blobs['data'].data[...] = np.asarray([image])
#out = net.forward_all(data=np.asarray([image]))
out = net.forward()
predicted_label = out['prob'][0].argmax(axis=0)
if label == predicted_label[0][0]:
correct = correct + 1
print("Label is class " + str(label) + ", predicted class is " + str(predicted_label[0][0]))
if count == 3:
break
print(str(correct) + " out of " + str(count) + " were classified correctly")
# Before keep going execute
# data/mnist/get_mnist.sh
# examples/mnist/create_mnist.sh
if __name__ == '__main__':
#~ WriteNet()
#~ GenerateSolver(solver_proto, train_proto, test_proto)
solver = "/home/carol/radiation-benchmarks/src/cuda/LeNetCUDA/caffe/examples/mnist/lenet_solver.prototxt"
model = "/home/carol/radiation-benchmarks/src/cuda/LeNetCUDA/caffe/examples/mnist/lenet_train_test.prototxt"
weights = "/home/carol/radiation-benchmarks/src/cuda/LeNetCUDA/caffe/examples/mnist/lenet_iter_10000.caffemodel"
db_path = "/home/carol/radiation-benchmarks/src/cuda/LeNetCUDA/caffe/examples/mnist/mnist_test_lmdb/"
#Training(model)
test(model, weights, db_path)
| [
"lwellausen@gmail.com"
] | lwellausen@gmail.com |
1d0b9f718ecd45f65d4231ee660f12b1f0aaaef7 | 03fc178bd2d6eacd6f656bfd9733ccb1590c5539 | /day14/day14.py | deb31f49d3dc1f809f51f684fdbbf12cf84a7de2 | [] | no_license | antongoransson/advent-of-code-2019 | 6a364e6c9e90f7fed448e33fe71841d8e6d5db03 | aec58876552f738322e3b39ec10f34ccfacbffa8 | refs/heads/master | 2020-09-22T03:33:10.486724 | 2019-12-25T15:33:18 | 2019-12-25T15:33:18 | 225,033,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | from collections import defaultdict
import math
def calc_ore(c, reactions, acc, waste, res):
r = reactions[c[1]]
c_n = c[0]
r_n = r['amount']
inputs = r['inputs']
m = math.ceil((c_n * acc - waste[c[1]]) / r_n)
waste[c[1]] = m * r_n - (c_n * acc - waste[c[1]])
if inputs[0][1] == 'ORE':
res.append(m * inputs[0][0])
return
[calc_ore(x, reactions, m, waste, res) for x in inputs]
def solve_part_1(reactions, fuel_amount):
tot = defaultdict(int)
waste = defaultdict(int)
res = []
[calc_ore(c, reactions, fuel_amount, waste, res)
for c in reactions['FUEL']['inputs']]
return sum(res)
def solve_part_2(reactions):
low = 0
high = 10000000
res = {}
while True:
m = (high + low) // 2
t = solve_part_1(reactions, fuel_amount=m)
res[m] = t
if t > 1000000000000:
high = m
else:
low = m
if m + 1 in res and res[m + 1] > 1000000000000:
return m
def main():
with open('input.txt') as f:
reactions = {}
for line in f:
i, o = line.split("=>")
i = i.split(",")
o = o.strip().split(" ")
reactions[o[1]] = {'amount': int(o[0]), 'inputs': []}
for reaction in i:
r = reaction.strip().split(" ")
reactions[o[1]]['inputs'].append([int(r[0]), r[1]])
sol1 = solve_part_1(reactions, 1)
print('Part 1: {}'.format(sol1))
sol2 = solve_part_2(reactions)
print('Part 2: {}'.format(sol2))
if __name__ == "__main__":
main()
| [
"anton.goransson95@gmail.com"
] | anton.goransson95@gmail.com |
295d466b1cfaeed29cb650aeb973f2c95ae468d1 | acc58aec826f84e51940985d8653c32ba492b556 | /gui/gantt_chart/bar.py | 3bf4af34af2a8bd95de92e64b01e3880b9f99857 | [] | no_license | alexiusacademia/easyplan | 8f6f7a0d12257189403ac4aefe2877c52c5f8a5c | 9e4995ccb64343641bc2bc7a67d73027c1f22185 | refs/heads/master | 2020-06-01T00:51:52.051599 | 2019-07-11T02:21:43 | 2019-07-11T02:21:43 | 190,566,861 | 0 | 0 | null | 2019-07-11T02:21:44 | 2019-06-06T11:02:52 | Python | UTF-8 | Python | false | false | 6,271 | py | import wx
from wx.lib.docview import CommandProcessor
from constants import *
from ..dialogs.dlg_split_task import SplitTaskDialog
from core.task_segment import TaskSegment
from core.project import Project
from ..commands.move_task_segment_by_dragging import MoveTaskSegmentCommand
BG_RECEIVED_FOCUS = wx.Colour(0, 0, 0)
BG_DEFAULT = wx.Colour(0, 0, 255)
class BarSegment(wx.Panel):
task_segment = None
task = None
parent = None
project = None
command_processor = None
left_limit = 0
right_limit = 1000000
def __init__(self, parent, x, y, l, h, task, task_segment):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.task_segment: TaskSegment = task_segment
self.task = task
self.parent = parent
self.command_processor: CommandProcessor = parent.parent.command_processor
self.project: Project = self.parent.project
y_adjustment = (WBS_ROW_HEIGHT - BAR_HEIGHT) / 2
self.SetPosition((x, y + y_adjustment))
self.SetSize(l, h)
self.SetBackgroundColour(BG_DEFAULT)
self.SetCursor(wx.Cursor(wx.CURSOR_SIZING))
# TRefresh s needed to work on Windows
self.Refresh()
self.Bind(wx.EVT_ENTER_WINDOW, self.on_hover)
self.Bind(wx.EVT_LEFT_UP, self.on_left_up)
self.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_left_down)
self.Bind(wx.EVT_SET_FOCUS, self.on_received_focus)
self.Bind(wx.EVT_KILL_FOCUS, self.on_lost_focus)
self.Bind(wx.EVT_MOTION, self.on_mouse_move)
def on_mouse_left_down(self, event):
self.SetFocus()
self.mouse_start_position = event.GetPosition().x
# Get the left limit for predecessor
if len(self.task.predecessors) > 0:
for pred in self.task.predecessors:
pred_end = pred.get_finish() * BAR_SCALE
if pred_end > self.left_limit:
self.left_limit = pred_end
# Get the left limit for split tasks
ts_index = self.task.task_segments.index(self.task_segment)
if ts_index != 0:
# Get the task segment on the left
left_ts_index = ts_index - 1
left_ts: TaskSegment = self.task.task_segments[left_ts_index]
left_limit = left_ts.get_finish() * BAR_SCALE
if left_limit > self.left_limit:
self.left_limit = left_limit
# Get right limit for successors
# Get the nearest successor
nearest_successor_start = 1000000
for task in self.project.tasks:
if len(task.predecessors) > 0:
for task_pred in task.predecessors:
if task_pred == self.task:
successor_start = task.start_day
if successor_start < nearest_successor_start:
nearest_successor_start = successor_start
nearest_successor_location = nearest_successor_start * BAR_SCALE
if nearest_successor_location < self.right_limit:
self.right_limit = nearest_successor_location
# Get the right limit for splitted task
if ts_index < (len(self.task.task_segments) - 1):
# There is a segment to the right
right_ts: TaskSegment = self.task.task_segments[ts_index + 1]
right_limit = right_ts.start * BAR_SCALE
if right_limit < self.right_limit:
self.right_limit = right_limit
def on_mouse_move(self, event):
if not event.Dragging or not event.LeftIsDown():
return
# Get the bar's current position
starting_point = self.GetPosition()[0]
# Initial mouse position
start_x = self.mouse_start_position
dx = (event.GetPosition()[0] - start_x)
new_x = starting_point + dx
if (new_x) < self.left_limit:
return
if new_x >= 0 and abs(dx) >= BAR_SCALE:
# The calculated/predicted location of the tip of this task segment bar.
new_task_end_x = (int(new_x/BAR_SCALE) + self.task_segment.duration - 1) * BAR_SCALE
if (new_x + BAR_SCALE) > self.left_limit and \
(new_task_end_x + BAR_SCALE) < self.right_limit:
self.move_task_segment(new_x)
def move_task_segment(self, new_x: int):
# self.project.move_task_segment(self.task, self.task_segment, int(new_x / BAR_SCALE))
command = MoveTaskSegmentCommand(True, 'Move Task Segment',
int(new_x / BAR_SCALE),
self.task,
self.task_segment,
self.project,
self)
self.command_processor.Submit(command)
# self.Move(self.task_segment.start * BAR_SCALE, self.GetPosition()[1])
def on_hover(self, event):
pass
def on_left_up(self, event):
# Get the task start
start_x = self.task_segment.start - 1
self.SetPosition((start_x * BAR_SCALE, self.GetPosition()[1]))
def on_received_focus(self, event):
"""
Triggered when the bar is clicked.
:param event: wx.EVT_LEFT_UP
"""
res = self.SetBackgroundColour(BG_RECEIVED_FOCUS)
self.project.selected_task_segment = self.task_segment
self.project.selected_task = self.task
self.project.selected_task_index = self.project.tasks.index(self.task)
self.Refresh()
def on_lost_focus(self, event):
"""
Triggered when the mouse is clicked somewhere else or the frame lost
its focus.
:param event:
:return:
"""
res = self.SetBackgroundColour(BG_DEFAULT)
# self.project.selected_task_segment = None
# self.project.selected_task = None
self.Refresh()
def on_double_clicked(self, event):
task = self.task
task_segment = self.task_segment
self.project.selected_task_segment = task_segment
self.project.selected_task = task
dlg = SplitTaskDialog(self.parent.parent)
res = dlg.ShowModal()
if res == ID_OK:
dlg.Destroy()
| [
"alexius.academia@gmail.com"
] | alexius.academia@gmail.com |
1194dfd86a3d043f09ac701fb4b1c43643524106 | 4da72085e8b3adc68a6ec967025caf9576a75363 | /tapiriik/services/api.py | 0d61cea0af5e343af7a8d1f00d2a42d8eb991179 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | blakearnold/tapiriik | ec9063b3bc234dccc5dc63fcbe2f31bbcabc6e96 | bf2e803cc8825a6c21bf7eae115044683dc98837 | refs/heads/master | 2021-01-20T01:10:52.259265 | 2014-07-21T00:58:58 | 2014-07-21T00:58:58 | 18,734,981 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | class ServiceExceptionScope:
Account = "account"
Service = "service"
class ServiceException(Exception):
def __init__(self, message, scope=ServiceExceptionScope.Service, block=False, user_exception=None):
Exception.__init__(self, message)
self.Message = message
self.UserException = user_exception
self.Block = block
self.Scope = scope
def __str__(self):
return self.Message + " (user " + str(self.UserException) + " )"
class ServiceWarning(ServiceException):
pass
class APIException(ServiceException):
pass
class APIWarning(ServiceWarning):
pass
# Theoretically, APIExcludeActivity should actually be a ServiceException with block=True, scope=Activity
# It's on the to-do list.
class APIExcludeActivity(Exception):
def __init__(self, message, activity=None, activityId=None, permanent=True, userException=None):
Exception.__init__(self, message)
self.Message = message
self.Activity = activity
self.ExternalActivityID = activityId
self.Permanent = permanent
self.UserException = userException
def __str__(self):
return self.Message + " (activity " + str(self.ExternalActivityID) + ")"
class UserExceptionType:
# Account-level exceptions (not a hardcoded thing, just to keep these seperate)
Authorization = "auth"
AccountFull = "full"
AccountExpired = "expired"
AccountUnpaid = "unpaid" # vs. expired, which implies it was at some point function, via payment or trial or otherwise.
# Activity-level exceptions
FlowException = "flow"
Private = "private"
NotTriggered = "notrigger"
RateLimited = "ratelimited"
MissingCredentials = "credentials_missing" # They forgot to check the "Remember these details" box
NotConfigured = "config_missing" # Don't think this error is even possible any more.
StationaryUnsupported = "stationary"
NonGPSUnsupported = "nongps"
TypeUnsupported = "type_unsupported"
DownloadError = "download"
ListingError = "list" # Cases when a service fails listing, so nothing can be uploaded to it.
UploadError = "upload"
SanityError = "sanity"
Corrupt = "corrupt" # Kind of a scary term for what's generally "some data is missing"
Untagged = "untagged"
LiveTracking = "live"
UnknownTZ = "tz_unknown"
System = "system"
Other = "other"
class UserException:
def __init__(self, type, extra=None, intervention_required=False, clear_group=None):
self.Type = type
self.Extra = extra # Unimplemented - displayed as part of the error message.
self.InterventionRequired = intervention_required # Does the user need to dismiss this error?
self.ClearGroup = clear_group if clear_group else type # Used to group error messages displayed to the user, and let them clear a group that share a common cause.
| [
"cpf@cpfx.ca"
] | cpf@cpfx.ca |
38efa084fb74c15ed10bf51e3ce920d95524b244 | 35e4fefee09e5a7601caeb0ae2a529bb9cd08dbf | /virtual/bin/easy_install | c9954c7853fbba81c1076da184d773f6cf131345 | [
"MIT"
] | permissive | Nicoleha/awards | 14f2f162bf01b4ae1663f09782ec3455c3bbf49f | 90d0461c672a2ea9ea8343538722bb44c8118507 | refs/heads/master | 2020-05-03T05:43:01.117289 | 2019-04-03T17:01:16 | 2019-04-03T17:01:16 | 178,455,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/home/wecode/Documents/post/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nicoleha127@gmail.com"
] | nicoleha127@gmail.com | |
878793373abef15eb0f86e00c10ef25dc6c595e9 | 67b9feeed6bb154631aa0bf980cec9e34b0417a0 | /setup.py | 763e9bfd9cd05ae82237a3d4f4d699b47e0f03cb | [
"MIT"
] | permissive | marquitobb/parse_subjson | 6c33c511d8773809bfe6da7a303404ed3ea9295e | 541ee9aeca2b20de9f1fa536a9b1369dd8f175c0 | refs/heads/master | 2023-07-16T21:10:11.643288 | 2021-08-30T23:25:32 | 2021-08-30T23:25:32 | 363,579,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from distutils.core import setup
setup(
name = "parse_subjson",
packages = ["parse_subjson"],
version= "0.1",
description = "package to loop json with subjson list",
author = "Marco Cobian",
license='MIT',
author_email = "maca9608@gmail.com",
url = "https://github.com/marquitobb/parse_subjson.git",
keywords= ["parse_subjson", "subjson"]
) | [
"maca9608@gmail.com"
] | maca9608@gmail.com |
24fa541ba8035e7771c837154211bd159e7bd92e | 2d2c10ffa7aa5ee35393371e7f8c13b4fab94446 | /projects/ai/reader/read-records.py | ad82b741cf38c8653e3c5b8df2f1402d4a8f7ed8 | [] | no_license | faker2081/pikachu2 | bec83750a5ff3c7b5a26662000517df0f608c1c1 | 4f06d47c7bf79eb4e5a22648e088b3296dad3b2d | refs/heads/main | 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file inference.py
# \author chenghuige
# \date 2018-02-05 20:05:25.123740
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('input', './mount/temp/ai2018/sentiment/tfrecord/valid/*record,', '')
flags.DEFINE_integer('batch_size_', 512, '')
flags.DEFINE_string('type', 'debug', '')
flags.DEFINE_string('base', './mount/temp/ai2018/sentiment/tfrecord/', '')
#flags.DEFINE_integer('fold', None, '')
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import sys, os
from sklearn import metrics
import pandas as pd
import numpy as np
import gezi
import pickle
from wenzheng.utils import ids2text
import melt
logging = melt.logging
from dataset import Dataset
from tqdm import tqdm
# TODO by default save all ? so do not need to change the code ?
# _asdict() https://stackoverflow.com/questions/26180528/python-named-tuple-to-dictionary
# err... valid and test data share same id...
def deal(dataset, infos):
for x, _ in tqdm(dataset, ascii=True):
for key in x:
x[key] = x[key].numpy()
if type(x[key][0]) == bytes:
x[key] = gezi.decode(x[key])
ids = x['id']
for j in range(len(ids)):
infos[ids[j]] = {}
for key in x:
infos[ids[j]][key] = x[key][j]
def main(_):
base = FLAGS.base
logging.set_logging_path('./mount/tmp/')
vocab_path = f'{base}/vocab.txt'
ids2text.init(vocab_path)
FLAGS.vocab = f'{base}/vocab.txt'
# FLAGS.length_index = 2
# FLAGS.buckets = '100,400'
# FLAGS.batch_sizes = '64,64,32'
input_ = FLAGS.input
if FLAGS.type == 'test':
input_ = input_.replace('valid', 'test')
inputs = gezi.list_files(input_)
inputs.sort()
if FLAGS.fold is not None:
inputs = [x for x in inputs if not x.endswith('%d.record' % FLAGS.fold)]
if FLAGS.type == 'debug':
print('type', FLAGS.type, 'inputs', inputs, file=sys.stderr)
dataset = Dataset('valid')
dataset = dataset.make_batch(FLAGS.batch_size_, inputs)
print('dataset', dataset)
timer = gezi.Timer('read record')
for i, (x, y) in enumerate(dataset):
# if i % 10 == 1:
# print(x['id'])
# print(x['content'][0])
# print(ids2text.ids2text(x['content'][0], sep='|'))
# print(x['content'])
# print(type(x['id'].numpy()[0]) == bytes)
# break
x['id'] = gezi.decode(x['id'].numpy())
x['content_str'] = gezi.decode(x['content_str'].numpy())
for j, id in enumerate(x['id']):
if id == '573':
print(id, x['content_str'][j])
elif FLAGS.type == 'dump':
valid_infos = {}
test_infos = {}
inputs = gezi.list_files(f'{base}/train/*record')
dataset = Dataset('valid')
dataset = dataset.make_batch(1, inputs)
deal(dataset, valid_infos)
print('after valid', len(valid_infos))
inputs = gezi.list_files(f'{base}/test/*record')
dataset = Dataset('test')
dataset = dataset.make_batch(1, inputs)
deal(dataset, test_infos)
print('after test', len(test_infos))
for key in valid_infos:
print(valid_infos[key])
print(ids2text.ids2text(valid_infos[key]['content']))
break
ofile = f'{base}/info.pkl'
with open(ofile, 'wb') as out:
pickle.dump(valid_infos, out)
ofile = ofile.replace('.pkl', '.test.pkl')
with open(ofile, 'wb') as out:
pickle.dump(test_infos, out)
elif FLAGS.type == 'show_info':
valid_infos = pickle.load(open(f'{base}/info.pkl', 'rb'))
lens = [len(valid_infos[key]['content']) for key in valid_infos]
unks = [list(valid_infos[key]['content']).count(1) for key in valid_infos]
print('num unks per doc:', sum(unks) / len(unks))
print('num doc with unk ratio:', len([x for x in unks if x != 0]) / len(unks))
print('un unk tokens ratio:', sum(unks) / sum(lens))
print('len max:', np.max(lens))
print('len min:', np.min(lens))
print('len mean:', np.mean(lens))
else:
raise ValueError(FLAGS.type)
if __name__ == '__main__':
tf.compat.v1.app.run()
| [
"chenghuige@gmail.com"
] | chenghuige@gmail.com |
d4e76be4c9bb5257782b5a0aa70af1ccc14a4b6f | 9a8d1ab7424279e4b748a2bcce19fc2d8f39b846 | /changed/apps.py | 24910de356e07849156b9c1155ee49d6d1830c45 | [] | no_license | echung05/covid-reviewapp | d7bc03c6c57aa48a3e7f816b4c7d595b53edebef | 15ee0c67009d0fb4f8e2e5534a3234b05636ed40 | refs/heads/master | 2023-05-24T07:31:41.419571 | 2021-05-06T14:52:45 | 2021-05-06T14:52:45 | 374,002,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class ChangedConfig(AppConfig):
name = 'changed'
| [
"trandavidq@gmail.com"
] | trandavidq@gmail.com |
ab23e48873e7ca764d6bfc1216f93ed33e7f1c28 | 69c185d0dfed894234506a1aa6c6bf863849c589 | /web服务器最初引进/wsgi/ctime.py | ac9c1f737f07361c21f766c78184b8254b451ce7 | [] | no_license | haha479/Socket_CS_project | 19599edc47dda61a60afc55dae16a6b59c78fdd5 | 5b54ef8db0b10d63bf9e6f980a32a45c4055238a | refs/heads/master | 2020-04-08T11:23:16.514181 | 2018-11-30T04:26:08 | 2018-11-30T04:26:08 | 159,304,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import time
#将返回给浏览器的数据解耦到次脚本文件中
def get_time(env,start_response):
statu = "200 OK"
headers = [('Content-Type', 'text/html')]
start_response(statu,headers)
return time.ctime()
def get_love(env,start_response):
statu = "200 OK"
headers = [('Content-Type', 'text/html')]
start_response(statu,headers)
return "Love"
| [
"283275935@qq.com"
] | 283275935@qq.com |
04837cefd1f208d2c2e064d9550fd2879f05690c | 8c60030f6b90e2367658ed478f444660d67d1e11 | /backend/search/internal_api.py | 6b76d1ed7a75bb219e1fabe239be22d8d9dda734 | [] | no_license | taoky/Team_USTC_Software | 64e961271eab67a12b47d4064f29d2e9b7dab95e | 95363f102877352ef28c7f34f9ded927d08efc5d | refs/heads/master | 2020-09-01T22:55:41.420229 | 2019-10-22T03:35:35 | 2019-10-22T03:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from haystack.query import SearchQuerySet, SQ
from biobricks.models import Biobrick
import functools
import random
def search_biobricks(*keywords, num=5):
if Biobrick.objects.count() < num:
return sr2obj(Biobrick.objects.all())
if not keywords:
return fill_in([], num)
sq = functools.reduce(lambda a, b: a | b, (SQ(content__fuzzy=keyword) for keyword in keywords))
sqs = SearchQuerySet().models(Biobrick).filter(sq).order_by('-_score')
count = sqs.count()
if count < num:
return fill_in(sr2obj(sqs), num)
else:
return sr2obj(sqs[0:num])
def fill_in(initial, num):
"""
return a list filled with initial data and random objects until reaching size of `num`
"""
biobricks = Biobrick.objects.all()
result = initial[:]
while len(result) < num:
random_obj = random.choice(biobricks)
if random_obj not in result:
result.append(random_obj)
return result
def sr2obj(srs):
return [sr.object for sr in srs]
| [
"taoky1999@gmail.com"
] | taoky1999@gmail.com |
91c211a6e01d7c3e851c89671af6973faa5c1296 | 8bc025f27f451f245bd371b66f3d58253e4587d3 | /src/Foundation/Standard/practice12.py | 9c2c3dd3ebc73f06f3f63c509ae4d052354cdf83 | [
"MIT"
] | permissive | mryyomutga/PracticePython | 8f2c5cdef074091eb8fcf76bd78800b959024e02 | e191d73064248d0983344b137fbe6b69e5eb1d12 | refs/heads/master | 2021-05-15T14:31:16.365211 | 2017-10-29T04:46:51 | 2017-10-29T04:46:51 | 107,212,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | # -*- coding:utf-8 -*-
# ジェネレータ
# 処理の途中で値を返し、必要に応じて処理を再開できる
def sample_generator():
print("call 1")
yield "1st step"
print("call 2")
yield "2nd step"
print("call 3")
yield "3rd step"
# ジェネレータオブジェクトを作成
gen_func = sample_generator()
text = gen_func.__next__() # yieldまで実行
print(text) # 1st step
text = gen_func.__next__()
print(text) # 2nd step
text = gen_func.__next__()
print(text) # 3rd step
print()
# ループ処理でジェネレータ関数を実行
def sample_generator():
print("call 1")
yield "1st step"
print("call 2")
yield "2nd step"
print("call 3")
yield "3rd step"
gen_func = sample_generator()
for text in gen_func:
print(text)
# フィボナッチ数列を返すジェネレータ
def fibonacci_generator():
f0, f1 = 0, 1
while True: # この中が10回繰り返される
yield f0
f0, f1 = f1, f0 + f1
gen_func = fibonacci_generator()
for i in range(0, 10):
# 10個取得する
num = next(gen_func)
print(num)
print()
# send()メソッド
# 待機中のジェネレータに値を設定する
def sample_generator():
text = yield "Good Morning"
yield text
yield text
gen_func = sample_generator()
text = next(gen_func)
print(text)
text = gen_func.send("Hello")
print(text)
text = next(gen_func)
print(text)
# thorw()メソッド
# 待機中のジェネレータに例外を送信
# close()メソッド
# 待機中のジェネレータを正常終了させる
| [
"mryyomutga@gmail.com"
] | mryyomutga@gmail.com |
9331f4d30f97229a552d60e04fe7d0999582a44e | 4cb0b3c4acf4e30dda0f814fab8232bf13617422 | /Python/Python/MultiplesSumAverage/server.py | 8b315d568c04b2bf9bfae92084806477d517b270 | [] | no_license | h0oper/DojoAssignments | d60336b3e67021be0e6a43c1f3693193f83b22d9 | 28472e7907a18725d702fc9617f27619fcc4fcfc | refs/heads/master | 2020-05-09T23:38:27.916674 | 2018-10-06T20:46:02 | 2018-10-06T20:46:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | for count in range(0, 10):
if count % 2 == 1:
print count
for counter in range(5, 101, 5):
print counter
a = [1, 2, 5, 10, 255, 3]
z = 0
for element in a:
z = z + element
print z
a = [1, 2, 5, 10, 255, 3]
z = 0
for element in a:
z = z + element
x = z/len(a)
print x | [
"ccasil@ucsc.edu"
] | ccasil@ucsc.edu |
7c69ed213923a672ef47819e263bc2c7a18b0dae | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /LNH/day4/Common module/06 sys module-FLS-MPB.py | 8b07eaf64b395655b35a8a69cb85bf7d9ab01420 | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # !/usr/bin/env python3
# _*_coding:utf-8_*_
# __author__:FLS
from pprint import pprint
import sys
print(sys.argv[0])
# 重定向标准输出:
saveout=sys.stdout
flog=open('t2.log.sh','w',encoding='utf-8')
sys.stdout=flog
print('12345323')
flog.close()
sys.stdout=saveout
print('zhengcheng')
print(sys.builtin_module_names)
pprint(sys.path)
pprint(sys.platform)
| [
"lincappu@163.com"
] | lincappu@163.com |
901f5b1128b25ca5c9895ab04816531693edc2b8 | 87e904087ef28fa3dd29d7e91774d74f87eeb0a7 | /API.py | 37f35676817ded554b117a9fb7eab26317b2ffde | [] | no_license | gitter-badger/W.I.L.L | b624418810838f4bfaa7b3b40e809772857a9feb | 0c374f4ceed2ea4199fea5d9992c2cf6208a7890 | refs/heads/master | 2021-01-17T12:44:17.491654 | 2017-03-01T18:55:52 | 2017-03-01T18:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,227 | py | from flask import Blueprint, request, session, redirect, render_template
import core
import tools
import logging
import bcrypt
import json
import traceback
import sys
from whenareyou import whenareyou
try:
import queue as Queue
except ImportError:
import Queue
db = None
configuration_data = None
log = logging.getLogger()
api = Blueprint('api', __name__, template_folder='templates')
@api.route('/new_user', methods=["GET","POST"])
def new_user():
'''
Create new user in the database
:param: username
:param: password
:param: first_name
:param: email
:param: city
:param: country
:param: state
'''
log.info(":API:/api/new_user")
response = {"type": None, "data": {}, "text": None}
try:
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
username = str(request_data["username"])
log.debug("Username is {0}".format(username))
password = str(request_data["password"])
first_name = str(request_data["first_name"])
last_name = str(request_data["last_name"])
email = str(request_data["email"])
city = str(request_data["city"])
country = str(request_data["country"])
state = str(request_data["state"])
check_list = [username, password, first_name, last_name, email, city, country, state]
passed = tools.check_string(check_list)
if passed:
log.debug("Attempting to create new user with username {0} and email {1}".format(username, email))
# Check to see if the username exists
users = db["users"]
if users.find_one(username=username):
# If that username is already taken
taken_message = "Username {0} is already taken".format(username)
log.debug(taken_message)
response["type"] = "error"
response["text"] = taken_message
else:
# Add the new user to the database
log.info(":{0}:Adding a new user to the database".format(username))
db.begin()
# Hash the password
log.debug("Hashing password")
hashed = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
log.debug("Hashed password is {0}".format(hashed))
is_admin = username in configuration_data["admins"]
try:
db['users'].insert({
"username": username,
"first_name": first_name,
"last_name": last_name,
"email": email,
"password": hashed,
"admin": is_admin,
"default_plugin": "search",
"notifications": json.dumps(["email"]),
"ip": request.environ.get('HTTP_X_REAL_IP', request.remote_addr),
"news_site": "http://reuters.com",
"city": city,
"country": country,
"state": state,
"temp_unit": "fahrenheit",
"timezone": whenareyou(city)
})
db.commit()
response["type"] = "success"
response["text"] = "Thank you {0}, you are now registered for W.I.L.L".format(first_name)
except:
db.rollback()
response["type"] = "error"
response["text"] = "There was an error in signing you up for W.I.L.L. Please check the information you entered"
else:
log.warning(":{0}:Failed SQL evaluation".format(username))
response["type"] = "error"
response["text"] = "Invalid input, valid chars are {0}".format(tools.valid_chars)
except KeyError:
log.error("Needed data not found in new user request")
response["type"] = "error"
response["text"] = "Couldn't find required data in request. " \
"To create a new user, a username, password, first name, last name," \
"and email is required"
return tools.return_json(response)
@api.route("/settings", methods=["POST"])
def settings():
"""
:param username:
:param password:
:param Optional - setting to be changed:
Change the users settings
:return:
"""
log.info(":API:/api/settings")
response = {"type": None, "text": None, "data": {}}
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
if "username" in request_data.keys() and "password" in request_data.keys():
username = request_data["username"]
password = request_data["password"]
if tools.check_string(request_data.values()):
user_table = db["users"].find_one(username=username)
if user_table:
db_hash = user_table["password"]
if bcrypt.checkpw(password.encode('utf8'), db_hash.encode('utf8')):
#TODO: write a framework that allows changing of notifications
immutable_settings = ["username", "admin", "id", "user_token", "notifications", "password"]
db.begin()
log.info(":{0}:Changing settings for user".format(username))
try:
for setting in request_data.keys():
if setting not in immutable_settings:
db["users"].upsert({"username": username, setting: request.form[setting]}, ['username'])
db.commit()
response["type"] = "success"
response["text"] = "Updated settings"
except Exception as db_error:
log.debug("Exception {0}, {1} occurred while trying to commit changes to the database".format(
db_error.message, db_error.args
))
response["type"] = "error"
response["text"] = "Error encountered while trying to update db, changes not committed"
db.rollback()
else:
response["type"] = "error"
response["text"] = "User {0} doesn't exist".format(username)
else:
response["type"] = "error"
response["text"] = "Invalid input"
else:
response["type"] = "error"
response["text"] = "Couldn't find username or password in request data"
return tools.return_json(response)
@api.route('/get_sessions', methods=["GET", "POST"])
def get_sessions():
"""
Return list of active sessions for user
:param: username
:param: password
:return: list of sessions
"""
log.info(":API:/api/get_sessions")
response = {"type": None, "data": {}, "text": None}
sessions = core.sessions
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
try:
username = request_data["username"]
password = request_data["password"]
if tools.check_string(request_data.values()):
db_hash = db['users'].find_one(username=username)["password"]
user_auth = bcrypt.checkpw(password.encode('utf8'), db_hash.encode('utf8'))
if user_auth:
response["data"].update({"sessions":[]})
for user_session in sessions:
if sessions[user_session]["username"] == username:
response["data"]["sessions"].append(session)
response["type"] = "success"
response["text"] = "Fetched active sessions"
else:
response["type"] = "error"
response["text"] = "Invalid username/password combination"
else:
response["type"] = "error"
response["text"] = "One of the submitted parameters contained an invalid character. " \
"Valid characters are {0}".format(tools.valid_chars)
except KeyError:
response["type"] = "error"
response["text"] = "Couldn't find username and password in request"
return tools.return_json(response)
@api.route('/start_session', methods=["GET","POST"])
def start_session():
'''
:param: username
:param: password
Generate a session id and start a new session
:return:
'''
log.info(":API:/api/start_session")
# Check the information that the user has submitted
response = {"type": None, "data": {}, "text": None}
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
try:
if request.method == "POST":
username = request_data["username"]
password = request_data["password"]
client = "API-POST"
elif request.method == "GET":
username = request.args.get("username", "")
password = request.args.get("password", "")
client = "API-GET"
if not (username and password):
raise KeyError()
if tools.check_string([username, password]):
log.info(":{0}:Checking password".format(username))
users = db["users"]
user_data = users.find_one(username=username)
if user_data:
user_data = db["users"].find_one(username=username)
# Check the password
db_hash = user_data["password"]
user_auth = bcrypt.checkpw(password.encode('utf8'), db_hash.encode('utf8'))
if user_auth:
log.info(":{0}:Authentication successful".format(username))
# Return the session id to the user
session_id = tools.gen_session(username, client, db)
if session_id:
response["type"] = "success"
response["text"] = "Authentication successful"
response["data"].update({"session_id": session_id})
else:
response["type"] = "error"
response["text"] = "Invalid username/password"
else:
response["type"] = "error"
response["text"] = "Couldn't find user with username {0}".format(username)
else:
response["type"] = "error"
response["text"] = "Invalid input"
except KeyError:
response["type"] = "error"
response["text"] = "Couldn't find username and password in request data"
# Render the response as json
if request.method == "GET":
session.update({"session_data": response})
if response["type"] == "success":
return redirect("/")
log.debug("Rendering command template")
return render_template("command.html")
else:
return tools.return_json(response)
@api.route('/end_session', methods=["GET", "POST"])
def end_session():
"""
End a session
:param session_id:
:return End the session:
"""
log.info(":API:/api/end_session")
response = {"type": None, "data": {}, "text": None}
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
try:
session_id = request_data["session_id"]
# Check for the session id in the core.sessions dictionary
if session_id in core.sessions.keys():
log.info(":{0}:Ending session".format(session_id))
del core.sessions[session_id]
response["type"] = "success"
response["text"] = "Ended session"
else:
response["type"] = "error"
response["text"] = "Session id {0} wasn't found in core.sessions".format(session_id)
except KeyError:
response["type"] = "error"
response["text"] = "Couldn't find session id in request data"
# Render the response as json
return tools.return_json(response)
@api.route('/check_session', methods=["GET", "POST"])
def check_session():
"""
Check if a session is valid
:param: session_id
:return:
"""
log.info(":API:/api/check_session")
response = {"type": None, "text": None, "data": {}}
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
try:
session_id = request_data["session_id"]
session_valid = (session_id in core.sessions.keys())
response["data"].update({"valid": session_valid})
response["type"] = "success"
if tools.check_string(session_id):
if session_valid:
response["text"] = "Session id {0} is valid".format(session_id)
else:
response["text"] = "Session id {0} is invalid".format(session_id)
else:
response["type"] = "error"
response["text"] = "Invalid input"
except KeyError:
response["type"] = "error"
response["text"] = "Couldn't find session_id in request data"
response["data"].update({"valid": False})
return tools.return_json(response)
@api.route('/respond', methods=["GET", "POST"])
def command_response():
"""
Api path for responding to a command question
:param session_id:
:param command_id:
:return:
"""
log.info(":API:/api/respond")
response = {"type": None, "text": None, "data": {}}
if request.is_json:
request_data = request.get_json()
try:
log.debug(request_data.keys())
command_id = request_data["command_id"]
session_id = request_data["session_id"]
response_value = request_data["value"]
#Validate the JSON response object
if tools.check_string([command_id, session_id]):
if session_id in core.sessions.keys():
session_data = core.sessions[session_id]
session_commands = session_data["commands"]
response_command = None
for command_obj in session_commands:
if command_obj["id"] == command_id:
response_command = command_obj
if response_command:
if "function" in command_obj.keys() and "event" in command_obj.keys():
response_function = command_obj["function"]
log.info(":{0}: Executing response function {1} with response {2}".format(
command_id, response_function, response_value
))
#Execute the response
try:
response_result = response_function(response_value, command_obj)
log.info(":{0}:Successfully executed response, returning {1}".format(
session_id, tools.fold(response_result)
))
response = response_result
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
error_string = repr(traceback.format_exception(exc_type, exc_value,
exc_traceback))
log.error(error_string)
username = session_data["username"]
user_table = tools.find_one(username=username)
if user_table:
response["type"] = "error"
if user_table["admin"]:
response["text"] = error_string
else:
response["text"] = "An error has occurred while trying to fetch a response." \
"Please contact will@willbeddow.com to report the error and " \
"get more information"
else:
log.error("USER {0} NOT FOUND IN DATABASE. WARNING.".format(username))
response["type"] = "error"
response["text"] = "A database error has occurred. Please contact will@willbeddow.com" \
"to report the error, along with the circumstances under which it" \
"occurred."
else:
response["type"] = "error"
response["text"] = "Command {0} didn't register for a response or didn't" \
" register the required data for a response.".format(command_id)
else:
response["type"] = "error"
response["text"] = "Couldn't find a command object in session {0} with command id {1}".format(
session_id, command_id
)
else:
response["type"] = "error"
response["text"] = "Invalid session id {0}".format(session_id)
else:
response["type"] = "error"
response["text"] = "Submitted response data {0} failed string validation. Valid characters are {0}".format(
tools.valid_chars
)
except KeyError:
response["type"] = "error"
response["text"] = "command_id, session_id, and JSON response object required"
else:
response["type"] = "error"
response["text"] = "/api/respond requires a JSON request"
return tools.return_json(response)
@api.route('/command', methods=["GET", "POST"])
def process_command():
"""
Api path for processing a command
:param command:
:param session_id:
:return response object:
"""
log.info(":API:/api/command")
response = {"type": None, "data": {}, "text": None}
if request.is_json:
request_data = request.get_json()
else:
request_data = request.form
try:
command = request_data["command"]
session_id = request_data["session_id"]
log.debug(":{1}:Processing command {0}".format(command, session_id))
if session_id in core.sessions.keys():
# Add the command to the core.sessions command queue
session_data = core.sessions[session_id]
log.info(":{1}:Adding command {0} to the command queue".format(command, session_id))
command_data = tools.create_command_obj(session_id, command)
command_response = core.sessions_monitor.command(
command_data, core.sessions[session_id], db, add_to_updates_queue=False
)
if session_id in core.commands.keys():
core.commands[session_id].append(command_data)
else:
core.commands.update({session_id: [command_data]})
session_data["commands"].append(command_data)
core.commands.update(command_data)
log.info(":{0}:Returning command response {1}".format(session_id, tools.fold(str(command_response))))
response = command_response
else:
log.info(":{0}:Couldn't find session id in sessions".format(session_id))
response["type"] = "error"
response["text"] = "Invalid session id"
except KeyError:
log.debug("Couldn't find session id and command in request data")
response["type"] = "error"
response["text"] = "Couldn't find session id and command in request data"
return tools.return_json(response) | [
"will.beddow@gmail.com"
] | will.beddow@gmail.com |
6d0e72d1116e4ee5ca04da2bbe4cd009826fc6f4 | 95ed61745cdbfd0382cf2227262f6d8db7907e58 | /parkingmanagement/parkingmanagement/wsgi.py | 8b19b12561416545ac94260744762b4cbd188c62 | [
"Apache-2.0"
] | permissive | vivek-at-work/ridecell-code | 4a856028fbb8df4d7cd14e1f860575e17618c008 | 93965a1437197102eca6cf6313ba3dbb4c3f5c3c | refs/heads/main | 2022-12-31T22:16:16.902614 | 2020-10-21T05:42:02 | 2020-10-21T05:42:02 | 305,413,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for parkingmanagement project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'parkingmanagement.settings')
application = get_wsgi_application()
| [
"developer.viveksrivastava@gmail.com"
] | developer.viveksrivastava@gmail.com |
937b41f61be640a9eb4b28095e4da3318567bcbe | 06c5568d230a5890831240c58a8c000a420083fe | /class 19.py | 99a579f379dc85cbc7eb042c2b558d5b9cc53266 | [] | no_license | Rasel31/pythonist-dp01 | 3b97fa4a8bbd8f7a1e8d94a1984f6562d2c82fce | 38a4f69aa4ab5143d272eb840c83ce9eace65d19 | refs/heads/master | 2021-01-23T01:56:03.978317 | 2017-05-18T09:53:49 | 2017-05-18T09:53:49 | 85,946,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | a = int(input("Enter First Number (a): "))
b = int(input("Enter Second Number (b): "))
c = input('WHAT DO YOU WANT TO DO? ')
ev = eval(c)
print('''a = {}
b = {}
{} = {}'''.format(a, b, c, ev))
| [
"noreply@github.com"
] | Rasel31.noreply@github.com |
b0cc6d8487a45dd93c94121fc670385c13004fbd | 804a434e72f1a8f022797d849f2e4acac40ab8da | /quizmo/urls.py | a24b4ec88d13ba824072b23697efdfc04c067f46 | [] | no_license | yash112-lang/Quizmo | e8b834140ad35676f3439bba4971abc051d99cad | f2ef54c80308cf85ffebaa4e32743c49f824eac8 | refs/heads/master | 2022-12-14T14:18:14.188857 | 2020-09-01T12:57:37 | 2020-09-01T12:57:37 | 291,997,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | """quizmo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import main
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', main.index),
path('data', main.submit),
]
urlpatterns+=staticfiles_urlpatterns()
| [
"noreply@github.com"
] | yash112-lang.noreply@github.com |
74def67fd1057a11170352f86723a83b5b99812e | 76902fbfb5bdb3357e09a3f47ca397b6abc95eef | /setup.py | 4b71231e84420134af978f0e03da4d8621961f3d | [
"MIT"
] | permissive | thebinary/jinjaStore | 164bf2bb1e26a9b93227b7a1b54090ca948f5c32 | fdb76fd50953491fe11541ad704c62fe15dc8cf0 | refs/heads/master | 2021-01-20T20:27:46.986334 | 2016-06-03T05:49:06 | 2016-06-03T05:49:06 | 60,267,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from setuptools import setup
setup(name = 'jinjaStore',
version = "0.1",
description = "Jinja Store for Templating",
author = "The Binary",
author_email = "binary4bytes@gmail.com",
url = "https://github.com/thebinary/jinjaStore",
packages = ['jinjaStore'],
package_data = {
'jinjaStore': [ '*.jinja2' ]
},
install_requires = ['jinja2'],
long_description = "Maintain Store of Jinja Templates for rendering",
)
| [
"binary4bytes@gmail.com"
] | binary4bytes@gmail.com |
a4cd7cb5995c192f21fa2784925e7f68e210743e | 69eb6e69a5acaf13d8570e0fe099c209c55d4ee1 | /listings/migrations/0001_initial.py | 9d15de2f01ae5165cda90f077bb3842385f89615 | [] | no_license | mudittripathi/real_estate | 9c4c9dd07d2c69f5931d96c1da76dbd0940b13d9 | e08afce7fe8cf630bccee212614058e3385e9aa1 | refs/heads/master | 2022-08-24T14:08:12.879793 | 2020-05-27T12:51:21 | 2020-05-27T12:51:21 | 264,178,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | # Generated by Django 3.0.6 on 2020-05-15 12:16
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
('address', models.CharField(max_length=1500)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')),
],
),
]
| [
"mt2213@gmail.com"
] | mt2213@gmail.com |
99c69377a3240c2d0699ec19bbd7b8ab3d0cb518 | ef14a4ff45a8f1f5220f79c36ffc0db317e4d5aa | /Bootcamp.py | edb9e11973c244a8d88d914fcf400a36954d9270 | [] | no_license | vijays4404/pythontutorial | 9ef5874c1875f0980790b83fe33fecde6aec8041 | 19f9c0b91077328b07ca80459da2ffd7d9a32a33 | refs/heads/main | 2023-07-27T00:03:55.297277 | 2021-09-13T15:04:50 | 2021-09-13T15:04:50 | 406,016,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,132 | py | my_age=30
my_name="Vijay"
print("Hello",my_age)
print("Hello",my_name)
import sys
print(sys.maxsize)
print(sys.float_info.max)
print("Cast",type(int(5.5)))
print("Cast 2",type(str(5.43)))
print("Cast 3",type(chr(97)))
print("Cast 4",type(ord("a")))
print("Cast 5",type(float(2)))
num_1="1"
num_2="2"
print("1+2=",(int(num_1)+int(num_2)))
num_1=input("Enter first Numbers :")
num_2=input("Enter Second Number")
num_1=int(num_1)
num_2=int(num_2)
sum_1=num_1+num_2
difference=num_1-num_2
product=num_1*num_2
quotient=num_1/num_2
remainder=num_1 % num_2
print("{} + {}={}".format(num_1,num_2,sum_1))
print("{} - {}={}".format(num_1,num_2,difference))
print("{} * {}={}".format(num_1,num_2,product))
print("{} / {}={}".format(num_1,num_2,quotient))
print("{} % {}={}".format(num_1,num_2,remainder))
miles=int(input("Enter Miles"))
Kilometers=miles * 1.60934
print("{} miles equal {} kilometers".format(miles,Kilometers))
import math
print("ceil(4.4)=",math.ceil(4.4))
print("floor(4.4)=",math.floor(4.4))
print("fabs(-4.4)=",math.fabs(-4.4))
print("factorial(4)=",math.factorial(4))
print("fmod(5,4)=",math.fmod(5,4))
print("trunc(4.2)=",math.trunc(4.2))
print("pow(2,2)=",math.pow(2,2))
drink=input("Pick One(Coke or Pepsi:")if drink=="Coke":
print("Here is your Coke")
elif drink=="Pepsi":
print("Here is your pepsi")
else:
print("Here is your water")
num_1,operator,num_2=input("Enter Calculation:").split()
num_1=int(num_1)
num_2=int(num_2)
if operator=="+":
print("{} + {} = {}".format(num_1,num_2,num_1+num_2))
elif operator=="-":
print("{}-{}={}".format(num_1,num_2,num_1-num_2))
elif operator=="*":
print("{}*{}={}".format(num_1,num_2,num_1*num_2))
elif operator=="/":
print("{}/{}={}".format(num_1,num_2,num_1/num_2))
age=int(input("Enter Age: "))
if (age>=1) and (age<=18):
print("Important Birthday")
elif(age==21) or (age==50):
print("Important Birthday")
elif not age<65:
print("Important Birthday")
else:
print("Sorry Not IMportant Birthday")
age=int(input("Enter Age: "))
if age==5:
print("Go to Kindergarten")
elif age>5 and age<=17:
print("Go to Grade 6")
else:
print("Go to College")
age=int(input("What is your age?"))
can_vote=True if age>=18 else False
print("You can vote:",can_vote)
for i in [2,4,6,8,10]:
print("i=",i)
for i in range(2,7):
print("i=",i)
your_float=input("Enter a float: ")
your_float=float(your_float)
print("Rounded to 2 Decimals: {:.2f}".format(your_float))
investment=float(input("Enter your investment amount: "))
interest=10
for i in range(1,11):
investment=investment+(investment*interest*i)/100
print(investment)
money=input("How much to invest:")
interest=input("Interest Rate:")
money=float(money)
interest=float(interest)*.01
for i in range(10):
money=money+(money*interest*i)
print("Investment after 10 years: ${:.2f}".format(money))
import random
rand_num=random.randrange(1,51)
i=1
while i!=rand_num:
i+=1
print("The random value is :",rand_num)
i=1
while i<=20:
if (i%2)==0:
i+=1
continue
if i==15:
break
print("Odd:",i)
i+=1
r="#"
for i in range(1,6):
r+=str(i)
print(r)
for i in range(1,6):
r.append("#")
print(r)
while True:
try:
number=int(input("Please enter a number:"))
break
except ValueError:
print("You didn't enter a number")
except:
print("An unknown error occurred")
print("Thank you for entering a number")
secret_number=7
while True:
Guess=int(input("Enter a number:"))
if Guess==secret_number:
print("You guess it right")
break
samp_string="This is a very important string"
print("Length :", len(samp_string))
print(samp_string[0])
print(samp_string[-1])
print(samp_string[0:4])
print(samp_string[8:])
print("Every Other", samp_string[0:-1:2])
print("Green","Eggs")
print("Hello"*5)
samp_string="This is a very important string"
for char in samp_string:
print(char)
samp_string="This is a very important string"
for i in range(0,len(samp_string)-1,2):
print(samp_string[i]+samp_string[i+1])
samp_string="This is a very imporant string"
print("A=",ord("A"))
norm_string=input("Enter a string to hide in uppercase:")
secret_string=""
for char in norm_string:
secret_string+=str(ord(char))
print("Secret Message:",secret_string)
norm_string=""
for i in range(0,len(secret_string)-1,2):
char_code=secret_string[i]+secret_string[i+1]
norm_string+=chr(int(char_code))
print("Original Message:",norm_string)
rand_string=" this is an important string "
rand_string=rand_string.lstrip()
rand_string=rand_string.rstrip()
print(rand_string)
rand_string=" this is an important string "
rand_string=rand_string.strip()
print(rand_string)
rand_string=" this is an important string "
print(rand_string.strip().upper().lower().capitalize())
rand_string="This is an important string"
print("Where is:",rand_string.replace(" an"," a kind of"))
a_list=["Bunch","of","random","words"]
print(",".join(a_list))
orig_string=input("Convert fo Acronym:")
orig_string=orig_string.upper()
list_of_words=orig_string.split()
for word in list_of_words:
print(word[0],end="")
letter_z="z"
print("Is z a letter or number:",letter_z.islower())
print("Is z a letter or number:",letter_z.isdigit())
print("Is z a letter or number:",letter_z.isupper())
print("Is z a letter or number:",letter_z.isnumeric())
while True:
message=input("Enter you message:")
key=int(input("how many characters should we shift (1-26):"))
secret_message=""
for char in message:
if char.isalpha():
char_code=ord(char)
char_code+=key
if char.isupper():
if char_code>ord("Z"):
char_code-=26
elif char_code<ord("A"):
char_code+=26
else:
if char_code>ord("z"):
char_code-=26
elif char_code<ord("a"):
char_code+=26
secret_message+=chr(char_code)
else:
secret_message+=char
print("Encrpyted:",secret_message)
key=-key
original_message=""
for char in secret_message:
if char.isalpha():
char_code=ord(char)
char_code+=key
if char.isupper():
if char_code>ord("Z"):
char_code-=26
elif char_code<ord("A"):
char_code+=26
else:
if char_code>ord("z"):
char_code-=26
elif char_code<ord("a"):
char_code+=26
original_message+=chr(char_code)
else:
original_message+=char
print("Decrpyted:",original_message)
def add_numbers(num_1,num_2):
return num_1+num_2
print("5+4=",add_numbers(5,6))
def assign_name(name):
name="Mark"
name="Tom"
change_name(name)
print(name)
def is_float(str_val):
try:
float(str_val)
return True
except valueError:
return False
pi=3.14
print("Is Pi a float:",is_float(pi))
def solve_eq(equation):
x,add,num_1,equal,num_2=equation.split()
num_1,num_2=int(num_1),int(num_2)
return "x= "+str(num_2-num_1)
print(solve_eq("x + 7 = 9"))
def mult_divide(num_1,num_2):
return (num_1*num_2),(num_1/num_2)
mult,divide=mult_divide(5,6)
print("5*4=",mult)
print("5/4=",divide)
def is_prime(num):
for i in range(2,num):
if(num%i)==0:
return False
return True
print(is_prime(6))
def get_primes(max_number):
list_of_primes=[]
for num_1 in range(2,)
def sum_all(*args):
sum_1=0
for i in args:
sum_1+=i
return sum_1
print("Sum:",sum_all(1,2,3,4))
import math
def get_area(shape):
shape=shape.lower()
if shape=="rectangle":
rectangle_area()
elif shape=="circle":
circle_area()
else:
print("Please enter rectangle or circle")
def rectangle_area():
length=float(input("Enter the length"))
width=float(input("Enter the width"))
area=length*width
print("The area of the rectangle is",area)
def circle_area():
radius=float(input("Enter the radius:"))
area=math.pi*(math.pow(radius,2))
print("The area of the cirlce is{:.2f}".format(area))
def main():
shape_type=input("Get area for what shape:")
get_area(shape_type)
main()
def is_prime():
value=int(input("Enter the number:"))
for i in range(2,value):
if(value%i)==0:
return False
return True
print(is_prime())
rand_list=["string",1.234,28]
one_to_ten=list(range(11))
rand_list=rand_list+one_to_ten
print(rand_list[0])
print("List length",len(rand_list))
first_3=rand_list[0:3]
print(first_3)
for i in first_3:
print("{}:{}".format(first_3.index(i),i))
print(first_3[0]*3)
print("string" in first_3)
print("Index of string",first_3.index("string"))
import random
num_list=[]
for i in range(5):
num_list.append(random.randrange(1,9))
i=len(num_list)-1
while i>1:
j=0
while j<i:
print("\nIs {}>{}".format(num_list[j])
my_list=[5,2,9,1]
total=0
for i in my_list:
if i==2:
total+=1
print(total)
import math
even_list=[i*2 for i in range(10)]
for k in even_list:
print(k,end=",")
print()
import math
num_list=[1,2,3,4,5]
list_of_values=[[math.pow(m,2),math.pow(m,3),math.pow(m,4)] for m in num_list]
for k in list_of_values:
print(k)
print()
mult_d_list=[[0]*10 for i in range(10)]
print(mult_d_list)
for i in range(10):
for j in range(10):
mult_d_list[i][j]="{}:{}".format(i,j)
print(mult_d_list)
for i in range(10):
for j in range(10):
print(mult_d_list[i][j],end="||")
print()
mult_table=[[0]*10 for i in range(10)]
for i in range(1,10):
for j in range(1,10):
mult_table[i][j]=i*j
derek_dict={"f_name":"Derek","l_name":"Banas","address":"123 Main St"}
print(derek_dict)
print("My Name:",derek_dict["f_name"])
derek_dict["address"]="215 North St"
derek_dict["city"]="Pittsburgh"
print("Is there a city:","city" in derek_dict)
print(derek_dict.values())
print(derek_dict.keys())
derek_dict={"f_name":"Derek","l_name":"Banas","address":"123 Main St"}
for k,v in derek_dict.items():
print(k,v)
derek_dict={"f_name":"Derek","l_name":"Banas","address":"123 Main St"}
print(derek_dict.get("m_name","Not Here"))
del derek_dict["f_name"]
print(derek_dict)
employees=[]
f_name,l_name=input("Enter Employee Name: ").split()
employees.append({"f_name":f_name,"l_name":l_name})
print(employees)
customers=[]
while True:
create_entry=input("Enter Customer(Yes/No):")
create_entry=create_entry[0].lower()
if create_entry=="n":
break
else:
f_name,l_name=input("Enter customer name: :").split()
customers.append({"f_name":f_name,"l_name":l_name})
for cust in customers:
print(cust['f_name'],cust['l_name'])
def factorial(num):
if num<=1:
return 1
else:
result=num*factorial(num-1)
return result
print(factorial(4))
def fib(n):
if n==0:
return 0
elif n==1:
return 1
else:
result=fib(n-1)+fib(n-2)
return result
num_fib_vals=int(input("How many Fibonacci values should be found: "))
i=1
while i<num_fib_vals:
fib_value=fib(i)
print(fib_value)
i+=1
import OSError
with open("mydata.txt",mode="w",encoding="utf=8")
as my_file:
myFile.write("Some random text/nMore random text\nAnd some more")
with open("mydata.txt",encoding="utf-8")
as my_file:
print
class Dog:
def __init__(self,name="",height=0,weight=0):
self.name=name
self.height=height
self.weight=weight
def run(self):
print("{} the dog runs".format(self.name))
def eat(self):
print("{} the dog eats".format(self.name))
def bark(self):
print("{} the dog barks".format(self.name))
def main():
spot=Dog("Spot",66,26)
spot.bark()
main()
class Square:
def __init__(self,height="0",width="0"):
self.height=height
self.width=width
@property
def height(self):
print("Retrieving the height")
return (self.__height)
@height.setter
def height(self,value):
if value.isdigit():
self.__height=value
else:
print("Please only enter numbers for height")
@property
def width(self):
print("Retrieving the width")
return self.__width
@width.setter
def width(self,value):
if value.isdigit():
self.__width=value
else:
print("Please only enter numbers for height")
def get_area(self):
return int(self.__width)* int(self.__height)
def main():
square=Square()
height=input("Enter Height:")
width=input("Enter width:")
square.height=height
square.width=width
print("Height :",square.height)
print("Width:",square.width)
print("The area is:",square.get_area())
main()
import random
import math
class Warrior:
def __init__(self,name="Warrior",health=0,attk_max=0,block_max=0):
self.name=name
self.health=health
self.attk_max=attk_max
self.block_max=block_max
def attack(self):
attk_amt=self.attk_max*(random.random()+.5)
return attk_amt
def block(self):
block_amt=self.block_max*(random.random()+.5)
return block_amt
class Battle:
def start_fight(self,warrior1,warrior2):
while True:
if self.get_attack_result(warrior1,warrior2)=="Game Over":
break
if self.get_attack_result(warrior2,warrior1)=="Game Over":
break
def get_attack_result(self,warriorA,warriorB):
warrior_a_attk_amt=warriorA.attack()
warrior_b_block_amt=warriorB.block()
damage_2_warrior_b=math.ceil(warrior_a_attk_amt-warrior_b_block_amt)
warriorB.health=warriorB.health-damage_2_warrior_b
print("{} attacks{} and deals{} damage".format(warriorA.name,warriorB.name,damage_2_warrior_b))
print("{} is down to {} health".format(warriorB.name,warriorB.health))
if warriorB.health<=0:
print("{} has died and {} is Victorious".format(warriorB.name,warriorA.name))
return "Game Over"
else:
return "Fight Again"
def main():
thor=Warrior("Thor",50,20,10)
loki=Warrior("Loki",50,20,10)
battle=Battle()
battle.start_fight(thor,loki)
main()
class Animal:
def __init__(self,birth_type="Unknown",appearance="Unknown",blooded="Unknown"):
self._birth_type=birth_type
self._appearance=appearance
self._blooded=blooded
@property
def birth_type(self):
return self._birth_type
@birth_type.setter
def birth_type(self,birth_type):
self._birth_type=birth_type
@property
def appearance(self):
return self._appearance
@appearance.setter
def appearance(self,appearance):
self._appearance=appearance
@property
def blooded(self):
return self._blooded
@blooded.setter
def blooded(self,blooded):
self._blooded=blooded
def __str__(self):
return "A {} is {} it is {} it is {}".format(
type(self).__name__,self.birth_type,self.appearance,self.blooded)
class Mammal(Animal):
def __init__(self,birth_type="born alive",appearance="hair or fur",blooded="warm blood",nurse_young=True):
Animal.__init__(self,birth_type,appearance,blooded)
self._nurse_young=nurse_young
@property
def nurse_young(self):
return self._nurse_young
@nurse_young.setter
def nurse_young(self,nurse_young):
self._nurse_young=nurse_young
def __str__(self):
return super().__str__() + " and it is {} they nurse their young".format(self.nurse_young)
class Reptile(Animal):
def __init__(self,birth_type="born in an egg or born alive",appearance="dry scales",blooded="cold blooded"):
Animal.__init__(self,birth_type,appearance,blooded)
def main():
animal1=Animal("born alive")
print(animal1.birth_type)
print(animal1)
mammal1=Mammal()
print(mammal1)
print(mammal1.birth_type)
print(mammal1.appearance)
print(mammal1.blooded)
print(mammal1.nurse_young)
print()
main()
class Time:
def __init__(self,hour=0,minute=0,second=0):
self.hour=hour
self.minute=minute
self.second=second
def __str__(self):
return "{}:{:02d}:{:02d}".format(self.hour,self.minute,self.second)
def __add__(self,other_time):
new_time=Time()
if(self.second+other_time.second)>=60:
self.minute+=1
new_time.second=(self.second+other_time.second)-60
else:
new_time.second=self.second+other_time.second
if(self.minute+other_time.minute)>=60:
self.hour+=1
new_time.minute=(self.minute+other_time.minute)-60
else:
new_time.minute=self.minute+other_time.minute
if(self.hour+other_time.hour)>=24:
new_time.hour=(self.hour+other_time.hour)-24
else:
new_time.hour=self.hour+other_time.hour
return new_time
def main():
time1=Time(1,20,30)
print(time1)
time2=Time(24,41,30)
print(time1+time2)
main()
class Sum:
@staticmethod
def get_sum(*args):
sum_1=0
for i in args:
sum_1+=i
return sum_1
def main():
print ("Sum :",Sum.get_sum(1,2,3,4,5))
main()
class Dog:
num_of_dogs=0
def __init__(self,name="Unknown"):
self.name=name
Dog.num_of_dogs+=1
@staticmethod
def get_num_of_dogs():
print("There are currently {} dogs".format(Dog.num_of_dogs))
def main():
spot=Dog("Spot")
doug=Dog("Doug")
spot.get_num_of_dogs()
main()
class DogNameError(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
try:
dog_name=input("What is your dogs name: ")
if any(char.isdigit() for char in dog_name):
raise DogNameError
except DogNameError:
print("Your dogs name can't contain a number")
num1,num2=input("Enter two values to divide:").split()
try:
quotient=int(num1)/int(num2)
print("{}/{}={}".format(num1,num2,quotient))
try:
my_file=open("mydata3.txt",encoding="utf-8")
except FileNotFoundError as ex:
print("That file was not found")
print(ex.args)
else:
print("File :",my_file.read())
my_file.close()
finally:
print("Finished Working with File")
def mult_by_2(num):
return num*2
time_two=mult_by_2
print("4*2=",time_two(4)
)
def do_math(func,num):
return func(num)
print("8*2=",do_math(mult_by_2,2))
time_two=mult_by_2
print("4*2=",time_two(4)
)
def get_func_mult_by_num(num):
def mult_by(value):
return num*value
return mult_by
generated_func=get_func_mult_by_num(5)
print("5*9=",generated_func(9))
list_of_func=[time_two,generated_func]
print("5*9=",list_of_funcs[1][9])
def is_it_odd(num):
if num%2==0:
return False
else:
return True
def mult_by_2(num):
return num*2
time_two=mult_by_2
print("4*2=",time_two(4))
def do_math(func,num):
return func(num)
print("8*2=",do_math(mult_by_2,8))
def get_func_mult_by_num(num):
def mult_by(value):
return num*value
return mult_by
generated_func=get_func_mult_by_num(5)
print("5*9=",generated_func(9))
list_of_funcs=[time_two,generated_func]
print("5*9=",list_of_funcs[1](9))
def is_it_odd(num):
if num%2==0:
return False
else:
return True
def change_list(list,func):
odd_list=[]
for i in list:
if func(i):
odd_list.append(i)
return odd_list
a_list=range(1,20)
print(change_list(a_list,is_it_odd))
def random_func(name:str,age:int,weight:float)->str:
print("Name:",name)
print("Age:",age)
print("Weight:",weight)
return "{} is {} years old and weighs {}".format(name,age,weight)
print(random_func("Derek",41,165.1))
sum_1=lambda x,y:x+y
print("Sum:",sum_1(4,5)
can_vote=lambda age: True if age>=18 else False
print("Can Vote:",can_vote(16))
power_list=[lambda x: x**2,
lambda x:x**3,
lambda x:x**4
]
for func in power_list:
print(func(4))
attack={'quick':(lambda:print("Quick Attack")),
'power':(lambda:print("PowerAttack")),
"miss":(lambda:print("The Attack Missed"))
}
attack['quick']()
import random
attack_key=random.choice(list(attack.keys()))
attack[attack_key]()
import random
flip_list=[]
for i in range(1,101):
flip_list+=random.choice(['H','T'])
print("Head:",flip_list.count('H'))
print("Tails:",flip_list.count('T'))
one_to_10=range(1,11)
def dbl_num(num):
return num*2
print(list(map(dbl_num,one_to_10)))
print(list(map((lambda x:x*3),one_to_10)))
a_list=list(map((lambda x,y:x+y),[1,2,3],[1,2,3]))
print(a_list)
import random
rand_list=list(random.randint(1,1001) for i in range(100))
print(rand_list)
print(list(filter((lambda x:x%9==0),rand_list)))
class Alphabet:
def __init__(self):
self.letters="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.index=-1
def __iter__(self):
return self
def __next__(self):
if self.index>=len(self.letters)-1:
raise StopIteration
self.index+=1
return self.letters[self.index]
alpha=Alphabet()
for letter in alpha:
print(letter)
derek={"f_name":"Derek","l_name":"Banas"}
for key in derek:
print(key,derek[key])
class Fib_Generator:
def __init__(self):
self.first=0
self.second=1
def __iter__(self):
return self
def __next__(self):
fib_num=self.first+self.second
self.first=self.second
self.second=fib_num
return fib_num
fib_seq=Fib_Generator()
for i in range(10):
print("Fib:",next(fib_seq))
print(list(map((lambda x:x*2),range(1,11))))
print([2*x for x in range(1,11)])
print(list(filter((lambda x: x%2!=0),range(1,11))))
print([x for x in range(1,11) if x%2!=0])
print([i**2 for i in range(50) if i%8==0])
print([x*y for x in range(1,3) for y in range(11,16)])
print([x for x in [i*2 for i in range(10)] if x%8==0])
import random
print([x for x in [random.randint(1,1001) for i in range(50)] if x%9==0])
multi_list=[[1,2,3],[4,5,6],[7,8,9]]
print([col[1] for col in multi_list])
print([multi_list[i][i] for i in range(len(multi_list))])
def is_prime(num):
for i in range(2,num):
if num%i==0:
return False
return True
def gen_prime(max_number):
for num1 in range(2,max_number):
if is_prime(num1):
yield num1
prime=gen_prime(50)
print("Prime:",next(prime))
print("Prime:",next(prime))
print("Prime:",next(prime))
print("Prime:",next(prime))
double=(x*2 for x in range(10))
print("Double:",next(double))
print("Double:",next(double))
for num in double:
print(num)
import threading
import time
import random
def execute_thread(i):
print("Thread {} sleeps at {}".format(i,time.strftime("%H:%M:%S",time.gmtime())))
rand_sleep_time=random.randint(1,4)
time.sleep(rand_sleep_time)
print("Thread {} stops sleeping at {}".format(i,time.strftime("%H:%M:%S",time.gntime())))
for i in range(10):
thread=threading.Thread(
target=execute_thread,args=(i,))
thread.start()
print("Active Threds:",threading.activeCount())
print("Thread Objects:",threading.enumerate())
import threading
import time
import random
import re
all_apes=re.findall("ape","The ape was at the apex")
for i in all_apes:
print(i)
import re
the_str="The ape was at the apex"
for i in re.finditer("ape.",the_str):
loc_tuple=i.span()
print(loc_tuple)
print(the_str[loc_tuple[0]:loc_tuple[1]])
import re
animal_str="Cat rat mat fat pat"
some_animals=re.findall("[crmfp]at",animal_str)
for i in some_animals:
print(i)
import re
animal_str="Cat rat mat fat pat"
some_animals=re.findall("[c-mC-M]at",
animal_str)
for i in some_animals:
print(i)
import re
owl_food="rat cat mat pat"
regex=re.compile("[cr]at")
owl_food=regex.sub("owl",owl_food)
print(owl_food)
import re
rand_str="Here is \\stuff"
print("Find \\stuff:",re.search("\\stuff",rand_str))
import re
rand_str="F.B.I. I.R.S. CIA"
print("Matches :",len(re.findall(".\..\..",rand_str)))
import re
rand_str="""This is a long string that goes on for many lines"""
print(rand_str)
regex=re.compile("\n")
rand_str=regex.sub(" ",rand_str)
print(rand_str)
import re
rand_str="12345"
if re.search("\d{5}",rand_str):
print("It is a zip code")
import re
rand_str="123 12345 123456 1234567"
print("MatchesZ:",len(re.findall("\d{5,7}",rand_str)))
import re
ph.num="412-555-1212"
if re.search("w{2,20}"\s\w{2,20}","Toshio Muramatsu"):
print("It is a valid name")
import re
print("Matches:",len(re.findall("a+","a as ape bug")))
email_list="db@aol.com m@"
import re
rand_str="cat cats"
regex=re.compile("[cat]+s?")
matches=re.findall(regex,rand_str)
for i in matches:
print(i)
import re
rand_str="doctor doctors doctor's"
regex=re.compile("[doctor]+['s]*")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
import re
long_str="""Just some words and some more and more"""
print("Matches:",len(re.findall(r"[\w\s]+[\r]?\n",long_str)))
matches=re.findall("[\w\s]+\r]?\n",long_str)
for i in matches:
print(i)
import re
rand_str="<name>Life On Mars</name><name>Freaks and Geeks</name>"
regex=re.compile(r"<name>.*?</name>")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
for i in matches:
print(i)
import re
rand_str="ape at the apex"
regex=re.compile(r"ape")
regex_2=re.compile(r"\bape\b")
matches=re.findall(regex,rand_str)
matches_2=re.findall(regex_2,rand_str)
print("Matches 1:",len(matches))
print("Matches 2:",len(matches_2))
import re
rand_str="cat cats"
regex=re.compile("[cat]+s?")
matches=re.findall(regex,rand_str)
for i in matches:
print(i)
import re
rand_str="doctor doctors doctor's"
regex=re.compile("doctor]+['s]*")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
import re
long_str="""Just some words and some mor and more"""
print("Matches:",len(re.findall(r"[\w\s"]+[\r]?\n",long_str)))
matches=re.findall("[")
import re
rand_str="<name>Life on Mars</name><name>Freaks and Geeks</name>"
regex=re.compile(r"<name>.*?</name>")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
for i in matches:
print(i)
import re
rand_str="ape at the apex"
regex=re.compile(r"ape")
regex_2=re.compile(r"\bape\b")
matches=re.findall(regex,rand_str)
matches_2=re.findall(regex_2,rand_str)
print("Matches 1:",len(matches))
print("Matches 2:",len(matches_2))
import re
rand_str="Matches everything up to @"
regex=re.compile(r"^.*[^@]")
rand_str="""Ape is big
Turtle is slow
Cheetah is fast"""
regex=re.compile(r"(?m)^.*?\s")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
for i in matches:
print(i)
import re
rand_str="My number is 412-555-1212"
regex=re.compile(r"412-(.*)")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
for i in matches:
print(i)
import re
rand_str="412-555-1212 412-555-1213 412-555-1214"
regex=re.compile(r"412-(.*)")
matches=re.findall(regex,rand_str)
print("Matches :",len(matches))
for i in matches:
print(i)
import re
rand_str="The cat cat fell out the window"
regex=re.compile(r"(\b\w+)\s+\1")
matches=re.findall(regex,rand_str)
print("Matches:",len(matches))
for i in matches:
print(i)
| [
"noreply@github.com"
] | vijays4404.noreply@github.com |
b2d2a98c8344bcd53ce98c7f0bea3a343a1d049a | 774efbb47121ba4e2828c6a695048bd47f5673fe | /mysite/env/bin/easy_install-3.7 | 00f0a629e877f5e4e14576077473f873681ceabc | [] | no_license | Kpres/WagTail-Tutorials | 10b19b16fc525f1e254d7c240e0e4835a2d0340e | 36fe9372ce6440d701fb8765f534d345702c363f | refs/heads/master | 2022-05-30T01:25:58.881841 | 2020-04-24T00:52:53 | 2020-04-24T00:52:53 | 258,367,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | 7 | #!/Users/kevinpresing/Projects/tutorial/mysite/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kevinfpresing@gmail.com"
] | kevinfpresing@gmail.com |
e70ee55f56de6c91d224092d8631974fb8e1a81b | b72299fffadb6db223d8ae5f29fdf4a9ac425802 | /cnngenerate.py | 60ee9874948f61cf3a1b62d3cb8623e66f388b3b | [
"MIT"
] | permissive | arjunjamwal/CarplateRecognitionSystem | 6887d86e22e1b2dd4b26c24b4f0567b1b928b9f7 | 5512b80407a3da0551efa353d070cb500d71b4d5 | refs/heads/master | 2021-06-14T17:11:32.705440 | 2017-03-19T12:33:07 | 2017-03-19T12:33:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,768 | py | import caffe
from caffe import layers as L, params as P
def lenetData(lmdb_train, lmdb_test, batch_size_train, batch_size_test):
n = caffe.NetSpec()
temp_n = caffe.NetSpec()
temp_n.data, temp_n.label = L.Data(batch_size=batch_size_train, backend=P.Data.LMDB, source=lmdb_train,
transform_param=dict(scale=1./255), ntop=2, include=dict(phase=caffe.TRAIN))
n.data, n.label = L.Data(batch_size=batch_size_test, backend=P.Data.LMDB, source=lmdb_test,
transform_param=dict(scale=1./255), ntop=2, include=dict(phase=caffe.TEST))
return n, temp_n
def conv(input_blob, kernel_size, num_output):
return L.Convolution(input_blob, kernel_size=kernel_size, num_output=num_output, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
def pooling(input_blob, kernel_size, stride, method=P.Pooling.MAX):
return L.Pooling(input_blob, kernel_size=kernel_size, stride=stride, pool=method)
def ip(input_blob, num_output):
return L.InnerProduct(input_blob, num_output=num_output, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
def relu(input_blob):
return L.ReLU(input_blob, in_place=True)
def accuracy(input_blob, label_blob):
return L.Accuracy(input_blob, label_blob, include=dict(phase=caffe.TEST))
def loss(input_blob, label_blob):
return L.SoftmaxWithLoss(input_blob, label_blob)
def lenetV2(*args):
if len(args) != 4:
raise Exception('wrong param')
n, temp_n = lenetData(*args)
n.conv1 = conv(n.data, 3, 30)
n.conv2 = conv(n.conv1, 3, 60)
n.pool2 = pooling(n.conv2, 2, 2)
n.conv3 = conv(n.pool2, 3, 90)
n.pool3 = pooling(n.conv3, 2, 2)
n.conv4 = conv(n.pool3, 3, 120)
n.ip1 = ip(n.conv4,750)
n.relu1 = relu(n.ip1)
n.ip2 = ip(n.relu1,34)
n.accuracy = accuracy(n.ip2, n.label)
n.loss = loss(n.ip2, n.label)
return str(temp_n.to_proto())+str(n.to_proto())
def inputLenetV2():
n = caffe.NetSpec()
n.data = L.Input(input_param=dict(shape=[dict(dim=[1,1,32,32])]))
n.conv1 = conv(n.data, 3, 30)
n.conv2 = conv(n.conv1, 3, 60)
n.pool2 = pooling(n.conv2, 2, 2)
n.conv3 = conv(n.pool2, 3, 90)
n.pool3 = pooling(n.conv3, 2, 2)
n.conv4 = conv(n.pool3, 3, 120)
n.ip1 = ip(n.conv4, 750)
n.relu1 = relu(n.ip1)
n.ip2 = ip(n.relu1, 34)
n.prob = L.Softmax(n.ip2)
return str(n.to_proto())
def lenet(lmdb_train, lmdb_test, batch_size_train, batch_size_test):
n = caffe.NetSpec()
temp_n = caffe.NetSpec()
temp_n.data, temp_n.label = L.Data(batch_size=batch_size_train, backend=P.Data.LMDB, source=lmdb_train,
transform_param=dict(scale=1./255), ntop=2, include=dict(phase=caffe.TRAIN))
n.data, n.label = L.Data(batch_size=batch_size_test, backend=P.Data.LMDB, source=lmdb_test,
transform_param=dict(scale=1./255), ntop=2, include=dict(phase=caffe.TEST))
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=30, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=70, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv3 = L.Convolution(n.pool2, kernel_size=5, num_output=100, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.ip1 = L.InnerProduct(n.conv3, num_output=750, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=34, weight_filler=dict(type='xavier'), bias_filler=dict(type='constant'),
param=[dict(lr_mult=1), dict(lr_mult=2)])
n.accuracy = L.Accuracy(n.ip2, n.label, include=dict(phase=caffe.TEST))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return str(temp_n.to_proto())+str(n.to_proto())
def inputLenet():
n = caffe.NetSpec()
n.data = L.Input(input_param=dict(shape=[dict(dim=[1, 1, 28, 28])]))
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=30, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=70, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv3 = L.Convolution(n.pool2, kernel_size=5, num_output=100, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.ip1 = L.InnerProduct(n.conv3, num_output=750, weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'), param=[dict(lr_mult=1), dict(lr_mult=2)])
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=34, weight_filler=dict(type='xavier'), bias_filler=dict(type='constant'),
param=[dict(lr_mult=1), dict(lr_mult=2)])
n.prob = L.Softmax(n.ip2)
return str(n.to_proto())
def netv3(*args):
if len(args) != 4:
raise Exception('wrong param')
n, temp_n = lenetData(*args)
n.conv1 = conv(n.data, 3, 60)
n.conv2 = conv(n.conv1, 3, 90)
n.pool2 = pooling(n.conv2, 2, 2)
n.conv3 = conv(n.pool2, 3, 120)
n.pool3 = pooling(n.conv3, 2, 2)
n.conv4 = conv(n.pool3, 3, 150)
n.ip1 = ip(n.conv4,1000)
n.relu1 = relu(n.ip1)
n.ip2 = ip(n.relu1, 500)
n.relu2 = relu(n.ip2)
n.ip3 = ip(n.relu2,34)
n.accuracy = accuracy(n.ip3, n.label)
n.loss = loss(n.ip3, n.label)
return str(temp_n.to_proto())+str(n.to_proto())
def netinputv3():
n = caffe.NetSpec()
n.data = L.Input(input_param=dict(shape=[dict(dim=[1,1,32,32])]))
n.conv1 = conv(n.data, 3, 60)
n.conv2 = conv(n.conv1, 3, 90)
n.pool2 = pooling(n.conv2, 2, 2)
n.conv3 = conv(n.pool2, 3, 120)
n.pool3 = pooling(n.conv3, 2, 2)
n.conv4 = conv(n.pool3, 3, 150)
n.ip1 = ip(n.conv4, 1000)
n.relu1 = relu(n.ip1)
n.ip2 = ip(n.relu1, 500)
n.relu2 = relu(n.ip2)
n.ip3 = ip(n.relu2, 34)
n.prob = L.Softmax(n.ip3)
return str(n.to_proto())
# print dict(type='xavier')
# print str(lenetV2('C:/Users/pc/Desktop/carplate_model/carplate_lmdb_train',
# 'C:\Users\pc\Desktop\carplate_model/carplate_lmdb_test', 64, 100))
#
# with open('C:/Users/pc/Desktop/carplate_model/carplate_train_test_0818.prototxt', 'w') as f:
# f.write(lenetV2('C:/Users/pc/Desktop/carplate_model/carplate_lmdb_train',
# 'C:/Users/pc/Desktop/carplate_model/carplate_lmdb_test', 64, 100))
#
# with open('C:/Users/pc/Desktop/carplate_model/carplate_input_0818.prototxt', 'w') as f:
# f.write(inputLenetV2())
#
#
# f.write(str(lenet('carplate_model/carplate_train_lmdb',64)))
#
# with open('carplate_model/lenet_carplate_test.prototxt','w') as f:
# f.write(str(lenet('carplate_model/carplate_test_lmdb',100)))
with open('model/carplate_train_test_060317.prototxt','w') as f:
f.write(netv3('C:/Users/pc/Desktop/carplate_model/carplate_lmdb_train_0818',
'C:/Users/pc/Desktop/carplate_model/carplate_lmdb_test_0818'
,64
,100))
with open('model/carplate_input_060317.prototxt', 'w') as f:
f.write(netinputv3())
| [
"kamwoh@gmail.com"
] | kamwoh@gmail.com |
b436c6bbb9bac52afa682fa7de21aae28f015a4d | e4031a6f6aaf7f087f6a43e0cc6bd15d41ef18f3 | /CloneBlog/comment/migrations/0002_auto_20200212_0435.py | 49e1228a9ee11678a76fdb65bb4b3020fc06a2cc | [] | no_license | youshen-chl/BlogEx | 9492910163d170fc89073b96c64a5686f968b38f | 73598c32d6c26e8a8931edcaf553da482268b10c | refs/heads/master | 2022-12-02T12:31:31.795395 | 2020-02-26T15:06:13 | 2020-02-26T15:06:13 | 219,108,822 | 0 | 0 | null | 2022-11-22T05:19:12 | 2019-11-02T05:35:04 | CSS | UTF-8 | Python | false | false | 2,125 | py | # Generated by Django 2.2.3 on 2020-02-12 04:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('storm', '0001_initial'),
('comment', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='articlecomment',
name='belong',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='article_comments', to='storm.Article', verbose_name='所属文章'),
),
migrations.AddField(
model_name='articlecomment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='articlecomment_child_comments', to='comment.ArticleComment', verbose_name='父评论'),
),
migrations.AddField(
model_name='articlecomment',
name='rep_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='articlecomment_rep_comments', to='comment.ArticleComment', verbose_name='回复'),
),
migrations.AddField(
model_name='aboutselfcomment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aboutselfcomment_related', to='comment.CommentUser', verbose_name='评论人'),
),
migrations.AddField(
model_name='aboutselfcomment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='aboutselfcomment_child_comments', to='comment.AboutselfComment', verbose_name='父评论'),
),
migrations.AddField(
model_name='aboutselfcomment',
name='rep_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='aboutselfcomment_rep_comments', to='comment.AboutselfComment', verbose_name='回复'),
),
]
| [
"514292146@qq.com"
] | 514292146@qq.com |
661ecdd01b1742556a9e7a99a743c13e13548b0f | 06d3156837abec83be6e038e21ee4bfd0f6c0a23 | /mysite/settings.py | be5097c4338860646302a2ba8e43adb57d314010 | [] | no_license | Igorskie/my-first-blog | 2f4c94380ab61024c009f24f6f7cf3d0ac0df0b3 | 431a35144803cb9768e597d945116c94ced6ea13 | refs/heads/master | 2020-07-06T01:14:07.806438 | 2019-08-17T08:03:57 | 2019-08-17T08:03:57 | 202,833,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b&pf-&z59!43(r882u2*k36s4fbtpw##$z1=570m!cjb13+$-a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"you@example.com"
] | you@example.com |
e72f0caffcab32a6b1d54c4d895be2149304c7d8 | 6b265b404d74b09e1b1e3710e8ea872cd50f4263 | /Python/PyParsing/macro_expander.py | 0c15b33b30ac328bf08b6d947d174a4f430e5943 | [
"CC-BY-4.0"
] | permissive | gjbex/training-material | cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae | e748466a2af9f3388a8b0ed091aa061dbfc752d6 | refs/heads/master | 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 | CC-BY-4.0 | 2023-08-03T07:07:25 | 2014-04-09T06:35:58 | Jupyter Notebook | UTF-8 | Python | false | false | 3,443 | py | #!/usr/bin/env python
from argparse import ArgumentParser, FileType
import imp
import sys
import types
from pyparsing import Regex, Literal, ZeroOrMore, Group
class UndefinedMacroError(Exception):
'''Class encoding an exception for an undefined macro encountered
while parsing a text'''
def __init__(self, function_name):
'''Constructor, takes the unknown macro name as an argument'''
super(UndefinedMacroError, self).__init__()
self._msg = "unknown macro '{0}'".format(function_name.strip('\\'))
def __str__(self):
'''method to stringify the exception'''
return repr(self._msg)
class MacroExpander(object):
'''Macro expansion class, macros are encoded as
\\macro_name{param_1}...{param_n}'''
def __init__(self):
'''Constructor'''
self._macros = {}
text = Regex(r'[^\\]+').leaveWhitespace()
lb = Literal('{').suppress()
rb = Literal('}').suppress()
param_value = Regex(r'[^}\\]+')
param = lb + ZeroOrMore(param_value) + rb
params = Group(ZeroOrMore(param)).setResultsName('params')
macro_name = Regex(r'\\\w+').setResultsName('macro')
macro_call = macro_name + params
text_file = ZeroOrMore(text | macro_call)
def macro_action(toks):
macro_name = toks['macro']
params = toks['params']
if self._has_macro(macro_name):
return self._macros[macro_name](*params)
else:
raise UndefinedMacroError(macro_name)
macro_call.addParseAction(macro_action)
self._grammar = text_file
def add_macro(self, macro_name, macro_impl):
'''method to add a new macro to the macro expander, given
the function name, and its implementation as arguments'''
self._macros['\\' + macro_name] = macro_impl
def _has_macro(self, macro_name):
'''internal method to check whether the parser has a
definition for the given macro name'''
return macro_name in self._macros
def expand(self, text):
'''method to perform the macro expansion on the given text'''
results = self._grammar.parseString(text)
return ''.join(results)
def main():
arg_parser = ArgumentParser(description='macro expansion utility')
arg_parser.add_argument('--file', type=FileType('r'),
action='store', dest='file',
required=True, help='file to expand')
arg_parser.add_argument('--def', type=str, action='store',
default='macro_defs', dest='defs',
help='macro definitions module name')
try:
options = arg_parser.parse_args()
text = ''.join(options.file)
module_info = imp.find_module(options.defs)
macro_module = imp.load_module(options.defs, *module_info)
expander = MacroExpander()
for macro_def in macro_module.__dict__.values():
if isinstance(macro_def, types.FunctionType):
expander.add_macro(macro_def.__name__, macro_def)
print(expander.expand(text))
except UndefinedMacroError as error:
sys.stderr.write('### error: ' + str(error) + '\n')
sys.exit(2)
except Exception as error:
sys.stderr.write('### error: ' + str(error) + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
| [
"geertjan.bex@uhasselt.be"
] | geertjan.bex@uhasselt.be |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.