blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e15eed74eefbfb62ee5c59fdaeeb1a192affa26 | 1d499072e362a6e47f662ab76cfb9fcfebdabd85 | /people/urls.py | 153221e2debc717f74b3c8c08fd2c1e237d43def | [] | no_license | arsen-movsesyan/passman | a3a6732b54b21a65e0a8a31384e6c08d05ff0769 | 8d8e6e2160cc1d2715a5802767073cdec2422959 | refs/heads/master | 2020-12-29T01:31:03.050903 | 2015-07-28T18:38:59 | 2015-07-28T18:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from django.conf.urls import patterns, include, url
from people import views
urlpatterns = patterns('',
url(r'^person/$', views.person),
url(r'^address/$', views.address_history),
)
| [
"arsen.movsesyan@gmail.com"
] | arsen.movsesyan@gmail.com |
a7784cf4b12ea9bed917ce26508e4c63ce253b6c | 12e42f4f34030b90c1841ece8d4efdd28925394f | /test/functional/wallet_scriptaddress2.py | 1f6b0e35dc51989b468955669c9f87acde059877 | [
"MIT"
] | permissive | GerardoTaboada/EducaCoin | 46d8aa08dd4b3859e59b739713ced08ec0b8c510 | c7f1be5dacd0a10464775c7eeb0eb799fc66cd43 | refs/heads/master | 2020-03-31T20:01:41.768383 | 2018-10-17T21:54:13 | 2018-10-17T21:54:13 | 152,522,009 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Educacoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = False
self.extra_args = [['-addresstype=legacy'], [], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main() | [
"you@example.com"
] | you@example.com |
31d9eeab5c41e89598019b524f1037d1b0babfec | acb4150eab3aaadeb65758465d96133606b2464c | /test_for_etf/otg_test_06_risk3.py | 9caf651126b00cc8c3f0248e7a208965f9d17426 | [] | no_license | shinny-mayanqiong/test-otg-by-tqsdk | 898f6a7ebb9e217f991e0c6342224320eedaf284 | 43ca59e6689e9fc4df5a02fc28b98ad89710c16f | refs/heads/master | 2023-01-11T10:39:11.753594 | 2020-10-23T07:59:57 | 2020-10-23T07:59:57 | 288,365,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tqsdk import TqApi, TqAccount
from otg_check_helper import check_orders, check_positions, check_account, check_risk_rule, check_risk_data, check_all
from test_for_etf.base_info import bid, user_id, pwd, td_url, test_logger
if __name__ == '__main__':
api = TqApi(TqAccount(bid, user_id, pwd), auth="ringo,Shinnytech123", _stock=True, _td_url=td_url)
# 成交持仓比风控规则
rule = api.set_risk_management_rule("SSE", True, trade_units_limit=6, trade_position_ratio_limit=150)
test_logger.info(f"{'='*12} 期权 开仓 {'='*12}")
symbol = "SSE.10002477"
quote = api.get_quote(symbol) # ETF 期权
# 挂单
buy_order = api.insert_order(symbol=symbol, direction="BUY", offset="OPEN", limit_price=quote.ask_price1, volume=10)
while buy_order.status == "ALIVE":
api.wait_update()
check_all(api, bid, user_id)
check_risk_rule(api, None)
check_risk_data(api, symbol)
api.close()
| [
"mayanqiong@shinnytech.com"
] | mayanqiong@shinnytech.com |
c4005f0e5842c7d14c9e583f81263d0b6cbf2b57 | 10711b2745eb0895c715b24be8359335644bf425 | /manage.py | 156d6e9927fdc92f2a542676d34975cb43bc907a | [] | no_license | Mikalisa/Divaexplorer | 7eed29251182f5f313e9a05595a68e1c607517bf | 7cb92fdc3d9e8cb9fb7f3f1e89de98af9ff1f958 | refs/heads/master | 2021-04-11T04:45:56.106135 | 2020-05-17T14:25:42 | 2020-05-17T14:25:42 | 248,993,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from wsgi import app
from app.extensions import db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"mekalissa0@gmail.com"
] | mekalissa0@gmail.com |
0aabe54500d0f9cfbda228c78ae78343c531d611 | 9e91706de538097e9d72f8b5a851e7e3c2e48674 | /utils.py | cd45aacee23db77394bce42b9b7ad8e817730883 | [] | no_license | qwp8510/weather_broadcaster_project | 80ddb552590ecf7c05f3ce96c60310d0c00a427b | d317d45f48fddae4a55a8e8dbad6449323cb1d75 | refs/heads/main | 2023-02-12T03:06:44.110454 | 2021-01-17T06:43:32 | 2021-01-17T06:43:32 | 318,798,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import json
def get_json_content(path):
with open(path, 'r') as js:
content = json.load(js)
js.close()
return content
def trans_temp_kelvin_to_Celsius(temp):
return int(temp - 273.15)
| [
"44150378+qwp8510@users.noreply.github.com"
] | 44150378+qwp8510@users.noreply.github.com |
d6ec614e25fb084667d466be9b59026d2b31eab5 | 6b9eebe50762d088ae487c25aa982a331bbd4843 | /mydocker/compareserverc.py | 9c65e272143539189d730b7b8055b8db2a77621b | [] | no_license | zjzwjcbj/MyDocker | 67b40e517b9e5cf8aa1b153f4d1c7f5c9f4b2258 | 9229d7ed4b6d1dd2ceeffd3ef4de549bc2dce70b | refs/heads/master | 2021-01-19T21:15:28.371928 | 2016-08-24T02:32:39 | 2016-08-24T02:32:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | #-*- coding:UTF8 -*-
import os,os.path
import re
#保存所有C程序文件的路径
pathlist = []
lcslist = []
#遍历目录查找C程序文件
def search_c(root,dirs,files):
global pathlist
for file in files:
path = os.path.join(root,file)
path = os.path.normcase(path)
if re.search(r"s/.*\.c",path):
pathlist.append(path)
#求最长公共子序列
def lcs(a,b):
lena=len(a)
lenb=len(b)
c=[[0 for i in range(lenb+1)] for j in range(lena+1)]
flag=[[0 for i in range(lenb+1)] for j in range(lena+1)]
for i in range(lena):
for j in range(lenb):
if a[i]==b[j]:
c[i+1][j+1]=c[i][j]+1
flag[i+1][j+1]='ok'
elif c[i+1][j]>c[i][j+1]:
c[i+1][j+1]=c[i+1][j]
flag[i+1][j+1]='left'
else:
c[i+1][j+1]=c[i][j+1]
flag[i+1][j+1]='up'
return c,flag
def printLcs(flag,a,i,j):
global lcslist
if i==0 or j==0:
return
if flag[i][j]=='ok':
printLcs(flag,a,i-1,j-1)
lcslist.append(a[i-1])
elif flag[i][j]=='left':
printLcs(flag,a,i,j-1)
else:
printLcs(flag,a,i-1,j)
#比较两个文件的内容并计算重复率
def compare(path1,path2):
file1 = open(path1)
file2 = open(path2)
fa = file1.readlines()
fb = file2.readlines()
file1.close()
file2.close()
#去除每行中的空格
fa = [ str.strip() for str in fa ]
fb = [ str.strip() for str in fb ]
#去除空行
content1 = []
content2 = []
for i in fa:
if i == '':
continue
content1.append(i)
for j in fb:
if j == '':
continue
content2.append(j)
#将文件内容合成一个字符串
con1 = ''.join(content1)
con2 = ''.join(content2)
c,flag=lcs(con1,con2)
printLcs(flag,con1,len(con1),len(con2))
percent = (len(lcslist)*2.0)/(len(con1)+len(con2))
percent = percent * 100
print("The repetition rate of %s and %s is:%0.2d%%" %(path1,path2,percent))
if __name__ == '__main__':
for root,dirs,files in os.walk('/workspace'):
search_c(root,dirs,files)
if len(pathlist) == 0:
print("C code file not found!")
if len(pathlist) == 1:
print("Only one C file, can not be compared!")
else:
for i in range(len(pathlist)-1):
for j in range(i+1,len(pathlist),1):
compare(pathlist[i],pathlist[j])
| [
"koen19940214@gmail.com"
] | koen19940214@gmail.com |
a20690d0585dccfcadc444288fb2a3e9ffe50766 | 7e7a13dd1c22c0e327a429416ee9ca5962626773 | /judge_server/containers/java/main.py | e9b546556806da0a4a9109444c4cca3903f2234a | [
"MIT"
] | permissive | ucpr/onlinejudge | 45cf1a822741f6f391d0c96bb37a24ca813c03d4 | 472b4671dc8fde8bd2f2b139ce61bc52e8137fcc | refs/heads/master | 2020-05-02T15:54:28.445552 | 2019-05-13T09:11:12 | 2019-05-13T09:11:12 | 178,055,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | import filecmp
import glob
import json
import os
import sys
import subprocess
from typing import List
from subprocess import TimeoutExpired
from judge_result import JudgeResult
def compile_process():
p = None
try:
p = subprocess.run(
["javac", "/problem/Main.java"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
except:
return {
"isError": True,
"status": "CE",
"stdout": p.stdout,
"stderr": p.stderr
}
if p.returncode != 0:
return {
"isError": True,
"status": "CE",
"stdout": p.stdout,
"stderr": p.stderr
}
return {
"isError": False,
"status": "",
"stdout": p.stdout,
"stderr": p.stderr
}
def run_process(input_):
p = None
try:
p = subprocess.run(
["java", "Main"],
cwd="/problem",
input=input_.encode(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=2
)
except TimeoutExpired:
return {
"isError": True,
"status": "TLE",
"stdout": "",
"stderr": ""
}
return {
"isError": False,
"status": "",
"stdout": p.stdout,
"stderr": p.stderr
}
def get_test_filenames():
return {
"input_files": glob.glob('/problem/testcases/in/*'),
"output_files": glob.glob('/problem/testcases/out/*')
}
def diff():
filenames = get_test_filenames()
input_file_paths = filenames["input_files"]
output_file_paths = filenames["output_files"]
judge_results: List[JudgeResult] = []
for i, input_file_path in enumerate(input_file_paths):
input_: str = ""
output_: str = ""
with open(input_file_path) as input_file:
input_ = input_file.read()
run_result = run_process(input_)
if run_result["isError"]:
judge_results.append(
JudgeResult(
status=run_result["status"],
error=run_result["stdout"]
).export_dict()
)
else:
with open('./tmp.txt', 'w') as tmp_file:
tmp_file.write(run_result["stdout"].decode())
import filecmp
res = filecmp.cmp('tmp.txt', output_file_paths[i])
judge_results.append(
JudgeResult(
status="AC" if res else "WA",
).export_dict()
)
return judge_results
def check_status(statuses):
if "WA" in statuses:
return "WA"
elif "TLE" in statuses:
return "TLE"
else:
return "AC"
def main():
compile_result = compile_process()
if compile_result["isError"]:
print(json.dumps({
"status": compile_result["status"],
"ac_count": 0,
"output": compile_result["stderr"].decode()
}))
return
judge_results = diff()
statuses = [judge_result["status"] for judge_result in judge_results]
res = {
"status": check_status(statuses),
"ac_count": statuses.count("AC"),
"output": ""
}
print(json.dumps(res))
if __name__ == '__main__':
main()
| [
"ryubi.102395@gmail.com"
] | ryubi.102395@gmail.com |
74096f3871ce295e10d08f00012c88bc032e9da1 | f972e22df004b419d23b4b03d3c7e42e604a2e2b | /compute/wps/tasks/ophidia.py | 830321413d5463dd764a9eed4384191c13d65a43 | [] | no_license | OphidiaBigData/esgf-compute-wps | 9ec663b1701f2336f08117a6fb0725d71adfe078 | 8dd26dde385fbe861c78e432e0954725d7bf9b18 | refs/heads/master | 2020-04-28T10:20:49.718253 | 2019-02-04T09:46:43 | 2019-02-04T09:46:43 | 175,198,536 | 0 | 0 | null | 2019-03-12T11:39:20 | 2019-03-12T11:39:19 | null | UTF-8 | Python | false | false | 5,490 | py | import json
import os
import uuid
import cwt
from celery.utils.log import get_task_logger
from django.conf import settings
from PyOphidia import client
from wps import WPSError
from wps.tasks import base
__ALL__ = [
'PROCESSES',
'oph_submit',
]
logger = get_task_logger('wps.tasks.ophidia')
PROCESSES = {
'Oph.max': 'max',
'Oph.min': 'min',
'Oph.avg': 'avg',
'Oph.sum': 'sum',
'Oph.std': 'std',
'Oph.var': 'var',
}
class OphidiaTask(object):
def __init__(self, name, operator, on_error=None):
self.name = name
self.operator = operator
self.on_error = on_error
self.arguments = []
self.dependencies = []
def add_arguments(self, **kwargs):
self.arguments.extend(['{}={}'.format(key, value) for key, value in kwargs.iteritems()])
def add_dependencies(self, *args):
self.dependencies.extend(dict(task=x.name) for x in args)
def to_dict(self):
data = {
'name': self.name,
'operator': self.operator,
'arguments': self.arguments,
}
if self.on_error:
data['on_error'] = self.on_error
if self.dependencies:
data['dependencies'] = self.dependencies
return data
class OphidiaWorkflow(object):
def __init__(self, oph_client):
self.oph_client = oph_client
self.workflow = {
'name': 'ESGF WPS Workflow',
'author': 'ESGF WPS',
'abstract': 'Auto-generated abstract',
'exec_mode': 'sync',
'cwd': '/',
'ncores': '2',
'tasks': []
}
def add_tasks(self, *args):
self.workflow['tasks'].extend(args)
def check_error(self):
if self.oph_client.last_error is not None and self.oph_client.last_error != '':
error = '{}\n'.format(self.oph_client.last_error)
res = self.oph_client.deserialize_response()
try:
for x in res['response'][2]['objcontent']:
for y in x['rowvalues']:
error += '\t{}: {}\n'.format(y[-3], y[-1])
except IndexError:
raise WPSError('Failed to parse last error from Ophidia')
raise WPSError(error)
def submit(self):
self.check_error()
self.oph_client.wsubmit(self.to_json())
def to_json(self):
def default(o):
if isinstance(o, OphidiaTask):
return o.to_dict()
return json.dumps(self.workflow, default=default, indent=4)
@base.cwt_shared_task()
def oph_submit(self, parent_variables, variables, domains, operation, user_id, job_id):
self.PUBLISH = base.ALL
proc = process.Process(self.request.id)
proc.initialize(user_id, job_id)
v, d, o = self.load(parent_variables, variables, domains, operation)
oph_client = client.Client(settings.WPS_OPHIDIA_USER, settings.WPS_OPHIDIA_PASSWORD, settings.WPS_OPHIDIA_HOST, settings.WPS_OPHIDIA_PORT)
workflow = OphidiaWorkflow(oph_client)
workflow.check_error()
cores = o.get_parameter('cores')
if cores is None:
cores = settings.WPS_OPHIDIA_DEFAULT_CORES
else:
cores = cores.values[0]
axes = o.get_parameter('axes')
if axes is not None:
axes = axes.values[0]
else:
axes = 'time'
proc.log('Connected to Ophidia backend, building workflow')
container_task = OphidiaTask('create container', 'oph_createcontainer', on_error='skip')
container_task.add_arguments(container='work')
proc.log('Add container task')
# only take the first input
inp = o.inputs[0]
import_task = OphidiaTask('import data', 'oph_importnc')
import_task.add_arguments(container='work', measure=inp.var_name, src_path=inp.uri, ncores=cores, imp_dim=axes)
import_task.add_dependencies(container_task)
proc.log('Added import task')
try:
operator = PROCESSES[o.identifier]
except KeyError:
raise WPSError('Process "{name}" does not exist for Ophidia backend', name=o.identifier)
if axes == 'time':
reduce_task = OphidiaTask('reduce data', 'oph_reduce')
reduce_task.add_arguments(operation=operator, ncores=cores)
reduce_task.add_dependencies(import_task)
proc.log('Added reduction task over implicit axis')
else:
reduce_task = OphidiaTask('reduce data', 'oph_reduce2')
reduce_task.add_arguments(operation=operator, dim=axes, ncores=cores)
reduce_task.add_dependencies(import_task)
proc.log('Added reduction task over axes "{}"', axes)
output_name = '{}'.format(uuid.uuid4())
export_task = OphidiaTask('export data', 'oph_exportnc2')
export_task.add_arguments(output_path=settings.WPS_OPHIDIA_OUTPUT_PATH, output_name=output_name, ncores=cores, force='yes')
export_task.add_dependencies(reduce_task)
proc.log('Added export task')
workflow.add_tasks(container_task, import_task, reduce_task, export_task)
proc.log('Added tasks to workflow')
workflow.submit()
proc.log('Submitted workflow to Ophidia backend')
workflow.check_error()
proc.log('No errors reported by Ophidia')
output_url = settings.WPS_OPHIDIA_OUTPUT_URL.format(output_path=settings.WPS_OPHIDIA_OUTPUT_PATH, output_name=output_name)
output_var = cwt.Variable(output_url, inp.var_name, name=o.name)
return {o.name: output_var.parameterize()}
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
52781a4315e58f453b73b84942444f1862527b89 | 62e6f4e946c2ebbd28a690338da3facbea0f6fd4 | /clients/cloud/python/producer_ccsr.py | c54844999ae51467fcdcbb2f951bfe0efadee3d6 | [
"Apache-2.0",
"MIT",
"CPL-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | vengky/kafka-docker | d9907ed67cc7cf98b13cbc47c85edcff94ffb4d0 | 6722867402e582da94a8b75a903e5eeb71370db0 | refs/heads/master | 2022-12-24T02:24:27.559234 | 2020-03-03T23:15:06 | 2020-03-03T23:15:06 | 244,807,772 | 0 | 0 | Apache-2.0 | 2022-12-08T04:37:17 | 2020-03-04T04:33:29 | TSQL | UTF-8 | Python | false | false | 3,189 | py | #!/usr/bin/env python
#
# Copyright 2019 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Produce messages to Confluent Cloud
# Using Confluent Python Client for Apache Kafka
# Writes Avro data, integration with Confluent Cloud Schema Registry
#
# =============================================================================
from confluent_kafka import Producer, KafkaError
from confluent_kafka.avro import AvroProducer
import json
import ccloud_lib
if __name__ == '__main__':
# Initialization
args = ccloud_lib.parse_args()
config_file = args.config_file
topic = args.topic
conf = ccloud_lib.read_ccloud_config(config_file)
# Create AvroProducer instance
p = AvroProducer({
'bootstrap.servers': conf['bootstrap.servers'],
'sasl.mechanisms': conf['sasl.mechanisms'],
'security.protocol': conf['security.protocol'],
'sasl.username': conf['sasl.username'],
'sasl.password': conf['sasl.password'],
'schema.registry.url': conf['schema.registry.url'],
'schema.registry.basic.auth.credentials.source': conf['basic.auth.credentials.source'],
'schema.registry.basic.auth.user.info': conf['schema.registry.basic.auth.user.info']
}, default_key_schema=ccloud_lib.schema_key, default_value_schema=ccloud_lib.schema_value)
# Create topic if needed
ccloud_lib.create_topic(conf, topic)
# Optional per-message on_delivery handler (triggered by poll() or flush())
# when a message has been successfully delivered or
# permanently failed delivery (after retries).
def acked(err, msg):
"""Delivery report handler called on
successful or failed delivery of message
"""
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
for n in range(10):
name_object = ccloud_lib.Name()
name_object.name = "alice"
record_key = name_object.to_dict()
count_object = ccloud_lib.Count()
count_object.count = n
record_value = count_object.to_dict()
print("Producing Avro record: {}\t{}".format(name_object.name, count_object.count))
p.produce(topic=topic, key=record_key, value=record_value, on_delivery=acked)
# p.poll() serves delivery reports (on_delivery)
# from previous produce() calls.
p.poll(0)
p.flush(10)
print("10 messages were produced to topic {}!".format(topic))
| [
"noreply@github.com"
] | vengky.noreply@github.com |
1d299fc35a1f1aa5feca93086cb650a6d0e1c2f3 | 8842d6c864f12dc8853d22b8a986b01acdf0e446 | /27_12_15_Nico3/LDA.pyx | c15e5a4eef6680c7665544e3191ce137506966f6 | [] | no_license | yukkyo/ResearchSource | 0d701aa09d3cfc5aae80a022445ecf14c42f0a07 | db497d19aae41ea57d7d6dd245714a477a7a1d4c | refs/heads/master | 2021-01-18T20:01:20.427148 | 2019-06-20T05:17:54 | 2019-06-20T05:17:54 | 24,621,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | pyx | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# cython: profile=True, boundscheck=False, wraparound=False
from __future__ import division
cimport cython
from libc.stdlib cimport rand, RAND_MAX
from libcpp.vector cimport vector
from libc.math cimport log, exp
from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free
# Latent Dirichlet Allocation + collapsed Gibbs sampling
# 全文書(約50万)に対してLDA(Collapsed Gibbs Sampling)を適用する
# トピック-語彙分布行列の各値からBetaを引いて転置した語彙-トピック分布、perplexitiesを返す
class LDA:
@cython.cdivision(True)
def __init__(self, r_n_topics, r_alpha, r_beta, raw_docs, r_V, r_iteration):
print "init lda instance"
self.n_topics = r_n_topics
self.alpha = r_alpha # parameter of topics prior
self.beta = r_beta # parameter of words prior
self.V = r_V # size of vocabulary
self.perps = []
self.iteration = r_iteration
print "initalize topics"
cdef vector[vector[int]] docs = raw_docs
# self.docs = docs
cdef int n_corpus, len_doc, m, n, new_z, v
n_corpus = 0
cdef int n_topics_int = self.n_topics
cdef int V_int = self.V
cdef double n_topics = self.n_topics
cdef double alpha = self.alpha
cdef double beta = self.beta
cdef double V = self.V
cdef double Vbeta = V * beta
n_topics_s = self.n_topics
v2 = self.V
# number of times topic z and word w co-occur
cdef int max_docs = 1
max_docs = docs.size()
# word count of each document and topic
cdef vector[vector[double]] n_m_z
n_m_z = vector[vector[double]](max_docs, vector[double](n_topics_int, alpha))
# word count of each topic and vocabulary
cdef vector[vector[double]] n_z_t
# n_z_t = vector[vector[double]](n_topics_int, vector[double](<int>V, beta))
n_z_t = vector[vector[double]](V_int, vector[double](n_topics_int, beta))
# word count of each topic
cdef vector[double] n_z
n_z = vector[double](n_topics_int, Vbeta)
cdef vector[vector[int]] z_m_n
cdef vector[int] z_n
for m in xrange(max_docs):
len_doc = docs[m].size()
n_corpus += len_doc
z_n.clear()
for n in xrange(len_doc):
v = docs[m][n]
new_z = int((rand()/(RAND_MAX +1.)) * n_topics)
z_n.push_back(new_z)
n_m_z[m][new_z] += 1.
n_z_t[v][new_z] += 1.
n_z[new_z] += 1
z_m_n.push_back(z_n)
print "end initialize topics"
"""learning once iteration"""
print "inference start"
cdef int j, ite, iteration
iteration = self.iteration
cdef vector[vector[double]] n_z_t_tmp
cdef vector[double] n_m_z_m
n_m_z_m.resize(n_topics_int)
cdef vector[int] z_m_n_m
cdef vector[double] p_z2
p_z2.resize(n_topics_int)
cdef double p_z2j, u, perp
# cdef long V = self.V
cdef vector[int] docs_m
cdef double n_z_j
cdef vector[double] theta
cdef double Kalpha = <double>n_topics * alpha
cdef double log_per, tmp_logper, len_doc_kalpha
print "calc first perp"
n_z_t_tmp = n_z_t
log_per = 0.0
for v in xrange(V_int):
for j in xrange(n_topics_int):
n_z_t_tmp[v][j] /= n_z[j]
for m in xrange(max_docs):
len_doc = docs[m].size()
len_doc_kalpha = <double>len_doc + Kalpha
theta = n_m_z[m]
docs_m = docs[m]
for j in xrange(n_topics_int):
theta[j] = theta[j] / len_doc_kalpha
for n in xrange(len_doc):
v = docs_m[n]
tmp_logper = 0.0
for j in xrange(n_topics_int):
tmp_logper += (theta[j] * n_z_t_tmp[v][j])
log_per -= log(tmp_logper)
theta.clear()
n_z_t_tmp.clear()
log_per /= <double>n_corpus
perp = exp(log_per)
print "perp: " + str(perp)
self.perps.append(perp)
for ite in xrange(iteration):
print "ite: " + str(ite)
# sampling each word in corpus
for m in xrange(max_docs):
len_doc = docs[m].size()
n_m_z_m = n_m_z[m]
z_m_n_m = z_m_n[m]
for n in xrange(len_doc):
v = docs[m][n]
# discount for n-th word n with topic z
z = z_m_n_m[n]
n_m_z_m[z] -= 1
n_z_t[v][z] -= 1
n_z[z] -= 1
# sampling new_z
for j in xrange(n_topics_int):
p_z2j = n_z_t[v][j] * n_m_z_m[j]
p_z2j /= n_z[j]
if j != 0:
p_z2j += p_z2[j-1]
p_z2[j] = p_z2j
u = (rand()/(RAND_MAX +1.))
u *= p_z2[n_topics_int - 1]
new_z = n_topics_int - 1
for j in xrange(n_topics_int):
if u < p_z2[j]:
new_z = j
break
# set z the new topic and increment counters
z_m_n_m[n] = new_z
n_m_z_m[new_z] += 1
n_z_t[v][new_z] += 1
n_z[new_z] += 1
z_m_n[m] = z_m_n_m
n_m_z[m] = n_m_z_m
if (m + 1) % 100000 == 0:
print "end docs: " + str(m + 1)
print "calc perp"
log_per = 0.0
n_z_t_tmp = n_z_t
for v in xrange(V_int):
for j in xrange(n_topics_int):
n_z_t_tmp[v][j] /= n_z[j]
for m in xrange(max_docs):
len_doc = docs[m].size()
len_doc_kalpha = <double>len_doc + Kalpha
theta = n_m_z[m]
docs_m = docs[m]
for j in xrange(n_topics_int):
theta[j] = theta[j] / len_doc_kalpha
for n in xrange(len_doc):
v = docs_m[n]
tmp_logper = 0.0
for j in xrange(n_topics_int):
tmp_logper += (theta[j] * n_z_t_tmp[v][j])
log_per -= log(tmp_logper)
theta.clear()
n_z_t_tmp.clear()
log_per /= <double>n_corpus
perp = exp(log_per)
print "perp: " + str(perp)
self.perps.append(perp)
print "calc new alpha and beta"
self.n_z_t = n_z_t
self.z_m_n = z_m_n
return | [
"yukkyo12221222@gmail.com"
] | yukkyo12221222@gmail.com |
9fc5b6e12ba33400052ec9e08c251ff1626f1477 | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /testing/test_programs/numpy/basic_numpy/arrays/stypy_test_files/numpy_array_broadcasting_4__type_data.py | 99ec5d09a07932264cc57dd68828680219e497e5 | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from testing.code_generation_testing.codegen_testing_common import instance_of_class_name
test_types = {
'__main__': {
'r4': instance_of_class_name("ndarray"),
'r5': instance_of_class_name("ndarray"),
'__name__': instance_of_class_name("str"),
'r2': instance_of_class_name("ndarray"),
'r3': instance_of_class_name("ndarray"),
'__builtins__': instance_of_class_name("module"),
'__file__': instance_of_class_name("str"),
'__package__': instance_of_class_name("NoneType"),
'r': instance_of_class_name("ndarray"),
'w': instance_of_class_name("ndarray"),
'v': instance_of_class_name("ndarray"),
'np': instance_of_class_name("module"),
'x': instance_of_class_name("ndarray"),
'__doc__': instance_of_class_name("NoneType"),
},
}
| [
"redondojose@uniovi.es"
] | redondojose@uniovi.es |
a1091ac2ec151af94c7b3cfb6e2420db81816dc8 | 7311b8d31c5a152e697b1f0ff218db0b9291c604 | /dna.py | 63191116b386c6bcab172eb3e22cfbbd9fe4c683 | [] | no_license | mgxmajewski/dna | 71a43bf18020f4cc2e3b30b1604668f136b4dbab | 5fd4fb024cbb9a5997539f65b5a00510328ddf9e | refs/heads/master | 2023-02-24T11:08:30.135657 | 2021-01-30T22:59:44 | 2021-01-30T22:59:44 | 332,850,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | import sys
import csv
# Read database of people as a dictionary
if len(sys.argv) < 3:
print("Provide 2 arguments")
exit(1)
people_database = sys.argv[1]
with open(people_database, "r") as f:
people_reader = csv.DictReader(f)
people_list = list(people_reader)
dna_str = []
# Read dna STR which are input to look for
with open(people_database, "r") as f:
sequence_reader = csv.reader(f)
dna_str = next(sequence_reader)[1:]
print(dna_str)
# Read DNA sequence as a string
dna_material = sys.argv[2]
with open(dna_material, "r") as f:
dna = f.read()
# Add dictionary to keep SRT count
extracted_seq = {}
seq_counter = 0
for seq in range(len(dna_str)):
extracted_seq[dna_str[seq]] = 2
str_found = []
# Check how many dna_srt are there in dna
for seq in range(len(dna_str)):
counter = 3
for char in range(len(dna)):
char_last = len(dna_str[seq])
if dna_str[seq] == dna[char: char + char_last]:
str_found.append(dna_str[seq])
if dna_str[seq] == dna[char + char_last: char + 2 * char_last]:
if dna_str[seq] == dna[char + 2* char_last: char + 3 * char_last]:
extracted_seq[dna_str[seq]] = str(counter)
counter += 1
str_found.append(dna_str[seq])
print(extracted_seq)
# # Check how many dna_srt are there in dna
# for seq in range(len(dna_str)):
# seq_counter += 1
# counter = 1
# for char in range(len(dna)):
# char_last = len(dna_str[seq])
# if dna_str[seq] == dna[char: char + char_last]:
# extracted_seq[dna_str[seq]] = str(counter)
# if dna_str[seq] == dna[char + char_last: char + 2 * char_last]:
# extracted_seq[dna_str[seq]] = str(counter)
# counter += 1
# Print result
flag = False
for row in people_list:
temp = row['name']
del row['name']
print(row)
if row == extracted_seq:
print(temp)
flag = True
if flag != True:
print("No match") | [
"mgxmajewski@gmail.com"
] | mgxmajewski@gmail.com |
e3a350e9daa6bb356997dec03b833179fc1eb34e | f2579545965c70b7e6cb28248b561c107960d5ea | /bin/track_task.py | f0d4ee43fd50fb89648c673ab49a8c33f038594b | [] | no_license | lao19881213/GLEAM-X-pipeline | f5b79b0636145c7bba115c299252f2c9abc879ec | ea98c4ab0d76a2cd77f2238104ee6f78ac093650 | refs/heads/master | 2020-04-19T08:46:57.565628 | 2018-11-19T07:42:30 | 2018-11-19T07:42:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,729 | py | #!/usr/bin/env python
__author__ = "PaulHancock & Natasha Hurley-Walker"
import sqlite3
import sys
db='/group/mwasci/nhurleywalker/GLEAM-X-pipeline/db/GLEAM-X.sqlite'
def queue_job(job_id, task_id, submission_time, obs_id, user, batch_file, stderr, stdout, task):
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("""INSERT INTO processing
( job_id, task_id, submission_time, obs_id, user, batch_file, stderr, stdout, task, status)
VALUES ( ?,?,?,?,?,?,?,?,?, 'queued')
""", (job_id, task_id, submission_time, obs_id, user, batch_file, stderr, stdout, task))
conn.commit()
conn.close()
def start_job(job_id, task_id, start_time):
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("""UPDATE processing SET status='started', start_time=? WHERE job_id =? AND task_id=?""", (start_time, job_id, task_id))
conn.commit()
conn.close()
def finish_job(job_id, task_id, end_time):
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("""UPDATE processing SET status='finished', end_time=? WHERE job_id =? AND task_id=?""", (end_time, job_id, task_id))
conn.commit()
conn.close()
def fail_job(job_id, task_id, time):
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("""UPDATE processing SET status='failed', end_time=? WHERE job_id =? AND task_id=?""", (time, job_id, task_id))
conn.commit()
conn.close()
def require(args, reqlist):
"""
Determine if the the given requirements are met
ie that the attributes in the reqlist are not None.
"""
for r in reqlist:
if not getattr(args, r):
print "Directive {0} requires argument {1}".format(args.directive, r)
sys.exit()
return True
if __name__ == "__main__":
import argparse
ps = argparse.ArgumentParser(description='track tasks')
ps.add_argument('directive', type=str, help='Directive', default=None)
ps.add_argument('--jobid', type=int, help='Job id from slurm', default=None)
ps.add_argument('--taskid', type=int, help='Task id from slurm', default=None)
ps.add_argument('--task', type=str, help='task being run', default=None)
ps.add_argument('--submission_time', type=int, help="submission time", default=None)
ps.add_argument('--start_time', type=int, help='job start time', default=None)
ps.add_argument('--finish_time', type=int, help='job finish time', default=None)
ps.add_argument('--batch_file', type=str, help='batch file name', default=None)
ps.add_argument('--obs_id', type=int, help='observation id', default=None)
ps.add_argument('--stderr', type=str, help='standard error log', default=None)
ps.add_argument('--stdout', type=str, help='standard out log', default=None)
args = ps.parse_args()
user = os.environ['USER']
if args.directive.lower() == 'queue':
require(args, ['jobid', 'taskid', 'submission_time', 'obs_id', 'user', 'batch_file', 'stderr', 'stdout', 'task'])
queue_job(args.jobid, args.taskid, args.submission_time, args.obs_id, user, args.batch_file, args.stderr, args.stdout, args.task)
elif args.directive.lower() == 'start':
require(args, ['jobid', 'taskid', 'start_time'])
start_job(args.jobid, args.taskid, args.start_time)
elif args.directive.lower() == 'finish':
require(args, ['jobid', 'taskid', 'finish_time'])
finish_job(args.jobid, args.taskid, args.finish_time)
elif args.directive.lower() == 'fail':
require(args, ['jobid', 'taskid', 'finish_time'])
fail_job(args.jobid, args.taskid, args.finish_time)
else:
print "I don't know what you are asking; please include a queue/start/finish/fail directive"
| [
"nhw@icrar.org"
] | nhw@icrar.org |
13e6dab25939004b31a87b72b1bff7a81f948bef | b576fb0efac1cdb48fa484796033aa904b86104e | /webserver.py | 34a7432f6b6c1f0575be28b3637c6c034624fb89 | [] | no_license | i-xingxu/web_servver | b5aa2462248f57510b6d141a239b87bc49239223 | 18f03dff3a8083e2f0a7bd6bec695c5edfe06a26 | refs/heads/master | 2020-03-18T16:02:00.291854 | 2018-05-26T08:54:38 | 2018-05-26T08:54:38 | 134,943,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | #coding=utf-8
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
data = {'result': 'this is a test'}
host = ('localhost', 8888)
class Request(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data).encode())
if __name__ == '__main__':
server = HTTPServer(host, Request)
print("Starting server, listen at: %s:%s" % host)
server.serve_forever() | [
"3.14159265358979"
] | 3.14159265358979 |
d52f369279b61c549cd45ca50f35e600bd083990 | 4032f53b1b780a9ae30d2c7defa29ccc48059d3e | /mysite/wasteweb/migrations/0004_inputanop.py | 3b965da510a972953a51e2e175a566c439d7e747 | [] | no_license | kimmy28/django-spdsk-repo | 33d308de176777adf0db56d64608ac25b3083fe2 | e6a479d9ad2c515e65532eea52c5181a8d2462b5 | refs/heads/master | 2020-06-23T16:32:46.689254 | 2019-07-25T15:08:05 | 2019-07-25T15:08:05 | 198,681,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | # Generated by Django 2.2 on 2019-05-17 18:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wasteweb', '0003_delete_angkutansampah'),
]
operations = [
migrations.CreateModel(
name='InputanOP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tanggal', models.DateField(max_length=100)),
('jam', models.TimeField(max_length=100)),
('jumlah', models.IntegerField(help_text='in kg')),
('nama_angkutan_truk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wasteweb.AngkutanTrukSampah')),
('nama_lokasi', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wasteweb.LokasiKecamatan')),
('nama_sampah', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wasteweb.Sampah')),
],
),
]
| [
"47594667+kimmy28@users.noreply.github.com"
] | 47594667+kimmy28@users.noreply.github.com |
656ea44a2013c49708dc47316d8f89de585a7142 | 48ada3e6aebcc5b6b364208303716133f35a502d | /MyProject/myapp/templatetags/my_filter.py | 26e530ce5441ee95bf897397841bc0f2118f1748 | [] | no_license | sabujgolder/HostingPython | 6610586be12abb7059831611c505138f0b7ada55 | 2159d135445f57327b689b98543905a6e3c78870 | refs/heads/master | 2023-08-21T22:26:37.152965 | 2021-10-16T13:33:20 | 2021-10-16T13:33:20 | 417,837,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from django import template
register = template.Library()
def my_filter(value):
return value + " | Custom Filter "
register.filter('custom_filter',my_filter)
| [
"sabujgolder9334@gmail.com"
] | sabujgolder9334@gmail.com |
9b6ea46c7719d9ae63ed88c912bf32fb6f626a7b | 21978faf4bf1cc8700e152f5e0784abfc5a3a518 | /PythonProject/Pomodoro/sample/pomodoro.py | a221e3af68580fdd2ad53ed5ed5eabf02ec4449e | [] | no_license | AbelDeLuna/Coding-Examples | b093c5c46dd85cdb853b0db0afc4bdac4cfd35b7 | aa105589ca0aa0db32451e084aa07bfd8b5e2814 | refs/heads/master | 2020-03-07T16:41:40.052061 | 2019-04-17T00:12:03 | 2019-04-17T00:12:03 | 127,590,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | from pomodoroUI import display
if __name__ == "__main__":
x = display() | [
"lunaabel9@Gmail.com"
] | lunaabel9@Gmail.com |
2e128223a11a22dfa56ab09f5a1f881794efade3 | 9ffdc5aace2d4c904c39c5f9af21a2165bb258a2 | /codes/forward_result.py | 5da67a5ca72e703cfe6a6f4b0a8a6598058b9e73 | [] | no_license | Qirui-Y/UDC-Residual-and-Dense-UNet | 971d287432c9682d4a4e4f07cb137f7cecc8436e | 220162dfed14e78feb97c56a7a9762013d21d7a2 | refs/heads/master | 2022-12-09T01:12:23.262885 | 2020-09-09T03:41:20 | 2020-09-09T03:41:20 | 293,988,585 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | import torch
from models import create_model
import options.options as option
import numpy as np
import utils.util as util
visualization = False
if visualization:
import matplotlib.pyplot as plt
opt = dict()
opt['model'] = 'sr'
opt['is_train'] = False
opt['dist'] = False
opt['train'] = {}
opt['gpu_ids'] = 1 # change here
opt['network_G'] = {'which_model_G':'UDC_res_5'} # change here
opt['path'] = {'pretrain_model_G':'model/Poled_model.pth','strict_load':True} # change here
model = create_model(opt)
def get_result(img_LQ):
# input: [H, W, 3], RGB
img_input = img_LQ.copy()
img_LQ = img_LQ/255
img_LQ = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ, (2, 0, 1)))).float() # [3, H, W]
data = {}
data['LQ'] = img_LQ.unsqueeze(0) # [1, 3, H, W]
model.feed_data(data)
model.test()
visuals = model.get_current_visuals()
#sr_img = util.tensor2img(visuals['SR']) # uint8
img_np = visuals['SR'].numpy()
img_np = np.transpose(img_np[:, :, :], (1, 2, 0)) # HWC, RGB
img_np = (img_np * 255.0).round()
sr_img = img_np.astype(np.uint8)
if visualization:
plt.subplot(1,2,1)
plt.imshow(img_input)
plt.subplot(1,2,2)
plt.imshow(sr_img)
plt.show()
return sr_img
| [
"2068325534@qq.com"
] | 2068325534@qq.com |
db420fb80be68c7b5e1dd776d883c1a88d15e964 | dbb320f62c06433b2ca92ee3dd51a6bde8527143 | /xml_test.py | 6d74d88310bc73f6cbadb61d24b4f3ea2c985b5e | [
"MIT"
] | permissive | pranavdave893/Leetcode | 3c051f4510d8907f04534b27c1fa76602f205852 | 1f30ea37af7b60585d168b15d9397143f53c92a1 | refs/heads/master | 2021-06-10T12:12:34.782733 | 2021-03-22T00:37:44 | 2021-03-22T00:37:44 | 140,044,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | from xml.etree import ElementTree as xml
from xml.dom import minidom
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = xml.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
root = xml.Element("Users")
userelement = xml.Element("user")
root.append(userelement)
uid = xml.SubElement(userelement, "uid")
uid.text = "1"
FirstName = xml.SubElement(userelement, "FirstName")
FirstName.text = "testuser"
LastName = xml.SubElement(userelement, "LastName")
LastName.text = "testuser"
Email = xml.SubElement(userelement, "Email")
Email.text = "testuser@test.com"
state = xml.SubElement(userelement, "state")
state.text = "xyz"
location = xml.SubElement(userelement, "location")
location.text = "abc"
# xml_str = xml.tostring(root).decode()
tree = xml.tostring(root, 'utf-8')
print (tree)
# xml_str = (prettify(root))
# with open("text.xml","w") as fh:
# fh.write(xml_str) | [
"pranavdave@Pranavs-MacBook-Pro.local"
] | pranavdave@Pranavs-MacBook-Pro.local |
7c58362f81d2eebf86e77c4f52201dabd123be2d | e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1 | /AlgorithmCodeTemplates/algorithm/sliding_window_examples.py | 634c355c9efa3418e81eeafb9f04d218da1225cd | [] | no_license | sevenhe716/LeetCode | 41d2ef18f5cb317858c9b69d00bcccb743cbdf48 | 4a1747b6497305f3821612d9c358a6795b1690da | refs/heads/master | 2020-03-16T16:12:27.461172 | 2019-04-22T13:27:54 | 2019-04-22T13:27:54 | 130,221,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | from collections import Counter
from collections import defaultdict
# [3] https://leetcode.com/problems/longest-substring-without-repeating-characters/
# variation with no pattern
def lengthOfLongestSubstring(s):
# create a default dict to maintain state
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(s):
counter[s[end]] += 1
if counter[s[end]] > 1:
count += 1
end += 1
while count > 0:
counter[s[start]] -= 1
if counter[s[start]] > 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [76] https://leetcode.com/problems/minimum-window-substring/
# variation with finding minimum
def minWindow(s: str, t: str) -> str:
counter = Counter(t)
count, start, end, res = len(t), 0, 0, [float('inf'), 0]
while end < len(s):
counter[s[end]] -= 1
# consider duplicate char in t
if counter[s[end]] >= 0:
count -= 1
end += 1
# valid in while
while count == 0:
# update minimum here, inner while loop
if end - start < res[0]:
res = (end - start, start)
counter[s[start]] += 1
if counter[s[start]] > 0:
count += 1
start += 1
return s[res[1]:res[0] + res[1]] if res[0] != float('inf') else ''
# [904] https://leetcode.com/problems/fruit-into-baskets/
# variation with list
def totalFruit(tree: 'List[int]') -> int:
cnt = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(tree):
cnt[tree[end]] += 1
if cnt[tree[end]] == 1:
count += 1
end += 1
while count > 2:
cnt[tree[start]] -= 1
if cnt[tree[start]] == 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [438] https://leetcode.com/problems/find-all-anagrams-in-a-string/
# variation with restrict between start and end
def findAnagrams(s: str, p: str) -> 'List[int]':
len_p, len_s = len(p), len(s)
if len_p > len_s:
return []
counter = Counter(p)
count, start, end, res = len_p, 0, 0, []
while end < len_s:
# only update counter when match char in p
counter[s[end]] -= 1
if counter[s[end]] >= 0:
count -= 1
end += 1
if count == 0:
res.append(start)
# not use a while, because restrict the length
if end - start == len_p:
counter[s[start]] += 1
# exclude char not in p, because always negative
if counter[s[start]] > 0:
count += 1
start += 1
return res
# [30] https://leetcode.com/problems/substring-with-concatenation-of-all-words/
# variation with complex match policy
def findSubstring(s: str, words: 'List[str]') -> 'List[int]':
if not words:
return []
word_len, res = len(words[0]), []
# start offset from 0 to word_len, and step is word_len
for i in range(word_len):
# reset state every epoch
counter = Counter(words)
start, end, count = i, i, len(words)
while end < len(s):
cur_word = s[end:end + word_len]
# check is not necessary here, just for performance
if cur_word in counter:
counter[cur_word] -= 1
if counter[cur_word] >= 0:
count -= 1
end += word_len
if count == 0:
res.append(start)
# ensure consecutive words
if end - start == word_len * len(words):
cur_word = s[start:start + word_len]
if cur_word in counter:
counter[cur_word] += 1
if counter[cur_word] > 0:
count += 1
start += word_len
# the order is not necessary here
return res | [
"429134862@qq.com"
] | 429134862@qq.com |
5a6867fe5bae4b919908de7cd9fc977d193bb0db | 32c4dde56cc039ba0e02012deb51419293306c5d | /addons/tasquescbm/models/todo_task_model.py | 26085da726d5b5c83912fb2d803bbae509f4178f | [] | no_license | damarillom/odoo | c5172003682d0c4725c20b5aeefcb11756fe33e4 | c138f176357f86ddef26b4f35d82becc45634f32 | refs/heads/master | 2020-05-03T04:57:25.958730 | 2019-04-02T15:00:34 | 2019-04-02T15:00:34 | 178,436,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models
class TodoTask(models.Model):
_name = 'todo.task'
name = fields.Char(help="What needs to be done?")
is_done = fields.Boolean('Fet?')
active = fields.Boolean('Activa?', default=True)
date_deadline = fields.Date('Deadline')
urgencia = fields.Char(string='Urgència', help="Urgència de la tasca?")
user_id = fields.Many2one(
'res.users',
string='Responsable',
default=lambda s: s.env.user)
team_ids = fields.Many2many('res.partner', string='Equip')
@api.multi
def do_clear_done(self):
for task in self:
task.is_done = not task.is_done
return True
| [
"danielamarillo1@hotmail.com"
] | danielamarillo1@hotmail.com |
97f6be1a83a2f41e8e428878ad403e804c34a963 | 158a2e97d2c043938b27f8f9ae144c1583ea99f4 | /Numerical_Sine_Tests.py | 1bd3b8d5bcce14da3d0303d410788cc8c34dee07 | [] | no_license | Lancaster-Physics-Phys389-2020/phys389-2020-project-searbhan | fe3686bc612e4459e8f5dd4a42f7d31c12b183ea | 79dfda496c62e003895b434c0eec218804166d32 | refs/heads/master | 2021-06-12T21:27:12.614669 | 2020-04-09T16:32:29 | 2020-04-09T16:32:29 | 254,405,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import numpy as np
class Sine:
def __init__(self, l, b, k, T, p, M):
self.l = l
self.b = b
self.k = k
self.T = T
self.p = p
self.M = M
#self.wv = wv
def value(self, wv):
l = self.l
b = self.b
k = self.k
T = self.T
p = self.p
M = self.M
v = np.sqrt(T/p)
f1 = (M*wv*wv*v*v) - k
f2 = np.sin(wv*b)*np.sin(wv*(l-b))
f3 = wv*T*np.sin(wv*l)
return (f1*f2 - f3) | [
"noreply@github.com"
] | Lancaster-Physics-Phys389-2020.noreply@github.com |
733afa4caeb88d41ba189770c059f29d7f3f07c7 | f22d5d94297c9cf167f45a1f938c8753ddbdee75 | /dsa/ArrayUnion.py | cdd1814037a5dbfbe28f4390664830d91865cb7a | [] | no_license | harsham4026/DSA | 084a045ef662666ebaf6ccf0e2c5680b6aa5aa60 | cb216dd87ac30db498ca9aede65e02a405575deb | refs/heads/master | 2021-06-27T15:57:43.061730 | 2019-08-01T02:44:25 | 2019-08-01T02:44:25 | 151,821,424 | 0 | 0 | null | 2020-10-13T10:58:13 | 2018-10-06T08:50:00 | Python | UTF-8 | Python | false | false | 1,800 | py | def union_of_arrays(arr1, arr2, m, n):
result_array = []
i = j = 0
while i < m and j < n:
if arr1[i] < arr2[j]:
i += 1
elif arr2[j] < arr1[i]:
j += 1
else:
result_array.append(arr2[j])
i += 1
j += 1
while i < m:
result_array.append(arr1[i])
i += 1
while j < n:
result_array.append(arr2[j])
j += 1
return result_array
def binary_search(arr, element_to_find, starting_index, last_index):
if last_index >= starting_index:
mid_index = int((starting_index + last_index) / 2)
if arr[mid_index] == element_to_find:
return mid_index
elif element_to_find > arr[mid_index]:
return binary_search(arr, element_to_find, mid_index + 1, last_index)
else:
return binary_search(arr, element_to_find, starting_index, mid_index - 1)
else:
return -1
def union_using_binary_search(arr1, arr2):
union_array = []
if (len(arr1) < len(arr2)):
arr1.sort()
else:
arr2.sort()
if len(arr1) < len(arr2):
for i in arr1:
if not i in union_array:
union_array.append(i)
for i in arr2:
if (binary_search(arr1, i, 0, len(arr1) - 1)) == -1:
union_array.append(i)
else:
for i in arr2:
if not i in union_array:
union_array.append(i)
for i in arr1:
if (binary_search(arr2, i, 0, len(arr2) - 1)) == -1:
union_array.append(i)
print(union_array)
if __name__ == '__main__':
arr1 = [1, 2, 1, 1, 3]
arr2 = [1, 1, 1, 2]
print(union_of_arrays(arr1, arr2, len(arr1), len(arr2)))
union_using_binary_search(arr1, arr2)
| [
"noreply@github.com"
] | harsham4026.noreply@github.com |
2b2524d483e90d56b4695f6a957abea80f42236d | d1064259419c6d22edee3131bd24b5cde78139c3 | /node_grabber.py | 88b89ecb516432629cf6d528658da11f4828dcd0 | [
"MIT"
] | permissive | jonathan-bravo/BioScripts | 945f1edba933f76352d105f1245b27a4c1ed0c37 | 77959a2feb4304fd3595a59fbbbd768a3e4c4831 | refs/heads/master | 2022-10-05T12:43:09.680005 | 2019-03-27T05:05:03 | 2019-03-27T05:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | #!/usr/bin/env python3
import re, csv, sys
# Usually a .fasta or .fastq file
# - Scaffolds.fasta
inFile = sys.argv[1]
# Nodes that you want to grab from your file
# - 1,2,3
putative_nodes = sys.argv[2]
# The name you want for your output file
# - putative.fasta
outFile = sys.argv[3]
nodes = []
contigs = []
sequence = []
temp = ""
# This loop is creating the nodes and contigs array
# After each node if found a 'None' value is entered in the contigs to separate
# - the sequences
with open(inFile) as f:
for l, line in enumerate(f):
if re.search(r"\>", line):
nodes.append(line.strip())
contigs.append(None)
else:
contigs.append(line.strip())
# This adds a 'None' value at the end so that the sequences can be parsed
# - correctly
contigs.append(None)
# This loop merges the correct contigs into sequences
for i in range(len(contigs)):
if contigs[i] != None:
temp = temp + contigs[i]
elif contigs[i] == None:
sequence.append(temp)
temp = ""
sequence.pop(0)
# Cleaning up temporary lists
del contigs
del temp
## Compair second input to list of available node
keep = []
for i in range(len(nodes)):
temp = str(re.findall(r"\>NODE_(\d+)_", str(nodes[i])))
temp = temp[2:len(temp)-2]
if temp in putative_nodes:
keep.append(i)
del temp
file = open(outFile, "w")
for i in range(len(keep)):
file.write(nodes[keep[i]])
file.write("\n")
file.write(sequence[keep[i]])
file.write("\n")
file.close() | [
"hakmonkey@gmail.com"
] | hakmonkey@gmail.com |
f52fb6152bba23a4c0a005ca2f666c5e95d07473 | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /data/lmdbMaker.py | 0081b73ccebdd68d83571914b0342cb8bcb9817a | [
"MIT"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 6,516 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Please execute the code with python2
'''
import os
import lmdb
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
try:
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
except:
print("Image is invalid!")
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert (len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
import codecs
with open(imagePath, 'r') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
def read_image_label(image_directory, label_address):
import os
image_lis = os.listdir(image_directory)
f = open(label_address)
dict = {}
i = 1
# 图片:目标记录
for line in f.readlines():
# TODO
dict[line[10:].split(" ")[0]] = line.split(' ')[1].replace('\n', '').replace('\r',
'') # arttrain-11.art/lsvttest10.lsvt12
'''
print(dict)
i+=1
if i==14:
break
print(dict)
'''
# print(dict)
result1 = []
result2 = []
# TODO
for image_path1 in image_lis:
for image_path2 in os.listdir(image_directory + '/' + image_path1):
try:
# image_path = image_path.replace('.jpg','')
# result1.append(image_directory+'/'+image_path1+'/'+image_path2)
result2.append(dict[image_path1 + '/' + image_path2])
result1.append(image_directory + '/' + image_path1 + '/' + image_path2)
except:
# pass
print("jianzhi")
return result1, result2
def extract_result_from_xml():
import re
f = open('../xml_test/word.xml', 'r')
string = ""
for line in f.readlines():
print(line)
string += line
print(string)
# 记录文件路径
result1 = re.findall(r'file=\"(.*?)\"', string)
for i in range(len(result1)):
result1[i] = '/home/chenjingye/datasets/ICDAR2003/WordR/TrialTest/' + result1[i]
print(result1)
result2 = re.findall(r'tag=\"(.*?)\"', string)
print(result2)
return result1, result2
def ic15():
f = open('/home/chenjingye/datasets/ICDAR2015/Word_recognition/Challenge4_Test_Task3_GT.txt', 'r')
result1 = []
result2 = []
for line in f.readlines():
# print(line)
# print(line.split())
a, b = line.split(', ')
print(a, b)
result1.append(
'/home/chenjingye/datasets/ICDAR2015/Word_recognition/ch4_test_word_images_gt/' + a.replace(',', ''))
result2.append(b.replace("\"", "").replace('\r\n', ''))
print(result1)
print(result2)
return result1, result2
def find_jpg():
import os
root = "/mnt/sdb1/zifuzu/chenjingye/datasets/mnt/ramdisk/max/90kDICT32px"
flag = True
def findjpg(path, ret):
"""Finding the *.txt file in specify path"""
filelist = os.listdir(path)
for filename in filelist:
# if len(ret) > 500000 :
# return
de_path = os.path.join(path, filename)
if os.path.isfile(de_path):
if de_path.endswith(".jpg"): # Specify to find the txt file.
print(de_path)
ret.append(de_path)
# if len(ret) > 500000:
# return
else:
findtxt(de_path, ret)
ret = []
findtxt(root, ret)
for path in ret:
print(path)
try:
os.remove('./temp.txt')
except:
pass
f = open('./temp.txt', 'a')
for element in ret:
f.write(element + '\n')
f.close()
def syn90():
import re
f = open('./temp.txt', 'r')
result1 = []
result2 = []
for line in f.readlines():
result1.append(line.replace('\n', ''))
target = re.findall(r'_(.*?)_', line)[0]
result2.append(target)
return result1, result2
if __name__ == '__main__':
'''
将两个list传进createDataset函数
list1: 图片路径列表
list2: 图片标签列表
其中两个列表在相同位置
'''
imgList, labelList = ic15()
print(imgList)
print(labelList)
print("The length of the list is ", len(imgList))
'''Input the address you want to generate the lmdb file.'''
createDataset('/mnt/sdb1/zifuzu/chenjingye/datasets/syn90_train_500000data_lmdb', imgList, labelList)
| [
"576194329@qq.com"
] | 576194329@qq.com |
7219c2288997ad16c5bd77381b8e4bad11e2966c | ff447903cad14d1a2c4e6e47388533aed1e238d9 | /boson_sdk_cpp/FSLP_Files/UART_HalfDuplex.py | 2d670edb19257bcd99847492f5df0441254f0e94 | [
"MIT"
] | permissive | tonyngophd/vuir-zoom | cafbc1a1eb1a1450efb602632808c03d0329ba90 | ba74d0ad1f3c5cc510c91c0cd9327c09f51a9415 | refs/heads/master | 2023-03-24T01:33:38.629294 | 2021-03-05T03:14:27 | 2021-03-05T03:14:27 | 306,504,465 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,847 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 14:28:13 2015
@author: jimamura
"""
import ctypes, os, sys
import _ctypes
try:
from .Factory_BOSON_GECKO import *
from . import Factory_INIUtils#, BASE_BosonGecko
FOUND_INI = True
except:
FOUND_INI = False
class UART():
def __init__(self,dllPath = None,**kwargs):
self.isClosed = False
self.portOpen = False
self.port_num = -1
if (not "posix" in os.name):
dll_name = "FSLP_64.dll"
else:
dll_name = "FSLP_64.so"
if dllPath:
loadpath = os.path.join(dllPath,dll_name)
else:
loadpath = os.path.join(os.path.dirname(__file__),dll_name)
try:
self.__library = ctypes.cdll.LoadLibrary(loadpath)
self.__dllHandle = self.__library._handle
self.camsend = self.__library.__getattr__("FSLP_send_to_camera")
self.camread = self.__library.__getattr__("FSLP_read_frame")
self.camunframed = self.__library.__getattr__("FSLP_read_unframed")
# try:
# self.poll_frame = self.__library.__getattr__("poll_general_frame")
# except:
# self.poll_frame = None
self.port_open = self.__library.__getattr__("FSLP_open_port")
self.port_close = self.__library.__getattr__("FSLP_close_port")
self.lookup_port_name = self.__library.__getattr__("FSLP_lookup_port_id")
except OSError as e:
print("dllPath = {!s}".format(dllPath))
print("filePath = {!s}".format(os.path.dirname(__file__)))
print("dllName = {!s}".format(dll_name))
raise e
def SendToCamera(self,ClientToCam,clientBytes,expectedReceiveBytes):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if (not self.isClosed) and self.portOpen:
sendBuffer = (ctypes.c_uint8*clientBytes)()
# print(" Sent:\t{!s}".format(ClientToCam))
for i,dat in enumerate(ClientToCam):
sendBuffer[i] = dat
sendBytes = ctypes.c_uint16(clientBytes)
receiveBuffer = (ctypes.c_uint8*2048)(*[0xFF]*2048)
receiveBytes = ctypes.c_uint16(expectedReceiveBytes)
channel_ID = ctypes.c_uint8(0x00)
start_byte_ms = ctypes.c_uint16(1000)
self.camsend(ctypes.c_int32(self.port_num),channel_ID,sendBytes,sendBuffer)
self.camread(ctypes.c_int32(self.port_num),channel_ID,start_byte_ms,ctypes.byref(receiveBytes),receiveBuffer)
returnBuffer = []
for i in range(receiveBytes.value):
returnBuffer.append(receiveBuffer[i])
returnBytes = bytearray(returnBuffer)
# print(" Recd:\t{!s}".format(returnBytes))
return returnBytes
else:
raise Exception("Attempting to access closed DLL or closed COM port!")
def SendFrame(self,ChannelID,ClientToCam,clientBytes):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if (not self.isClosed) and self.portOpen:
sendBuffer = (ctypes.c_uint8*clientBytes)()
for i,dat in enumerate(ClientToCam):
sendBuffer[i] = dat
sendBytes = ctypes.c_uint16(clientBytes)
channel_ID = ctypes.c_uint8(ChannelID)
self.camsend(ctypes.c_int32(self.port_num),channel_ID,sendBytes,sendBuffer)
else:
raise Exception("Attempting to access closed DLL or closed COM port!")
def ReadFrame(self,ChannelID,expectedReceiveBytes):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if (not self.isClosed) and self.portOpen:
receiveBuffer = (ctypes.c_uint8*2048)(*[0xFF]*2048)
receiveBytes = ctypes.c_uint16(expectedReceiveBytes)
channel_ID = ctypes.c_uint8(ChannelID)
start_byte_ms = ctypes.c_uint16(1000)
self.camread(ctypes.c_int32(self.port_num),channel_ID,start_byte_ms,ctypes.byref(receiveBytes),receiveBuffer)
returnBuffer = []
for i in range(receiveBytes.value):
returnBuffer.append(receiveBuffer[i])
returnBytes = bytearray(returnBuffer)
return returnBytes
else:
raise Exception("Attempting to access closed DLL or closed COM port!")
def PollDebug(self, channel_ID):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if (not self.isClosed) and self.portOpen:
receiveBuffer = (ctypes.c_uint8*2048)(*[0xFF]*2048)
receiveBytes = ctypes.c_uint16(0)
channelID = ctypes.c_uint8(channel_ID)
start_byte_ms = ctypes.c_uint16(25)
#(int32_t port_num,uint8_t *channel_ID,uint32_t *receiveBytes, uint8_t *receiveBuffer);
self.camread(ctypes.c_int32(self.port_num),channelID,start_byte_ms,ctypes.byref(receiveBytes),receiveBuffer)
if receiveBytes.value == 0:
return bytearray()
returnBuffer = []
for i in range(receiveBytes.value):
returnBuffer.append(receiveBuffer[i])
returnBytes = bytearray(returnBuffer)
# print(" Recd:\t{!s}".format(returnBytes))
return returnBytes
else:
raise Exception("Attempting to access closed DLL or closed COM port!")
def DumpUnframed(self):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if (not self.isClosed) and self.portOpen:
receiveBuffer = (ctypes.c_uint8*2048)(*[0xFF]*2048)
receiveBytes = ctypes.c_uint16(0)
start_byte_ms = ctypes.c_uint16(25)
self.camunframed(ctypes.c_int32(self.port_num),start_byte_ms,ctypes.byref(receiveBytes),receiveBuffer)
if receiveBytes.value == 0:
return bytearray()
returnBuffer = []
for i in range(receiveBytes.value):
returnBuffer.append(receiveBuffer[i])
returnBytes = bytearray(returnBuffer)
# print(" Recd:\t{!s}".format(returnBytes))
return returnBytes
else:
raise Exception("Attempting to access closed DLL or closed COM port!")
def close(self):
self.isClosed = True
del(self.__library)
if (not "posix" in os.name):
_ctypes.FreeLibrary(self.__dllHandle)
def OpenPort(self,ini_name = "CameraSerialConfig.ini",manual_port=None,manual_baud=None):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if FOUND_INI:
if hasattr(sys, 'frozen'):
infolder = os.path.dirname(os.path.abspath(sys.executable))
else:
infolder = os.path.dirname(os.path.abspath(__file__))
iniPath = os.path.join(infolder, ini_name)
configDict = Factory_INIUtils.readTestCameraINI(iniPath)
# timeout = int(configDict[INI_TIMEOUT])
portname = str(configDict[INI_COM_PORT])
else:
if not manual_port:
raise ValueError("Must provide manual_port=\"COM<n>\", manual_port=\"/dev/ttyACM<n>\" or manual_port=<z> argument")
if manual_port is not None:
portname = str(manual_port)
try:
portnum = int(portname)
except ValueError:
portbuffer = (ctypes.c_uint8*16)()
for i,dat in enumerate(portname.encode('ascii')):
portbuffer[i] = dat
portnum = self.lookup_port_name(portbuffer,len(portname))
self.port_num = portnum
if FOUND_INI:
baudrate = int(configDict[INI_BAUDRATE])
else:
baudrate = 921600
if manual_baud:
baudrate = int(manual_baud)
print("PortNum: {:d} // {!s}\nBaudRate: {:d}".format(portnum,portname,baudrate))
# print(configDict)
ret = self.port_open(ctypes.c_int(portnum),ctypes.c_int(baudrate))
if ret == 0:
self.portOpen = True
print("Port open")
else:
raise IOError("Failed to open COM port {:d}!".format(ret))
def ClosePort(self):
self.port_close(ctypes.c_int32(self.port_num))
import serial,time
#import pdb
def debugprint(*args,**kwargs):
#print(*args, **kwargs)
return
class PyUART():
def __init__(self):
self.isClosed = False
self.portOpen = False
self.port_num = -1
self.port = None
self.readTimeout = 5
self.FRAME_BUF_SIZ = 4000
self.START_FRAME_BYTE = bytes([0x8E])
self.ESCAPE_BYTE = bytes([0x9E])
self.END_FRAME_BYTE = bytes([0xAE])
self.ESCAPED_START_FRAME_BYTE = bytes([0x81])
self.ESCAPED_ESCAPE_BYTE = bytes([0x91])
self.ESCAPED_END_FRAME_BYTE = bytes([0xA1])
self.g_frame_buf = bytearray()
self.ccitt_16Table = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def ByteCRC16(self, value, crcin):
bottom_byte = (crcin << 8) & 0xFFFF
top_byte = (crcin >> 8) & 0xFFFF
value = value & 0xFF
tbl_index = top_byte ^ value & 0xFF
crcout = bottom_byte ^ self.ccitt_16Table[tbl_index]
return crcout
def CalcCRC16Bytes(self, count, buffer):
crc = 0x1d0f
if (not isinstance(buffer, bytes) and not isinstance(buffer, bytearray)):
raise Exception("Type error in CalcCRC16Bytes")
for cur_byte in buffer[:count]:
crc = self.ByteCRC16(cur_byte, crc)
return crc
def SendToCamera(self,ClientToCam,clientBytes,expectedReceiveBytes):
raise Exception("SendToCamera not currently implemented for PyUART")
def SendFrame(self,ChannelID,payload,clientBytes):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if ((not self.isClosed) and (self.portOpen)):
# Calculate and tack on the CRC for the payload (un-escaped)
temppayload = bytearray([ChannelID])
temppayload.extend(payload)
payload_crc = self.CalcCRC16Bytes(len(temppayload), temppayload)
# print("CRC = 0x{:04x}".format(payload_crc))
temppayload.extend([(payload_crc >> 8) & 0xff])
temppayload.extend([payload_crc & 0xff])
packet = bytearray()
packet.extend(self.START_FRAME_BYTE)
for i in range(0, len(temppayload)):
if (temppayload[i] == self.START_FRAME_BYTE[0]):
packet.extend(self.ESCAPE_BYTE)
packet.extend(self.ESCAPED_START_FRAME_BYTE)
elif (temppayload[i] == self.END_FRAME_BYTE[0]):
packet.extend(self.ESCAPE_BYTE)
packet.extend(self.ESCAPED_END_FRAME_BYTE)
elif (temppayload[i] == self.ESCAPE_BYTE[0]):
packet.extend(self.ESCAPE_BYTE)
packet.extend(self.ESCAPED_ESCAPE_BYTE)
else:
packet.extend([temppayload[i]])
packet.extend(self.END_FRAME_BYTE)
debugprint("sending " + str(len(packet)) + " bytes:" + " ".join(map(lambda b: format(b, "02x"), packet)))
self.port.write(packet)
# self.port.flush()
else:
raise Exception("Attempting to access closed DLL or closed COM port!")
def ReadFrame(self,ChannelID,expectedReceiveBytes):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
unframedBytes = bytearray()
packet = bytearray()
inFrame = False
if (not self.isClosed) and self.port.isOpen():
startTime = time.time()
while True :
byte = self.port.read(1)
debugprint("Read a byte " + str(byte))
if ((time.time() - startTime) > self.readTimeout):
raise Exception("Timed out in PyUART ReadFrame")
if (byte == self.START_FRAME_BYTE):
debugprint("inframe")
inFrame = True
continue
if (inFrame):
if (byte == self.ESCAPE_BYTE):
byte = self.port.read(1)
if (byte == self.ESCAPED_START_FRAME_BYTE):
packet.extend(self.START_FRAME_BYTE)
elif (byte == self.ESCAPED_END_FRAME_BYTE):
packet.extend(self.END_FRAME_BYTE)
elif (byte == self.ESCAPED_ESCAPE_BYTE):
packet.extend(self.ESCAPE_BYTE)
else:
raise Exception("Packet corrupt. Improperly escaped bytes encountered.")
elif (byte == self.END_FRAME_BYTE):
debugprint("endframe")
break
else:
packet.extend(byte)
else:
unframedBytes.extend(byte)
debugprint("received " + str(len(packet)) + " bytes:" + " ".join(map(lambda b: format(b, "02x"), packet)))
packetCRC = self.CalcCRC16Bytes( (len(packet) - 2), packet)
packetCRC = bytes([packetCRC>>8, packetCRC&0x00FF])
if (packetCRC == packet[-2:]):
#if (payload[0] != ChannelID):
# raise Exception("Response for wrong channel received.")
#if ((len(payload) - 3) != expectedReceiveBytes):
# raise Exception("Did not receive expected number of bytes.")
returnBytes = packet[1:-2] # No start byte, no end byte, no channel id, and no CRC
return returnBytes
else:
raise Exception("Packet corrupt. CRC doesn't match expected.")
else:
raise Exception("Attempting write to unopened PyUART")
def PollDebug(self, channel_ID):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
raise Exception("PollDebug not currently implemented for PyUART")
def DumpUnframed(self):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
raise Exception("DumpUnframed not currently implemented for PyUART")
def close(self):
self.isClosed = True
self.ClosePort()
def OpenPort(self,ini_name = "CameraSerialConfig.ini",manual_port=None,manual_baud=None):
''' Send ClientToCam with len()=clientBytes to camera,
receive CamToClient with len()=camBytes
'''
if FOUND_INI:
if hasattr(sys, 'frozen'):
infolder = os.path.dirname(os.path.abspath(sys.executable))
else:
infolder = os.path.dirname(os.path.abspath(__file__))
iniPath = os.path.join(infolder, ini_name)
configDict = Factory_INIUtils.readTestCameraINI(iniPath)
# timeout = int(configDict[INI_TIMEOUT])
portname = str(configDict[INI_COM_PORT])
else:
if not manual_port:
raise ValueError("Must provide manual_port=\"COM<n>\" or manual_port=<n-1> argument")
if manual_port is not None:
portname = str(manual_port)
if "COM" in portname:
portnum = int(portname.replace("COM",""))-1
self.portname = portname
else:
portnum = int(portname)
self.portname = "COM" + str(portnum + 1 )
self.port_num = portnum
if FOUND_INI:
baudrate = int(configDict[INI_BAUDRATE])
else:
baudrate = 921600
if manual_baud:
baudrate = int(manual_baud)
print("PortNum: {:d} // {!s}\nBaudRate: {:d}".format(portnum,portname,baudrate))
# print(configDict)
self.port = serial.Serial()
self.port.port = str(self.portname)
self.port.baudrate = baudrate
self.port.parity = 'N'
self.port.stopbits = 1
self.port.bytesize = 8
self.port.timeout = 10
self.port.open()
self.portOpen = self.port.isOpen()
if (self.portOpen):
print("Port open")
else:
raise IOError("Failed to open COM port {:d}!".format(ret))
def ClosePort(self):
self.port.close()
| [
"contact@suas.com"
] | contact@suas.com |
5065ee22d47e1ca22302a179abfa8374deba99b3 | ab3de6d9684e4aabffbe251440a23f46eb2dde69 | /tests/tune/iterative/test_asha.py | 30dad89496c1e7e99c02ee301d9684b9ee15e30e | [
"Apache-2.0"
] | permissive | Kamran151199/tune | 00063261b04bf18ab0e591445445eb09097c3757 | bf2288ddcb29c8345d996a9b22c0910da9002da1 | refs/heads/master | 2023-09-05T15:53:54.549870 | 2021-10-07T05:22:30 | 2021-10-07T05:22:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,714 | py | import math
from typing import Any, Dict, Iterable
from fugue import FugueWorkflow
from tune import optimize_by_continuous_asha
from tune.constants import TUNE_REPORT_METRIC
from tune.concepts.dataset import TuneDatasetBuilder
from tune.iterative.asha import ASHAJudge, RungHeap
from tune.iterative.objective import IterativeObjectiveFunc
from tune.concepts.space import Grid, Space
from tune.concepts.flow import Monitor, Trial, TrialReport
def test_rung_heap():
h = RungHeap(2)
assert 2 == h.capacity
assert not h.full
assert math.isnan(h.best)
assert 0 == len(h)
assert [] == h.bests
assert h.push(rp("a", 1))
assert h.push(rp("b", 0.1))
assert h.push(rp("a", 2))
assert h.push(rp("c", 0.5))
assert not h.push(rp("d", 0.6))
assert 0.1 == h.best
assert 2 == len(h)
assert h.full
assert "a" not in h
assert "d" not in h
assert "b" in h
assert "c" in h
assert [1.0, 0.1, 0.1, 0.1, 0.1] == h.bests
assert h.push(rp("e", 0.01))
assert [1.0, 0.1, 0.1, 0.1, 0.1, 0.01] == h.bests
assert "b" in h and "e" in h and 2 == len(h)
assert h.push(rp("e", 5))
assert [1.0, 0.1, 0.1, 0.1, 0.1, 0.01, 0.01] == h.bests
values = {x.trial_id: x for x in h.values()}
assert "b" in values and "e" in values and 2 == len(values)
assert 5.0 == values["e"].sort_metric
# TODO: test multiple partitions
def test_asha_judge_simple_happy_path():
j = ASHAJudge(schedule=[(1.0, 2), (2.0, 1)])
d = j.judge(rp("a", 0.5, 0))
assert 2.0 == d.budget
assert not d.should_checkpoint
d = j.judge(rp("b", 0.6, 0))
assert 2.0 == d.budget
assert not d.should_checkpoint
d = j.judge(rp("a", 0.4, 1))
assert d.should_stop
assert d.should_checkpoint
# stop criteria met, so other jobs won't get more budget
d = j.judge(rp("c", 0.2, 0))
assert d.should_stop
assert d.should_checkpoint
def test_asha_stop():
def should_stop(keys, rungs):
metrics = []
for r in rungs:
if len(r) == 0:
break
if not r.full:
return False
metrics.append(r.best)
if len(metrics) < 2:
return False
return metrics[-2] - metrics[-1] < 0.2
j = ASHAJudge(
schedule=[(1.0, 2), (2.0, 2), (3.0, 1)],
always_checkpoint=True,
study_early_stop=should_stop,
)
d = j.judge(rp("a", 0.6, 0))
assert 2.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("b", 0.5, 0))
assert 2.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("c", 0.4, 0))
assert 2.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("b", 0.45, 1))
assert 3.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("c", 0.39, 1))
assert 3.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("x", 0.45, 0))
# rungs[1] and rungs[0] diff so somall
# no longer accept new trials
assert 0.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("b", 0.45, 1))
assert 3.0 == d.budget # existed ids can still be accepted
assert d.should_checkpoint
d = j.judge(rp("a", 0.44, 2))
assert 0.0 == d.budget # already stopped
assert d.should_checkpoint
def test_trial_stop():
def should_stop(report, history, rungs):
return not all(report.trial_id in x for x in rungs[: report.rung])
j = ASHAJudge(
schedule=[(1.0, 2), (2.0, 2), (3.0, 1)],
always_checkpoint=True,
trial_early_stop=should_stop,
)
d = j.judge(rp("a", 0.6, 0))
assert 2.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("b", 0.5, 0))
assert 2.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("c", 0.4, 0))
assert 2.0 == d.budget
assert d.should_checkpoint
d = j.judge(rp("a", 0.1, 1))
assert d.should_stop # kicked out by c
assert d.should_checkpoint
def test_run_asha(tmpdir):
class M(Monitor):
def on_report(self, report: TrialReport) -> None:
print(report)
def assert_metric(df: Iterable[Dict[str, Any]], metric: float, ct: int) -> None:
n = 0
for row in df:
assert row[TUNE_REPORT_METRIC] == metric
n += 1
assert n == ct
space = Space(a=Grid(0, 1, 2, 3))
dag = FugueWorkflow()
dataset = TuneDatasetBuilder(space, str(tmpdir)).build(dag, shuffle=False)
obj = F()
res = optimize_by_continuous_asha(
obj,
dataset,
plan=[[1.0, 3], [1.0, 2], [1.0, 1], [1.0, 1]],
checkpoint_path=str(tmpdir),
)
res.result(1).output(assert_metric, dict(metric=1.0, ct=1))
res = optimize_by_continuous_asha(
obj,
dataset,
plan=[[2.0, 2], [1.0, 1], [1.0, 1]],
checkpoint_path=str(tmpdir),
monitor=M(),
)
res.result(1).output(assert_metric, dict(metric=1.0, ct=1))
dag.run()
def rp(tid, metric, rung=0, keys=[]):
t = Trial(tid, {}, keys=keys)
return TrialReport(t, metric=metric, rung=rung)
class F(IterativeObjectiveFunc):
def __init__(self):
super().__init__()
self._it = 0
self._all = [
[9, 3, 1, 1],
[8, 6, 5, 5],
[8, 5, 4, 3],
[7, 4, 3, 4],
]
def save_checkpoint(self, fs):
fs.writetext("x", str(self._it))
def load_checkpoint(self, fs):
self._it = int(fs.readtext("x"))
def run_single_iteration(self):
trial = self.current_trial
metric = self._all[trial.params.simple_value["a"]][self._it]
self._it += 1
return TrialReport(trial, metric=metric)
def copy(self):
return F()
| [
"noreply@github.com"
] | Kamran151199.noreply@github.com |
d64431f4d4e960083c785d8ffcac9f5e91ad10c2 | c38f9e69e56afe9c2602ce3d0a432c386a187224 | /automate_testing.py | c28e4dfb29d272ed076aa39d11929e8f879b7f6b | [] | no_license | yuji3w/holmes-project | 8a7632f71f595b79b9c66c06d928679fbedae054 | 6c14522e0d6d383749bd89d7acc419b451130185 | refs/heads/master | 2020-08-08T04:14:41.733695 | 2019-11-08T21:05:50 | 2019-11-08T21:05:50 | 213,709,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | import subprocess
def make_pal_arg(minpallen, maxpallen, gaplimit, nummismatches):
return """palindrome -sequence sequence.fasta -minpallen {} -maxpallen {} -gaplimit {} -nummismatches {} -outfile nc_000913.pal -nooverlap""".format(minpallen, maxpallen, gaplimit, nummismatches)
run_other_py_file = "python automate_parameters.py"
minpallen = 10
maxpallen = 100
for gaplimit in range(0, 50, 10):
for nummismatches in range(0, 3):
pal_arg = make_pal_arg(minpallen, maxpallen, gaplimit, nummismatches)
subprocess.run([pal_arg], shell = True)
subprocess.run([run_other_py_file], shell = True) | [
"yujiewang@berkeley.edu"
] | yujiewang@berkeley.edu |
25ea3ccf9694bbff46ace1ccdf8a44257540ba69 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/myfavouritekk_vdetlib/vdetlib-master/utils/log.py | 331282eee61fb52e30ff5a83431ec74d430c69e0 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 175 | py | #!/usr/bin/env python
import logging
logging.basicConfig(
format='[%(asctime)s %(process)d %(filename)s:%(lineno)s %(levelname)s] %(message)s',
level=logging.DEBUG)
| [
"659338505@qq.com"
] | 659338505@qq.com |
96e6aab2bb0e71cb5457b158b81c4e21000ec23b | b6387b3ac9750cfae3892ea31ebd3d4f7cab456a | /JIO_Data/jitter_calc_HL.py | a1f67993bcd3c135a9e2e0fe14471d6dbaae38ac | [] | no_license | msoumyakant14/Network-Data-Analysis | ee7d8cd001b9039f33748e8c5902d62829ede7ee | 1d499bd2a3e5040e51d0f3c17115a717e2a44c7a | refs/heads/main | 2023-01-18T16:41:41.526670 | 2020-11-18T18:01:56 | 2020-11-18T18:01:56 | 312,952,945 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | def time_to_num(t):
lnt = len(t) - 3
n = 0
e = 1
while t[lnt]!='=':
n = e*(ord(t[lnt])-ord('0')) + n
lnt-=1
e*=10
return n
file_name = input()
f = open(file_name)
cnt = sm = 0
l = f.readlines()
ln = len(l)
t_prev = -1
i=2
while i < ln-6:
if l[i]=="Request timed out.\n":
t_prev = -1
else:
if t_prev==-1:
j=l[i].split()
t_prev = time_to_num(j[-1])
else:
j=l[i].split()
t_curr = time_to_num(j[-1])
cnt+=1
sm = sm + abs(t_curr - t_prev)
t_prev = t_curr
i+=1
print("jitter =",sm/cnt,"ms") | [
"msoumyakant14@gmail.com"
] | msoumyakant14@gmail.com |
c1d75bd1bfbc749864be9da119d324e476608263 | 38e1a897ce68bfd71aacd932dc428d7c4b56af0e | /code_supl/split_pos_neg.py | ccca651a202bd453672e6758cf27c7ace3f28bd2 | [] | no_license | rnaimehaom/drug-combinations | 456629e5236ec5f18afa3e3e20e7e02f0f397c58 | 14a32f814c01309ecf61a1713b7cae88bfdc06a4 | refs/heads/master | 2023-03-27T05:36:48.290544 | 2020-08-19T16:23:33 | 2020-08-19T16:23:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 18:12:49 2013
@author: alexey
"""
def split_pos_neg(filename):
extension= filename[-4:]
ih = open(filename, 'r')
oh_pos = open(filename[:-4] + '-pos' + extension, 'w')
oh_neg = open(filename[:-4] + '-neg' + extension, 'w')
for line_number, line in enumerate(ih):
try:
row = line.strip().split('\t')
if row[0][:3] == 'Pos':
oh_pos.writelines('\t'.join([row[1], row[2]]) + '\n')
elif row[0][:3] == 'Neg':
oh_neg.writelines('\t'.join([row[1], row[2]]) + '\n')
else:
print "Did not write this line:", line
except IndexError:
print filename
print line_number, line
ih.close
oh_pos.close()
oh_neg.close()
if __name__ == '__main__':
filenames = [
'/home/alexey/working/chemical-interactions/data/targets/Coexpression/Coexpression.txt',
'/home/alexey/working/chemical-interactions/data/targets/Essentiality/Essentiality.txt',
'/home/alexey/working/chemical-interactions/data/targets/FxnAssociation/FxnAssociation.txt',
'/home/alexey/working/chemical-interactions/data/targets/GeneticInteraction/GeneticInteraction.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOAll_SharedFraction/GOAll_SharedFraction.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOBP_Pattern/GO_BP_Pattern.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOBP_SharedFraction/GOBP_SharedFraction.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOCC_Pattern/GO_CC_Pattern.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOCC_SharedFraction/GOCC_SharedFraction.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOMF_Pattern/GO_MF_Pattern.txt',
'/home/alexey/working/chemical-interactions/data/targets/GOMF_SharedFraction/GOMF_SharedFraction.txt',
'/home/alexey/working/chemical-interactions/data/targets/Pathway_Pattern/Pathway_Pattern.txt',
'/home/alexey/working/chemical-interactions/data/targets/Pathway_SharedFraction/Pathway_SharedFraction.txt',
'/home/alexey/working/chemical-interactions/data/targets/RegNetwork/RegNetwork.txt',
'/home/alexey/working/chemical-interactions/data/targets/ShortestPath/ShortestPath.txt',
]
filenames = [
'/home/alexey/working/chemical-interactions/data/targets/ShortestPath/ShortestPathLength/CRG_ShortestPathLength.txt']
for filename in filenames:
split_pos_neg(filename) | [
"ostrokach@gmail.com"
] | ostrokach@gmail.com |
d1b1b3d66f06ffaa78d56207e13518d805c554db | 20076d53b42f209c9fe1bd99887af5d4119751ea | /my_solver/oliver/reader/__init__.py | d34ab9131eeca7d8210d1bf124c31cc7380576e0 | [] | no_license | OliverGeisel/sudokusat-example | 4ccd27196ebda17970b6ad7f74d0d6f0e098785d | 923967fabbe02c4c0551ffa2503783be247f8184 | refs/heads/master | 2020-06-11T07:44:06.447780 | 2019-07-30T23:20:21 | 2019-07-30T23:20:21 | 193,894,796 | 0 | 0 | null | 2019-06-26T11:53:01 | 2019-06-26T11:53:00 | null | UTF-8 | Python | false | false | 20 | py | from . import Input
| [
"geisel.oliver@googlemail.com"
] | geisel.oliver@googlemail.com |
59c0ad6c5e3cfdc1314c6767c4df3e9a8083fb5c | 7f6b8d34cb20cb1292517c9c0d875545f5b20a81 | /api/demo/admin.py | 12dbb5d709b59c5346ec04c4a8e75033ce7fbde5 | [] | no_license | tanvir002700/Drf-React-Boilerplate | 6ad4e02b6aad749b3b045cd1e80a1a131ba5b7b2 | 51b5b837c53dfc49adef060327445fa80bb31684 | refs/heads/main | 2023-01-10T20:02:48.828842 | 2020-11-08T18:21:08 | 2020-11-08T18:21:08 | 306,245,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django.contrib import admin
from rest_framework import views, permissions
from rest_framework.response import Response
# Register your models here.
class DemoApiListView(views.APIView):
permission_classes = (permissions.AllowAny, )
def get(self, request):
return Response("World api root")
| [
"tanvir.hasan@codemarshal.com"
] | tanvir.hasan@codemarshal.com |
9fd2562128b2f1703bc4534e945dcf1cd35ef57f | 6258003a64ebad1749986adda185e938ddad1780 | /CNN_Model.py | 87fd6ac904c59b9bb7cf441f1e385bf05f63472d | [] | no_license | sndychvn/LSTM_CNN-Sentiment-Analysis | eae089ff1e14a0db43e38426f2abdacfecf908f6 | 26e8c660f3577a572c7b26802b64d374169f0009 | refs/heads/master | 2020-04-15T00:33:05.998947 | 2019-01-15T21:51:23 | 2019-01-15T21:51:23 | 164,244,257 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,523 | py | import tensorflow as tf
import numpy as np
import re
from IPython import embed
import random
class CNN(object):
##Used an embedding layer, a Convolutional Layer, max-pooling and Softmax layer
def __init__(self.sequence_length, num_classes, vocab_size, embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x") #Placeholders for input, output and dropout
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
l2_loss = tf.constant(0.0) #keeping track of L2 regularization loss (optional)
#1. EMBEDDING LAYER
with tf.device('/cpu:0'), tf.name_scope("embedding"): #allocating GPUs and using embedding layers
self.W = tf.Variable(tf.random_uniform(shape=[vocab_size, embedding_size], minval=-1.0, maxval=1.0), name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x) #explaination lookup https://stackoverflow.com/questions/34870614/what-does-tf-nn-embedding-lookup-function-do
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1) #adding another dimension to the Tensor (expand_dims)
pooled_outputs = []
#2. CONVOLUTIONAL LAYER
for i, filter_size in enumerate(filter_sizes): #size of the filter (3x3 or 5x5 ......)
with tf.name_scope("conv-maxpool-%s" % filter_size):
#Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W") #creating a 4D Tensor for weights
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b") #creating a bias with 0.1 as value with a shape equal to number of filters (i.e if num_filters = 100) shape would be (100,1)
conv = tf.nn.conv2d(self.embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") #providing a 4D Tensor to the CONV Layer
h = tf.nn.relu(tf.nn.bias_add(conv,b), name="relu") #apply non-linearity ReLu as pooling requires a non-linearity to be added
pooled = tf.nn.max_pool(h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool")
pooled_outputs.append(pooled)
num_filters_tool = num_filters * len(filter_sizes) #combine all pooled feature
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_tool])
with tf.name_scope("dropout"): #adding dropout
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
with tf.name_scope("output"): #Final (unnormalized) scores and predictions
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
with tf.name_scope("loss"): # Calculate Mean cross-entropy loss
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
with tf.name_scope("accuracy"): # Accuracy
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
print("CNN is Loaded!") | [
"sndy.chvn.fr@gmail.com"
] | sndy.chvn.fr@gmail.com |
54cadc4665ed355d3939850244a60b5432bbb967 | 2d7a5303f065f7dd9783ac49b8b5d0db31e4318c | /train_mnist_with_classifier.py | 740b4e28b343784419063f828df69ebb5869cfda | [] | no_license | DEBADRIBASAK/SPAIR-Improve | 9a28a195db7aaae23ce6e7006ce03eec9755d9f6 | e86ced42e1a704acc8ad1a44dfce787d6153f391 | refs/heads/master | 2023-07-18T04:36:35.047449 | 2021-09-03T06:40:55 | 2021-09-03T06:40:55 | 217,029,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,841 | py | import argparse
import sys
import os
import time
import torch
import math
import numpy as np
from torch.utils.data import DataLoader
import torch.optim
from torch.nn.utils import clip_grad_norm_
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from torch import nn
from torchvision import datasets
from create_atari.data_atari import ATARI
from create_atari.data_mnist import MNIST
from torch.optim.lr_scheduler import LambdaLR
#from data import MultiDSprites, CLEVR
from utils_modified import save_ckpt, load_ckpt, linear_annealing, visualize, \
calc_count_acc, calc_count_more_num, print_spair_clevr, spatial_transform
from common import *
# from eval import evaluation
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
from spair_with_z_pres_guidance import Spair
def main():
# Training settings
parser = argparse.ArgumentParser(description='SPAIR')
parser.add_argument('--data-dir', default='./', metavar='DIR',
help='train.pt file')
parser.add_argument('--nocuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('-j', '--workers', default=12, type=int, metavar='N',
help='number of data loading workers (default: 12)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run (default: 400)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 64)')
parser.add_argument('--lr', '--learning-rate', default=2e-4, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--cp', '--clip-gradient', default=1.0, type=float,
metavar='CP', help='rate of gradient clipping')
parser.add_argument('--weight_decay', '--wd', default=2e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--print_freq', '-p', default=50, type=int,
metavar='N', help='print batch frequency (default: 100)')
parser.add_argument('--save_epoch_freq', '-s', default=1, type=int,
metavar='N', help='save epoch frequency (default: 20)')
parser.add_argument('--last-ckpt', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--lr-decay-rate', default=0.8, type=float,
help='decay rate of learning rate (default: 0.8)')
parser.add_argument('--lr-epoch-per-decay', default=1000, type=int,
help='epoch of per decay of learning rate (default: 1000)')
parser.add_argument('--ckpt_dir', default='./model', metavar='DIR',
help='path to save checkpoints')
parser.add_argument('--summary_dir', default='./summary', metavar='DIR',
help='path to save summary')
parser.add_argument('--tau-end', default=0.5, type=float, metavar='T',
help='initial temperature for gumbel')
parser.add_argument('--tau-ep', default=50, type=float, metavar='E',
help='exponential decay factor for tau')
parser.add_argument('--seed', default=1, type=int,
help='Fixed random seed.')
parser.add_argument('--sigma', default=0.08, type=float, metavar='S',
help='Sigma for log likelihood.')
parser.add_argument('--dataset_path',default='aspect_reserved.pt',type=str,help='path to the stored dataset')
parser.add_argument('--epoch_num',default=499,type=int,help="epoch number for the saved model")
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.ckpt_dir):
os.mkdir(args.ckpt_dir)
if not os.path.exists(args.summary_dir):
os.mkdir(args.summary_dir)
PATH = os.path.join("./Classifier_model/","model_{}th_epoch.pt".format(args.epoch_num))
device = torch.device(
"cuda" if not args.nocuda and torch.cuda.is_available() else "cpu")
# torch.manual_seed(args.seed)
train_data = torch.load(os.path.join(args.data_dir,args.dataset_path)) #MNIST(root=args.data_dir, phase_train=True) # CLEVR
train_loader = DataLoader(
train_data, batch_size=args.batch_size, shuffle=True)
num_train = len(train_data)
model = Spair(trained_model_path=PATH,sigma=args.sigma)
model.to(device)
for param in model.model.parameters():
param.requires_grad = False
if device.type == 'cuda' and torch.cuda.device_count() >= 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model.train()
#optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if args.last_ckpt:
global_step, args.start_epoch = \
load_ckpt(model, optimizer, args.last_ckpt, device)
writer = SummaryWriter(args.summary_dir)
global_step = 0
log_tau_gamma = math.log(args.tau_end) / args.tau_ep
for epoch in range(int(args.start_epoch), args.epochs):
local_count = 0
last_count = 0
end_time = time.time()
tau = max(math.exp(epoch * log_tau_gamma), args.tau_end)
if (not args.last_ckpt and epoch != 0) or (args.last_ckpt and epoch != args.start_epoch):
if epoch % args.save_epoch_freq == 0:
print("Saving....")
save_ckpt(args.ckpt_dir, model, optimizer, global_step, epoch,
local_count, args.batch_size, num_train)
for batch_idx, sample in enumerate(train_loader):
imgs = sample[0].squeeze().view(-1, 1, 128, 128).to(device)
gt = sample[1].squeeze().view(-1,N_CHANNELS,128,128).to(device)
target_count = sample[2].squeeze()
recon_x, log_like, kl_z_what, kl_z_where, kl_z_pres, kl_z_depth, kl_bg_what, classifier_loss, log = \
model(imgs, global_step, tau)
log_like, kl_z_what, kl_z_where, kl_z_pres, kl_z_depth, kl_bg_what,classifier_loss = \
log_like.mean(), kl_z_what.mean(), kl_z_where.mean(), \
kl_z_pres.mean(), kl_z_depth.mean(), kl_bg_what.mean(), classifier_loss.mean()
total_loss = - (log_like - kl_z_what - kl_z_where - kl_z_pres - kl_z_depth - kl_bg_what - classifier_loss)
optimizer.zero_grad()
total_loss.backward()
if DEBUG:
for name, param in model.named_parameters():
if torch.any(torch.isnan(param.grad)) or torch.any(torch.isinf(param.grad)):
breakpoint()
clip_grad_norm_(model.parameters(), args.cp)
optimizer.step()
local_count += imgs.data.shape[0]
global_step += 1
if global_step % args.print_freq == 0 or global_step == 1:
bs = imgs.size(0)
print("global_step = {}".format(global_step),flush=True)
log = {
'z_where': log['z_where'],
'bg_what': log['bg_what'].view(-1, bg_what_dim),
'bg_what_std': log['bg_what_std'].view(-1, bg_what_dim),
'bg_what_mean': log['bg_what_mean'].view(-1, bg_what_dim),
'bg': log['bg'].view(-1, N_CHANNELS, img_h, img_w),
'z_what': log['z_what'].view(-1, 4 * 4, z_what_dim),
'z_where_scale':
log['z_where'].view(-1, 4 * 4, z_where_scale_dim + z_where_shift_dim)[:, :, :z_where_scale_dim],
'z_where_shift':
log['z_where'].view(-1, 4 * 4, z_where_scale_dim + z_where_shift_dim)[:, :, z_where_scale_dim:],
'z_pres': log['z_pres'].permute(0, 2, 3, 1),
'z_pres_probs': torch.sigmoid(log['z_pres_logits']).permute(0, 2, 3, 1),
'z_what_std': log['z_what_std'].view(-1, 4 * 4, z_what_dim),
'z_what_mean': log['z_what_mean'].view(-1, 4 * 4, z_what_dim),
'z_where_scale_std':
log['z_where_std'].permute(0, 2, 3, 1)[:, :, :z_where_scale_dim],
'z_where_scale_mean':
log['z_where_mean'].permute(0, 2, 3, 1)[:, :, :z_where_scale_dim],
'z_where_shift_std':
log['z_where_std'].permute(0, 2, 3, 1)[:, :, z_where_scale_dim:],
'z_where_shift_mean':
log['z_where_mean'].permute(0, 2, 3, 1)[:, :, z_where_scale_dim:],
'glimpse': log['x_att'].view(-1, 4 * 4, N_CHANNELS, glimpse_size, glimpse_size),
'glimpse_recon': log['y_att'].view(-1, 4 * 4, N_CHANNELS, glimpse_size, glimpse_size),
'prior_z_pres_prob': log['prior_z_pres_prob'].unsqueeze(0),
'o_each_cell': spatial_transform(log['o_att'], log['z_where'], (4 * 4 * bs, N_CHANNELS, img_h, img_w),
inverse=True).view(-1, 4 * 4, N_CHANNELS, img_h, img_w),
'alpha_hat_each_cell': spatial_transform(log['alpha_att_hat'], log['z_where'],
(4 * 4 * bs, 1, img_h, img_w),
inverse=True).view(-1, 4 * 4, 1, img_h, img_w),
'alpha_each_cell': spatial_transform(log['alpha_att'], log['z_where'],
(4 * 4 * bs, 1, img_h, img_w),
inverse=True).view(-1, 4 * 4, 1, img_h, img_w),
'y_each_cell': (log['y_each_cell'] * log['z_pres'].
view(-1, 1, 1, 1)).view(-1, 4 * 4, N_CHANNELS, img_h, img_w),
'z_depth': log['z_depth'].view(-1, 4 * 4, z_depth_dim),
'z_depth_std': log['z_depth_std'].view(-1, 4 * 4, z_depth_dim),
'z_depth_mean': log['z_depth_mean'].view(-1, 4 * 4, z_depth_dim),
'importance_map_full_res_norm':
log['importance_map_full_res_norm'].view(-1, 4 * 4, 1, img_h, img_w),
'z_pres_logits': log['z_pres_logits'].permute(0, 2, 3, 1),
'z_pres_y': log['z_pres_y'].permute(0, 2, 3, 1),
'classifier_loss': log['classifier_loss']
}
time_inter = time.time() - end_time
count_inter = local_count - last_count
print_spair_clevr(global_step, epoch, local_count, count_inter,
num_train, total_loss, log_like, kl_z_what,
kl_z_where, kl_z_pres, kl_z_depth, kl_bg_what,classifier_loss)
end_time = time.time()
for name, param in model.named_parameters():
writer.add_histogram(
name, param.cpu().detach().numpy(), global_step)
if param.grad is not None:
writer.add_histogram(
'grad/' + name, param.grad.cpu().detach(), global_step)
# writer.add_scalar(
# 'grad_std/' + name + '.grad', param.grad.cpu().detach().std().item(), global_step)
# writer.add_scalar(
# 'grad_mean/' + name + '.grad', param.grad.cpu().detach().mean().item(), global_step)
for key, value in log.items():
if value is None:
continue
if key == 'importance_map_full_res_norm' or key == 'alpha_hat_each_cell' or key == 'alpha_each_cell':
writer.add_histogram('inside_value/' + key, value[value > 0].cpu().detach().numpy(),
global_step)
else:
writer.add_histogram('inside_value/' + key, value.cpu().detach().numpy(),
global_step)
grid_image = make_grid(imgs.cpu().detach()[:10].view(-1, N_CHANNELS, img_h, img_w),
5, normalize=False, pad_value=1)
writer.add_image('train/1-image', grid_image, global_step)
grid_image = make_grid(recon_x.cpu().detach()[:10].view(-1, N_CHANNELS, img_h, img_w).clamp(0., 1.),
5, normalize=False, pad_value=1)
writer.add_image('train/2-reconstruction_overall', grid_image, global_step)
grid_image = make_grid(log['bg'].cpu().detach()[:10].view(-1, N_CHANNELS, img_h, img_w),
5, normalize=False, pad_value=1)
writer.add_image('train/3-background', grid_image, global_step)
bbox = visualize(imgs[:num_img_summary].cpu(), log['z_pres'][:num_img_summary].cpu().detach(),
log['z_where_scale'][:num_img_summary].cpu().detach(),
log['z_where_shift'][:num_img_summary].cpu().detach())
boxes = torch.ones((1,N_CHANNELS,32,32));
boxes = ((torch.stack((boxes,)*4*4*bs,dim=1).view(-1,N_CHANNELS,32,32)*log['z_pres'].cpu().detach().view(-1,1,1,1))>.5).float()
predicted_boxes = spatial_transform(boxes,log['z_where'].cpu().detach(),(4*4*bs,N_CHANNELS,img_h,img_w),inverse=True)
predicted_boxes = (predicted_boxes.view(-1,4*4,img_h,img_w).sum(dim=1).unsqueeze(1)>0).cuda().float()
intersection = (predicted_boxes*gt)>0
union = (predicted_boxes+gt)>0
IoU = intersection.view(bs,-1).sum(-1).float()/union.view(bs,-1).sum(-1).float()
IoU = IoU.mean()
y_each_cell = log['y_each_cell'].view(-1, N_CHANNELS, img_h, img_w)[:num_img_summary * 16].cpu().detach()
o_each_cell = log['o_each_cell'].view(-1, N_CHANNELS, img_h, img_w)[:num_img_summary * 16].cpu().detach()
alpha_each_cell = log['alpha_hat_each_cell'].view(-1, 1, img_h, img_w)[
:num_img_summary * 16].cpu().detach()
importance_each_cell = \
log['importance_map_full_res_norm'].view(-1, 1, img_h, img_w)[:num_img_summary * 16].cpu().detach()
for i in range(num_img_summary):
grid_image = make_grid(bbox[i * 16:(i + 1) * 16], 4, normalize=True, pad_value=1)
writer.add_image('train/4-bbox_{}'.format(i), grid_image, global_step)
grid_image = make_grid(y_each_cell[i * 16:(i + 1) * 16], 4, normalize=True, pad_value=1)
writer.add_image('train/5-y_each_cell_{}'.format(i), grid_image, global_step)
grid_image = make_grid(o_each_cell[i * 16:(i + 1) * 16], 4, normalize=True, pad_value=1)
writer.add_image('train/6-o_each_cell_{}'.format(i), grid_image, global_step)
grid_image = make_grid(alpha_each_cell[i * 16:(i + 1) * 16], 4, normalize=True, pad_value=1)
writer.add_image('train/7-alpha_hat_each_cell_{}'.format(i), grid_image, global_step)
grid_image = make_grid(importance_each_cell[i * 16:(i + 1) * 16], 4, normalize=True, pad_value=1)
writer.add_image('train/8-importance_each_cell_{}'.format(i), grid_image, global_step)
writer.add_scalar('train/total_loss', total_loss.item(), global_step=global_step)
writer.add_scalar('train/log_like', log_like.item(), global_step=global_step)
writer.add_scalar('train/What_KL', kl_z_what.item(), global_step=global_step)
writer.add_scalar('train/Where_KL', kl_z_where.item(), global_step=global_step)
writer.add_scalar('train/Pres_KL', kl_z_pres.item(), global_step=global_step)
writer.add_scalar('train/Depth_KL', kl_z_depth.item(), global_step=global_step)
writer.add_scalar('train/tau', tau, global_step=global_step)
writer.add_scalar('train/classifier_loss',classifier_loss.item(),global_step=global_step)
acc = calc_count_acc(log['z_pres'].cpu().detach(), target_count)
#writer.add_scalar('train/count', out ,global_step=global_step)
#writer.add_scalar('train/target_count',target_count,global_step=global_step)
writer.add_scalar('train/count_acc', acc,
global_step=global_step)
writer.add_scalar('train/count_more', calc_count_more_num(log['z_pres'].cpu().detach(), target_count),
global_step=global_step)
writer.add_scalar('train/Bg_KL', kl_bg_what.item(), global_step=global_step)
# writer.add_scalar('train/Bg_Beta', kg_kl_beta.item(), global_step=global_step)
writer.add_scalar('train/IoU',IoU.item(),global_step=global_step)
writer.flush()
last_count = local_count
if '__main__':
main()
| [
"noreply@github.com"
] | DEBADRIBASAK.noreply@github.com |
2fab57f9d7a90e69947876842d351300f5d5583a | 8f6e1a91f4adafa1e27e89315a51a195d7945548 | /Library/ArcETL/arcetl/proximity.py | 26b9149a96c442cae01529dff4743c84c2b04a04 | [] | no_license | denkide/ColumbiaCarto | d85a37b19f40209ee1bff66bc41eb2bb3fb66774 | f57a3444fee7c31d43594eb21868ddc7da005e68 | refs/heads/master | 2020-05-29T19:48:53.575856 | 2019-07-26T03:37:38 | 2019-07-26T03:37:38 | 187,425,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | """Analysis result operations."""
import logging
import arcpy
from arcetl import arcobj
from arcetl import attributes
from arcetl import dataset
from arcetl.helpers import unique_path
LOG = logging.getLogger(__name__)
"""logging.Logger: Module-level logger."""
def id_near_info_map(
dataset_path,
dataset_id_field_name,
near_dataset_path,
near_id_field_name,
max_near_distance=None,
**kwargs
):
"""Return mapping dictionary of feature IDs/near-feature info.
Args:
dataset_path (str): Path of the dataset.
dataset_id_field_name (str): Name of ID field.
near_dataset_path (str): Path of the near-dataset.
near_id_field_name (str): Name of the near ID field.
max_near_distance (float): Maximum distance to search for near-features, in
units of the dataset's spatial reference.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
dataset_where_sql (str): SQL where-clause for dataset subselection.
near_where_sql (str): SQL where-clause for near-dataset subselection.
near_rank (int): Nearness rank of the feature to map info for. Default is 1.
Returns:
dict: Mapping of the dataset ID to a near-feature info dictionary.
Info dictionary keys: 'id', 'near_id', 'rank', 'distance',
'angle', 'near_x', 'near_y'.
'distance' value (float) will match linear unit of the dataset's
spatial reference.
'angle' value (float) is in decimal degrees.
"""
kwargs.setdefault('dataset_where_sql')
kwargs.setdefault('near_where_sql')
kwargs.setdefault('near_rank', 1)
view = {
'dataset': arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql']),
'near': arcobj.DatasetView(near_dataset_path, kwargs['near_where_sql']),
}
with view['dataset'], view['near']:
temp_near_path = unique_path('near')
arcpy.analysis.GenerateNearTable(
in_features=view['dataset'].name,
near_features=view['near'].name,
out_table=temp_near_path,
search_radius=max_near_distance,
location=True,
angle=True,
closest=False,
closest_count=kwargs['near_rank'],
)
oid_id_map = attributes.id_map(
view['dataset'].name, 'oid@', dataset_id_field_name
)
near_oid_id_map = attributes.id_map(
view['near'].name, 'oid@', near_id_field_name
)
field_names = [
'in_fid', 'near_fid', 'near_dist', 'near_angle', 'near_x', 'near_y', 'near_rank'
]
near_info_map = {}
for near_info in attributes.as_dicts(temp_near_path, field_names):
if near_info['near_rank'] == kwargs['near_rank']:
_id = oid_id_map[near_info['in_fid']]
near_info_map[_id] = {
'id': _id,
'near_id': near_oid_id_map[near_info['near_fid']],
'rank': near_info['near_rank'],
'distance': near_info['near_dist'],
'angle': near_info['near_angle'],
'near_x': near_info['near_x'],
'near_y': near_info['near_y'],
}
dataset.delete(temp_near_path, log_level=None)
return near_info_map
| [
"denkide@gmail.com"
] | denkide@gmail.com |
047a2b3f43f4f244a819b6f31e88745c8ab398e0 | 5ae88a0a197a52086edffe9c031bc424634b55d4 | /AizuOnlineJudge/ITP1/6/B.py | 7b3cc8dc426a3470c63010fe4519c4bd638fe9e7 | [] | no_license | muck0120/contest | 1b1226f62e0fd4cf3bd483e40ad6559a6f5401cb | de3877822def9e0fe91d21fef0fecbf025bb1583 | refs/heads/master | 2022-07-17T17:31:50.934829 | 2020-05-18T09:55:30 | 2020-05-18T09:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | N = int(input())
S = [i for i in range(1, 14)]
H = [i for i in range(1, 14)]
C = [i for i in range(1, 14)]
D = [i for i in range(1, 14)]
for n in range(N):
suit, num = input().split()
num = int(num)
if suit == 'S': S.remove(num)
if suit == 'H': H.remove(num)
if suit == 'C': C.remove(num)
if suit == 'D': D.remove(num)
for s in S: print('S', s)
for h in H: print('H', h)
for c in C: print('C', c)
for d in D: print('D', d) | [
"mutsuki.s.0120@gmail.com"
] | mutsuki.s.0120@gmail.com |
3e850dd8c41c4527bbd702500f8bd7d0a8b489fa | f77c1c438029613ef9c48ef7cc6f4c567e9db177 | /208_implement_trie_add_search_prefix.py | d8a7066f619fb5334cb070f5dfabd2baa21368a2 | [] | no_license | rsumukha/leetcode | af002e58fc760496e52909c3bd6d7296c1f49137 | ab8617fc8cc50cdfef736c189ccb09adc687398a | refs/heads/master | 2022-04-01T09:47:02.029967 | 2020-01-28T16:10:07 | 2020-01-28T16:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | class TrieNode(object):
def __init__(self, char):
self.val = char
self.children = collections.defaultdict(int)
self.count = 0
self.end = False
class Trie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode("root")
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: None
"""
currentnode = self.root
for character in word:
if currentnode.children[character]==0:
currentnode.children[character] = TrieNode(character)
currentnode = currentnode.children[character]
currentnode.end = True
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
currentnode = self.root
for character in word:
if currentnode.children[character]!=0:
currentnode = currentnode.children[character]
else:
return False
return currentnode.end
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
currentnode = self.root
for character in prefix:
if currentnode.children[character]!=0:
currentnode = currentnode.children[character]
else:
return False
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
| [
"sumukharadhakrishna@gmail.com"
] | sumukharadhakrishna@gmail.com |
e13fab0514aa87a22f4efac43760c2d877c23adb | 64a99161204051f6f2abb9e8d88a5508952c0115 | /examples/saveLoadV1/create_save.py | 61f2ff9f7627dd79c32b0c968455c4711de7a2ad | [
"MIT"
] | permissive | suny-downstate-medical-center/netpyne | d1ba5a258ba63c8ad8b0fa91a6d8bbd99f2e8d28 | 9d08867205b776bbb467554c49df9d8aba57dcf2 | refs/heads/development | 2023-08-23T22:48:26.020812 | 2023-08-16T14:20:23 | 2023-08-16T14:20:23 | 48,733,333 | 18 | 18 | MIT | 2023-09-11T16:01:19 | 2015-12-29T07:12:08 | Jupyter Notebook | UTF-8 | Python | false | false | 164 | py | from netpyne import sim
import params
# Create network and save
sim.create(netParams=params.netParams, simConfig=params.simConfig)
sim.gatherData()
sim.saveData()
| [
"salvadordura@gmail.com"
] | salvadordura@gmail.com |
60307f994b3735554e67452f6e4b146fdea0ef7a | 3354e0a6b998b017befae119118ae90d3c5e17aa | /linkedlist.py | c1c113e40fdd329f82896b5eb1c40f6bce004a36 | [] | no_license | ElvinKim/seocho_2019_07 | 14ac829f170668c1f67950399eb5260ab160ae1e | 77f4eb58ac8edd3932d26a2cec1f44b2de6daffb | refs/heads/master | 2020-06-19T10:21:29.803586 | 2019-08-02T13:11:08 | 2019-08-02T13:11:08 | 196,676,128 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | import time
class Node:
nodeNext = None
nodePrev = ''
objValue = ''
blnHead = False
blnTail = False
def __init__(self, objValue = '', nodeNext = None, blnHead = False, blnTail = False):
self.nodeNext = nodeNext
self.objValue = objValue
self.blnHead = blnHead
self.blnTail = blnTail
def getValue(self):
return self.objValue
def setValue(self, objValue):
self.objValue = objValue
def getNext(self):
return self.nodeNext
def setNext(self, nodeNext):
self.nodeNext = nodeNext
def isHead(self):
return self.blnHead
def isTail(self):
return self.blnTail
class SinglyLinkedList:
nodeHead = ''
nodeTail = ''
size = 0
def __init__(self):
self.nodeTail = Node(blnTail=True)
self.nodeHead = Node(blnHead=True, nodeNext=self.nodeTail)
def insertAt(self, objInsert, idxInsert):
nodeNew = Node(objValue = objInsert)
nodePrev = self.get(idxInsert - 1)
nodeNext = nodePrev.getNext()
nodePrev.setNext(nodeNew)
nodeNew.setNext(nodeNext)
self.size = self.size + 1
def removeAt(self, idxRemove):
nodePrev = self.get(idxRemove - 1)
nodeRemove = nodePrev.getNext()
nodeNext = nodeRemove.getNext()
nodePrev.setNext(nodeNext)
self.size = self.size - 1
return nodeRemove.getValue()
def get(self, idxRetrieve):
nodeReturn = self.nodeHead
for itr in range(idxRetrieve + 1):
nodeReturn = nodeReturn.getNext()
return nodeReturn
def printStatus(self):
nodeCurrent = self.nodeHead
while nodeCurrent.getNext().isTail() == False:
nodeCurrent = nodeCurrent.getNext()
print(nodeCurrent.getValue(), end=" ")
print("")
def getSize(self):
return self.size
total_len = 1000000
val_insert = 1000
list1 = SinglyLinkedList()
for i in range(total_len):
list1.insertAt(i, 0)
start_time = time.time()
list1.insertAt(val_insert, int(total_len / 2))
print(time.time() - start_time)
start_time = time.time()
list1.insertAt(val_insert, int(total_len / 3))
print(time.time() - start_time)
start_time = time.time()
list1.insertAt(val_insert, int(total_len / 4))
print(time.time() - start_time)
| [
"noreply@github.com"
] | ElvinKim.noreply@github.com |
e0d7aee5595ccee3e0b07d6015e6a424c555d2ae | 51a4ac8fa703adbdacac13aceb9503b915473d14 | /1주차 토요일/소개글.py | 06e75ca34e5063946c4876259eeec39de6fc38cd | [] | no_license | Ha-rin-kim/Python-basic | 0f6218bafbd277ddbb84c955a1b8caf065307de4 | 3eb7edd4ac53d82dfe0aa6d98699a389fbec208f | refs/heads/master | 2022-03-01T03:48:21.082802 | 2019-11-10T11:59:34 | 2019-11-10T11:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #사용자의 이름,나이,성별을 화면에 출력하는 프로그램
#사용자로부터 이름,나이,성별을 입력받고 변수에 저장
이름 = input('이름 입력:')
나이 = int(input('나이 입력:'))
성별 = input('성별 입력:')
#지정된 포멧 문자열에 변수값을 넣어 화면에 출력
소개글 = '''
안녕하세요 저는 %s 입니다.
나이는 %d이고 %s입니다.
''' % (이름,나이,성별)
print(소개글)
| [
"noreply@github.com"
] | Ha-rin-kim.noreply@github.com |
e1faa83daf6594b6eb818f019733137ed653914a | 4fed03c9ad291e480c56c1da10d9a68e149649e1 | /articles/apps.py | 8af61786cc1ee08ea718a8a12ede5d7bd96b56f1 | [] | no_license | nikakoss1/upw | 8b304e8785f03ee2d6123c5fc427557dd10fd3b3 | 035a349d54cb76e4cf4b29f12eadd4f4744a14ef | refs/heads/master | 2020-04-28T20:46:59.651359 | 2019-03-14T05:49:00 | 2019-03-14T05:49:00 | 175,556,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class ArticlesConfig(AppConfig):
name = 'articles'
| [
"i@6009.ru"
] | i@6009.ru |
fca14b53660a6678c7d2568d55ff5cb89c3ff21a | 3db32047e1ac68b0e7d7b858b28050f1100e1efb | /0002/py0001.py | 00e80ee862792f6f082508fb87e6762f5f1af313 | [] | no_license | vinlinch/python_exercise | d2bf69b594da98871539d19977f77df51223b91a | f124228a394f7181bee903f63b744e3425a69821 | refs/heads/master | 2021-09-01T09:26:14.520462 | 2017-12-26T07:26:49 | 2017-12-26T07:26:49 | 115,397,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | #!python3.6.1
# -*- coding: utf-8 -*-
# author: https://github.com/vinlinch
# 0001 做为 Apple Store App 独立开发者,你要搞限时促销,
# 为你的应用生成激活码(或者优惠券),
# 使用 Python 如何生成 200 个激活码(或者优惠券)?
import string
import random
def gene_active_code(number_of_code, str_len):
# generate a str list with 26 upper alphabeta and 10 digits
choose_str = string.ascii_uppercase + string.digits
# re shuffle the string, is this necessary?
seq = list(choose_str)
random.shuffle(seq)
# f_seq = ''.join(seq)
# print(f_seq
# use random to generate random str
# random.choices(population, weights=None, *, cum_weights=None, k=1) 3.6版本新增。从population集群中随机抽取K个元素(可重复)。
# random.sample(population, k) 从population样本或集合中随机抽取K个不重复的元素形成新的序列。常用于不重复的随机抽样。
# use set for distinct code
# use len to check number of code
code_set = set()
while len(code_set) < number_of_code:
# random.choices(f_seq, str_len)
code_set.add(''.join(random.choices(seq, weights=None, k=str_len)))
# print("%d active code were generated. " % len(code_set))
return list(code_set)
if __name__ == "__main__":
# defing 200 active code
number_of_code = 200
# define 16 length string
str_len = 16
# print (string.ascii_uppercase,string.digits)
codes = gene_active_code(number_of_code,str_len)
print(codes) | [
"jielove1@hotmail.com"
] | jielove1@hotmail.com |
412c22f8ce5aa90d526e82e1a80ae3e8f5a2db1b | 4cd14b2d74a01bb81549fa31a2a8b499ed2a599e | /backend/tests/server/test_app.py | a886d82dfd69480cc1225a2c7f1cba03924798d0 | [] | no_license | JackEngelmann/chessengine | 2ffb32412d1885f157ffb5ca6b332996b3472202 | 2b00c2d4403a1e677f5af8cfaa95dd166336e4e8 | refs/heads/main | 2023-02-11T21:04:04.716811 | 2021-01-09T19:47:58 | 2021-01-09T19:47:58 | 325,601,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,390 | py | import pytest
from chessbackend.server import app, data
from chessbackend import engine
from flask import json
def test_create_game(client):
response = client.post("/game")
data = json.loads(response.data)
assert data["id"] is not None
def test_get_game(client):
create_response = client.post("/game")
create_data = json.loads(create_response.data)
game_id = create_data["id"]
get_response = client.get(f"/game/{game_id}")
get_data = json.loads(get_response.data)
assert get_data["inTurn"] == "white"
assert isinstance(get_data["check"], bool)
assert isinstance(get_data["checkmate"], bool)
assert isinstance(get_data["stalemate"], bool)
def test_get_game_figures(client):
create_response = client.post("/game")
create_data = json.loads(create_response.data)
game_id = create_data["id"]
figures_response = client.get(f"/game/{game_id}/figures")
figures_data = json.loads(figures_response.data)
assert len(figures_data) > 0
for figure in figures_data:
assert isinstance(figure["id"], str)
assert isinstance(figure["positionX"], int)
assert isinstance(figure["positionY"], int)
assert figure["colour"] == "white" or figure["colour"] == "black"
def test_get_figure_details(client):
create_response = client.post("/game")
create_data = json.loads(create_response.data)
game_id = create_data["id"]
figures_response = client.get(f"/game/{game_id}/figures")
figures_data = json.loads(figures_response.data)
# Pick a pawn, because all pawns should have some valid moves.
figure_id = None
for figure in figures_data:
if figure["name"] == "Pawn":
figure_id = figure["id"]
figure_details_response = client.get(f"/game/{game_id}/figures/{figure_id}")
figure_details_data = json.loads(figure_details_response.data)
assert isinstance(figure_details_data["id"], str)
assert isinstance(figure_details_data["positionX"], int)
assert isinstance(figure_details_data["positionY"], int)
assert (
figure_details_data["colour"] == "white"
or figure_details_data["colour"] == "black"
)
assert len(figure_details_data["validMoves"]) > 0
assert len(figure_details_data["validMoves"][0]) == 2
def test_update_figure_location(client):
create_response = client.post("/game")
create_data = json.loads(create_response.data)
game_id = create_data["id"]
# Move white pawn one step forward.
patch_response = client.patch(
f"/game/{game_id}", json={"from": {"x": 0, "y": 1}, "to": {"x": 0, "y": 2}}
)
assert patch_response.status_code == 204
figures_response = client.get(f"/game/{game_id}/figures")
figures_data = json.loads(figures_response.data)
assert len(figures_data) == 32
moved_figure = (
next(
fig
for fig in figures_data
if fig["positionX"] == 0 and fig["positionY"] == 2
)
is not None
)
assert moved_figure is not None
game_response = client.get(f"/game/{game_id}")
game_data = json.loads(game_response.data)
assert game_data["inTurn"] == "black"
@pytest.fixture
def client():
app.app.config["TESTING"] = True
# TODO: app.app.app_context is pretty ugly.
with app.app.test_client() as client:
yield client
app.reset_data()
| [
"jack@tau2logic.com"
] | jack@tau2logic.com |
3c5ef95c496f2a494a9753f4a17c7b50b148cb24 | ba19b5449d72611c48e9395caa808b6ec02f25fc | /HW 2.py | 0db6e2a5b578bbdf665956727921918ff3e794e5 | [] | no_license | osbetel/CS-110-Coursework-2014 | 0a0de475271ce70b8bd109d6e8e5bd68267bef63 | 5059c0d163efbbaae92a6f812f9380e03a5d6caf | refs/heads/master | 2021-01-13T05:25:36.512553 | 2017-02-09T10:09:58 | 2017-02-09T10:09:58 | 81,435,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,089 | py | #Written by Andrew Nguyen; July 10, 2015
#I did attempt the extra credit portion of this lab.
#This program is written as a marketing tool to help determine how much the total cost of software will be
#, granting discounts based on bulk buying.
#2-10 packages = 10%
#11-20 = 20%
#21-30 = 30%
#31 or more = 40%
#FIRST PACKAGE IS ALWAYS $99!
print("Welcome to Over8ted Software's Interactive Pricing Program!")
print("--------------------")
name = input("What is your name?")
print("The pricing schedule is as follows:\n2-10 packages = 10% \n11-20 = 20% \n21-30 = 30% \n31 or more = 40%")
print("--------------------")
input("Press Enter to continue...")
print("The first software package will always be $99; all subsequent packages will be discounted at whatever discount"
"\nbracket they fall into. Eg: If you buy 22 packages, the first will be $99, 2-10 will be discounted by 10%,"
"\n11-20 will be discounted 20%, and the last two will be discounted 30%. Prices do not include tax.")
print("--------------------")
packages = int(input("How many software packages would you like to purchase today? Please enter only a number."))
#Begin definition of pricing and student discount functions.
def pricing():
global subtotal
global total_cost
print("--------------------")
if packages == 1:
print("Your subtotal is $99.")
print("You will be taxed at a rate of 9.6% sales tax.")
print("After tax, your total is $108.50")
subtotal = float(format(99, '.2f'))
total_cost = float(format(108.50, '.2f'))
if 2 <= packages <= 10 and packages != 1:
print("Your subtotal is $"+str(format(((packages - 1) * 99 * .9)+99.00, '.2f')))
print("You will be taxed at a rate of 9.6% sales tax.")
print("After tax, your total cost is $"+str(format((((packages - 1) * 99 * .9)+99.00) * 1.096, '.2f')))
subtotal = float(format(((packages - 1) * 99 * .9)+99.00, '.2f'))
total_cost = float(format((((packages - 1) * 99 * .9)+99.00) * 1.096, '.2f'))
#First 10 packages have a subotal cost of $900.90, factoring in the 10% discount on 2-10
if 11 <= packages <= 20 and packages != 1:
print("Your subtotal is $"+str(format(((packages - 10) * 99 * .8)+900.90, '.2f')))
print("You will be taxed at a rate of 9.6% sales tax.")
print("After tax, your total cost is $"+str(format((((packages - 10) * 99 * .8)+900.90) * 1.096, '.2f')))
subtotal = float(format(((packages - 10) * 99 * .8)+900.90, '.2f'))
total_cost = float(format((((packages - 10) * 99 * .8)+900.90) * 1.096, '.2f'))
#subotal cost of first 20 packages is $1692.90
if 21 <= packages <= 30 and packages != 1:
print("Your subtotal is $"+str(format(((packages - 20) * 99 * .7)+1692.90, '.2f')))
print("You will be taxed at a rate of 9.6% sales tax.")
print("After tax, your total cost is $"+str(format((((packages - 20) * 99 * .7)+1692.90) * 1.096, '.2f')))
subtotal = float(format(((packages - 20) * 99 * .7)+1692.90, '.2f'))
total_cost = float(format((((packages - 20) * 99 * .7)+1692.90) * 1.096, '.2f'))
#subotal cost of first 30 packages is $2385.90
if packages >= 31 and packages != 1:
print("Your subtotal is $"+str(format(((packages - 30) * 99 * .6)+2385.90, '.2f')))
print("You will be taxed at a rate of 9.6% sales tax.")
print("After tax, your total cost is $"+str(format((((packages - 30) * 99 * .6)+2385.90) * 1.096, '.2f')))
subtotal = float((format(((packages - 30) * 99 * .6)+2385.90, '.2f')))
total_cost = float((format((((packages - 30) * 99 * .6)+2385.90) * 1.096, '.2f')))
#Includes cost of all prior packages + all subsequent packages ordered.
def student_discount():
discount = input("""Are you a student of North Seattle College or an affiliated educational institution of NSC?
\nPlease enter "y" for yes, or "n" for no.""")
if name == "NSC":
discount = "y"
if name == "North Seattle College":
discount = "y"
if discount == "y":
print("As a student or related institution of NSC, you will receive a discount of 5% off your total purchase! (Tax not included)")
print("Your final bill, after your eligible discounts, is $"+str(format(subtotal * .95 * 1.096,'.2f')))
else:
print("Your final bill will be $"+str(total_cost))
#Begin callback of functions.
pricing()
print("--------------------")
student_discount()
print("--------------------")
print("Thank you,", name, ",for your purchase of", packages, "software packages!")
print("--------------------")
#Test 1: I tested the purchase of 23 packages under the name Andrew, with the student discount included.
#from manual calculations, I figured the final sum to be $1979.11 which is exactly what the program returned.
#Calculations were done using the pricing table for bulk buying, and then factoring in the 5% discount to the SUBTOTAL
#finally, tax was added in to achieve $1979.11.
#Test 2: I tested the purchase of 14 packages under the name NSC, while responding "n" to the question of whether the user
#was a student at NSC or affiliated institution. The calculation was the same process as before, but
#this time, even with the "n" response, the discount was still take, and the final total was $1267.87, which is what I got as well.
#Test 3: This final test was simply a plain person buying 45 packages with no educational discount. Just a standard subtotal * tax
#operation. The final total came back as $3591.48, which is also what I got from manual calculation
#This concludes my testing of this program.
#Final notes: The input for # of packages to purchase only accepts whole numbers. And even then, only positive numbers.
#if I were to extend this program, it may include an option for returning products back (in which case I would use the
#negative numbers). It also does not accept floating point numbers (which makes no sense anyway as you can't purchase
#a fraction of a product). | [
"osbetel@gmail.com"
] | osbetel@gmail.com |
04e23dd0f9dc7fc5bfaba404a9c4618ef737ab5d | ce44b65c5269ee44c2c8a3f44dc689030ff58f44 | /config.py | 5798aa39ac0cd0a5f9a5b87e1589d3f62c978f5b | [] | no_license | judywawira/TotoCare | 360d07152d9f45436c921bffcadb656757fbe79e | 236b9f70901175130ecc2d5370eb8285ac7cbc9b | refs/heads/master | 2021-06-27T03:47:24.508682 | 2017-09-16T00:01:22 | 2017-09-16T00:01:22 | 103,587,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from flask_pymongo import PyMongo
from pymongo import MongoClient
WTF_CSRF_ENABLED = True
SECRET_KEY = 'Put your secret key here'
DB_NAME = 'TotoCare'
DATABASE = MongoClient()[DB_NAME]
POSTS_COLLECTION = DATABASE.posts
USERS_COLLECTION = DATABASE.users
SETTINGS_COLLECTION = DATABASE.settings
DEBUG = False
| [
"judywawira@gmail.com"
] | judywawira@gmail.com |
cd3c833ec59d6f5b7be30c93793164e6ee290006 | ce2beb0878797a8ae88e07e56fcf8c2d0402ac5f | /e075/e75.py | 34035636e3a791493beb0a1ecdcce3f4c6382c83 | [] | no_license | cslarsen/project-euler | 2de49ad9be9b0bfe7c5b22c0e2272992e8f07151 | fd1c7664891a5503aedaccb936b66eeb87d6085b | refs/heads/master | 2020-07-08T00:17:29.605171 | 2015-09-14T17:45:23 | 2015-09-14T17:45:23 | 6,430,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | import sys
import math
from fractions import gcd
def triple():
for m in range(1,1300):
for n in range(1,m):
if gcd(m,n)!=1 and ((m-n) & 1)!=1:
continue
yield m*m-n*n, 2*m*n, m*m+n*n
yield 0,0,0
MAX = 1500000
found = {}
seen = set()
t = triple()
while True:
x,y,z = t.next()
if x+y+z > MAX:
continue
if x==0 and y==0 and z==0:
break
k=1
while True:
a = x*k
b = y*k
c = z*k
k += 1
L=a+b+c
if L > MAX:
break
T = tuple(sorted((a,b,c)))
if not T in seen:
seen.add(T)
if not L in found: found[L] = 0
found[L] += 1
count = sum(1 for k in found if found[k]==1)
print "12 ->", found[12]
print "24 ->", found[24]
print "30 ->", found[30]
print "36 ->", found[36]
print "40 ->", found[40]
print "48 ->", found[48]
print "120 ->", found[120]
print "Answer:", count
| [
"csl@sublevel3.org"
] | csl@sublevel3.org |
30b8c1c76638d1d10d64aa615e685919637c8132 | b75f80da7065e631e4850bc4d8dddef86437265d | /python/functions.py | d87969fbe7123db7ccf3ee1ff30864728da2ffce | [] | no_license | eab03/wcc | 099644d14993847bcad96a08dcc20547d7910efe | 5df95d33c5fcca62d74d0d95c2498bc6cec3e49d | refs/heads/master | 2020-07-05T09:17:30.421733 | 2017-01-02T21:11:29 | 2017-01-02T21:11:29 | 67,425,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,192 | py | #def multiply(a, b):
# result = a * b
# return result
# Test the function:
#solution = multiply(4, 5) # Invoke multiply giving it the arguments 4 and 5
#print(solution) # Expected: 20
# Test the function
#print(multiply(4,5)) # Expected: 20
# Test the function
#print(multiply(4,5)) # 20
#print(multiply(9,11)) # 99
#print(multiply(0,10)) # 0
#print(multiply(.5,9)) # 4.5
#print(multiply(-1, -55)) # 55
#print(multiply(3, 'Hello')) # 'HelloHelloHello'
#def isPositive(a):
# if a > 0:
# return True
# else:
# return False
# Test the function
#print(isPositive(-4)) # Expected: False
#print(isPositive(4)) # Expected: True
#print(isPositive(-9.9)) # Expected: False
#print(isPositive(9.9)) # Expected: True
# Import statements should always be at the top of your file, not in the body of functions
#import random
#def draw_random_card():
# cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 11]
# random.shuffle(cards)
# return cards.pop()
# Test the function
#print(draw_random_card()) # Expected: Random number b/n 1 & 11
#print(draw_random_card()) # Expected: Random number b/n 1 & 11
#print(draw_random_card()) # Expected: Random number b/n 1 & 11
#def display_winner(winner, msg):
# if winner == 'Player':
# outcome = 'You win! '
# else:
# outcome = 'Computer wins! '
# print(outcome + '(' + msg + ')')
# Test the function
#display_winner('Player', 'You were closest to 21') # Expected: You win! (You were closest to 21)
#display_winner('Computer', 'It was closest to 21') # Expected: Computer wins! (It was closest to 21)
#display_winner('Computer', 'You busted') # Expected: Computer wins! (You busted)
def mystery(x, y, z):
result = x + (y * z)
return result
print mystery('Hello', 3, '!') # Expected: 'Hello!!!'
print mystery('Goodbye', 2, '@') # Expected: 'Goodbye@@'
#print('Hello' + 3 * '!')
#___________________
def calculate_tip(meal_price, service_rating):
# Calculate the tip % using an if statement
# tip_percentage = ???
if service_rating == 'A':
tip_percentage = .2;
elif service_rating == 'B':
tip_percentage = .18;
elif service_rating == 'C':
tip_percentage = .15;
# Calculate the tip amount by multiplying the meal price by the tip percentage
# tip_amount = ???
tip_amount = meal_price * tip_percentage;
return tip_amount
print(calculate_tip(30.50, 'C')) # Expected: 4.575
print(calculate_tip(15.00, 'B')) # Expected: 2.7
print(calculate_tip(20.00, 'A')) # Expected: 4
def isPositive(a):
if a > 0:
return True
else:
return False
print(isPositive(4))
def isPositive(a):
return a > 0
print(isPositive(4))
#______________
def calculate_lucky_number(birth_month, birth_day):
lucky_number = birth_month;
if birth_month in [2, 4, 6]:
lucky_number = birth_month + birth_day
return lucky_number
elif birth_month in [8, 10, 12]:
lucky_number = (birth_month * 10) - birth_day
return lucky_number
return lucky_number * 2
#Given the following invocation, what do you predict the results will be?
print(calculate_lucky_number(11, 10)) # Expected: ???
| [
"elizabethbright@Elizabeths-MacBook-Air-2.local"
] | elizabethbright@Elizabeths-MacBook-Air-2.local |
0b3aab841a1ebc987afbdaf6c188eeb5e3c68645 | 4512eaea837a5d95f84e87fd62f07f3bf3ee8ad6 | /scripts/read_alignment/snakemake_ChIPseq_MNaseseq/Snakefile | a896fdcd9ec13933e7586e0bb9fc590472e24ed9 | [
"MIT"
] | permissive | ajtock/Wheat_DMC1_ASY1_paper | 53a631189b075fb053e20a6b8d38d9841e38e496 | f4188d6fda80f8147ac9ddcdbccaab1d439e1b73 | refs/heads/master | 2023-06-04T11:06:30.644413 | 2021-06-17T10:45:00 | 2021-06-17T10:45:00 | 299,987,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,621 | # Snakemake workflow for aligning paired-end ChIP-seq or MNase-seq reads to a reference genome
# Chromosome sizes file below ("data/index/wheat_v1.0.fa.sizes") must exist
# before running snakemake
# e.g., in "data/index/" run:
# samtools faidx wheat_v1.0.fa; cut -f1,2 wheat_v1.0.fa.fai > wheat_v1.0.fa.sizes
# Usage ("--cores" should match the "THREADS" parameter in config.yaml, and reflect available threads):
# conda env create --file environment.yaml --name ChIPseq_mapping # On first use only
# conda activate ChIPseq_mapping
# snakemake -p --cores 48
# conda deactivate
import pandas as pd
import os
# To make the samtools rule work with a shell script ("scripts/keepPaired.py") invoked using the "shell" directive,
# we need to determine the base path of Snakefile since we expect the "scripts/" subdirectory to be located here
SRCDIR = srcdir("")
# Specify config file parameters
configfile: "config.yaml"
# Define wildcards and variables
sample = config["SAMPLES"]
reference = config["MAPPING"]["reference"]
refbase = os.path.basename(reference)
genomeBinName = config["COVERAGE"]["genomeBinName"]
# Determine bam index format (bai or csi) based on chromosome sizes
# Genomes with chromosomes longer than ~500 Mb (e.g., in wheat) require a csi index
# E.g., in axolotl: https://sourceforge.net/p/samtools/mailman/message/36249039/
chrSizes = pd.read_table("data/index/" + refbase + ".fa.sizes",
header = None)
smallChrs = 0
for x in chrSizes[1]:
if x < 5e+08:
smallChrs = smallChrs + 1
if smallChrs < len(chrSizes[1]):
bamidx = "csi"
else:
bamidx = "bai"
# Specify the desired end target file(s)
rule all:
input:
expand("logs/fastqc/raw/{sample}_R1_fastqc.html",
sample = sample),
expand("logs/fastqc/raw/{sample}_R2_fastqc.html",
sample = sample),
expand("data/dedup/{sample}_R1_dedup.fastq.gz",
sample = sample),
expand("data/dedup/{sample}_R1_dedup_repair.fastq.gz",
sample = sample),
expand("data/dedup/{sample}_R2_dedup_repair.fastq.gz",
sample = sample),
expand("data/dedup/trimmed/{sample}_R1_dedup_repair_trimmed.fastq.gz",
sample = sample),
expand("data/dedup/trimmed/{sample}_R2_dedup_repair_trimmed.fastq.gz",
sample = sample),
expand("logs/fastqc/trimmed/{sample}_R1_dedup_repair_trimmed_fastqc.html",
sample = sample),
expand("logs/fastqc/trimmed/{sample}_R2_dedup_repair_trimmed_fastqc.html",
sample = sample),
expand("mapped/{sample}_MappedOn_{refbase}.bam",
sample = sample,
refbase = refbase),
expand("mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam",
sample = sample,
refbase = refbase),
expand("mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
sample = sample,
refbase = refbase),
expand("mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.{bamidx}",
sample = sample,
refbase = refbase,
bamidx = bamidx),
expand("mapped/unique/bw/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.bw",
sample = sample,
refbase = refbase),
expand("mapped/unique/bg/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.bedgraph",
sample = sample,
refbase = refbase),
expand("mapped/unique/bg/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm_binSize{genomeBinName}.bedgraph",
sample = sample,
refbase = refbase,
genomeBinName = genomeBinName),
expand("mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.{bamidx}",
sample = sample,
refbase = refbase,
bamidx = bamidx),
expand("mapped/both/bw/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.bw",
sample = sample,
refbase = refbase),
expand("mapped/both/bg/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.bedgraph",
sample = sample,
refbase = refbase),
expand("mapped/both/bg/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm_binSize{genomeBinName}.bedgraph",
sample = sample,
refbase = refbase,
genomeBinName = genomeBinName)
# Run fastqc on R1 raw data
rule fastqc_R1_raw:
"""Create fastqc report"""
input:
"data/{sample}_R1.fastq.gz"
output:
html = "logs/fastqc/raw/{sample}_R1_fastqc.html",
zip = "logs/fastqc/raw/{sample}_R1_fastqc.zip"
params: "--extract"
log:
"logs/fastqc/raw/{sample}_R1.log"
wrapper:
"0.27.1/bio/fastqc"
# Run fastqc on R2 raw data
rule fastqc_R2_raw:
"""Create fastqc report"""
input:
"data/{sample}_R2.fastq.gz"
output:
html = "logs/fastqc/raw/{sample}_R2_fastqc.html",
zip = "logs/fastqc/raw/{sample}_R2_fastqc.zip"
params: "--extract"
log:
"logs/fastqc/raw/{sample}_R2.log"
wrapper:
"0.27.1/bio/fastqc"
# Deduplicate R1 reads
rule dedupe_R1:
"""Remove duplicate R1 reads"""
input:
"data/{sample}_R1.fastq.gz"
output:
"data/dedup/{sample}_R1_dedup.fastq.gz"
threads: config["THREADS"]
params:
memory = config["MEMORY"]
log:
"logs/dedup/{sample}_R1_dedup.log"
shell:
"(dedupe.sh -Xmx{params.memory} in={input} out={output}"
" threads={threads} ac=f) 2> {log}"
# Re-pair separately deduplicated reads
rule repair:
"""Re-pair separately deduplicated reads"""
input:
fastq1 = "data/dedup/{sample}_R1_dedup.fastq.gz",
fastq2 = "data/{sample}_R2.fastq.gz"
output:
fastq1 = "data/dedup/{sample}_R1_dedup_repair.fastq.gz",
fastq2 = "data/dedup/{sample}_R2_dedup_repair.fastq.gz",
fastq3 = "data/dedup/{sample}_dedup_singletons.fastq.gz"
params:
memory = config["MEMORY"]
log:
"logs/repair/{sample}_R1_R2_dedup_repair.log"
shell:
"(repair.sh -Xmx{params.memory} repair=t"
" in1={input.fastq1} in2={input.fastq2}"
" out1={output.fastq1} out2={output.fastq2}"
" outs={output.fastq3}) 2> {log}"
# Trim off adapters
rule cutadapt:
"""Remove adapters"""
input:
"data/dedup/{sample}_R1_dedup_repair.fastq.gz",
"data/dedup/{sample}_R2_dedup_repair.fastq.gz"
output:
fastq1 = "data/dedup/trimmed/{sample}_R1_dedup_repair_trimmed.fastq.gz",
fastq2 = "data/dedup/trimmed/{sample}_R2_dedup_repair_trimmed.fastq.gz",
qc = "data/dedup/trimmed/{sample}_dedup_repair_trimmed.qc.txt"
params:
" -u " + str(config["FILTER"]["cutadapt"]["R1_5prime_cut"]) +
" -u " + str(config["FILTER"]["cutadapt"]["R1_3prime_cut"]) +
" -U " + str(config["FILTER"]["cutadapt"]["R2_5prime_cut"]) +
" -U " + str(config["FILTER"]["cutadapt"]["R2_3prime_cut"]) +
" -a " + config["FILTER"]["cutadapt"]["adapter_R1"] +
" -A " + config["FILTER"]["cutadapt"]["adapter_R2"] +
" -O " + str(config["FILTER"]["cutadapt"]["minimum-overlap"]) +
" -q " + str(config["FILTER"]["cutadapt"]["quality-filter"]) +
" -m " + str(config["FILTER"]["cutadapt"]["minimum-length"]) +
" -M " + str(config["FILTER"]["cutadapt"]["maximum-length"]) +
" --cores=0"
log:
"logs/cutadapt/{sample}_dedup_repair_trimmed.log"
wrapper:
"0.27.1/bio/cutadapt/pe"
# Run fastqc on R1 trimmed data
rule fastqc_R1_trimmed:
"""Create fastqc report"""
input:
"data/dedup/trimmed/{sample}_R1_dedup_repair_trimmed.fastq.gz"
output:
html = "logs/fastqc/trimmed/{sample}_R1_dedup_repair_trimmed_fastqc.html",
zip = "logs/fastqc/trimmed/{sample}_R1_dedup_repair_trimmed_fastqc.zip"
params: "--extract"
log:
"logs/fastqc/trimmed/{sample}_R1_dedup_repair_trimmed.log"
wrapper:
"0.27.1/bio/fastqc"
# Run fastqc on R2 trimmed data
rule fastqc_R2_trimmed:
"""Create fastqc report"""
input:
"data/dedup/trimmed/{sample}_R2_dedup_repair_trimmed.fastq.gz"
output:
html = "logs/fastqc/trimmed/{sample}_R2_dedup_repair_trimmed_fastqc.html",
zip = "logs/fastqc/trimmed/{sample}_R2_dedup_repair_trimmed_fastqc.zip"
params: "--extract"
log:
"logs/fastqc/trimmed/{sample}_R2_dedup_repair_trimmed.log"
wrapper:
"0.27.1/bio/fastqc"
# Align to reference genome
rule bowtie2:
"""Map reads using bowtie2 and sort them using samtools"""
input:
fastq1 = "data/dedup/trimmed/{sample}_R1_dedup_repair_trimmed.fastq.gz",
fastq2 = "data/dedup/trimmed/{sample}_R2_dedup_repair_trimmed.fastq.gz"
output:
protected("mapped/{sample}_MappedOn_{refbase}.bam")
params:
alignments = config["MAPPING"]["alignments"],
MAPQmaxi = config["MAPPING"]["MAPQmaxi"]
threads: config["THREADS"]
log:
"logs/bowtie2/{sample}_MappedOn_{refbase}_sort.log"
shell:
# -f 3 includes only concordantly aligned read pairs, and not unpaired reads
# -F 2316 excludes unmapped reads (their mates too),
# as well as secondary and supplementary alignments
# Exclude alignments with MAPQ < config["MAPPING"]["MAPQmaxi"]
"(bowtie2 --very-sensitive --no-mixed --no-discordant"
" --threads {threads} -k {params.alignments}"
" -x {reference} -1 {input.fastq1} -2 {input.fastq2} "
"| samtools view -bh -@ {threads} -f 3 -F 2316 -q {params.MAPQmaxi} -o {output} - ) 2> {log}"
# Filter alignments for mismatches and extract alignments consisting
# of at least 1 uniquely aligned read in a pair
rule samtools:
input:
"mapped/{sample}_MappedOn_{refbase}.bam"
output:
both = protected("mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam"),
unique = protected("mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam")
params:
sortMemory = config["MAPPING"]["sortMemory"],
MAPQunique = config["MAPPING"]["MAPQunique"]
threads: config["THREADS"]
log:
both = "logs/samtools/{sample}_MappedOn_{refbase}_lowXM_both_sort.log",
unique = "logs/samtools/{sample}_MappedOn_{refbase}_lowXM_unique_sort.log"
shell:
# Allow a maximum of 6 mismatches
# ([^0-9] matches characters not in the range of 0 to 9)
# http://seqanswers.com/forums/showthread.php?t=19729
"(samtools view -h {input} "
"| grep -e '^@' -e 'XM:i:[0-6][^0-9]' "
# Retain alignments for which the names of both reads in a pair are the same
"| scripts/keepPaired.py "
"| samtools view -u - "
"| samtools sort -@ {threads} -m {params.sortMemory} -o {output.both}) 2> {log.both}; "
# Extract unique alignments, excluding alignments with MAPQ scores < config["MAPPING"]["MAPQunique"]
# http://biofinysics.blogspot.com/2014/05/how-does-bowtie2-assign-mapq-scores.html
# https://sequencing.qcfail.com/articles/mapq-values-are-really-useful-but-their-implementation-is-a-mess/
"(samtools view -h -q {params.MAPQunique} {input} "
"| grep -e '^@' -e 'XM:i:[0-6][^0-9]' "
# Retain alignments for which the names of both reads in a pair are the same
"| scripts/keepPaired.py "
"| samtools view -u - "
"| samtools sort -@ {threads} -m {params.sortMemory} -o {output.unique}) 2> {log.unique}"
# Postmapping steps:
# Index BAM files (index format [bai or csi] depends on chromosome sizes)
# Generate samtools flagstat and idxstats
# Calculate library-size-normalized coverage
if bamidx == "bai":
rule postmapping:
"""bam.bai samtools flagstat idxstats"""
input:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam"
output:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.{bamidx}",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.{bamidx}"
log:
uniqueflagstat = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_unique_sort_flagstat.log",
bothflagstat = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_both_sort_flagstat.log",
uniqueidxstats = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_unique_sort_idxstats.log",
bothidxstats = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_both_sort_idxstats.log"
shell:
"""
samtools index {input.uniqueBAM}
samtools flagstat {input.uniqueBAM} > {log.uniqueflagstat}
samtools idxstats {input.uniqueBAM} > {log.uniqueidxstats}
samtools index {input.bothBAM}
samtools flagstat {input.bothBAM} > {log.bothflagstat}
samtools idxstats {input.bothBAM} > {log.bothidxstats}
"""
rule calc_coverage:
"""Calculate library-size-normalized coverage"""
input:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam",
uniqueBAMidx = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.bai",
bothBAMidx = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.bai"
output:
uniqueBW = "mapped/unique/bw/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.bw",
bothBW = "mapped/both/bw/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.bw",
uniqueBG = "mapped/unique/bg/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.bedgraph",
bothBG = "mapped/both/bg/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.bedgraph"
params:
normalizeUsing = config["COVERAGE"]["normalizeUsing"],
ignoreForNormalization = config["COVERAGE"]["ignoreForNormalization"],
binSize = config["COVERAGE"]["binSize"]
log:
unique = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.log",
both = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.log"
threads: config["THREADS"]
shell:
"(bamCoverage -b {input.uniqueBAM} -o {output.uniqueBW}"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}; "
"bamCoverage -b {input.uniqueBAM} -o {output.uniqueBG} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}) 2> {log.unique}; "
"(bamCoverage -b {input.bothBAM} -o {output.bothBW}"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}; "
"bamCoverage -b {input.bothBAM} -o {output.bothBG} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}) 2> {log.both}"
rule calc_coverage_genome:
"""Calculate library-size-normalized coverage in adjacent windows"""
input:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam",
uniqueBAMidx = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.bai",
bothBAMidx = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.bai"
output:
uniqueBGgenome = "mapped/unique/bg/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm_binSize{genomeBinName}.bedgraph",
bothBGgenome = "mapped/both/bg/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm_binSize{genomeBinName}.bedgraph"
params:
normalizeUsing = config["COVERAGE"]["normalizeUsing"],
ignoreForNormalization = config["COVERAGE"]["ignoreForNormalization"],
genomeBinSize = config["COVERAGE"]["genomeBinSize"]
log:
unique = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm_binSize{genomeBinName}.log",
both = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm_binSize{genomeBinName}.log"
threads: config["THREADS"]
shell:
"(bamCoverage -b {input.uniqueBAM} -o {output.uniqueBGgenome} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.genomeBinSize} -p {threads}) 2> {log.unique}; "
"(bamCoverage -b {input.bothBAM} -o {output.bothBGgenome} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.genomeBinSize} -p {threads}) 2> {log.both}"
else:
rule postmapping:
"""bam.csi samtools flagstat idxstats"""
input:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam"
output:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.{bamidx}",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.{bamidx}"
log:
uniqueflagstat = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_unique_sort_flagstat.log",
bothflagstat = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_both_sort_flagstat.log",
uniqueidxstats = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_unique_sort_idxstats.log",
bothidxstats = "logs/samtools/stats/{sample}_MappedOn_{refbase}_lowXM_both_sort_idxstats.log"
shell:
"""
samtools index -c -m 14 {input.uniqueBAM}
samtools flagstat {input.uniqueBAM} > {log.uniqueflagstat}
samtools idxstats {input.uniqueBAM} > {log.uniqueidxstats}
samtools index -c -m 14 {input.bothBAM}
samtools flagstat {input.bothBAM} > {log.bothflagstat}
samtools idxstats {input.bothBAM} > {log.bothidxstats}
"""
rule calc_coverage:
"""Calculate library-size-normalized coverage"""
input:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam",
uniqueBAMidx = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.csi",
bothBAMidx = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.csi"
output:
uniqueBW = "mapped/unique/bw/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.bw",
bothBW = "mapped/both/bw/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.bw",
uniqueBG = "mapped/unique/bg/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.bedgraph",
bothBG = "mapped/both/bg/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.bedgraph"
params:
normalizeUsing = config["COVERAGE"]["normalizeUsing"],
ignoreForNormalization = config["COVERAGE"]["ignoreForNormalization"],
binSize = config["COVERAGE"]["binSize"]
log:
unique = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm.log",
both = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm.log"
threads: config["THREADS"]
shell:
"(bamCoverage -b {input.uniqueBAM} -o {output.uniqueBW}"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}; "
"bamCoverage -b {input.uniqueBAM} -o {output.uniqueBG} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}) 2> {log.unique}; "
"(bamCoverage -b {input.bothBAM} -o {output.bothBW}"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}; "
"bamCoverage -b {input.bothBAM} -o {output.bothBG} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.binSize} -p {threads}) 2> {log.both}"
rule calc_coverage_genome:
"""Calculate library-size-normalized coverage in adjacent windows"""
input:
uniqueBAM = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam",
bothBAM = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam",
uniqueBAMidx = "mapped/unique/{sample}_MappedOn_{refbase}_lowXM_unique_sort.bam.csi",
bothBAMidx = "mapped/both/{sample}_MappedOn_{refbase}_lowXM_both_sort.bam.csi"
output:
uniqueBGgenome = "mapped/unique/bg/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm_binSize{genomeBinName}.bedgraph",
bothBGgenome = "mapped/both/bg/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm_binSize{genomeBinName}.bedgraph"
params:
normalizeUsing = config["COVERAGE"]["normalizeUsing"],
ignoreForNormalization = config["COVERAGE"]["ignoreForNormalization"],
genomeBinSize = config["COVERAGE"]["genomeBinSize"]
log:
unique = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_unique_sort_norm_binSize{genomeBinName}.log",
both = "logs/bamCoverage/{sample}_MappedOn_{refbase}_lowXM_both_sort_norm_binSize{genomeBinName}.log"
threads: config["THREADS"]
shell:
"(bamCoverage -b {input.uniqueBAM} -o {output.uniqueBGgenome} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.genomeBinSize} -p {threads}) 2> {log.unique}; "
"(bamCoverage -b {input.bothBAM} -o {output.bothBGgenome} -of bedgraph"
" --normalizeUsing {params.normalizeUsing}"
" --ignoreForNormalization {params.ignoreForNormalization}"
" --extendReads"
" --binSize {params.genomeBinSize} -p {threads}) 2> {log.both}"
| [
"andytock@gmail.com"
] | andytock@gmail.com | |
46c205a3f435959086389638a9fd7fefd957308c | 99fa82f29a5b50a5595985acc460a0afaa6099a8 | /app/shopdj/sale/migrations/0004_invoice_total.py | a4f08cde3a2ed9f2afa42d4898d917d64e08dcca | [] | no_license | nnocturnnn/university_rep | a47cce9e29f96e9cc33293c76321e298e7628a4d | 4a8cd42f53dd112640a37ad5ff815ecf09ce1c25 | refs/heads/master | 2023-04-20T09:44:24.144760 | 2021-05-11T16:16:07 | 2021-05-11T16:16:07 | 304,661,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 3.0.5 on 2020-07-02 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sale', '0003_auto_20200701_0535'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='total',
field=models.IntegerField(default=0),
),
]
| [
"vikchehovich@gmail.com"
] | vikchehovich@gmail.com |
9bda9c11c95d6d32ce7a1eebfd736f5f67170ed6 | d76bc60b5d538b5f4dd435e5b8f78d83b7211992 | /Without_Doubt_Project/Django_Server/crawling_App/urls.py | 4677ff7f2b7d8e0d9577570d9e5b3155c17198d6 | [] | no_license | icona2/crawling | 792f5478db639c0df862a14bbd1d042bb423f871 | c88a0d442ca8afeb8072b48542663b52816fe71c | refs/heads/master | 2020-04-22T03:42:20.245586 | 2019-02-27T05:05:36 | 2019-02-27T05:05:36 | 170,097,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from django.conf.urls import url
#from django.urls import path, include
from . import views
urlpatterns = [
#url(r'^$', views.index, name='index'),
url(r'^$', views.input),
url(r'^realtime.html$', views.index),
url(r'^realtime_google.html$', views.index2),
url(r'^realtime_Top20.html$', views.top),
url(r'^realtime_Top20_google.html$', views.top2),
#url(r'^(?P<id>.*)/$', views.input, name='input'),
#path('<word>/',views.input),
#path('',views.index),
] | [
"lyou1@hanmail.net"
] | lyou1@hanmail.net |
0d3dcd3a21ccefd0f1a1dfbce2f6cea60b4365f9 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/conch/test/test_checkers.py | 4111f2169895e93fbeeaf8e2819916a1a8a017e1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 31,498 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.checkers}.
"""
from __future__ import absolute_import, division
try:
import crypt
except ImportError:
cryptSkip = 'cannot run without crypt module'
else:
cryptSkip = None
import os
from collections import namedtuple
from io import BytesIO
from zope.interface.verify import verifyObject
from twisted.python import util
from twisted.python.compat import _b64encodebytes
from twisted.python.failure import Failure
from twisted.python.reflect import requireModule
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase, ShadowDatabase
from twisted.test.test_process import MockOS
if requireModule('cryptography') and requireModule('pyasn1'):
dependencySkip = None
from twisted.conch.ssh import keys
from twisted.conch import checkers
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
else:
dependencySkip = "can't run without cryptography and PyASN1"
if getattr(os, 'geteuid', None) is None:
euidSkip = "Cannot run without effective UIDs (questionable)"
else:
euidSkip = None
class HelperTests(TestCase):
"""
Tests for helper functions L{verifyCryptedPassword}, L{_pwdGetByName} and
L{_shadowGetByName}.
"""
skip = cryptSkip or dependencySkip
def setUp(self):
self.mockos = MockOS()
def test_verifyCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{True} if the plaintext password
passed to it matches the encrypted password passed to it.
"""
password = 'secret string'
salt = 'salty'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %r' % (
crypted, password))
def test_verifyCryptedPasswordMD5(self):
"""
L{verifyCryptedPassword} returns True if the provided cleartext password
matches the provided MD5 password hash.
"""
password = 'password'
salt = '$1$salt'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %s' % (
crypted, password))
def test_refuteCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{False} if the plaintext password
passed to it does not match the encrypted password passed to it.
"""
password = 'string secret'
wrong = 'secret string'
crypted = crypt.crypt(password, password)
self.assertFalse(
checkers.verifyCryptedPassword(crypted, wrong),
'%r not supposed to be valid encrypted password for %s' % (
crypted, wrong))
def test_pwdGetByName(self):
"""
L{_pwdGetByName} returns a tuple of items from the UNIX /etc/passwd
database if the L{pwd} module is present.
"""
userdb = UserDatabase()
userdb.addUser(
'alice', 'secrit', 1, 2, 'first last', '/foo', '/bin/sh')
self.patch(checkers, 'pwd', userdb)
self.assertEqual(
checkers._pwdGetByName('alice'), userdb.getpwnam('alice'))
def test_pwdGetByNameWithoutPwd(self):
"""
If the C{pwd} module isn't present, L{_pwdGetByName} returns L{None}.
"""
self.patch(checkers, 'pwd', None)
self.assertIsNone(checkers._pwdGetByName('alice'))
def test_shadowGetByName(self):
"""
L{_shadowGetByName} returns a tuple of items from the UNIX /etc/shadow
database if the L{spwd} is present.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', userdb)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(util, 'os', self.mockos)
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutSpwd(self):
"""
L{_shadowGetByName} returns L{None} if C{spwd} is not present.
"""
self.patch(checkers, 'spwd', None)
self.assertIsNone(checkers._shadowGetByName('bob'))
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
class SSHPublicKeyDatabaseTests(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
skip = euidSkip or dependencySkip
def setUp(self):
self.checker = checkers.SSHPublicKeyDatabase()
self.key1 = _b64encodebytes(b"foobar")
self.key2 = _b64encodebytes(b"eggspam")
self.content = (b"t1 " + self.key1 + b" foo\nt2 " + self.key2 +
b" egg\n")
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.patch(util, 'os', self.mockos)
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser(
b'user', b'password', 1, 2, b'first last',
self.mockos.path.path, b'/bin/shell')
self.checker._userdb = userdb
def test_deprecated(self):
"""
L{SSHPublicKeyDatabase} is deprecated as of version 15.0
"""
warningsShown = self.flushWarnings(
offendingFunctions=[self.setUp])
self.assertEqual(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.conch.checkers.SSHPublicKeyDatabase "
"was deprecated in Twisted 15.0.0: Please use "
"twisted.conch.checkers.SSHPublicKeyChecker, "
"initialized with an instance of "
"twisted.conch.checkers.UNIXAuthorizedKeysFiles instead.")
self.assertEqual(len(warningsShown), 1)
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword(b"user", b"password")
user.blob = b"foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = b"eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = b"notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0o000)
self.addCleanup(keyFile.chmod, 0o777)
# And restore the right mode when seteuid is called
savedSeteuid = self.mockos.seteuid
def seteuid(euid):
keyFile.chmod(0o777)
return savedSeteuid(euid)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(self.mockos, "seteuid", seteuid)
self.patch(util, 'os', self.mockos)
user = UsernamePassword(b"user", b"password")
user.blob = b"foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEqual(self.mockos.seteuidCalls, [0, 1, 0, 2345])
self.assertEqual(self.mockos.setegidCalls, [2, 1234])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign(b'foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEqual(avatarId, b'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateDSA_openssh).sign(b'foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(b'test', None, b'blob', b'sigData', b'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTests(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
skip = dependencySkip
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(), )
self.assertEqual(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
checkers.SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a specific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(),
IUsernamePassword)
self.assertEqual(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
checkers.SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = checkers.SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser(b'test', b'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
def _callback(avatarId):
self.assertEqual(avatarId, b'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = checkers.SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser(b'test', b'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = checkers.SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertTrue(checkers.SSHProtocolChecker().areDone(None))
class UNIXPasswordDatabaseTests(TestCase):
"""
Tests for L{UNIXPasswordDatabase}.
"""
skip = cryptSkip or dependencySkip
def assertLoggedIn(self, d, username):
"""
Assert that the L{Deferred} passed in is called back with the value
'username'. This represents a valid login for this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{Deferred}
"""
result = []
d.addBoth(result.append)
self.assertEqual(len(result), 1, "login incomplete")
if isinstance(result[0], Failure):
result[0].raiseException()
self.assertEqual(result[0], username)
def test_defaultCheckers(self):
"""
L{UNIXPasswordDatabase} with no arguments has checks the C{pwd} database
and then the C{spwd} database.
"""
checker = checkers.UNIXPasswordDatabase()
def crypted(username, password):
salt = crypt.crypt(password, username)
crypted = crypt.crypt(password, '$1$' + salt)
return crypted
pwd = UserDatabase()
pwd.addUser('alice', crypted('alice', 'password'),
1, 2, 'foo', '/foo', '/bin/sh')
# x and * are convention for "look elsewhere for the password"
pwd.addUser('bob', 'x', 1, 2, 'bar', '/bar', '/bin/sh')
spwd = ShadowDatabase()
spwd.addUser('alice', 'wrong', 1, 2, 3, 4, 5, 6, 7)
spwd.addUser('bob', crypted('bob', 'password'),
8, 9, 10, 11, 12, 13, 14)
self.patch(checkers, 'pwd', pwd)
self.patch(checkers, 'spwd', spwd)
mockos = MockOS()
self.patch(util, 'os', mockos)
mockos.euid = 2345
mockos.egid = 1234
cred = UsernamePassword(b"alice", b"password")
self.assertLoggedIn(checker.requestAvatarId(cred), b'alice')
self.assertEqual(mockos.seteuidCalls, [])
self.assertEqual(mockos.setegidCalls, [])
cred.username = b"bob"
self.assertLoggedIn(checker.requestAvatarId(cred), b'bob')
self.assertEqual(mockos.seteuidCalls, [0, 2345])
self.assertEqual(mockos.setegidCalls, [0, 1234])
def assertUnauthorizedLogin(self, d):
"""
Asserts that the L{Deferred} passed in is erred back with an
L{UnauthorizedLogin} L{Failure}. This reprsents an invalid login for
this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{None}
"""
self.assertRaises(
checkers.UnauthorizedLogin, self.assertLoggedIn, d, 'bogus value')
def test_passInCheckers(self):
"""
L{UNIXPasswordDatabase} takes a list of functions to check for UNIX
user information.
"""
password = crypt.crypt('secret', 'secret')
userdb = UserDatabase()
userdb.addUser('anybody', password, 1, 2, 'foo', '/bar', '/bin/sh')
checker = checkers.UNIXPasswordDatabase([userdb.getpwnam])
self.assertLoggedIn(
checker.requestAvatarId(UsernamePassword(b'anybody', b'secret')),
b'anybody')
def test_verifyPassword(self):
"""
If the encrypted password provided by the getpwnam function is valid
(verified by the L{verifyCryptedPassword} function), we callback the
C{requestAvatarId} L{Deferred} with the username.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword(b'username', b'username')
self.assertLoggedIn(checker.requestAvatarId(credential), b'username')
def test_failOnKeyError(self):
"""
If the getpwnam function raises a KeyError, the login fails with an
L{UnauthorizedLogin} exception.
"""
def getpwnam(username):
raise KeyError(username)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword(b'username', b'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_failOnBadPassword(self):
"""
If the verifyCryptedPassword function doesn't verify the password, the
login fails with an L{UnauthorizedLogin} exception.
"""
def verifyCryptedPassword(crypted, pw):
return False
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword(b'username', b'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_loopThroughFunctions(self):
"""
UNIXPasswordDatabase.requestAvatarId loops through each getpwnam
function associated with it and returns a L{Deferred} which fires with
the result of the first one which returns a value other than None.
ones do not verify the password.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam1(username):
return [username, 'not the password']
def getpwnam2(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam1, getpwnam2])
credential = UsernamePassword(b'username', b'username')
self.assertLoggedIn(checker.requestAvatarId(credential), b'username')
def test_failOnSpecial(self):
"""
If the password returned by any function is C{""}, C{"x"}, or C{"*"} it
is not compared against the supplied password. Instead it is skipped.
"""
pwd = UserDatabase()
pwd.addUser('alice', '', 1, 2, '', 'foo', 'bar')
pwd.addUser('bob', 'x', 1, 2, '', 'foo', 'bar')
pwd.addUser('carol', '*', 1, 2, '', 'foo', 'bar')
self.patch(checkers, 'pwd', pwd)
checker = checkers.UNIXPasswordDatabase([checkers._pwdGetByName])
cred = UsernamePassword(b'alice', b'')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword(b'bob', b'x')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword(b'carol', b'*')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
class AuthorizedKeyFileReaderTests(TestCase):
"""
Tests for L{checkers.readAuthorizedKeyFile}
"""
skip = dependencySkip
def test_ignoresComments(self):
"""
L{checkers.readAuthorizedKeyFile} does not attempt to turn comments
into keys
"""
fileobj = BytesIO(b'# this comment is ignored\n'
b'this is not\n'
b'# this is again\n'
b'and this is not')
result = checkers.readAuthorizedKeyFile(fileobj, lambda x: x)
self.assertEqual([b'this is not', b'and this is not'], list(result))
def test_ignoresLeadingWhitespaceAndEmptyLines(self):
"""
L{checkers.readAuthorizedKeyFile} ignores leading whitespace in
lines, as well as empty lines
"""
fileobj = BytesIO(b"""
# ignore
not ignored
""")
result = checkers.readAuthorizedKeyFile(fileobj, parseKey=lambda x: x)
self.assertEqual([b'not ignored'], list(result))
def test_ignoresUnparsableKeys(self):
"""
L{checkers.readAuthorizedKeyFile} does not raise an exception
when a key fails to parse (raises a
L{twisted.conch.ssh.keys.BadKeyError}), but rather just keeps going
"""
def failOnSome(line):
if line.startswith(b'f'):
raise keys.BadKeyError('failed to parse')
return line
fileobj = BytesIO(b'failed key\ngood key')
result = checkers.readAuthorizedKeyFile(fileobj,
parseKey=failOnSome)
self.assertEqual([b'good key'], list(result))
class InMemorySSHKeyDBTests(TestCase):
"""
Tests for L{checkers.InMemorySSHKeyDB}
"""
skip = dependencySkip
def test_implementsInterface(self):
"""
L{checkers.InMemorySSHKeyDB} implements
L{checkers.IAuthorizedKeysDB}
"""
keydb = checkers.InMemorySSHKeyDB({b'alice': [b'key']})
verifyObject(checkers.IAuthorizedKeysDB, keydb)
def test_noKeysForUnauthorizedUser(self):
"""
If the user is not in the mapping provided to
L{checkers.InMemorySSHKeyDB}, an empty iterator is returned
by L{checkers.InMemorySSHKeyDB.getAuthorizedKeys}
"""
keydb = checkers.InMemorySSHKeyDB({b'alice': [b'keys']})
self.assertEqual([], list(keydb.getAuthorizedKeys(b'bob')))
def test_allKeysForAuthorizedUser(self):
"""
If the user is in the mapping provided to
L{checkers.InMemorySSHKeyDB}, an iterator with all the keys
is returned by L{checkers.InMemorySSHKeyDB.getAuthorizedKeys}
"""
keydb = checkers.InMemorySSHKeyDB({b'alice': [b'a', b'b']})
self.assertEqual([b'a', b'b'], list(keydb.getAuthorizedKeys(b'alice')))
class UNIXAuthorizedKeysFilesTests(TestCase):
"""
Tests for L{checkers.UNIXAuthorizedKeysFiles}.
"""
skip = dependencySkip
def setUp(self):
mockos = MockOS()
mockos.path = FilePath(self.mktemp())
mockos.path.makedirs()
self.userdb = UserDatabase()
self.userdb.addUser(b'alice', b'password', 1, 2, b'alice lastname',
mockos.path.path, b'/bin/shell')
self.sshDir = mockos.path.child('.ssh')
self.sshDir.makedirs()
authorizedKeys = self.sshDir.child('authorized_keys')
authorizedKeys.setContent(b'key 1\nkey 2')
self.expectedKeys = [b'key 1', b'key 2']
def test_implementsInterface(self):
"""
L{checkers.UNIXAuthorizedKeysFiles} implements
L{checkers.IAuthorizedKeysDB}.
"""
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb)
verifyObject(checkers.IAuthorizedKeysDB, keydb)
def test_noKeysForUnauthorizedUser(self):
"""
If the user is not in the user database provided to
L{checkers.UNIXAuthorizedKeysFiles}, an empty iterator is returned
by L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys}.
"""
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual([], list(keydb.getAuthorizedKeys('bob')))
def test_allKeysInAllAuthorizedFilesForAuthorizedUser(self):
"""
If the user is in the user database provided to
L{checkers.UNIXAuthorizedKeysFiles}, an iterator with all the keys in
C{~/.ssh/authorized_keys} and C{~/.ssh/authorized_keys2} is returned
by L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys}.
"""
self.sshDir.child('authorized_keys2').setContent(b'key 3')
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual(self.expectedKeys + [b'key 3'],
list(keydb.getAuthorizedKeys(b'alice')))
def test_ignoresNonexistantFile(self):
"""
L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys} returns only
the keys in C{~/.ssh/authorized_keys} and C{~/.ssh/authorized_keys2}
if they exist.
"""
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual(self.expectedKeys,
list(keydb.getAuthorizedKeys(b'alice')))
def test_ignoresUnreadableFile(self):
"""
L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys} returns only
the keys in C{~/.ssh/authorized_keys} and C{~/.ssh/authorized_keys2}
if they are readable.
"""
self.sshDir.child('authorized_keys2').makedirs()
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual(self.expectedKeys,
list(keydb.getAuthorizedKeys(b'alice')))
_KeyDB = namedtuple('KeyDB', ['getAuthorizedKeys'])
class _DummyException(Exception):
"""
Fake exception to be used for testing.
"""
pass
class SSHPublicKeyCheckerTests(TestCase):
"""
Tests for L{checkers.SSHPublicKeyChecker}.
"""
skip = dependencySkip
def setUp(self):
self.credentials = SSHPrivateKey(
b'alice', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign(b'foo'))
self.keydb = _KeyDB(lambda _: [
keys.Key.fromString(keydata.publicRSA_openssh)])
self.checker = checkers.SSHPublicKeyChecker(self.keydb)
def test_credentialsWithoutSignature(self):
"""
Calling L{checkers.SSHPublicKeyChecker.requestAvatarId} with
credentials that do not have a signature fails with L{ValidPublicKey}.
"""
self.credentials.signature = None
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
ValidPublicKey)
def test_credentialsWithBadKey(self):
"""
Calling L{checkers.SSHPublicKeyChecker.requestAvatarId} with
credentials that have a bad key fails with L{keys.BadKeyError}.
"""
self.credentials.blob = b''
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
keys.BadKeyError)
def test_credentialsNoMatchingKey(self):
"""
If L{checkers.IAuthorizedKeysDB.getAuthorizedKeys} returns no keys
that match the credentials,
L{checkers.SSHPublicKeyChecker.requestAvatarId} fails with
L{UnauthorizedLogin}.
"""
self.credentials.blob = keydata.publicDSA_openssh
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
UnauthorizedLogin)
def test_credentialsInvalidSignature(self):
"""
Calling L{checkers.SSHPublicKeyChecker.requestAvatarId} with
credentials that are incorrectly signed fails with
L{UnauthorizedLogin}.
"""
self.credentials.signature = (
keys.Key.fromString(keydata.privateDSA_openssh).sign(b'foo'))
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
UnauthorizedLogin)
def test_failureVerifyingKey(self):
"""
If L{keys.Key.verify} raises an exception,
L{checkers.SSHPublicKeyChecker.requestAvatarId} fails with
L{UnauthorizedLogin}.
"""
def fail(*args, **kwargs):
raise _DummyException()
self.patch(keys.Key, 'verify', fail)
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
UnauthorizedLogin)
self.flushLoggedErrors(_DummyException)
def test_usernameReturnedOnSuccess(self):
"""
L{checker.SSHPublicKeyChecker.requestAvatarId}, if successful,
callbacks with the username.
"""
d = self.checker.requestAvatarId(self.credentials)
self.assertEqual(b'alice', self.successResultOf(d))
| [
"354142480@qq.com"
] | 354142480@qq.com |
cb84423f5e4d70a65e70740238373d1f57053190 | c33cffb022fd41f54bf77d52a3fef10bdd96e141 | /bishe/analyze_trend/windowed_MSEvectorRegress.py | 51befd5131e4318b6ab4a6a2192940dfb3277a4d | [] | no_license | comeusr/bishe | 2452f4246ce3b4ed3478a41b25af9d891214d37a | 51f71108017cef940f0ccd5d122ceb6f23bfb587 | refs/heads/master | 2020-05-17T00:04:44.192278 | 2019-07-10T02:15:12 | 2019-07-10T02:15:12 | 183,387,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,559 | py | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.tsa.api as st
path = r'C:\Users\Ziyi Wang\Desktop\bishe\realize wangyi\bishe\data\best_trend\window_best_trend-22-2019-04-23(3).csv'
oil_price_path = r'C:\Users\Ziyi Wang\Desktop\bishe\realize wangyi\Data\Brent-11-25.xlsm'
df = pd.read_csv(path,header=0)
price_df = pd.read_excel(oil_price_path,header=None,sheet_name='Sheet1')
'''
本程序主要基于不含外生变量的VAR模型,也叫计量联合方程模型
大概思想是:
假设区间(a,b)趋势向量是2维的(trenda,trendb) 利用trenda和trendb和他们自己的滞后一阶和两阶值分别进行回归,得到回归方程的系数数组ka,kb,数组中的元素依次是常数项,滞后一阶的系数
滞后二阶的系数,如:
tranda = k[0]+k[1]tranda(t-1)+k[2]tranda(t-2)
'''
trenda = np.array(df.loc[:, 'trenda'].astype('float64'))
trendb = np.array(df.loc[:, 'trendb'].astype('float64'))
trendc = np.array(df.loc[:, 'trendc'].astype('float64'))
trendd = np.array(df.loc[:, 'trendd'].astype('float64'))
trende = np.array(df.loc[:, 'trende'].astype('float64'))
relative_index = np.array(df.loc[:,'relative_index']).astype('float64')
trenda1 = [""]
trendb1 = [""]
trendc1 = [""]
trendd1 = [""]
trende1 = [""]
relative_index1 = ['']
trenda2 = ["", ""]
trendb2 = ["", ""]
trendc2 = ["", ""]
trendd2 = ["", ""]
trende2 = ["", ""]
relative_index2 = ['', '']
trenda3 = ["", "", ""]
trendb3 = ["", "", ""]
trendc3 = ["", "", ""]
trendd3 = ["", "", ""]
trende3 = ["", "", ""]
relative_index3 = ['', '', '']
trenda4 = ["", "", "", ""]
trendb4 = ["", "", "", ""]
trendc4 = ["", "", "", ""]
trendd4 = ["", "", "", ""]
trende4 = ["", "", "", ""]
relative_index4 = ['','','','']
trenda5 = ["", "", "", "", ""]
trendb5 = ["", "", "", "", ""]
trendc5 = ["", "", "", "", ""]
trendd5 = ["", "", "", "", ""]
trende5 = ["", "", "", "", ""]
trenda6 = ['','','','','','']
trendb6 = ['','','','','','']
trendc6 = ['','','','','','']
trendd6 = ['','','','','','']
trende6 = ['','','','','','']
relative_index5 = ['','','','','']
'''
以下循环构造滞后各项的数组
'''
for i in range(len(trenda) - 1):
trenda1.append(trenda[i])
trendb1.append(trendb[i])
trendc1.append(trendc[i])
trendd1.append(trendd[i])
trende1.append(trende[i])
relative_index1.append(relative_index[i])
if i <= len(trenda) - 3:
trenda2.append(trenda[i])
trendb2.append(trendb[i])
trendc2.append(trendc[i])
trendd2.append(trendd[i])
trende2.append(trende[i])
relative_index2.append(relative_index[i])
if i <= len(trenda) - 4:
trenda3.append(trenda[i])
trendb3.append(trendb[i])
trendc3.append(trendc[i])
trendd3.append(trendd[i])
trende3.append(trende[i])
relative_index3.append(relative_index[i])
if i <= len(trenda) - 5:
trenda4.append(trenda[i])
trendb4.append(trendb[i])
trendc4.append(trendc[i])
trendd4.append(trendd[i])
trende4.append(trende[i])
relative_index4.append(relative_index[i])
if i <= len(trenda) - 6:
trenda5.append(trenda[i])
trendb5.append(trendb[i])
trendc5.append(trendc[i])
trendd5.append(trendd[i])
trende5.append(trende[i])
relative_index5.append(relative_index[i])
if i <= len(trenda)-7:
trenda6.append(trenda[i])
trendb6.append(trendb[i])
trendc6.append(trendc[i])
trendd6.append(trendd[i])
trende6.append(trende[i])
def getRegressResult(a, b, alpha):
'''通过线性回归求趋势向量自回归系数,被注释的c和d是四维趋势向量用到的'''
xa = []
xb = []
xc = []
xd = []
xe = []
x_index = []
ya = []
yb = []
yc = []
ye = []
yd = []
y_index = []
for i in range(a, b + 1):
xa.append([trenda1[i], trenda2[i],trenda3[i],trenda4[i],trenda5[i],trenda6[i]])
xb.append([trendb1[i], trendb2[i],trendb3[i],trendb4[i],trendb5[i],trendb6[i]])
xc.append([trendc1[i], trendc2[i],trendc3[i],trendc4[i],trendc5[i],trendc6[i]])
xd.append([trendd1[i], trendd2[i],trendd3[i],trendd4[i],trendd5[i],trendd6[i]])
xe.append([trende1[i], trende2[i],trende3[i],trende4[i],trende5[i],trende6[i]])
x_index.append([relative_index1[i], relative_index2[i], relative_index3[i]])
ya.append(trenda[i])
yb.append(trendb[i])
yc.append(trendc[i])
yd.append(trendd[i])
ye.append(trende[i])
y_index.append(relative_index[i])
xa = np.array(xa)
xb = np.array(xb)
xc = np.array(xc)
xd = np.array(xd)
xe = np.array(xe)
x_index = np.array(x_index)
xa = sm.add_constant(xa)
xb = sm.add_constant(xb)
xc = sm.add_constant(xc)
xd = sm.add_constant(xd)
xe = sm.add_constant(xe)
x_index = sm.add_constant(x_index)
resulta = sm.OLS(ya, xa).fit()
resultb = sm.OLS(yb, xb).fit()
resultc = sm.OLS(yc, xc).fit()
resultd = sm.OLS(yd, xd).fit()
resulte = sm.OLS(ye, xe).fit()
result_index = sm.OLS(y_index, x_index).fit()
# print(resulta.summary(),resultb.summary(),resultc.summary())
'''
这一段本来想按照显著性水平筛掉一些结果,发现没什么用,可删掉
for i in range(len(resulta.pvalues)):
if(resulta.pvalues[i]>alpha):
resulta.params[i]=0
for i in range(len(resultb.pvalues)):
if(resultb.pvalues[i]>alpha):
resultb.params[i]=0
for i in range(len(resultc.pvalues)):
if(resultc.pvalues[i]>alpha):
resultc.params[i]=0
'''
# 获取回归系数数组
ka = resulta.params
kb = resultb.params
kc = resultc.params
kd = resultd.params
ke = resulte.params
k_index = result_index.params
return ka, kb, kc, kd, ke, k_index
# 设定显著性水平
alpha = 0.1
# 窗口长度
step = 22
def getResultForPredict(a, b):
'''
根据向量自回归方程获取在区间(b+1,b+1+step)上的趋势向量分量,
所以后面数组无论trenda还是trendb都在b处取值,
因为想根据已知区间的终点b趋势预测未知区间的起点b+1趋势
'''
# print(b,trenda[b],trendb[b])
# 获取向量回归参数
ka, kb,kc,kd,ke, k_index = getRegressResult(a, b, alpha)
# print(ka,kb)
ya_ols = ka[0] + ka[1] * trenda1[b] + ka[2] * trenda2[b] +ka[3]*trenda3[b]+ka[4]*trenda4[b]+ka[5]*trenda5[b]\
+ka[6]*trenda6[b]
yb_ols = kb[0] + kb[1] * trendb1[b] + kb[2] * trendb2[b] +kb[3]*trendb3[b]+kb[4]*trendb4[b]+kb[5]*trendb5[b]\
+kb[6]*trendb6[b]
yc_ols = kc[0] + kc[1] * trendc1[b] + kc[2] * trendc2[b] +kc[3]*trendc3[b]+kc[4]*trendc4[b]+kc[5]*trendc5[b]\
+kc[6]*trendc6[b]
yd_ols = kd[0] + kd[1] * trendd1[b] + kd[2] * trendd2[b] +kd[3]*trendd3[b]+kd[4]*trendd4[b]+kd[5]*trendd5[b]\
+kd[6]*trendd6[b]
ye_ols = ke[0] + ke[1] * trende1[b] + ke[2] * trende2[b] +ke[3]*trende3[b]+ke[4]*trende4[b]+ke[5]*trende5[b]\
+ke[6]*trende6[b]
y_index_ols = k_index[0]+k_index[1]*relative_index1[b]+k_index[2]*relative_index2[b]+k_index[3]*relative_index3[b]
return ya_ols, yb_ols,yc_ols,yd_ols,ye_ols,y_index_ols
# TODO np.power函数是用不规范
def trend2oil(t, x, step=6):
oil_price = []
if t <= step:
for time in range(t+1):
temp_result = trenda[t-time]*np.power(x,4)+trendb[t-time]*np.power(x,3)+trendc[t-time]*np.power(x,2)+trendd[t-time]*(x)+trende[-time]
oil_price.append(temp_result)
else:
for time in range(step):
temp_result = trenda[t-time]*np.power(x, 4)+trendb[t-time]*np.power(x, 3)+trendc[t-time]*np.power(x,2)+trendd[t-time]*(x)+trende[t-time]
oil_price.append(temp_result)
pass
return oil_price
def get_best_price(i):
"""
返回t时刻的最佳油价
:param t: 时刻t
:return:
"""
x = price_df[i, 0]
best_price = df['trenda'][i] * np.power(x, 4) + df['trendb'][i] * np.power(x, 3) \
+ df['trendc'][i] * np.power(x, 2) + df['trendd'][i] * x + df['trende'][i]
def main():
# 设置回归区间
a = 6;
# b=df.shape[0]-1-1
avError = 0
mape = 0
sse = 0
# 滚动向前一步预测,思路是每次向前一步,将结果取平均
# 比如 第一次用(a,b)预测(b+1,step+b+1)的趋势
# 第二次用(a,b+1)预测(b+2,step+b+2)的趋势,以此类推
oil_price_list = []
for i in range(df.shape[0] - 6, df.shape[0] - 1):
ya_ols, yb_ols,yc_ols,yd_ols,ye_ols,index_ols = getResultForPredict(a, i)
# TODO x还可以从经验分布里随机抽取
x = index_ols
# print('index_ols', index_ols)
fit_x = df.loc[i,'relative_index']
oil_price = ya_ols*(np.power(x,4))+yb_ols*(np.power(x,3))+yc_ols*(np.power(x,2))+yd_ols*x+ye_ols
fitting_price = df.loc[i,'trenda']*np.power(fit_x,4)+df.loc[i,'trendb']*np.power(fit_x,3)+df.loc[i, 'trendc']*np.power(fit_x,2)\
+df.loc[i, 'trendd']*fit_x+df.loc[i, 'trende']
oil_price_list.append(oil_price)
oil_error = abs(oil_price-price_df.loc[i,1])
if i == df.shape[0] - 6:
i = df.shape[0] - 7
error = (0.5) * (np.square(ya_ols - trenda[i + 1]) + np.square(yb_ols - trendb[i + 1]) +np.square(yc_ols-trendc[i+1])
+np.square(yd_ols-trendd[i+1])+np.square(ye_ols-trende[i+1]))
dmape = np.average(
[np.abs((ya_ols - trenda[i + 1]) / trenda[i + 1]), np.abs((yb_ols - trendb[i + 1]) / trendb[i + 1])
,np.abs((yc_ols - trendc[i + 1]) / trendc[i + 1]),np.abs((yd_ols - trendd[i + 1]) / trendd[i + 1]),np.abs((ye_ols-trende[i+1]) / trende[i+1])])
mape += dmape
avError += error
sse += error * 2
print('预测第',i+1,'个点的油价',',预测值为',oil_price,'真实值为',price_df.loc[i,1],',误差为',oil_error, '拟合值为', fitting_price)
print('预测第', i + 1, '个点', ',平方损失为:', error, ',mape=', dmape, ',sse=', error * 2)
# print('预测第',i+1,'个点,第一个参数:',ya_ols, ',真值:',trenda[i+1], '第二个参数:',yb_ols,',真值:',trendb[i+1],'\n')
# print('预测第',i+1,'个点,第一个参数误差',trenda[i+1]-ya_ols,',第二个参数误差:',trendb[i+1]-yb_ols,',平方损失:',np.square(trendb[i+1]-yb_ols)+np.square(trenda[i+1]-ya_ols),'\n')
print('loss=', avError / 5, 'mape=', mape / 5, ',sse=', sse / 5)
#将已有趋势向量返回成石油价格
# for
# oil_df = pd.DataFrame({
# 'WTI': price_df.loc[0:5500,1],
# 'Oil': oil
# })
# predict_result_path = r'.\results\oil\predict.csv'
# oil_df.to_csv(predict_result_path)
if __name__ == '__main__':
# path = r'C:\Users\Ziyi Wang\Desktop\bishe\realize wangyi\bishe\results\Poly-6-Params-2019-02-28.csv'
# oil_price_path = r'C:\Users\Ziyi Wang\Desktop\bishe\realize wangyi\Data\Brent-11-25.xlsm'
# df = pd.read_csv(path, header=0)
# price_df = pd.read_excel(oil_price_path, header=None, sheet_name='Sheet1')
# trenda = np.array(df.loc[:, 'MSEtrenda'].astype('float64'))
# trendb = np.array(df.loc[:, 'MSEtrendb'].astype('float64'))
# trendc = np.array(df.loc[:, 'MSEtrendc'].astype('float64'))
# trendd = np.array(df.loc[:, 'MSEtrendd'].astype('float64'))
# trende = np.array(df.loc[:, 'MSEtrende'].astype('float64'))
main() | [
"wonziyi@126.com"
] | wonziyi@126.com |
a718663d2498261adff0e5a36c87fa7d6e782feb | c99fd2b8402d80dc11422c48ecce568ab896dc19 | /hw6_twitter.py | ff6768b43ae5a8995335ad1a8b9e9489264f7e6d | [] | no_license | ettienne010005/W21_HW6_Twitter | 9401514cc5d3a9aa7e1b5bfc2b76b3b65a8d017c | f0f6347df79fdb5b9edf37a7945c1cb1c4188beb | refs/heads/main | 2023-04-02T04:44:03.931381 | 2021-03-29T23:11:03 | 2021-03-29T23:11:03 | 349,137,726 | 0 | 0 | null | 2021-03-18T16:09:24 | 2021-03-18T16:09:23 | null | UTF-8 | Python | false | false | 7,480 | py | #########################################
##### Name: Tzu-Ching Lin #####
##### Uniqname: tzlin #####
#########################################
from requests_oauthlib import OAuth1
import json
import requests
import operator
import secrets # file that contains your OAuth credentials
CACHE_FILENAME = "twitter_cache.json"
CACHE_DICT = {}
client_key = secrets.TWITTER_API_KEY
client_secret = secrets.TWITTER_API_SECRET
access_token = secrets.TWITTER_ACCESS_TOKEN
access_token_secret = secrets.TWITTER_ACCESS_TOKEN_SECRET
oauth = OAuth1(client_key,
client_secret=client_secret,
resource_owner_key=access_token,
resource_owner_secret=access_token_secret)
def test_oauth():
''' Helper function that returns an HTTP 200 OK response code and a
representation of the requesting user if authentication was
successful; returns a 401 status code and an error message if
not. Only use this method to test if supplied user credentials are
valid. Not used to achieve the goal of this assignment.'''
url = "https://api.twitter.com/1.1/account/verify_credentials.json"
auth = OAuth1(client_key, client_secret, access_token, access_token_secret)
authentication_state = requests.get(url, auth=auth).json()
return authentication_state
def open_cache():
''' Opens the cache file if it exists and loads the JSON into
the CACHE_DICT dictionary.
if the cache file doesn't exist, creates a new cache dictionary
Parameters
----------
None
Returns
-------
The opened cache: dict
'''
try:
cache_file = open(CACHE_FILENAME, 'r')
cache_contents = cache_file.read()
cache_dict = json.loads(cache_contents)
cache_file.close()
except:
cache_dict = {}
return cache_dict
def save_cache(cache_dict):
''' Saves the current state of the cache to disk
Parameters
----------
cache_dict: dict
The dictionary to save
Returns
-------
None
'''
dumped_json_cache = json.dumps(cache_dict)
fw = open(CACHE_FILENAME,"w")
fw.write(dumped_json_cache)
fw.close()
def construct_unique_key(baseurl, params):
''' constructs a key that is guaranteed to uniquely and
repeatably identify an API request by its baseurl and params
AUTOGRADER NOTES: To correctly test this using the autograder, use an underscore ("_")
to join your baseurl with the params and all the key-value pairs from params
E.g., baseurl_key1_value1
Parameters
----------
baseurl: string
The URL for the API endpoint
params: dict
A dictionary of param:value pairs
Returns
-------
string
the unique key as a string
'''
#TODO Implement function
param_list = []
for i in params.keys():
param_list.append(f'{i}_{params[i]}')
param_list.sort()
unique_key = baseurl + '_' + '_'.join(param_list)
return unique_key
def make_request(baseurl, params):
'''Make a request to the Web API using the baseurl and params
Parameters
----------
baseurl: string
The URL for the API endpoint
params: dictionary
A dictionary of param:value pairs
Returns
-------
dict
the data returned from making the request in the form of
a dictionary
'''
#TODO Implement function
response = requests.get(baseurl, params=params, auth=oauth)
return response.json()
def make_request_with_cache(baseurl, hashtag, count):
'''Check the cache for a saved result for this baseurl+params:values
combo. If the result is found, return it. Otherwise send a new
request, save it, then return it.
AUTOGRADER NOTES: To test your use of caching in the autograder, please do the following:
If the result is in your cache, print "fetching cached data"
If you request a new result using make_request(), print "making new request"
Do no include the print statements in your return statement. Just print them as appropriate.
This, of course, does not ensure that you correctly retrieved that data from your cache,
but it will help us to see if you are appropriately attempting to use the cache.
Parameters
----------
baseurl: string
The URL for the API endpoint
hashtag: string
The hashtag to search for
count: integer
The number of results you request from Twitter
Returns
-------
dict
the results of the query as a dictionary loaded from cache
JSON
'''
#TODO Implement function
CACHE_DICT = open_cache()
param_dict = {'q':hashtag, 'count': str(count)}
key = construct_unique_key(baseurl,param_dict)
if key in CACHE_DICT:
print("fetching cached data")
return CACHE_DICT[key]
else:
print("making new request")
CACHE_DICT[key] = make_request(baseurl,param_dict)
save_cache(CACHE_DICT)
return CACHE_DICT[key]
def find_most_common_cooccurring_hashtag(tweet_data, hashtag_to_ignore):
''' Finds the hashtag that most commonly co-occurs with the hashtag
queried in make_request_with_cache().
Parameters
----------
tweet_data: dict
Twitter data as a dictionary for a specific query
hashtag_to_ignore: string
the same hashtag that is queried in make_request_with_cache()
(e.g. "#MarchMadness2021")
Returns
-------
string
the hashtag that most commonly co-occurs with the hashtag
queried in make_request_with_cache()
'''
# TODO: Implement function
hashtag_string = hashtag_to_ignore.replace('#','')
tweets = tweet_data['statuses']
hash_dict = {}
for t in tweets:
if len(t['entities']['hashtags']) is not 0:
for h in t['entities']['hashtags']:
if (h['text'] != hashtag_string) and (h['text'].lower() != hashtag_string.lower()):
if h['text'] not in hash_dict:
hash_dict[h['text']] = 1
else:
hash_dict[h['text']] = hash_dict[h['text']] + 1
# print(hash_dict)
return max(hash_dict.items(), key=operator.itemgetter(1))[0].lower()
''' Hint: In case you're confused about the hashtag_to_ignore
parameter, we want to ignore the hashtag we queried because it would
definitely be the most occurring hashtag, and we're trying to find
the most commonly co-occurring hashtag with the one we queried (so
we're essentially looking for the second most commonly occurring
hashtags).'''
if __name__ == "__main__":
if not client_key or not client_secret:
print("You need to fill in CLIENT_KEY and CLIENT_SECRET in secret_data.py.")
exit()
if not access_token or not access_token_secret:
print("You need to fill in ACCESS_TOKEN and ACCESS_TOKEN_SECRET in secret_data.py.")
exit()
CACHE_DICT = open_cache()
baseurl = "https://api.twitter.com/1.1/search/tweets.json"
hashtag = "#MarchMadness2021"
# hashtag = "#2020election"
count = 100
tweet_data = make_request_with_cache(baseurl, hashtag, count)
most_common_cooccurring_hashtag = find_most_common_cooccurring_hashtag(tweet_data, hashtag)
print("The most commonly cooccurring hashtag with {} is #{}.".format(hashtag, most_common_cooccurring_hashtag))
| [
"ettienne010005@gmail.com"
] | ettienne010005@gmail.com |
27a27b8ff6cfbb7f0b7e72510b910997df65cc2c | 139f055b1c391dfd9e7fa0ee316d9a100768ffe9 | /2.3-sorting-vdj.py | 6a08b44395bc9c48a3b3c04f09744fc332849658 | [] | no_license | yanmc/macaca-antibodyomics | c6a5d174c35c252f4839378a8375c0e193834171 | 401992008c42fefc0fe1645bc0d6d000aa23122a | refs/heads/master | 2020-04-22T12:01:47.629490 | 2016-03-01T03:16:29 | 2016-03-01T03:16:29 | 66,706,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | #!/usr/bin/env python
# encoding: utf-8
"""
2.3-sorting-vdj.py -i infile -org orgnism
Created by Mingchen on 2014-09-01.
Copyright (c) 2014 __MyCompanyName__. All rights reserved.
infile: the input file, orgism_get_vdj_add.txt
orgism: human,mouse or rabbit...
"""
import os, sys, glob, re, csv
from Bio import SeqIO
from mytools_ymc import *
def sorting_vdj(model,infile):
result = []
for index, line in enumerate(open(infile, "rU")):
match = re.findall(model,line)
if match != []:
result.append(line)
return result
def processer(infile):
for model in ['HV|VH','KV|VK','LV|VL','HD|DH','HJ|JH','KJ|JK','LJ|JL']:
print "Processing %s ,grep %s..." %(infile,model)
sort_result = sorting_vdj(model,infile)
fname, suffix = os.path.splitext(infile)
write_file = "%s_%s.txt" %(fname,model)
writer = csv.writer(open(write_file, "wt"), delimiter = "\t")
for line in sort_result:
line = line.split()
writer.writerow(line)
print "Got %d reads align to %s" %(len(sort_result),model)
def main():
infiles = glob.glob(infile_model)
for infile in infiles:
processer(infile)
if __name__ == '__main__':
# check parameters
if len(sys.argv) < 5 :
print __doc__
sys.exit(0)
# get parameters from input
dict_args = processParas(sys.argv, i="infile_model", org="orgnism")
infile_model, orgnism = getParas(dict_args, "infile_model", "orgnism")
main()
| [
"mcyan90@gmail.com"
] | mcyan90@gmail.com |
a5114ef7e1f2739128c2bf274bc7af9d47e46c91 | 4e5de397a7bac9e777074daaadb890f4315752a2 | /athenaCL/libATH/audioTools.py | 9a6e52cf8456020ece47d4066a9827636dd751d9 | [] | no_license | ericahub/athenacl | 60c5757e11f65f65eeb26c40125dac0e1636d86b | 96ac2a6859dedf08e8a9aebbed4ef348a66ac707 | refs/heads/master | 2021-01-10T12:38:04.869889 | 2011-12-17T20:49:19 | 2011-12-17T20:49:19 | 44,901,312 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41,703 | py | #-----------------------------------------------------------------||||||||||||--
# Name: audioTools.py
# Purpose: General audio processing tools
#
# Authors: Christopher Ariza
#
# Copyright: (c) 2004-2010 Christopher Ariza
# License: GPL
#-----------------------------------------------------------------||||||||||||--
# note: this modules may not be compatible w/ all distributions
# there is a wave module that may provide support for wave output formats
# with the same interface
import random, os, array, copy
import unittest, doctest
try:
import aifc, audioop
AIF = 1
except ImportError:
AIF = 0
from athenaCL.libATH import drawer
from athenaCL.libATH import unit
from athenaCL.libATH import fileTools
from athenaCL.libATH import osTools
_MOD = 'audioTools.py'
#-----------------------------------------------------------------||||||||||||--
audioFormatNames = ['aif', 'wav', 'sd2']
def audioFormatParser(usrStr):
"""provide backward compat to older names"""
ref = {
'aif' : ['aif', 'aiff', 'a'],
'wav' : ['wav', 'wave', 'w'],
'sd2' : ['sd2', 'sd', 'ircam', 'i'], # assign ircam to sd2
}
usrStr = drawer.selectionParse(usrStr, ref)
return usrStr # may be None
#-----------------------------------------------------------------||||||||||||--
# def setMacAudioRsrc(fmt, audioPath, prefDict=None):
# """for macos 9, necessary to set creator and type
# of audio files. given a fmt and file path, this function
# does all necessary processing to audio file"""
# if fmt == None: # assume it is an aif
# fmt = 'aif'
# # get standard type codes
# if fmt == 'aif': typeCode = 'AIFF'
# elif fmt == 'wav': typeCode = 'WAVE'
# elif fmt in ['sd2', 'ircam']: typeCode = 'Sd2f'
# # get creator code, use qt on default
# if prefDict != None and prefDict.has_key('audioPlayerCreatorCode'):
# # may be a complete file path, or a name
# creatorCode = prefDict['audioPlayerCreatorCode']
# else: #'TVOD' is QuickTime, 'auFM' is Peak
# creatorCode = 'TVOD' # quick time
# osTools.rsrcSetCreator(audioPath, creatorCode, typeCode)
#-----------------------------------------------------------------||||||||||||--
# common equations
def frameToSec(frames, sr=44100):
return frames / float(sr)
def secToFrame(sec, sr=44100):
return int(sec * float(sr))
def byteToInt(bytes):
# divide by 2, and sub 1, b/c int sign: just want max
return (pow(2, (bytes * 8)) / 2) - 1
def arrayLenToFrameLen(arrayLen, bytes, ch):
# in the arrya, each byte gets a unit, and each ch gets the samey bytes
return arrayLen / (bytes * ch)
#useful values:
#+3dB = 2x the power = 1.44x the SPL
#+6dB = 4x the power = 2x the SPL
#The doubling of SPL represents that the power increases four times.
# To describe an absolute value, the reference point must be known. There are different reference points defined.
#
# dBV represents the level compared to 1 Volt RMS. 0dBV = 1V. There is no reference to impedance (V = Volt).
#
# dBu represents the level compared to 0,775 Volt RMS with an unloaded, open circuit, source (u = 'unloaded' or 'unterminated' -- a voltage that is not related to power by an impedance).
#
# dBm represents the power level compared to 1 mWatt. This is a level compared to 0,775 Volt RMS across a 600 Ohm load impedance (m = milli).
#
# Dealing with voltage, convert from dBV to dBu: 1dBV equals +2.2dBu.
#
# +4dBu equals 1.23 Volt RMS.
#
# The reference level of -10dBV is the equivalent to a level of -7.8dBu.
#
# !!! +4dBu and -10dBV systems have a level difference of 11.8 dB and not 14 dB. This is almost a voltage ratio of 4:1 !!!
#
#-----------------------------------------------------------------||||||||||||--
# this functions rely on lame and oggenc
def encodeMp3(src, dstDir=None, br=128, title='',
artist='', album='', year='', dstName=None, quality=2):
"""encode an mp3 using lame
-m s # stereo mode, not using joint stereo
-q 2 # quality, 0 creates best quality, 2 is good
-b 128 # bitrate, 96, 128, 256, 320
--cbr # forces constatn bit rate, default
--disptime n # update more often
-c # ha!
--tt # title
--ta # artist
--tl # album
--ty # year """
srcDir, srcName = os.path.split(src)
srcName, ext = osTools.extSplit(srcName)
# optional dstName will be used instead of the srcName mod if provided
if dstName == None:
dstName = srcName + '.mp3'
if dstDir == None or not os.path.isdir(dstDir): #place in same dir
dst = os.path.join(srcDir, dstName)
else: # use provided dstdir
dst = os.path.join(dstDir, dstName)
tagStr = '--tt "%s" --ta "%s" --tl "%s" --ty "%s" ' % (title, artist,
album, year)
cmd = 'lame -m s -q %s --add-id3v2 --cbr --disptime 1 -b %s -c %s %s %s' % (quality, br,
tagStr, src, dst)
os.system(cmd)
return dst
def decodeMp3(src, dstDir=None, dstName=None, quality=2):
"""decode an mp3 to wav
-m s # stereo mode, not using joint stereo
-q 2 # quality, 0 creates best quality, 2 is good
-b 128 # bitrate, 96, 128, 256, 320
--cbr # forces constatn bit rate, default
--disptime n # update more often
-c # ha!
--tt # title
--ta # artist
--tl # album
--ty # year """
srcDir, srcName = os.path.split(src)
srcName, ext = osTools.extSplit(srcName)
# optional dstName will be used instead of the srcName mod if provided
if dstName == None:
dstName = srcName + '.wav'
if dstDir == None or not os.path.isdir(dstDir): #place in same dir
dst = os.path.join(srcDir, dstName)
else: # use provided dstdir
dst = os.path.join(dstDir, dstName)
cmd = 'lame -m s -q %s --decode --disptime 1 -c %s %s' % (quality, src, dst)
os.system(cmd)
return dst
def encodeFlac(src, dstDir=None, dstName=None):
"""To encode:
flac [INPUTFILE [...]] """
srcDir, srcName = os.path.split(src)
srcName, ext = osTools.extSplit(srcName)
# optional dstName will be used instead of the srcName mod if provided
if dstName == None:
dstName = srcName + '.flac'
if dstDir == None or not os.path.isdir(dstDir): #place in same dir
dst = os.path.join(srcDir, dstName)
else: # use provided dstdir
dst = os.path.join(dstDir, dstName)
cmd = 'flac -o %s %s ' % (dst, src)
os.system(cmd)
return dst
def decodeFlac(src, dstDir=None, dstName=None):
"""To decode:
flac -d [INPUTFILE [...]] """
srcDir, srcName = os.path.split(src)
srcName, ext = osTools.extSplit(srcName)
# optional dstName will be used instead of the srcName mod if provided
if dstName == None:
dstName = srcName + '.aif'
if dstDir == None or not os.path.isdir(dstDir): #place in same dir
dst = os.path.join(srcDir, dstName)
else: # use provided dstdir
dst = os.path.join(dstDir, dstName)
cmd = 'flac -d -o %s %s ' % (dst, src)
os.system(cmd)
return dst
def encodeOgg(src, dstDir=None, br=128, title='',
artist='', album='', year=''):
""" encode using oggenc
-b # bitrate, can use -q 0-10 instead
-o # output filename
-d # date
-a # artist
-t # title
-l # album"""
srcDir, srcName = os.path.split(src)
srcName, ext = osTools.extSplit(srcName)
dstName = srcName + '.ogg'
if dstDir == None or not os.path.isdir(dstDir): #place in same dir
dst = os.path.join(srcDir, dstName)
else: # use provided dstdir
dst = os.path.join(dstDir, dstName)
tagStr = '-t "%s" -a "%s" -l "%s" -d %s ' % (title, artist, album, year)
cmd = 'oggenc -w -b %s %s -o "%s" %s' % (br, tagStr, dst, src)
os.system(cmd)
return dst
def encodeAac(src, dstDir=None, br=128, title='',
artist='', album='', year='', dstName=None):
"""encode an ac4 using faac
note: this does not work w/ aiff files
--artist X Set artist to X
--writer X Set writer to X
--title X Set title to X
--genre X Set genre to X
--album X Set album to X
--compilation Set compilation
--track X Set track to X (number/total)
--disc X Set disc to X (number/total)
--year X Set year to X
--cover-art X Read cover art from file X
--comment X Set comment to X
"""
srcDir, srcName = os.path.split(src)
srcName, ext = osTools.extSplit(srcName)
# optional dstName will be used instead of the srcName mod if provided
if dstName == None:
dstName = srcName + '.m4a'
if dstDir == None or not os.path.isdir(dstDir): #place in same dir
dst = os.path.join(srcDir, dstName)
else: # use provided dstdir
dst = os.path.join(dstDir, dstName)
# quality seems to be anywhere from 50 to 150
tagStr = '--title "%s" --artist "%s" --album "%s" --year "%s" ' % (title,
artist, album, year)
cmd = 'faac -q 100 -b %s %s -o "%s" %s' % (br, tagStr, dst, src)
os.system(cmd)
return dst
#-----------------------------------------------------------------||||||||||||--
# sox wrappers
# http://sox.sourceforge.net/
# note: should use higher quality resampling here
def soxConvert(src, srcExt, dstExt='.aif', srcSr=44100, dstSr=44100):
dst = src.replace(srcExt, dstExt)
cmd = 'sox -r %s %s -r %s %s' % (srcSr, src, dstSr, dst)
os.system(cmd)
return dst
def soxSplit(src):
# this does not work for some reason
#dst = src.replace(srcExt, dstExt)
dir, name = os.path.split(src)
nameStub, ext = osTools.extSplit(src)
ext = '.aif'
dstLeft = os.path.join(dir, nameStub + '.L' + ext)
dstRight = os.path.join(dir, nameStub + '.R' + ext)
# get left
cmd = 'sox %s -c 1 %s avg -l' % (src, dstLeft)
os.system(cmd)
# get right
cmd = 'sox %s -c 1 %s avg -r' % (src, dstRight)
os.system(cmd)
return dstLeft, dstRight
def soxFade(src, dst=None, timeIn=.01, timeOut=.01):
# add a fade in and fade out to sound file
# need total time
# this is destructive if dst id none
timeTotal = fileDur(src)
if dst == None: # replace original
nameStub, ext = osTools.extSplit(src)
dst = '%s-temp%s' % (nameStub, ext)
#print 'in, out, total:', timeIn, timeOut, timeTotal
# t is a linear slope
cmd = 'sox %s %s fade t %s %s %s' % (src, dst, timeIn, timeTotal, timeOut)
os.system(cmd)
if dst == '%s-temp%s' % (nameStub, ext):
osTools.mv(dst, src)
#osTools.rm(dst)
return src
def soxAmp(src, dst=None, amp=.9):
# this is destructive if dst id none
if dst == None: # replace original
nameStub, ext = osTools.extSplit(src)
dst = '%s-temp%s' % (nameStub, ext)
#print 'in, out, total:', timeIn, timeOut, timeTotal
# t is a linear slope
cmd = 'sox %s %s vol %s amplitude' % (src, dst, amp)
os.system(cmd)
if dst == '%s-temp%s' % (nameStub, ext):
osTools.mv(dst, src)
#osTools.rm(dst)
return src
def soxSpeed(src, dst=None, speed=.5):
# this is destructive if dst id none
if dst == None: # replace original
nameStub, ext = osTools.extSplit(src)
dst = '%s-temp%s' % (nameStub, ext)
#print 'in, out, total:', timeIn, timeOut, timeTotal
# t is a linear slope
cmd = 'sox %s %s speed %s' % (src, dst, speed)
os.system(cmd)
if dst == '%s-temp%s' % (nameStub, ext):
osTools.mv(dst, src)
#osTools.rm(dst)
return src
#-----------------------------------------------------------------||||||||||||--
# http://www.python.org/doc/current/lib/module-array.html
# for working with arrays see examples here and elsewhere
#
# noise = []
# for i in range(8000):
# noise.append(whrandom.randint(-32767, 32767))
# data = array.array("h", noise).tostring()
#
class EnvelopeGenerator:
"""return various envelopes
envelopes are floating point values between 0 and 1
always returned as lists
"""
def __init__(self):
pass
def _ramp(self, rampLen, direction='in', format='linear'):
envelope = []
for i in range(0, rampLen):
envelope.append(((1.0 / rampLen) * i))
i = i + 1
if direction == 'out':
envelope.reverse()
return envelope
def symmetric(self, frames, fadeLen):
"""produce an envelope w/ length equal to aObj
fadeLen is in farames"""
#totalLen = obj.getnframes()
totalLen = frames
if totalLen < (fadeLen * 2):
raise ValueError, 'fades are too long'
flatLen = totalLen - (fadeLen * 2)
rampIn = self._ramp(fadeLen, 'in')
rampOut = self._ramp(fadeLen, 'out')
# create envelope for entire data
envelope = rampIn + ([1] * flatLen) + rampOut
return envelope
class SampleGenerator:
"""utility to generate sample data
always returns in format for writting
data in these arrays conssist of two bytes, and thus lengths
are always twice as large"""
def __init__(self, ch=1, sr=44100, bytes=2):
self.ch = 1
self.bytes = 2 # 16 bits
self.sr = 44100
def update(self, ch, sr, bytes):
# update values in case of changes
self.ch = ch
self.sr = sr
self.bytes = bytes
self.absMax = byteToInt(self.bytes) # value in integers
#-----------------------------------------------------------------------||--
# synthesize for given frames
def silence(self, frames):
if frames == 0: return ''
# h is a signed short
return array.array("h", [0]*frames*self.ch).tostring()
def noise(self, frames, amp=1):
"""amp is a scalar between 0 and 1"""
if frames == 0: return ''
noise = []
for i in range(frames*self.ch):
max = byteToInt(self.bytes)
noise.append(int(round(random.randint(-max, max) * amp)))
# 'h' is a signed int here
return array.array("h", noise).tostring()
#-----------------------------------------------------------------------||--
# funtionst the process string and list data
def multiply(self, xStr, yList):
"""multiple two data strings, both not supplied as lists
note: this may not work for stereo
if float values are provided, not sure that proper rouding is done
"""
xArray = array.array("h")
xArray.fromstring(xStr)
xList = xArray.tolist()
#print _MOD, 'comparing mult lists', len(xList), len(yList), self.ch
zList = []
i = 0
q = 0
while 1:
if i == len(xList): break
scalar = yList[q] # only one per frame, not data value
zList.append(int(round(xList[i] * scalar)))
i = i + 1
if i % self.ch == 0:
q = q + 1 # increment scale index
return array.array("h", zList).tostring()
def _frameLimit(self, val):
if val > self.absMax: return self.absMax
elif val < -self.absMax: return -self.absMax
else: return val
def mix(self, xStr, yStr):
"""mix two equal lengthed data strings;
"""
xArray = array.array("h")
xArray.fromstring(xStr)
xList = xArray.tolist()
yArray = array.array("h")
yArray.fromstring(yStr)
yList = yArray.tolist()
#print _MOD, 'comparing mix lists', len(xList), len(yList), self.ch
zList = []
i = 0
q = 0
for i in range(len(xList)):
# this should be limited
zList.append(self._frameLimit(xList[i] + yList[i]))
return array.array("h", zList).tostring()
def split(self, xStr):
"""split stereo data into two lists of data
if there is 1000 frames dataLen will be 2000
each side should have 1000 after complete"""
splitList = []
for ch in range(self.ch):
splitList.append([]) # create a list for each channel
xArray = array.array("h")
xArray.fromstring(xStr)
xList = xArray.tolist() # possible not necessary
i = 0
while i < len(xList):
for ch in range(self.ch):
splitList[ch].append(xList[i])
i = i + 1
# convert back to data string format
#print _MOD, 'split source, side', len(xList), len(splitList[0]), self.ch
for ch in range(self.ch):
splitList[ch] = array.array("h", splitList[ch]).tostring()
return splitList # list of channel data
def interleave(self, xStr, yStr):
"""intereleave two data sets
channel is not considered"""
xArray = array.array("h")
xArray.fromstring(xStr)
xList = xArray.tolist() # possible not necessary
yArray = array.array("h")
yArray.fromstring(xStr)
yList = yArray.tolist() # possible not necessary
zList = []
for i in range(len(xList)):
zList.append(xList[i])
zList.append(yList[i])
# convert back to data string format
return array.array("h", zList).tostring()
#-----------------------------------------------------------------------||--
# tools to load unit interval array as samples
# concver with various methods
def unitSynthesizer(self, xList, method=None):
"""scale values to the bit depth
pack into a list, convert to string data
assumes that values are normalized b/n 0 and 1
direct; scales value in range of 0 to 1 between -max and amx
"""
# for i in range(len(xList)):
# xList[i] = unit.limit(xList[i])
zList = []
max = byteToInt(self.bytes)
#print _MOD, max
if method in [None, 'direct']:
for x in xList:
valSigned = int(round(unit.denorm(x, -max, max)))
# add a value for each channel
for ch in range(self.ch):
zList.append(valSigned)
# thee use zero crossings to effect sig of the wave form
elif method in ['reflect']:
sign = 1 # 1 is positive
for x in xList:
val = unit.denorm(x, 0, max)
if val == 0: # only change sign at zero crossing
if sign == 1: sign = -1
else: sign = 1
valSigned = val*sign
# add a value for each channel
for ch in range(self.ch):
zList.append(valSigned)
elif method == 'fold':
sign = 1 # 1 is positive
for x in xList:
val = abs(unit.denorm(x, -max, max)) # abs of full range
if val == 0: # only change sign at zero crossing
if sign == 1: sign = -1
else: sign = 1
valSigned = val*sign
# add a value for each channel
for ch in range(self.ch):
zList.append(valSigned)
#print zList
return array.array("h", zList).tostring()
#-----------------------------------------------------------------||||||||||||--
class AudioFile:
"""object wrapper for an audio file
with intuitive, high level controls
"""
def __init__(self, absPath, ch=1, sr=44100, bytes=2):
"""specify bit depth with bytes, where 2 bytes == 16 bits
file exists, channel will be updated to appropriate value
"""
if not AIF: # check if modules loaded
raise ImportError, 'aif modules not available (%s)' % os.name
self.absPath = absPath
# store an envelope generator for convenience
self.envlGenObj = EnvelopeGenerator()
self.sampleGenObj = SampleGenerator()
# store initial settings
self.ch = ch
self.sr = sr
self.bytes = bytes
# setup the file
self._FILEOPEN = 0 # store if file is open or not to watch for errors
self._open(None) # None auto-determines what to do
self._close()
#-----------------------------------------------------------------------||--
# private methods for opening and closing file
def _open(self, mode='r', ch=None):
"""frame values must only be updated on opening a file
for reading; opeing for writing returns 0
"""
if self._FILEOPEN == 1: # file is already open:
self.aObj.close()
raise IOError, 'attempt to open open file.'
if mode == None: # determine by wether file exists or not
if os.path.exists(self.absPath): mode = 'r'
else: mode = 'w'
# mark file as open
self._FILEOPEN = 1
if mode == 'r':
assert os.path.exists(self.absPath)
self.aObj = aifc.open(self.absPath, mode)
# only update when opening for reading, as strange results if
# open for writing (0 frame length given)
self._update()
elif mode == 'w':
self.aObj = aifc.open(self.absPath, 'w')
if ch != None: self.ch = ch # coerce channel if provided
self.aObj.setnchannels(self.ch)
self.aObj.setsampwidth(self.bytes)
self.aObj.setframerate(self.sr)
def _close(self):
if self._FILEOPEN == 0: # file is already closed
raise IOError, 'attempt to close a closed file.'
self._update()
#print 'closing file: frames %s, channels %s' % (
# self.frames, self.ch)
self.aObj.close()
del self.aObj
self._FILEOPEN = 0
def _update(self):
"""always call update when opening/closing file"""
assert self._FILEOPEN == 1
self.ch = self.aObj.getnchannels()
self.sr = self.aObj.getframerate()
self.bytes = self.aObj.getsampwidth() # 1 is 8, 2 is 16,
self.frames = self.aObj.getnframes()
#print _MOD, 'update: found %s frames' % (self.frames)
# update values in sample gen obj
self.sampleGenObj.update(self.ch, self.sr, self.bytes)
#-----------------------------------------------------------------------||--
# methods that use open/close above to wrap public methods
def getSize(self):
# update should not be necessary
#self._open('r')
#self._close()
return self.frames
def getDur(self):
"""get duration in seconds"""
return frameToSec(self.frames, self.sr)
def getData(self, pos=0, frames=None):
"""note: when pos != 0, a frame size of None may not get all remaining
samples, but request samples that are not there"""
# read some data
if frames == None: # get all data
frames = self.frames
self._open('r') # open to read data
self.aObj.setpos(pos)
data = self.aObj.readframes(frames)
self._close()
return data
def reSize(self, size):
"""resize file to desired frames"""
if self.frames == size: pass
elif self.frames < size: # add difference
frameAdd = size - self.frames
self._open('r')
data = self.aObj.readframes(self.frames)
self._close()
# this data will be of len 2 times frames (2 bytes per frame)
newData = data + self.sampleGenObj.silence(frameAdd)
self._open('w')
self.aObj.writeframesraw(newData)
self._close()
else:
self._open('r')
data = self.aObj.readframes(size)
self._close()
self._open('w')
self.aObj.writeframesraw(data)
self._close()
def reChannel(self, ch, side=0):
"""re channelize a file
if side ==0, keep left, otherwise, right"""
if ch == self.ch: pass
elif self.ch == 2 and ch == 1: # make mono
self._open('r')
data = self.aObj.readframes(self.frames) # get all
split = self.sampleGenObj.split(data) # get each side
self._close()
self._open('w', ch) # coerce to 1 channel
self.aObj.writeframesraw(split[side])
self._close()
elif self.ch == 1 and ch == 2: # make stereo
self._open('r')
data = self.aObj.readframes(self.frames)
newData = self.sampleGenObj.interleave(data, data) # get each side
self._close()
self._open('w', ch) # coerce to 1 channel
self.aObj.writeframesraw(newData)
self._close()
else:
raise ValueError, 'incompatible channel conversioin'
def fillNoise(self):
"""pos is start position w/n file"""
data = self.sampleGenObj.noise(self.frames)
self._open('w')
self.aObj.writeframesraw(data)
self._close()
def fillSilence(self):
"""pos is start position w/n file"""
data = self.sampleGenObj.silence(self.frames)
self._open('w')
self.aObj.writeframesraw(data)
self._close()
def fillDataRaw(self, data):
"""file w/ raw data in the store format, signed in strings"""
self._open('w')
self.aObj.writeframesraw(data)
self._close()
def fillDataUnit(self, unitList, method=None):
"""fill file w/ a list of unit interval values
methods are direct, reflect, and fold
None defaults to direct
"""
#print _MOD, 'fillDataUnit got unitList;', unitList[0:10]
data = self.sampleGenObj.unitSynthesizer(unitList, method)
self._open('w')
self.aObj.writeframesraw(data)
self._close()
#print _MOD, 'fillDataUnit done'
def clear(self):
"""erase all sample data w/n the audio file
write one frame of silence"""
data = self.sampleGenObj.silence(1)
self._open('w')
self.aObj.writeframesraw(data)
self._close()
def padSilence(self, frames, position='front'):
"""pos is start position w/n file
can be front or rear"""
dataPad = self.sampleGenObj.silence(frames)
dataOld = self.getData() # default is all data
if position == 'front':
data = dataPad + dataOld
if position == 'rear':
data = dataOld + dataPad
self._open('w') # over-write existing data
self.aObj.writeframesraw(data)
self._close()
def insertNoise(self, pos, frames, amp=1):
"""insert some noise at a given amp into the file at position
specified in frames. dur specified in frames
not: dur applied to stereo channel does seem to last for the proper dur"""
if frames + pos > self.frames: raise ValueError, 'bad position size'
noiseData = self.sampleGenObj.noise(frames, amp)
srcLead = self.getData(0, pos-1) # get data before noise
srcPost = self.getData(pos+frames, self.getSize()-(pos+frames))
# insert noise data inbetween lead and post
data = srcLead + noiseData + srcPost
self._open('w') # over-write existing data
self.aObj.writeframesraw(data)
self._close()
def insertMix(self, pos, insertData, method=None):
"""insert the data at specifiec location
data can be in the form of a unitList (b/n 0 and 1)
or string data (assuming its in the same channel
it may be good to allow for a negative position:
insert the necessary frames before x and y
"""
# zero position measured form x
xData = self.getData()
xSize = copy.copy(self.frames)
if drawer.isStr(insertData):
# divide channel number * byte
ySize = arrayLenToFrameLen(len(insertData), self.bytes, self.ch)
yData = insertData
else: #must be a unit list
ySize = len(insertData) # channel does not matter in unit list
yData = self.sampleGenObj.unitSynthesizer(insertData, method)
#print _MOD, 'x, y size', xSize, ySize
if pos == 0: # just pad end of shorter
if ySize == xSize:
_caseStr = 'a' # this is just for debugging
xFinal = xData
yFinal = yData
elif ySize > xSize : # x needs pad after
_caseStr = 'b'
dif = ySize - xSize
xFinal = xData + self.sampleGenObj.silence(dif)
yFinal = yData
elif ySize < xSize : # y needs pad after
_caseStr = 'c'
dif = xSize - ySize
xFinal = xData
yFinal = yData + self.sampleGenObj.silence(dif)
else:
if ySize >= xSize:
_caseStr = 'd'
posRemain = (pos + ySize) - xSize
xFinal = xData + self.sampleGenObj.silence(posRemain)
yFinal = self.sampleGenObj.silence(pos) + yData
elif ySize < xSize: # x needs pad after
if pos + ySize == xSize:
_caseStr = 'e'
xRemain = pos # postpend to x
yRemain = 0
elif pos + ySize < xSize: # need yRemain
_caseStr = 'f'
xRemain = 0
yRemain = xSize - (pos + ySize)
elif pos + ySize > xSize: # need yRemain
_caseStr = 'g'
xRemain = (pos + ySize) - xSize
yRemain = 0
xFinal = xData + self.sampleGenObj.silence(xRemain)
yFinal = (self.sampleGenObj.silence(pos) + yData +
self.sampleGenObj.silence(yRemain))
#print _MOD, 'resize case:', _caseStr
data = self.sampleGenObj.mix(xFinal, yFinal)
self._open('w') # over-write existing data
self.aObj.writeframesraw(data)
self._close()
def envelopeSymmetric(self, fadeLen):
self._open('r')
envelope = self.envlGenObj.symmetric(self.frames, fadeLen)
data = self.aObj.readframes(self.frames) # get all frames
self._close()
self._open('w')
newData = self.sampleGenObj.multiply(data, envelope)
self.aObj.writeframesraw(newData)
self._close()
#-----------------------------------------------------------------------||--
# methods for producing new files form existing ones
def extract(self, newPath, start, length, fadeLen=None, ch=None):
""" extract a segment of this file to a new file
start, length, and fadeLen are all in frames
channel is ch in destination, forced from stereo if needed
returns the new object
"""
assert newPath != self.absPath # make sure not the same
self._open('r') # open to read data
if start + length > self.frames:
self._close()
raise ValueError, 'bad start and frame length'
self.aObj.setpos(start)
data = self.aObj.readframes(length)
self._close()
# get same format as current file
newObj = AudioFile(newPath, self.ch, self.sr, self.bytes)
newObj.fillDataRaw(data)
if ch != None:
newObj.reChannel(ch)
if fadeLen != None:
newObj.envelopeSymmetric(fadeLen)
return newObj
#-----------------------------------------------------------------------||--
# high level methods that do not open/close; only call above methods
def testAmp(self, pos, frames, rmsThresh=.08, maxThresh=.50):
"""look at peak and average values and determine if there is enough
audio to keep"""
data = self.getData(pos, frames)
if len(data) == 0:
print 'no data: %s: %s, %s' % (self.frames, pos, frames)
result = 0 # no data to get
absMax = byteToInt(self.bytes)
max = audioop.max(data, self.bytes)
rms = audioop.rms(data, self.bytes)
if rms >= (absMax * rmsThresh) or max >= (absMax * maxThresh):
result = 1
else:
rmsPct = round((rms / float(absMax)), 3)
print _MOD, 'lowamp: rms %s max %s (rmsPct %.3f)' % (str(rms).ljust(5),
str(max).ljust(5), rmsPct)
result = 0
return result
def findShard(self, length, maxAttempt=20):
"""find a random section of this file file, of length given in frames
that passes the amp test"""
# read data
frames = self.getSize()
rangeMax = frames - length
if rangeMax <= 0:
print _MOD, 'findShard: self.frames, length %s %s' % (
self.frames, length)
return None # skip
frameRange = range(0, rangeMax)
for i in range(0, maxAttempt):
trialStart = random.choice(frameRange)
if self.testAmp(trialStart, length): # if passes
return trialStart
return None # if nothing found
#-----------------------------------------------------------------------||--
def play(self):
osTools.openMedia(self.absPath)
#-----------------------------------------------------------------||||||||||||--
class ShardHarvest:
def __init__(self, srcDir, dstDir, audioLength, fadeLength=None):
"""audio and fade lengths are in secondss
will get all aiff files in the srcDir"""
self.srcDir = srcDir
self.dstDir = dstDir
if not os.path.exists(self.dstDir):
osTools.mkdir(self.dstDir)
self.audioFrame = secToFrame(audioLength)
if fadeLength != None:
self.fadeFrame = secToFrame(fadeLength)
else:
self.fadeFrame = None # turns off fades
obj = fileTools.AllFiles(self.srcDir, ['aif', 'aiff'])
self.srcFiles = obj.report()
# make a list indices to get src files from; will randomize
self.srcKeys = range(0, len(self.srcFiles))
self.dstFiles = []
def _fileNameStr(self, i):
if i < 10: # must use double f to get full aiff
return '00%i.aiff' % i
elif i < 100:
return '0%i.aiff' % i
else:
return '%i.aiff' % i
def gather(self, fragments=20, indexStart=0):
"""go through files and find a shard; write the new new file, append
to dstFiles
indexStart is used to determine file naming;"""
srcIndex = 0 # use to get from srcKeys
random.shuffle(self.srcKeys)
srcLen = len(self.srcKeys)
used = {} # dictionary of used file paths, value is number of uses
for i in range(indexStart, (fragments+indexStart)):
if i % 10 == 0: # report every ten
print _MOD, 'current index: %s' % i
srcPath = self.srcFiles[self.srcKeys[(srcIndex % srcLen)]]
srcIndex = srcIndex + 1
aObj = AudioFile(srcPath)
start = aObj.findShard(self.audioFrame)
if start == None:
print 'no audio found in %s' % srcPath
continue
dstPath = os.path.join(self.dstDir, self._fileNameStr(i))
ch = 1 # force mono output
b = aObj.extract(dstPath, start, self.audioFrame, self.fadeFrame, ch)
self.dstFiles.append(dstPath)
return self.dstFiles
#-----------------------------------------------------------------||||||||||||--
class EncodeChain:
def __init__(self, src, steps=999, bitRateArray=[64]):
"""encode and re-encode, appending to a file cyclically"""
self.steps = steps
self.bitRateArray = bitRateArray
self.src = src
self.dir, name = os.path.split(src)
self.nameStub, ext = osTools.extSplit(name)
#self.ref = os.path.join(dir, '%s%s' % (nameStub+'-ref', ext))
#self.storage = os.path.join(dir, '%s%s' % (nameStub+'-storage', ext))
def run(self):
srcObj = AudioFile(self.src)
frames = srcObj.getSize()
print _MOD, 'length', frames
#refObj = srcObj.extract(self.ref, 0, frames) # store a version
#storageObj = srcObj.extract(self.storage, 0, frames)
#print _MOD, 'duration', storageObj.getDur()
br = self.bitRateArray[0]
encodeSrcPath = copy.copy(self.src)
for i in range(self.steps):
tempEnc = os.path.join(self.dir, '%s%s' % (self.nameStub+'-enc'+str(i),
'.mp3'))
junk, tempEncName = os.path.split(tempEnc)
tempDec = os.path.join(self.dir, '%s%s' % (self.nameStub+'-dec'+str(i),
'.wav'))
junk, tempDecName = os.path.split(tempDec)
postMp3 = encodeMp3(encodeSrcPath, None, br, '', '', '', '',
tempEncName, 9) # quality set to 9
postWav = decodeMp3(postMp3, None, tempDecName, 9)
postAif = soxConvert(postWav, '.wav', '.aif')
osTools.rm(postWav)
encodeSrcPath = postAif # store re-decoded aif path
# add to existing file
#encStepObj = AudioFile(postAif)
# get stored data, and then get new data
#tempStart = storageObj.getData(0, storageObj.getSize())
#tempEnd = encStepObj.getData(0, encStepObj.getSize())
# can remove all objects and files no longer necessary
#del encStepObj
#osTools.rm(postMp3)
# replace all data in storage object
#storageObj.fillDataRaw(tempStart+tempEnd)
#print _MOD, 'duration', storageObj.getDur()
#-----------------------------------------------------------------||||||||||||--
def waterMark(src, spotWidth=70, spotDur=1.05, spotAmp=.20):
"""provide an auido file, add noise to watermark
spotWidth is the segment widht (in sec) in which a wm may occur
spotDur in seconds"""
if not os.path.exists(src): raise ValueError, 'no such file'
af = AudioFile(src)
dur = af.getDur() # dur is in seconds
# find total spots
totalSpots = int(round(float(dur) / spotWidth))
if totalSpots == 0: totalSpots = 1
frames = af.getSize()
fps = af.sr # frames per second
spotFrames = int(round(af.sr * spotDur))
# iterate over fraomes in minutes sized chunks
pos = 0
for q in range(0, totalSpots):
# pick a random starting point between 10 and 40 percent
# of spotWidth
min = int(round((.1*spotWidth)))
max = int(round((.3*spotWidth)))
secIn = random.choice(range(min, max))
secOut = spotWidth-secIn
shift = fps * secIn
pos = pos + shift
af.insertNoise(pos, spotFrames, spotAmp)
print _MOD, af.absPath, '(%s)' % frameToSec(pos)
# shift to end of second
pos = pos + (fps * secOut)
#-----------------------------------------------------------------||||||||||||--
# utility functions
def fileDur(srcPath):
"""get an audio file duration"""
aObj = AudioFile(srcPath)
return aObj.getDur()
#-----------------------------------------------------------------||||||||||||--
def TestOld():
src = '/Volumes/ydisc/_sync/iport/shardSrc/'
dst = '/Volumes/ydisc/_sync/iport/shardDst/shardBassB-136/'
if not os.path.exists(dst):
osTools.mkdir(dst)
indexStart = 0
# original tests, base 60bpm
#a = ShardHarvest(src, dst, .120, .012)
#a = ShardHarvest(src, dst, .060, .006)
#a = ShardHarvest(src, dst, .030, .003)
#a = ShardHarvest(src, dst, .015, .0015)
# bass 220 bpm
#a = ShardHarvest(src, dst, 0.0170454545455, .0017045) # 500
#a = ShardHarvest(src, dst, 0.0340909090909, .0034091) # 500
#a = ShardHarvest(src, dst, 0.0681818181818, .0068182) # 300
a = ShardHarvest(src, dst, 0.1363636363636, .0136364) # 300
# reccomended numbers between 300 and 500 (more for shorter)
print a.gather(300, indexStart)
#-----------------------------------------------------------------||||||||||||--
class Test(unittest.TestCase):
def runTest(self):
pass
def testDummy(self):
self.assertEqual(True, True)
#-----------------------------------------------------------------||||||||||||--
if __name__ == '__main__':
from athenaCL.test import baseTest
baseTest.main(Test)
| [
"christopher.ariza@13181586-94c9-11de-97c9-d5744c8abab9"
] | christopher.ariza@13181586-94c9-11de-97c9-d5744c8abab9 |
65b42d3a6a40c63cc71527b1616cd9e566132322 | a317e87c54e040f77d2b45358b24c60a8698e0df | /LearningDjango/LearningDjango/settings.py | 118df243d49802c613d2f0ef760e107ccdf2b788 | [] | no_license | beiou315/HelloDjango | 4fc9dfb6d13a9422fd63dbffb09d29e8a0db1a13 | c4c6db5a66316a310260504b309eee2fa59e04fd | refs/heads/master | 2021-01-24T19:23:59.564479 | 2018-09-11T01:25:40 | 2018-09-11T01:25:40 | 123,240,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | """
Django settings for LearningDjango project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cril^a!k+n22d-jx=7&v3_4bgdxpepa1u)zk96uyi&68(=8@@='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LearningDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LearningDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"beiou315@163.com"
] | beiou315@163.com |
28be41e4af0eff7f6b8524c4520483337cf24def | 66061cb2ab51f7ba78fd414347b622f39a5df292 | /rbm.py | 47ad6c961aeef9e4a1953dcb69a699341e0f9ab0 | [] | no_license | jackal092927/theano_ML | 8e28b4652ab0dd90f4ec94f70864030040ed9f2b | 2acb0db4885de0cea677debbbebed22f32d17187 | refs/heads/master | 2021-03-13T00:01:12.266842 | 2014-10-14T02:50:11 | 2014-10-14T02:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,962 | py | """This tutorial introduces restricted boltzmann machines (RBM) using Theano.
Boltzmann Machines (BMs) are a particular form of energy-based model which
contain hidden variables. Restricted Boltzmann Machines further restrict BMs
to those without visible-visible and hidden-hidden connections.
"""
import cPickle
import gzip
import time
import PIL.Image
import numpy
import theano
import theano.tensor as T
import os
from theano.tensor.shared_randomstreams import RandomStreams
#from utils import tile_raster_images
from logistic_sgd import load_data
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(self, input=None, n_visible=784, n_hidden=500, \
W=None, hbias=None, vbias=None, numpy_rng=None,
theano_rng=None):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='hbias', borrow=True)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
name='vbias', borrow=True)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
[pre_sigmoid_nvs, nv_means, nv_samples,
pre_sigmoid_nhs, nh_means, nh_samples], updates = \
theano.scan(self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k)
# determine gradients on RBM parameters
# not that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(chain_end))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
updates[param] = param - gparam * T.cast(lr,
dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates,
pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
of log(sigmoid(..)) and will not apply the wanted
optimization. We can not go and replace the sigmoid in scan
with something else also, because this only needs to be done
on the last step. Therefore the easiest and more efficient way
is to get also the pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan such
that Theano can catch and optimize the expression.
"""
cross_entropy = T.mean(
T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +
(1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),
axis=1))
return cross_entropy
def test_rbm(learning_rate=0.1, training_epochs=15,
dataset='mnist.pkl.gz', batch_size=20,
n_chains=20, n_samples=10, output_folder='rbm_plots',
n_hidden=500):
"""
Demonstrate how to train and afterwards sample from it using Theano.
This is demonstrated on MNIST.
:param learning_rate: learning rate used for training the RBM
:param training_epochs: number of epochs used for training
:param dataset: path the the pickled dataset
:param batch_size: size of a batch used to train the RBM
:param n_chains: number of parallel Gibbs chains to be used for sampling
:param n_samples: number of samples to plot for each chain
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
dtype=theano.config.floatX),
borrow=True)
# construct the RBM class
rbm = RBM(input=x, n_visible=28 * 28,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = rbm.get_cost_updates(lr=learning_rate,
persistent=persistent_chain, k=15)
#################################
# Training the RBM #
#################################
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm = theano.function([index], cost,
updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]},
name='train_rbm')
plotting_time = 0.
start_time = time.clock()
# go through training epochs
for epoch in xrange(training_epochs):
# go through the training set
mean_cost = []
for batch_index in xrange(n_train_batches):
mean_cost += [train_rbm(batch_index)]
print 'Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost)
# Plot filters after each training epoch
plotting_start = time.clock()
# Construct image from the weight matrix
# image = PIL.Image.fromarray(tile_raster_images(
# X=rbm.W.get_value(borrow=True).T,
# img_shape=(28, 28), tile_shape=(10, 10),
# tile_spacing=(1, 1)))
image.save('filters_at_epoch_%i.png' % epoch)
plotting_stop = time.clock()
plotting_time += (plotting_stop - plotting_start)
end_time = time.clock()
pretraining_time = (end_time - start_time) - plotting_time
print ('Training took %f minutes' % (pretraining_time / 60.))
#################################
# Sampling from the RBM #
#################################
# find out the number of test samples
number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]
# pick random test examples, with which to initialize the persistent chain
test_idx = rng.randint(number_of_test_samples - n_chains)
persistent_vis_chain = theano.shared(numpy.asarray(
test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains],
dtype=theano.config.floatX))
plot_every = 1000
# define one step of Gibbs sampling (mf = mean-field) define a
# function that does `plot_every` steps before returning the
# sample for plotting
[presig_hids, hid_mfs, hid_samples, presig_vis,
vis_mfs, vis_samples], updates = \
theano.scan(rbm.gibbs_vhv,
outputs_info=[None, None, None, None,
None, persistent_vis_chain],
n_steps=plot_every)
# add to updates the shared variable that takes care of our persistent
# chain :.
updates.update({persistent_vis_chain: vis_samples[-1]})
# construct the function that implements our persistent chain.
# we generate the "mean field" activations for plotting and the actual
# samples for reinitializing the state of our persistent chain
sample_fn = theano.function([], [vis_mfs[-1], vis_samples[-1]],
updates=updates,
name='sample_fn')
# create a space to store the image for plotting ( we need to leave
# room for the tile_spacing as well)
image_data = numpy.zeros((29 * n_samples + 1, 29 * n_chains - 1),
dtype='uint8')
for idx in xrange(n_samples):
# generate `plot_every` intermediate samples that we discard,
# because successive samples in the chain are too correlated
vis_mf, vis_sample = sample_fn()
print ' ... plotting sample ', idx
image_data[29 * idx:29 * idx + 28, :] = tile_raster_images(
X=vis_mf,
img_shape=(28, 28),
tile_shape=(1, n_chains),
tile_spacing=(1, 1))
# construct image
image = PIL.Image.fromarray(image_data)
image.save('samples.png')
os.chdir('../')
if __name__ == '__main__':
test_rbm()
| [
"jackal092927@gmail.com"
] | jackal092927@gmail.com |
2b6845c73b61c375a47a8357235a0008e3427bdc | 9d9f11ce0dc77a5b3ee3a9358a27b2c7d454e1f8 | /apps/users/urls.py | 367db33489c0b9eb006412202c738d1b48e449a1 | [] | no_license | samuelki/django-login-and-registration | e1f0f9c0d0415a033b35a83d6ad16112afa3bbc1 | c50af3f05c9fbde9df343431ce61fa1cc229d1bd | refs/heads/master | 2020-04-02T05:17:46.238036 | 2018-10-22T00:03:05 | 2018-10-22T00:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^users/register$', views.register),
url(r'^users/login$', views.login),
url(r'^home$', views.home),
url(r'^logout$', views.logout)
] | [
"samuelki62@gmail.com"
] | samuelki62@gmail.com |
036be4f0e66df507bc9729da8b7dfcb53d6adff5 | 31bd0383e29e31e89653b421840593cffc61464f | /app/migrations/0002_auto_20210718_1139.py | 031966e5efa039a935798ad18ad42b04ebca47f4 | [] | no_license | ndrohith09/School-django | 32945086e315df0e024e8a3386755b67254c7f0a | a7664a260447cfcaaac9fa4228d0b1d73a3e8a00 | refs/heads/master | 2023-06-26T22:56:07.204672 | 2021-07-19T12:53:23 | 2021-07-19T12:53:23 | 387,463,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # Generated by Django 3.1.1 on 2021-07-18 06:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='admission',
name='gender',
field=models.CharField(choices=[('1', 'Male'), ('2', 'Female'), ('3', 'Prefer not to say')], default='3', max_length=20),
),
]
| [
"ndrohith09@gmail.com"
] | ndrohith09@gmail.com |
504b693788900fa8fe43fab87c1075ce5593cf3b | 6f2675eee55b7ebc5adf9c2176ced8cb59fc64d4 | /dataInterSrvKm/bak/20200113版本半接口半直连/billBoli600.py | 729c1391bc030f8184f66d7eb48bc789ce7a4078 | [] | no_license | wildmanwang/proDataInter | 8c2b65fa96ad45b21165d997b1769a28e12fc42a | f5a1f1fb195c66bf586bd999465c7e3b16453369 | refs/heads/master | 2023-06-07T11:57:16.763251 | 2023-06-03T08:54:56 | 2023-06-03T08:54:56 | 157,559,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,164 | py | # -*- coding:utf-8 -*-
"""
"""
__author__ = "Cliff.wang"
from superBillDCB import SuperBillDCB
from interMssql import MSSQL
import time, json
class BillBoli600(SuperBillDCB):
def __init__(self, sett):
super().__init__(sett)
self.station = [1,2,3,4,5,6,7,8] # 可用基站
self.db = MSSQL(self.sett.serverHost, self.sett.serverUser, self.sett.serverPwd, self.sett.serverDb)
def _getStation(self):
"""
获取基站号
:return:
"""
rtnData = {
"result":False, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
try:
conn = self.db.GetConnect()
cur = conn.cursor()
if not cur:
rtnData["info"] = "基础数据获取失败:{name}数据库[{db}]连接失败".format(name=self.sett.serverName, db=self.sett.serverDb)
else:
lsSql = "select sys_var_value from sys_t_system where sys_var_id = 'dcb_stationList'"
cur.execute(lsSql)
rsData = cur.fetchall()
if len(rsData) > 0:
staStr = rsData[0][0]
else:
staStr = "[]"
staList = json.loads(staStr)
if len(staList) == 0:
rtnData["info"] = "基站繁忙,请稍后再试"
else:
rtnData["dataNumber"] = staList.pop(0)
lsSql = "update sys_t_system set sys_var_value = '{value}' where sys_var_id = 'dcb_stationList'".format(value=json.dumps(staList))
cur.execute(lsSql)
conn.commit()
rtnData["result"] = True
except Exception as e:
rtnData["dataNumber"] = 0
rtnData["info"] = str(e)
finally:
conn.close()
return rtnData
def _putStation(self, station):
"""
释放基站号
:param station:
:return:
"""
rtnData = {
"result":False, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
try:
conn = self.db.GetConnect()
cur = conn.cursor()
if not cur:
rtnData["info"] = "基础数据获取失败:{name}数据库[{db}]连接失败".format(name=self.sett.serverName, db=self.sett.serverDb)
else:
lsSql = "select sys_var_value from sys_t_system where sys_var_id = 'dcb_stationList'"
cur.execute(lsSql)
rsData = cur.fetchall()
if len(rsData) > 0:
staStr = rsData[0][0]
staList = json.loads(staStr)
staList.append(station)
staList = list(set(staList))
staList.sort()
lsSql = "update sys_t_system set sys_var_value = '{value}' where sys_var_id = 'dcb_stationList'".format(value=json.dumps(staList))
cur.execute(lsSql)
conn.commit()
rtnData["result"] = True
else:
rtnData["info"] = "获取基站参数失败"
except Exception as e:
rtnData["info"] = str(e)
finally:
conn.close()
return rtnData
def userLogin(self, data):
"""
登录
:param data:{
"terminal":"", # 开台终端号(3位)
"factory":"", # 出厂号(10位)
"user":"", # 工号(4位)
"password":"" # 密码(8位)
}
:return:
"""
rtnData = {
"result":True, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
# 获取基站
rtnData = self._getStation()
if rtnData["result"]:
iStation = rtnData["dataNumber"]
try:
# 参数检查
if len(self.sett.softNumber) > 0:
data["terminal"] = self.sett.softNumber
elif "terminal" not in data:
raise Exception("请传入参数:点菜宝编号")
sTerminal = (data["terminal"] + chr(32) * 3)[:3]
if len(self.sett.serialNumber) > 0:
data["factory"] = self.sett.serialNumber
elif "factory" not in data:
raise Exception("请传入参数:点菜宝序列号")
sFactory = ("0" * 10 + data["factory"])[-10:]
if len(self.sett.loginUser) > 0:
data["user"] = self.sett.loginUser
data["password"] = self.sett.loginPassword
elif "user" not in data:
raise Exception("请传入参数:用户及密码")
sUser = (data["user"] + chr(32) * 5)[:4]
sPassword = (data["password"] + chr(32) * 8)[:8]
# 生成开台请求数据
sCon = []
sCon.append("DL " + chr(32) + sTerminal)
sCon.append(sFactory + chr(32) + sUser + chr(32) + sPassword)
# sCon.append(sUser + chr(32) + sPassword)
# 开台请求写入文件,并通知餐饮服务
if rtnData["result"]:
rtnData = self._writeBusiData(iStation, sCon)
# 获取执行结果
if rtnData["result"]:
rtnData = self._readRtnData(iStation, "登录", sCon, 0, "", 1)
except Exception as e:
rtnData["result"] = False
rtnData["info"] = str(e)
finally:
# 释放基站
if "iStation" in locals():
self._putStation(iStation)
# 返回执行结果
return rtnData
def billOpen(self, data):
"""
开台
:param data:{
"terminal":"", # 开台终端号(3位)
"table":"", # 桌台号(4位)
"waiter":"", # 服务员号(5位)
"guestNum":0, # 客人数量(2位)
"factory":"" # 出厂号(后7/10位)
}
:return:
"""
rtnData = {
"result":True, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
# 获取基站
rtnData = self._getStation()
if rtnData["result"]:
iStation = rtnData["dataNumber"]
try:
# 参数检查
if len(self.sett.softNumber) > 0:
data["terminal"] = self.sett.softNumber
elif "terminal" not in data:
raise Exception("请传入参数:点菜宝编号")
sTerminal = (data["terminal"] + chr(32) * 3)[:3]
if "table" in data:
sTable = (data["table"] + chr(32) * 4)[:4]
else:
rtnData["result"] = False
rtnData["info"] = "请传入桌台号"
if "waiter" in data:
sWaiter = (data["waiter"] + chr(32) * 5)[:5]
else:
sWaiter = chr(32) * 5
if "guestNum" in data:
sGuestNum = ("0" + str(int(data["guestNum"])))[-2:]
else:
sGuestNum = "01"
if len(self.sett.serialNumber) > 0:
data["factory"] = self.sett.serialNumber
elif "factory" not in data:
raise Exception("请传入参数:点菜宝序列号")
sFactory = ("0" * 10 + data["factory"])
# 生成开台请求数据
if rtnData["result"]:
sCon = []
sCon.append("KT " + chr(32) + sTerminal)
sCon.append(sTable + chr(32) + sGuestNum + chr(32) + sWaiter + chr(32) + sFactory[-7:] + chr(
32) + time.strftime("%H:%M:%S"))
# 开台请求,并获取反馈
if rtnData["result"]:
rtnData = self._writeBusiData(iStation, sCon)
# 获取执行结果
if rtnData["result"]:
rtnData = self._readRtnData(iStation, "开台", sCon, 1, "开台成功", 1)
except Exception as e:
rtnData["result"] = False
rtnData["info"] = str(e)
finally:
# 释放基站
if "iStation" in locals():
self._putStation(iStation)
# 返回执行结果
return rtnData
def billPut(self, data):
"""
点菜
:param data:{
"terminal":"", # 开台终端号(3位)
"table":"", # 桌台号+账单流水号(4+3=7位)
"factory":"", # 出厂号(4+4+2=10位)
"remark":"", # 整单备注(12位)
"item":[{
"food":"", # 菜品号(5位)
"qty":1, # 数量(4位)
"made":"", # 做法(12位)
"suit":"", # 套餐号(2位)
"waitUp":0 # 等叫标志(1位)
}]
}
:return:
"""
rtnData = {
"result":False, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
# 获取基站
rtnData = self._getStation()
if rtnData["result"]:
iStation = rtnData["dataNumber"]
try:
# 参数检查
if len(self.sett.softNumber) > 0:
data["terminal"] = self.sett.softNumber
elif "terminal" not in data:
raise Exception("请传入参数:点菜宝编号")
sTerminal = (data["terminal"] + chr(32) * 3)[:3]
if "table" in data:
sTable = (data["table"] + chr(32) * 7)[:7]
else:
rtnData["result"] = False
rtnData["info"] = "请传入桌台号"
if len(self.sett.serialNumber) > 0:
data["factory"] = self.sett.serialNumber
elif "factory" not in data:
raise Exception("请传入参数:点菜宝序列号")
sFactory = ("0" * 10 + data["factory"])
if "remark" in data:
sRemark = (data["remark"] + chr(32) * 12)[:12]
else:
sRemark = chr(32) * 12
sFlow = time.strftime("%H:%M:%S")
# 生成开台请求数据
if rtnData["result"]:
sCon = []
sCon.append("DC " + chr(32) + sTerminal)
sCon.append(sTable + chr(32) + sFactory[:4] + chr(32) + chr(32) * 6 + sRemark + chr(32) + chr(
32) * 4 + sFlow + chr(32) + sFactory[4:8] + chr(32) + sFactory[8:10])
for line in data["item"]:
sFood = (line["food"] + chr(32) * 5)[:5]
sQty = (chr(32) * 4 + str(line["qty"]))[-4:]
if "made" in line:
sMade = (line["made"] + chr(32) * 12)[:12]
else:
sMade = chr(32) * 12
if "suit" in line:
suit = (line["suit"] + chr(32) * 2)[:2]
else:
suit = chr(32) * 2
if "waitUp" in line:
waitUp = (str(line["waitUp"]) + "0")[-1:]
else:
waitUp = "0"
sCon.append(
sTable + chr(32) + sFood + chr(32) + sQty + chr(32) + sMade + chr(32) + suit + chr(32) + waitUp)
# 开台请求写入文件,并通知餐饮服务
if rtnData["result"]:
rtnData = self._writeBusiData(iStation, sCon)
# 获取执行结果
if rtnData["result"]:
rtnData = self._readRtnData(iStation, "点菜", sCon, 1, "点菜成功", 1)
except Exception as e:
rtnData["result"] = False
rtnData["info"] = str(e)
finally:
# 释放基站
if "iStation" in locals():
self._putStation(iStation)
# 返回执行结果
return rtnData
| [
"cliff.w@qq.com"
] | cliff.w@qq.com |
080121153af9a45d9084cd5f5233cdfb821defe7 | 23af1e2b1f29be62926ed6a8e39b4462f07f5f2b | /atcoder.jp/abc086/abc086_b/Main.py | 2caffef31e6e42003299d780de9ca6f6f538b840 | [] | no_license | IKDrocket/Atcoder | 8ef382577a377a8f35890b24a49f681f00f2f047 | fc19379de2ddf62a61b67eda33bf8aa29d503685 | refs/heads/main | 2023-02-09T11:58:00.353304 | 2021-01-02T12:06:20 | 2021-01-02T12:06:20 | 318,876,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | a,b = input().split()
num = int(a+b)
for i in range(1,10101):
ans = i*i
if num == ans:
print("Yes")
break
else:
print("No")
| [
"ikdrocket@IKDrocket.local"
] | ikdrocket@IKDrocket.local |
5f17ba6614701f36534f286259cb976251bdb34c | fa72697feef44fdba1bf4e5c8064d25b65a6cb83 | /topo/diamond.py | 91559a7110a0c52ff2a46456a501017f4ea03302 | [
"Apache-2.0"
] | permissive | ravel-net/Faure | 6de4c022c078db579c13187cb83b79486004bb74 | 2f98c732e371ab14d001cd20e49a3f8fbcbf3d99 | refs/heads/main | 2023-08-12T11:12:21.078931 | 2021-10-04T17:24:47 | 2021-10-04T17:24:47 | 402,385,298 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from mininet.topo import Topo
class DiamondTopo(Topo):
def __init__( self ):
Topo.__init__( self )
h1 = self.addHost('h1')
h2 = self.addHost('h2')
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
self.addLink(s1,h1)
self.addLink(s4,h2)
self.addLink(s1,s2)
self.addLink(s1,s3)
self.addLink(s2,s4)
self.addLink(s3,s4)
topos = { 'diamond': ( lambda: DiamondTopo() ) }
| [
"fangpinglan0116@gmail.com"
] | fangpinglan0116@gmail.com |
c31db9e2643724ed66331b721d6a77560de6209a | 06167f625464c898ac95e752694a5931b9a55a55 | /src/admission/migrations/0001_initial.py | bacece5228ade3f6e66d8337c4fae54aa72fdb6d | [] | no_license | nazmul629/school_management_system | 16e2003b652b14174d6f59b4682ca366275f3207 | d0ff759645d9ba8f88d2aa63dbc867e7713455ed | refs/heads/master | 2021-06-19T18:06:56.539454 | 2019-04-20T12:35:24 | 2019-04-20T12:35:24 | 182,307,917 | 1 | 0 | null | 2021-03-20T08:15:23 | 2019-04-19T18:22:11 | CSS | UTF-8 | Python | false | false | 1,699 | py | # Generated by Django 2.0 on 2019-04-19 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='class_section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='Schoolallclass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Class', models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
name='StudentInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Student_name', models.CharField(max_length=50)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('m', 'Male'), ('f', 'Female')], max_length=10)),
('roll', models.IntegerField(unique=True)),
('fathers_name', models.CharField(max_length=50)),
('mothers_name', models.CharField(max_length=50)),
('address', models.TextField()),
('mobile', models.CharField(max_length=16)),
('Class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='admission.Schoolallclass')),
],
),
]
| [
"nazmulhossain.qnh@gmail.com"
] | nazmulhossain.qnh@gmail.com |
9a7d14e536288cb4f31c5d1e079d4321e8ee53be | a5df1b1f24d1d0a8dd1fad8408bdfd82b88fe021 | /3.leetcode/7.Python 随机数生成.py | 0e324aa3f4f2671108e3033946507c6f4d1a94f9 | [] | no_license | safe-pro/DevSevOps | 7c80e4f872e46b8bea224560b70209f2204621e8 | 1f5402e1bfddd008e844b81c2662580d1eb04c3f | refs/heads/master | 2022-03-25T23:54:38.432718 | 2020-01-04T01:35:20 | 2020-01-04T01:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | # -*- coding: UTF-8 -*-
# 生成 0 ~ 9 之间的随机数
# 导入 random(随机数) 模块
import random
print(random.randint(0, 90)) | [
"1229216571@qq.com"
] | 1229216571@qq.com |
83a9f73fd0d79f7c343ce343751d0259403323c4 | 4b1d5c4e17e16b87d6f3f613f12037bd051f3389 | /django_app/urls.py | d443d2c247bd486da86975776eaec72705e18527 | [] | no_license | Kun07/practice | 320b8e4608d2b03dd92a239117bd98744c5ef6a9 | 3d54f78082ecf053c800f55576ca57d148511ffd | refs/heads/master | 2023-03-26T15:53:26.114958 | 2021-03-24T12:04:13 | 2021-03-24T12:04:13 | 350,783,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import url, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^register/', user_views.register, name='register'),
url(r'^profile/', user_views.profile, name='profile'),
url(r'^login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
url(r'^logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
url(r'^', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"moon.kunal32@gmail.com"
] | moon.kunal32@gmail.com |
3f675d6a2dbe464b9ebac1862cbbfd5c698ff968 | 56593402ffb61df9456dca4042507dccf427ba66 | /Allstate/Supervised_Models.py | 2cce16bd82d772bc3b15734261ba48a504f004d8 | [] | no_license | jon-mqn/allstate_comp | b21e3c7488c6f586fa1b98a5792e6f78720e39b1 | 474b1450549218b20781703db7b96e15b4835a1f | refs/heads/master | 2021-06-07T09:15:16.739688 | 2016-10-28T21:29:26 | 2016-10-28T21:29:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 15:04:47 2016
@author: jarektb
"""
import pandas as pd
class Supervised_Models:
def __init__(self, pandas_object):
self.pandas_data = pandas_object
def getLinearRegressionModel():
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn;
from sklearn.linear_model import LinearRegression
import pylab as pl
seaborn.set()
import numpy as np
np.random.seed(0)
X = np.random.random(size=(20, 1))
y = 3 * X.squeeze() + 2 + np.random.randn(20)
plt.plot(X.squeeze(), y, 'o');
model = LinearRegression()
model.fit(X, y)
# Plot the data and the model prediction
X_fit = np.linspace(0, 1, 100)[:, np.newaxis]
y_fit = model.predict(X_fit)
plt.plot(X.squeeze(), y, 'o')
plt.plot(X_fit.squeeze(), y_fit);
| [
"noreply@github.com"
] | jon-mqn.noreply@github.com |
4fe6555c2378c5acf140a59ab1b3df05901e15b5 | a731a228ce92db118f300bf6f6ce9b093b5799e9 | /tr.py | afc442b96555f1707167add6d434d3c29e66d948 | [] | no_license | sujuma/pyfiles | 46b31967589ac3cf6cc252378196764a4c242e17 | 39d4e6d4bbb2299aef7e65caa517c2523cad093c | refs/heads/master | 2020-03-28T10:44:25.640210 | 2018-09-10T11:20:08 | 2018-09-10T11:20:08 | 148,140,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | fruits = {
"first": "Apple",
"second": "Grapes",
"third": "Banana"
}
del(fruits["first"])
print(fruits)
| [
"sujumapro@gmail.com"
] | sujumapro@gmail.com |
7e8a8bb81e0a0470aaf10abf3e04291116d40773 | 59f5ec33f1055059f53aa04ee9aa1282282d2c04 | /depends/depends_communications.py | 9e82f742eb4d01f772becbc8ff6fe2ccfb2b97e0 | [
"BSD-3-Clause"
] | permissive | mottosso/dependsworkflow | fcd96c774fc668ae6794b3f528d7b94f16f99049 | 5ed30d2e29d97a1baad1068837fbee54988c8175 | refs/heads/master | 2021-01-15T18:41:33.852577 | 2014-08-21T16:15:42 | 2014-08-21T16:15:42 | 23,190,989 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,410 | py | #
# Depends
# Copyright (C) 2014 by Andrew Gardner & Jonas Unger. All rights reserved.
# BSD license (LICENSE.txt for details).
#
from PySide import QtCore, QtNetwork
"""
This module is responsible for inter-process communication between any two
external programs. It uses The QtNetwork module's QTcpSocket to send and
receive messages in the form of a Python string.
NOTE:
This module is far from complete, but it serves its limited purpose.
"""
# TODO: What is the best way to report errors from within a module that
# inherits from QObject?
###############################################################################
###############################################################################
class BidirectionalCommunicationObject(QtCore.QObject):
"""
This object holds a QTcpSocket object capable of connecting and communicating
with another process using a QTcpSocket on given ports. The port to listen
for incoming communications is opened immediately, but the port to broadcast
information is opened just before a message is sent and closed immediately
thereafter.
A 'stringReceived' QT signal is emitted when the object receives a Python
string from an external source.
"""
# Signals
stringReceived = QtCore.Signal(str)
def __init__(self, listenPort, broadcastPort=None, parent=None):
"""
"""
QtCore.QObject.__init__(self, parent)
# Messages, internal buffers, and data to keep
self.received = ""
self.toTransmit = ""
self.broadcastPort = broadcastPort
self.listenPort = listenPort
# A server to listen
self.tcpServer = QtNetwork.QTcpServer(self)
if not self.tcpServer.listen(port=listenPort):
print "Unable to start the server: %s." % self.tcpServer.errorString()
return
self.tcpServer.newConnection.connect(self._handleNewConnection)
# A socket to tell
self.tcpSocket = QtNetwork.QTcpSocket(self)
self.tcpSocket.connected.connect(self._composeMessage)
self.tcpSocket.error.connect(self._displayError)
###########################################################################
## Interface
###########################################################################
def sendString(self, message):
"""
The given string is transmitted on the object's broadcast port.
"""
self.toTransmit = message
self._sendTcpMessage()
def setBroadcastPort(self, newBP):
"""
Set the tcp port this object broadcast on. This port is not opened
until just before a message is sent.
"""
self.broadcastPort = newBP
def close(self):
"""
Explicitly stops the tcp server from listening for incoming messages.
"""
self.tcpServer.close()
###########################################################################
## Server side internals
###########################################################################
def _gatherMessage(self):
"""
Pull the most recent message out into self.received, insuring its length.
"""
# Read out the data and insure there's something there
byteArray = self.sender().readLine()
if not len(byteArray):
print "ERROR!"
return
# Pull the data packet size out of the first 16-bits & confirm it's all there
stream = QtCore.QDataStream(byteArray, QtCore.QIODevice.ReadOnly)
arraySize = stream.readUInt16()
if arraySize != len(byteArray)-2:
print "ERROR!"
# Recover the data packet as a Python string object & let everyone know you got something
self.received = stream.readString()
self.stringReceived.emit(self.received)
# Disconnect
self.sender().disconnectFromHost()
def _handleNewConnection(self):
"""
Get a handle on the next connection, setup its read callback & register
it for eventual cleanup.
"""
clientConnection = self.tcpServer.nextPendingConnection()
clientConnection.readyRead.connect(self._gatherMessage)
clientConnection.disconnected.connect(clientConnection.deleteLater)
###########################################################################
## Client side internals
###########################################################################
def _composeMessage(self):
"""
Create a message and pass it to the network socket.
"""
datagram = QtCore.QByteArray()
out = QtCore.QDataStream(datagram, QtCore.QIODevice.WriteOnly)
out.setVersion(QtCore.QDataStream.Qt_4_0)
out.writeUInt16(0)
out.writeString(self.toTransmit)
out.device().seek(0)
out.writeUInt16(datagram.size() - 2)
written = self.tcpSocket.write(datagram)
if written != datagram.size():
print "BidirectionalCommunication error - message not sent"
self.tcpSocket.flush()
def _sendTcpMessage(self):
"""
Closes then opens a connection to the host - when the connection is made,
sendMessage kicks in and sends whatever is stored in self.toTransmit
"""
self.tcpSocket.abort()
self.tcpSocket.connectToHost('Localhost', self.broadcastPort)
def _displayError(self, socketError):
"""
Prints the given error message to stdout.
"""
if socketError == QtNetwork.QAbstractSocket.RemoteHostClosedError:
pass
elif socketError == QtNetwork.QAbstractSocket.HostNotFoundError:
print "The host was not found. Please check the host name and port settings."
elif socketError == QtNetwork.QAbstractSocket.ConnectionRefusedError:
print "The connection was refused by the peer."
else:
print "The following error occurred: %s." % self.tcpSocket.errorString()
| [
"gardner@dhcp243-143.itn.liu.se"
] | gardner@dhcp243-143.itn.liu.se |
904a5871b8f4528bb33480917751fdc269b31db5 | 05d186de4b6ddd3f14f19f3958da3869945a938a | /wbddata/admin.py | 41f5dd62857b5d64e5abe2bd612a76d70a4f9338 | [
"MIT"
] | permissive | JimmyBisese/wbd | 6f30bc443497027070c7110e3b2a030ddc8d810d | 9ee92325ee45fb2ab1116829f4be4338c4adf88c | refs/heads/master | 2020-03-31T22:07:20.837845 | 2019-02-26T15:47:17 | 2019-02-26T15:47:17 | 152,606,580 | 1 | 1 | null | 2018-10-11T14:45:47 | 2018-10-11T14:37:27 | CSS | UTF-8 | Python | false | false | 113 | py | from django.contrib import admin
from .models import HUC, WBD
admin.site.register(HUC)
admin.site.register(WBD) | [
"james.bisese@tetratech.com"
] | james.bisese@tetratech.com |
6e7a8849b45d4e7ef435085fefc41204dd11f94a | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Maxout_0.py | 34ed3c5b0baf796738184d4faee74db735487de9 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 608 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Maxout_0():
"""test Maxout_0"""
jit_case = JitTrans(case=yml.get_case_info("Maxout_0"))
jit_case.jit_run()
| [
"825276847@qq.com"
] | 825276847@qq.com |
0276f84d950f1b2718985231db5b12da64ee0a89 | ca7b428b4f132b7e18fa55618fff0a7de8357f7a | /src/dronet_tello/scripts/tellopy/__main__.py | d6fe9d222c64c2d956f274c365020dae4ecca4c5 | [] | no_license | MissMeriel/ROS_Tello | 75de6c67431183687af9052939f60e1497804fc3 | b443de5be9d5e6309eb1208223eccdb13c8d3828 | refs/heads/master | 2022-12-10T09:13:12.953462 | 2019-04-10T16:55:29 | 2019-04-10T17:29:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import time
import tellopy
def main():
tello = tellopy.Tello()
print('Taking off')
tello.take_off()
time.sleep(5)
tello.throttle = 0.01
# tello.pitch = -0.5
tello.yaw = 1.0
time.sleep(0.5)
print('Landing')
tello.land()
time.sleep(3)
print('Shutting down')
tello.shutdown()
if __name__ == "__main__":
main()
| [
"meriel.k.stein@gmail.com"
] | meriel.k.stein@gmail.com |
3b64a3c6ee3f5c6919080f693dfef0fa7820a4f6 | a2afd4f59fac3b80ccc0719b01432ff806d7fa06 | /agrochemistry/agrochemistry/settings.py | a08a4b02aa9b6d22203a5b81fa2220a1b1da8534 | [] | no_license | MalikDeveloper2077/agrochemistry-telegram-bot | d1d9514173b3760500436dc4ffd09f1dcf0a91ba | d282187f6ddd5cf8f9b4ceaf4819a898c32e7176 | refs/heads/main | 2023-04-09T12:22:17.212439 | 2021-04-24T12:23:52 | 2021-04-24T12:23:52 | 349,708,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,426 | py | import os
from pathlib import Path
import environ
env = environ.Env(
DEBUG=(bool, False)
)
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
if env("DJANGO_ALLOWED_HOSTS", default=[]):
ALLOWED_HOSTS = env("DJANGO_ALLOWED_HOSTS").split(" ")
else:
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# admin page package
'jet.dashboard',
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'calculator.apps.CalculatorConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'agrochemistry.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'agrochemistry.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('POSTGRES_DB_NAME'),
'USER': env('POSTGRES_USERNAME'),
'PASSWORD': env('POSTGRES_PASS'),
'HOST': env('POSTGRES_HOST'),
'PORT': env('POSTGRES_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
X_FRAME_OPTIONS = 'SAMEORIGIN'
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Telegram bot
TG_TOKEN = env('TG_TOKEN')
# Mail
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'nAsad1385@gmail.com'
EMAIL_HOST_PASSWORD = 'MalikAsad'
| [
"kalik@MacBook-Pro-Malik.local"
] | kalik@MacBook-Pro-Malik.local |
4ba2b4fb8af7065418d277f2d0e77dfde2042af3 | 4bb095cd25e48089df0aae93ae7b4526261d480f | /PythonProgramming/Statements/ForloopConcept.py | 50b31306a8cbc2d9b20dcafbdc9efca0ee4cd219 | [] | no_license | DevMindG/PythonProgramming | 064dee87fd8513f88306ae71605bbf30806f5b04 | 6c0a2fa01615fdbd298a07d89d6865e6a8c9c051 | refs/heads/master | 2022-03-27T18:33:39.855597 | 2019-12-21T01:00:57 | 2019-12-21T01:00:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | # in operators
# can be checked a value whether there is a list, tuple or string using in
print(5 in {1, 2, 3, 4})
print("hel" in "hello world")
print(10 in [1, 2, 3, 4])
print(not 4 in (1, 2, 3))
# for loop in list
list1 = [1, 2, 3, 4, 5, 6, 7]
for element in list1:
print("Element", element)
# sum list using for
list2 = [1, 2, 3, 4, 5, 6, 7]
sumNumbers = 0
for element in list2:
sumNumbers += element
print("Sum", sumNumbers)
list2 = [1, 2, 3, 4, 5, 6, 7]
sumNumbers = 0
for element in list2:
sumNumbers += element
print("Sum: {} Element : {}".format(sumNumbers, element))
print("Sum: ", sumNumbers)
# Find even numbers
list3 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for element in list3:
if element % 2 == 0:
print(element)
# String using for loop
st = "Python"
for character in st:
print(character)
st = "Python"
for character in st:
print(character * 3)
# tuple using for loop
tuple1 = (1, 2, 3, 4, 5, 6, 7)
for element in tuple1:
print(element)
list4 = [(1, 2), (3, 4), (5, 6), (7, 8)]
for element in list4:
print(element)
list5 = [(1, 2), (3, 4), (5, 6), (7, 8)]
for (i, j) in list5:
print(i, j)
for (i, j) in list5:
print("i: {} j: {}".format(i, j))
list6 = [(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12)]
for (i, j, k) in list6:
print(i * j * k)
# Dictionary using for loop
# keys
art = {"one": 1, "two": 2, "three": 3, "four": 4}
for element in art.keys():
print(element)
# values
art2 = {"one": 1, "two": 2, "three": 3, "four": 4}
for element in art2.values():
print(element)
# items
art3 = {"one": 1, "two": 2, "three": 3, "four": 4}
for (i, j) in art3.items():
print("Key:", i, "Value:", j)
| [
"boblerry4@gmail.com"
] | boblerry4@gmail.com |
4178f805bf8b8945847140f631ea24653a9a02e1 | afaba6f428d704154cabccb2d30908e0535a8ca5 | /locust/locustfile.py | aa4cea9d7ec34621bf3a83d296bd33f5f980e028 | [
"MIT"
] | permissive | sebadiaz/conferences | a67d7c3c2a1c6a3cf8f180e135ad9f8baf6e020b | 9c43b703993f30cd518ea099c1882c35c445bc20 | refs/heads/main | 2023-01-24T09:34:20.054624 | 2020-11-21T14:04:25 | 2020-11-21T14:04:25 | 314,824,334 | 1 | 0 | MIT | 2020-11-21T13:58:48 | 2020-11-21T13:58:47 | null | UTF-8 | Python | false | false | 484 | py | from locust import HttpUser, between, task
from locust.contrib.fasthttp import FastHttpUser
class WebsiteUser(FastHttpUser):
wait_time = between(5, 15)
def on_start(self):
self.client.post("/valider-email", {
"email": "julien.dauphant@@beta.gouv.fr"
})
@task
def index(self):
self.client.get("/")
self.client.get("/static/css/custom.css")
def about(self):
self.client.get("/mentions-legales")
| [
"julien.dauphant@beta.gouv.fr"
] | julien.dauphant@beta.gouv.fr |
0b923417f2c83d1b943f897a0e067b827cc724c3 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/6d0b0f8338f7ffbc761ddc05cbdc620a99901074-<format_item>-fix.py | a42ce4e3bcf3cd32eb44b6e67fee46a95e4f787a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | def format_item(self, item):
d = item.as_dict()
containers = d['containers']
ports = d['ip_address']['ports']
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
for port_index in range(len(ports)):
ports[port_index] = ports[port_index]['port']
for container_index in range(len(containers)):
old_container = containers[container_index]
new_container = {
'name': old_container['name'],
'image': old_container['image'],
'memory': old_container['resources']['requests']['memory_in_gb'],
'cpu': old_container['resources']['requests']['cpu'],
'ports': [],
}
for port_index in range(len(old_container['ports'])):
new_container['ports'].append(old_container['ports'][port_index]['port'])
containers[container_index] = new_container
d = {
'id': d['id'],
'resource_group': resource_group,
'name': d['name'],
'os_type': d['os_type'],
'ip_address': ('public' if (d['ip_address']['type'] == 'Public') else 'none'),
'ports': ports,
'location': d['location'],
'containers': containers,
'tags': d.get('tags', None),
}
return d | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
8e7b22bb92df39bfeee73706b780e1272342e27b | e401c09c53fea6f31a1a52fbffc437b08f4ea0b0 | /hassio/dock/util.py | 87cd76020f394e59292f0d14af145a0b6f301727 | [
"BSD-3-Clause"
] | permissive | pvizeli/hassio | a73df2b2a21636cd52fe260741c92b11d752d128 | 90030d3a28ce88ba49afa1109f3b83d2e3e22fb0 | refs/heads/dev | 2020-12-03T05:15:04.394380 | 2017-06-29T05:57:04 | 2017-06-29T05:57:04 | 84,926,758 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | """HassIO docker utilitys."""
import re
from ..const import ARCH_AARCH64, ARCH_ARMHF, ARCH_I386, ARCH_AMD64
RESIN_BASE_IMAGE = {
ARCH_ARMHF: "homeassistant/armhf-base:latest",
ARCH_AARCH64: "homeassistant/aarch64-base:latest",
ARCH_I386: "homeassistant/i386-base:latest",
ARCH_AMD64: "homeassistant/amd64-base:latest",
}
TMPL_IMAGE = re.compile(r"%%BASE_IMAGE%%")
def dockerfile_template(dockerfile, arch, version, meta_type):
"""Prepare a Hass.IO dockerfile."""
buff = []
resin_image = RESIN_BASE_IMAGE[arch]
# read docker
with dockerfile.open('r') as dock_input:
for line in dock_input:
line = TMPL_IMAGE.sub(resin_image, line)
buff.append(line)
# add metadata
buff.append(create_metadata(version, arch, meta_type))
# write docker
with dockerfile.open('w') as dock_output:
dock_output.writelines(buff)
def create_metadata(version, arch, meta_type):
"""Generate docker label layer for hassio."""
return ('LABEL io.hass.version="{}" '
'io.hass.arch="{}" '
'io.hass.type="{}"').format(version, arch, meta_type)
| [
"noreply@github.com"
] | pvizeli.noreply@github.com |
3b02e36fbb830f8964ea3d2b0b8cd302a2612b81 | 4d159e82c285dabb29dfe1e82e958af1cc289b0a | /千峰的每天/第六天12.18/代码/Day06/10.函数概述.py | 7b7eaec5a688a59ec865c1204a00bafe07cff4fd | [] | no_license | xvjingcheng/superman | 9954ce65b1c8bf21f7b49746ceb6cc5a4d1fc7af | 73ec3a7a45173cea7f2d9ca568f38250a17ad3c5 | refs/heads/master | 2020-04-22T03:51:57.077810 | 2019-02-11T11:26:05 | 2019-02-11T11:26:05 | 170,055,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # a = 3
# b = 5
# print(a + b)
def add2num(a, b):
print(a + b)
add2num(3, 5)
add2num(5, 8)
"""
函数的定义和格式
def 函数名(参数1,参数2,参数3... ...)
方法体(执行具体功能的代码)
return 返回值
"""
| [
"17853729677@163.com"
] | 17853729677@163.com |
ed9371292dfeeabe4c40569766673eb4d76627e7 | b3d68a4e7f93d8827afb2c7d3f6fd3c0da24cec4 | /FRS_v1/cnn.py | 41b3f4b92e1d618fcbe8b952bed74ff90dc6ba04 | [] | no_license | wasteee/Embedded-Image-Processing-final | 1ab4ced49ccb97c853a91d704064f6ad6310f26a | 3236f61e6bedffed4860a942aff981fb6b933baa | refs/heads/master | 2022-11-04T05:12:28.340991 | 2020-06-21T10:53:08 | 2020-06-21T10:53:08 | 267,253,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import pickle
import os
import cv2
from tqdm import tqdm
import random
import numpy as np
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
np.set_printoptions(threshold=np.inf)
# 定義梯度下降批量
batch_size = 256
# 定義分類數量
num_classes = 4
# 定義訓練週期
epochs = 12
# 定義圖像寬、高
img_rows, img_cols = 64, 64
# 載入 訓練資料
DATADIR = "G:/FRS/trainingdata"
CATEGORIES = ["data01", "data02", "data03","other_faces"]
training_data = []
IMG_SIZE = 64
filename = "G:\\FRS\\x_test.sav"
x_test = pickle.load(open(filename,'rb'))
filename = "G:\\FRS\\x_train.sav"
x_train = pickle.load(open(filename,'rb'))
filename = "G:\\FRS\\y_train.sav"
y_train = pickle.load(open(filename,'rb'))
filename = "G:\\FRS\\y_test.sav"
y_test = pickle.load(open(filename,'rb'))
# 保留原始資料,供 cross tab function 使用
y_test_org = y_test
print(x_test.shape)
# channels_last: 色彩通道(R/G/B)資料(深度)放在第4維度,第2、3維度放置寬與高
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print(x_test.shape)
# 轉換色彩 0~255 資料為 0~1
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# y 值轉成 one-hot encoding
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# 建立簡單的線性執行的模型
model = Sequential()
# 建立卷積層,filter=32,即 output space 的深度, Kernal Size: 3x3, activation function 採用 relu
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
# 建立卷積層,filter=64,即 output size, Kernal Size: 3x3, activation function 採用 relu
model.add(Conv2D(64, (3, 3), activation='relu'))
# 建立池化層,池化大小=2x2,取最大值
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout層隨機斷開輸入神經元,用於防止過度擬合,斷開比例:0.25
model.add(Dropout(0.25))
# Flatten層把多維的輸入一維化,常用在從卷積層到全連接層的過渡。
model.add(Flatten())
# 全連接層: 128個output
model.add(Dense(128, activation='relu'))
# Dropout層隨機斷開輸入神經元,用於防止過度擬合,斷開比例:0.5
model.add(Dropout(0.5))
# 使用 softmax activation function,將結果分類
model.add(Dense(num_classes, activation='softmax'))
# 編譯: 選擇損失函數、優化方法及成效衡量方式
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# 進行訓練, 訓練過程會存在 train_history 變數中
train_history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# 顯示損失函數、訓練成果(分數)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# save model
filename = "G:\\FRS\\cnn_v4.sav"
pickle.dump(model,open(filename,"wb"))
| [
"noreply@github.com"
] | wasteee.noreply@github.com |
286079141118038776c11e0b7f0eb90ba809b352 | 0d0f123de3e7bdbe846b892bef6f8f12d7f722b4 | /src/courses/migrations/0002_auto_20200211_1409.py | 7a6ec867d33f2e8c0bc4a60cd63f568f562e483e | [] | no_license | frifaie/vidsub | eb889d137f96ab65b0fc3a788e4b18d2f6309b7a | c17e12319c30712689a82fec73aebc53c935d8f8 | refs/heads/master | 2020-12-29T10:53:14.465244 | 2020-02-28T01:19:33 | 2020-02-28T01:19:33 | 238,582,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 2.2.10 on 2020-02-11 07:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='allowed_membership',
new_name='allowed_memberships',
),
]
| [
"frifaie15@gmail.com"
] | frifaie15@gmail.com |
5cdc5d9a41cd117f2e1e1faa0cf8c5b971c37a2b | 7b2f384e27f039d793d385ee18908d53e1cc4ea8 | /utils/log_tools.py | cfc3d006dbdbb0e9643dfa46fbe57f9dfdf9de94 | [] | no_license | moigomes/etl_geolocalizacao | 10fb687424d0306569945747937178748189e3d9 | 64d0dadc16ca479daf895d484632a2ffba7d10c2 | refs/heads/master | 2023-01-04T22:52:13.599122 | 2020-11-03T16:55:32 | 2020-11-03T16:55:32 | 309,749,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import logging
def ajustar_log():
logging.basicConfig(datefmt='%d-%m-%Y %H:%M:%S',
level=logging.INFO,
format='[%(levelname)-5.5s] %(asctime)s [%(filename)-15.15s / %(funcName)-20.22s / %(lineno)-5.5s] %(message)s') | [
"moigomes@gmail.com"
] | moigomes@gmail.com |
96fdbd1d69014c062a573ce6737c753189550b8e | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-CoreServices/PyObjCTest/test_textutils.py | 2ff838ec467acf264133c95ae598c609539c4881 | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | from PyObjCTools.TestSupport import *
import CoreServices
class TestTextUtils (TestCase):
def assert_not_wrapped(self, name):
self.assertTrue(not hasattr(CoreServices, name), "%r exposed in bindings"%(name,))
def test_not_wrapped(self):
self.assert_not_wrapped('ScriptRunStatus')
self.assert_not_wrapped('BreakTable')
self.assert_not_wrapped('NBreakTable')
self.assert_not_wrapped('Munger')
self.assert_not_wrapped('NewString')
self.assert_not_wrapped('SetString')
self.assert_not_wrapped('GetString')
self.assert_not_wrapped('GetIndString')
self.assert_not_wrapped('FindWordBreaks')
self.assert_not_wrapped('LowercaseText')
self.assert_not_wrapped('UppercaseText')
self.assert_not_wrapped('StripDiacritics')
self.assert_not_wrapped('UppercaseStripDiacritics')
self.assert_not_wrapped('FindScriptRun')
self.assert_not_wrapped('UpperString')
self.assert_not_wrapped('upperstring')
self.assert_not_wrapped('UprString')
self.assert_not_wrapped('c2pstrcpy')
self.assert_not_wrapped('p2cstrcpy')
self.assert_not_wrapped('CopyPascalStringToC')
self.assert_not_wrapped('CopyCStringToPascal')
self.assert_not_wrapped('c2pstr')
self.assert_not_wrapped('C2PStr')
self.assert_not_wrapped('p2cst')
self.assert_not_wrapped('P2CStr')
self.assert_not_wrapped('p2cstr')
self.assert_not_wrapped('c2pstr')
self.assert_not_wrapped('C2PStr')
self.assert_not_wrapped('P2CStr')
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
fb591f1fb60332f3175b3b2cc3434ae2c22e1d5b | cb1771f2d660c4c0abd6f6a38932fc8e2b0a0c46 | /MadLibs.py | 54ab2d65a4149f3633a489a6b4eeed9ca49d436c | [] | no_license | bagastri/Game | 6e15f6aa30dfa8c84325d3b6270d9c1a1de53fd5 | 8b8f02a70340ed9f82643885df1a656483c1206d | refs/heads/main | 2023-08-21T21:04:16.106516 | 2021-10-20T07:28:49 | 2021-10-20T07:28:49 | 419,225,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | kata_sifat = input("Sebutkan sebuah Kata Sifat: ")
kegiatan = input("Sebutkan sebuah Kegiatan: ")
nama_orang = input("Sebutkan Nama Seseorang: ")
tempat = input("Sebutkan sebuah Nama Tempat: ")
print(f"Aku adalah Seorang yang {kata_sifat}")
print(f"Hobiku adalah {kegiatan}")
print(f"Aku Suka dengan {nama_orang}")
print(f"Aku Punya Kebiasaan Tidur di {tempat}")
| [
"noreply@github.com"
] | bagastri.noreply@github.com |
f55673e83ede11923618c5868e655ce22f08bd6c | f7c38c85203c572a17a55a647f796c4217325914 | /interop.py | 197d08635d9be1a30c76c025c54a386287a444cd | [] | no_license | jeffthnd/interopclient | 27d9b6ce76e729929f81a2a7d52bbea72899e756 | 616a82f6f5af336963041f1994496b841f0ef0f0 | refs/heads/master | 2020-06-23T16:08:59.291042 | 2019-07-24T17:21:52 | 2019-07-24T17:21:52 | 198,674,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,762 | py | import json
import csv
import os
import sys
from auvsi_suas.client import client
from auvsi_suas.proto import interop_api_pb2
from google.protobuf import json_format
"""
Created on Thu Jun 13 09:30:42 2019
@author: Nuttawat Punpigul
"""
"""
The interop imprementation script for SUAS2019 competition.
The script will get the mission from the interop servo then reformat the data to CSV and TXT files which ready to use with another script made by Pegasus team.
The MAVLink protocol is hosted under the governance of the Dronecode Project.
See Wiki article (https://mavlink.io)
Interoperability System for the AUVSI SUAS Competition.
See Github (https://github.com/auvsi-suas/interop#interop-integration)
"""
# The example of JSON data for script testing
# mission = '{"id": 1,"flyZones": [{"altitudeMax": 750.0,"altitudeMin": 0.0,"boundaryPoints": [{"latitude": 38.142544,"longitude": -76.434088},{"latitude": 38.141833,"longitude": -76.425263},{"latitude": 38.144678,"longitude": -76.427995}]}],"searchGridPoints": [{"latitude": 38.142544,"longitude": -76.434088}],"offAxisOdlcPos": {"latitude": 38.142544,"longitude": -76.434088},"waypoints": [{"latitude": 38.142666,"altitude": 50.0,"longitude": -76.436777},{"latitude": 38.142544,"altitude": 200.0,"longitude": -76.434088}],"airDropPos":{"latitude": 38.141833,"longitude": -76.425263},"emergentLastKnownPos": {"latitude": 38.145823,"longitude": -76.422396},"stationaryObstacles": [{"latitude": 38.14792,"radius": 150.0,"longitude": -76.427995,"height": 200.0},{"latitude": 38.145823,"radius": 50.0,"longitude": -76.422396,"height": 300.0}]}'
def write_bp(num, lat, lon):
home_lat = 38.144749
home_lon = -76.428020
row = [str(lat), str(lon)]
homerow = [home_lat, home_lon]
if int(num) == 0:
with open('1.geofence.fen', 'w') as writer:
writer.write('\t'.join(map(str,homerow)))
writer.write('\n')
writer.write('\t'.join(map(str,row)))
writer.write('\n')
else:
with open('1.geofence.fen', 'a') as writer:
writer.write('\t'.join(map(str,row)))
writer.write('\n')
def write_poly(num, lat, lon):
row = [str(lat), str(lon)]
if int(num) == 0:
with open('3.polygon.poly', 'w') as writer:
writer.write('\t'.join(map(str,row)))
writer.write('\n')
else:
with open('3.polygon.poly', 'a') as writer:
writer.write('\t'.join(map(str,row)))
writer.write('\n')
def write_wp(wp_num, wp_lat, wp_lon, wp_alt):
row = [[str(wp_num), str(wp_lat), str(wp_lon), str(wp_alt/3.28)]]
if int(wp_num) == 0:
with open('waypoints.csv', 'w', newline='') as writeFile:
writer = csv.writer(writeFile)
# writer.writerows(head)
writer.writerows(row)
else:
with open('waypoints.csv', 'a', newline='') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(row)
writeFile.close()
def write_obs(obs_num, obs_lat, obs_lon, obs_rad, obs_alt):
head = [["No.","Lat","Lon","Radius","Altitude"]]
row = [[str(obs_num + 1), str(obs_lat), str(obs_lon), str(obs_rad), str(obs_alt)]]
if int(obs_num) == 0:
with open('obstacles.csv', 'w', newline='') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(head)
writer.writerows(row)
else:
with open('obstacles.csv', 'a', newline='') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(row)
writeFile.close()
# Read the waypoint.csv file for create the mavlink waypoint file
def reformat(gp_lat, gp_lon, ad_lat, ad_lon, elk_lat, elk_lon):
wp = list(csv.reader(open('waypoints.csv', 'r'), delimiter=','))
takeoff = 35.0
dropheight = 35.0
searchjoeheight = 90.0
home_lat = 38.144749
home_lon = -76.428020
wp_num = 2
num = 0
while True:
try:
lat = wp[num][1]
lon = wp[num][2]
alt = wp[num][3]
homerow = [0,1,0,16,0,0,0,0,home_lat,home_lon,1.3,1]
takeoffrow = [1,0,3,22,0.0,0.0,0.0,0.0,0.0,0.0,takeoff,1]
waypointrow = [wp_num,0,3,16,0.0,0.0,0.0,0.0,lat,lon,alt,1]
airdroprow = [wp_num + 1,0,3,16,0.0,0.0,0.0,0.0,ad_lat,ad_lon,dropheight,1]
joerow = [wp_num + 2,0,3,16,0.0,0.0,0.0,0.0,elk_lat,elk_lon,searchjoeheight,1]
landingrow = [wp_num + 3,0,3,21,0.0,0.0,0.0,0.0,home_lat,home_lon,0,1]
if int(wp_num) == 2:
with open('2.SUAS2019.txt', 'w') as writer:
writer.write('QGC WPL 110\n')
writer.write('\t'.join(map(str,homerow))) # Write home coordinate
writer.write('\n')
writer.write("\t".join(map(str,takeoffrow))) # Write takeoff coordinate
writer.write('\n')
writer.write("\t".join(map(str,waypointrow))) # Write first waypoint coordinates
writer.write('\n')
else:
with open('2.SUAS2019.txt', 'a') as writer:
writer.write("\t".join(map(str,waypointrow))) # Write next waypoint coordinates
writer.write('\n')
wp_num += 1
num += 1
except:
break
with open('2.SUAS2019.txt', 'a') as writer:
writer.write("\t".join(map(str,airdroprow))) # Write Airdrop coordinate
writer.write('\n')
writer.write("\t".join(map(str,joerow))) # Write emergentLastKnownPos coordinate
writer.write('\n')
writer.write("\t".join(map(str,landingrow))) # Write Landing coordinate
writer.write('\n')
return
# Create client object
client = client.Client(url='http://10.10.130.10:80',
username='thai',
password='3522175567')
#Get missions
mission_id = int(input('Select mission ID:'))
mission = client.get_mission(mission_id)
json_mission = json_format.MessageToJson(mission)
# Use json library to read the mission data
json_parsed = json.loads(json_mission)
# json_parsed = json.loads(mission)
# Write flyzones altitude
altmax = json_parsed['flyZones'][0]['altitudeMax']
altmin = json_parsed['flyZones'][0]['altitudeMin']
print("Flyzone alt:", "Max", altmax, "Min", altmin)
# Write boundarypoints coordinates
bp_num = 0
while True:
try:
bp_lat = json_parsed['flyZones'][0]['boundaryPoints'][bp_num]['latitude']
bp_lon = json_parsed['flyZones'][0]['boundaryPoints'][bp_num]['longitude']
print("Boundarypoints:", bp_num + 1, bp_lat, bp_lon)
write_bp(bp_num, bp_lat, bp_lon)
bp_num += 1
except:
break
# Write searchGridPoints coordinate
gp_num = 0
while True:
try:
gp_lat = json_parsed['searchGridPoints'][gp_num]['latitude']
gp_lon = json_parsed['searchGridPoints'][gp_num]['longitude']
print("searchGridPoints:", gp_num, gp_lat, gp_lon)
write_poly(gp_num, gp_lat, gp_lon)
gp_num += 1
except:
break
# Write waypoints coordinates
wp_num = 0
while True:
try:
wp_lat = json_parsed['waypoints'][wp_num]['latitude']
wp_lon = json_parsed['waypoints'][wp_num]['longitude']
wp_alt = json_parsed['waypoints'][wp_num]['altitude']
print("Waypoints:", wp_num + 1, wp_lat, wp_lon, wp_alt)
write_wp(wp_num, wp_lat, wp_lon, wp_alt)
wp_num += 1
except:
break
# Write airDropPos coordinates
ad_lat = json_parsed['airDropPos']['latitude']
ad_lon = json_parsed['airDropPos']['longitude']
print("airDropPos:", ad_lat, ad_lon)
# write_ad(ad_lat, ad_lon)
# Write emergentLastKnownPos coordinates
elk_lat = json_parsed['emergentLastKnownPos']['latitude']
elk_lon = json_parsed['emergentLastKnownPos']['longitude']
print("emergentLastKnownPos:", elk_lat, elk_lon)
# write_elk(elk_lat, elk_lon)
# Write emergentLastKnownPos coordinates
oax_lat = json_parsed['offAxisOdlcPos']['latitude']
oax_lon = json_parsed['offAxisOdlcPos']['longitude']
print("offAxisOdlcPos:", oax_lat, oax_lon)
# write_elk(elk_lat, elk_lon)
# Write stationaryObstacles coordinates
obs_num = 0
while True:
try:
obs_lat = json_parsed['stationaryObstacles'][obs_num]['latitude']
obs_lon = json_parsed['stationaryObstacles'][obs_num]['longitude']
obs_rad = json_parsed['stationaryObstacles'][obs_num]['radius']
obs_alt = json_parsed['stationaryObstacles'][obs_num]['height']
print("stationaryObstacles:", obs_num + 1, obs_lat, obs_lon, obs_rad)
write_obs(obs_num, obs_lat, obs_lon, obs_rad, obs_alt)
obs_num += 1
except:
break
# Reformat the mission file to a ready to use waypoint file
reformat(gp_lat, gp_lon, ad_lat, ad_lon, elk_lat, elk_lon)
# Restart?
done = str(input("Are you finished? (y/n)"))
if done == 'n':
os.execl(sys.executable,sys.executable,*sys.argv)
| [
"noreply@github.com"
] | jeffthnd.noreply@github.com |
7226b9cda9c338dffe84746f1f19fd9278d6e255 | 15e818aada2b18047fa895690bc1c2afda6d7273 | /lib/python/h5log_loader.py | 5cce0d76a64f14935c96f0fea66de17adea3cff0 | [
"Apache-2.0"
] | permissive | ghomsy/makani | 4ee34c4248fb0ac355f65aaed35718b1f5eabecf | 818ae8b7119b200a28af6b3669a3045f30e0dc64 | refs/heads/master | 2023-01-11T18:46:21.939471 | 2020-11-10T00:23:31 | 2020-11-10T00:23:31 | 301,863,147 | 0 | 0 | Apache-2.0 | 2020-11-10T00:23:32 | 2020-10-06T21:51:21 | null | UTF-8 | Python | false | false | 27,053 | py | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a more user friendly interface to Makani HDF5 logs.
Typical use case:
h5 = H5LogLoader()
h5.Open(['file1.h5', 'file2.h5'])
# Dictionary interface.
plt.figure()
t = h5.capture_time['FcA/FlightComputerSensor']
d = h5['FcA/FlightComputerSensor/aux/mag']
plt.plot(t, d)
# Function call interface.
plt.figure()
for f in h5.GetFields(r'^Servo[^/]+/ServoStatus$'):
t = h5.GetCaptureTime(f)
d = h5.GetData(f + '/angle_measured')
plt.plot(t, d, label=h5.GetNodeName(f))
plt.legend()
"""
# NOTICE: To ease analysis, please do not depend on the Makani repository!
import collections
import re
import sys
import h5py
import numpy as np
def PrintProgress(count, total, suffix=''):
"""Print a progress bar to stdout."""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percent = 100.0 * count / float(total)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] % 4.1f%% ...%s\r' % (bar, percent, suffix))
if count == total:
sys.stdout.write('\nDone!\n')
sys.stdout.flush()
def _H5BuildDtypeTree(dtype, prefix=''):
"""Recursively build an array of paths, starting from an HDF5 dtype."""
values = [str(prefix).lstrip('/')]
if dtype.fields:
for key, item in dtype.fields.iteritems():
values += _H5BuildDtypeTree(item[0].base, prefix + '/' + key)
return values
def _H5BuildGroupTree(group, prefix=''):
"""Recursively build an array of paths to each HDF5 dataset."""
values = []
for key, item in group.iteritems():
if isinstance(item, h5py.Dataset):
values.append(str(prefix + '/' + key).lstrip('/'))
elif isinstance(item, h5py.Group):
values += _H5BuildGroupTree(item, prefix + '/' + key)
return values
def _H5BuildGroupAndDtypeTree(group, prefix=''):
"""Recursively build an array of paths, starting from an HDF5 group."""
values = []
for key, item in group.iteritems():
if isinstance(item, h5py.Dataset):
values += _H5BuildDtypeTree(item.dtype.base, prefix + '/' + key)
elif isinstance(item, h5py.Group):
values += _H5BuildGroupAndDtypeTree(item, prefix + '/' + key)
return values
def _NormalizeFileNames(filenames):
if filenames is None:
filenames = []
elif not isinstance(filenames, list):
filenames = [filenames]
return filenames
class _H5DataCache(object):
"""Provides a simple cache interface to H5*Cache classes."""
def __init__(self):
self._data_cache = {} # Map path to cached data.
def ClearCached(self):
"""Free cached memory."""
self._data_cache = {}
def GetCached(self, path):
"""Get data and cache the result."""
data = self._data_cache.get(path)
if data is None:
data = self.GetData(path)
self._data_cache[path] = data
return np.copy(data) # Copy data to prevent changes to cached values.
def GetData(self, path): # pylint: disable=unused-argument
"""Get data without caching the result."""
raise NotImplementedError
class _H5DataLog(object):
"""Load HDF5 files."""
def __init__(self):
# Abstract HDF5 interface to improve access performance.
self._data_logs = [] # A list of HDF5 file objects.
self._data_paths = {} # A mapping from path to HDF5 data path.
def __del__(self):
self.Close()
def Open(self, filenames):
"""Open HDF5 log files.
Args:
filenames: A list of log HDF5 files, sorted in time.
"""
# Python's garbage collection does not always work well with HDF5 data
# structures. Close all files before reopening them again.
self.Close()
# Load HDF5 files as read-only.
self._data_logs = [h5py.File(f, 'r') for f in filenames]
# Index HDF5 data structure to improve path lookup performance.
paths = set()
for d in self._data_logs:
paths |= set(_H5BuildGroupAndDtypeTree(d))
self._data_paths = {self._GetShortPath(p): p for p in paths}
def _GetShortPath(self, path):
"""Remove overly verbose prefixes."""
return path.replace('kAioNode', '').replace('kMessageType', '')
def Save(self, filename, verbose=False):
"""Save data from all input files to a single, merged HDF5."""
with h5py.File(filename, 'w') as fp:
# Build paths for each HDF5 dataset.
paths = set()
for d in self._data_logs:
paths |= set(_H5BuildGroupTree(d))
dataset_paths = {self._GetShortPath(p): p for p in paths}
dataset_count = len(dataset_paths)
i = 0
for short_path, path in dataset_paths.iteritems():
fp.create_dataset(path, data=self.GetData(short_path))
i += 1
if verbose:
PrintProgress(i, dataset_count, 'Concatenating HDF5 datasets')
def Close(self):
"""Close all HDF5 log files and free associated memory."""
while self._data_logs:
d = self._data_logs.pop()
d.close()
self.__init__()
def GetData(self, path):
"""Load data from HDF5 structure as data[field 0][field 1] ... [field N]."""
arrays = []
split_path = self._data_paths[path].split('/')
for d in self._data_logs:
try:
for p in split_path:
d = d[p]
arrays.append(d)
except KeyError:
pass
if len(arrays) == 1:
return arrays[0]
return np.concatenate(arrays, axis=0)
def GetPathsRegex(self, re_match, re_sub=r'\g<0>'):
"""Get a list of paths matching the given regex pattern."""
expr = re.compile(re_match)
paths = set()
for path in self._data_paths:
match = expr.match(path)
if match:
paths.add(match.expand(re_sub))
return sorted(list(paths))
class _H5AioTimeCache(_H5DataCache):
"""Load and cache local node time."""
def __init__(self, loader):
super(_H5AioTimeCache, self).__init__()
self._loader = loader
self._aio_time_offset = collections.defaultdict(float)
def GetData(self, path):
"""Get the local node time associated with the given path."""
path = self._loader.GetAioHeaderPath(path)
data = self._loader.GetData(path + '/timestamp').astype(long)
for i in np.where(np.diff(data) < 0)[0]:
data[i + 1:] += 2**32
return data.astype(float) * 1e-6 - self._aio_time_offset[path]
def GetOffset(self, path):
"""Get the local node time offset assicated with the given path."""
path = self._loader.GetAioHeaderPath(path)
return self._aio_time_offset[path]
def SetOffset(self, path, offset):
"""Set the local node time offset assicated with the given path."""
path = self._loader.GetAioHeaderPath(path)
if path in self._data_cache:
self._data_cache[path] += self._aio_time_offset[path] - offset
self._aio_time_offset[path] = offset
def ShiftOffset(self, path, delta):
"""Shift the local node time offset assicated with the given path."""
offset = self.GetOffset(path) + delta
self.SetOffset(path, offset)
class _H5CaptureTimeCache(_H5DataCache):
"""Load and cache capture time."""
def __init__(self, loader):
super(_H5CaptureTimeCache, self).__init__()
self._loader = loader
self._capture_time_offset = 0 # Offset common to all datasets.
def GetData(self, path):
"""Get the capture time associated with the given path."""
path = self._loader.GetCaptureHeaderPath(path)
tv_sec = self._loader.GetData(path + '/tv_sec').astype(float)
tv_usec = self._loader.GetData(path + '/tv_usec').astype(float)
return tv_sec + tv_usec * 1e-6 - self._capture_time_offset
def GetOffset(self):
"""Get the global capture time offset."""
return self._capture_time_offset
def SetOffset(self, offset):
"""Set the global capture time offset."""
for t in self._data_cache.itervalues():
t += self._capture_time_offset - offset
self._capture_time_offset = offset
def ShiftOffset(self, delta):
"""Shift the global capture time offset."""
offset = self.GetOffset() + delta
self.SetOffset(offset)
class _H5GpsTimeCache(_H5DataCache):
"""Load and cache GPS time."""
def __init__(self, loader):
super(_H5GpsTimeCache, self).__init__()
self._loader = loader
self._gps_time_offset = 0 # Offset common to all datasets.
self._gps_time_cal = None # Calibration from capture time to GPS time.
def GetData(self, path):
"""Get the GPS time associated with the given path."""
if self._gps_time_cal is None:
t_cap = []
t_gps = []
loader = self._loader
fields = loader.GetPathsRegex(
r'^messages/[^/]+/NovAtelObservations/message$')
for f in fields:
time_status = loader.GetData(f + '/range/timestamp/time_status')
time_of_week_ms = loader.GetData(f + '/range/timestamp/tow').astype(int)
pps_latency_us = loader.GetData(f + '/pps_latency_usec').astype(int)
# See NovAtel OEM6 docs for "GPS Reference Time Status" on page 35.
i = np.where(time_status >= 100)[0]
# Validate time-of-week range.
gps_week_ms = 7 * 24 * 3600 * 1000
i = i[np.where(time_of_week_ms[i] < gps_week_ms)[0]]
# Validate PPS latency range.
i = i[np.where(0 < pps_latency_us[i])[0]]
i = i[np.where(pps_latency_us[i] < 1000 * 1000)[0]]
# Remove invalid indices.
cap_time = loader.GetCaptureTime(f)[i]
time_of_week_ms = time_of_week_ms[i]
pps_latency_us = pps_latency_us[i]
# Handle GPS week rollovers.
for i in np.where(np.diff(time_of_week_ms) < 0)[0]:
time_of_week_ms[i + 1:] += gps_week_ms
# To communicate GPS time precisely, the GPS receiver provides a pulse
# per second interrupt signal that occurs on the GPS time-of-week [ms]
# second transition (i.e., when time-of-week % 1000 == 0). The GPS
# receiver also transmits the time-of-week value in each message. We
# can then relate the reception time of any message to the time of
# validity by measuring the time between the PPS interrupt and message
# reception.
# Data D2 valid
# PPS | Data D2 received PPS
# | | | |
# --D19!-D0-.-D1-.-D2-.-D3-. ... .-D18.-D19!-D0-.-D1-.-D2-- ...
# | |-| Transport delay
# |<--------->| PPS latency
#
transport_delay_us = pps_latency_us - (time_of_week_ms % 1000) * 1000
# Compute times.
t_cap.append(cap_time)
t_gps.append(time_of_week_ms * 1e-3 + transport_delay_us * 1e-6)
t_cap = np.concatenate(t_cap)
t_gps = np.concatenate(t_gps)
# Reject outliers. Loop multiple times to improve estimate.
for _ in range(3):
# Compute linear fit coefficients: (gps_time) = m * (capture_time) + b.
p = np.polyfit(t_cap, t_gps, 1)
# Compute error in linear fit: (delta) = (measurement) - (estimate).
delta = t_gps - np.polyval(p, t_cap)
# Find data with error less than 3 sigma.
i = np.where(np.abs(delta) < 3.0 * np.std(delta))
t_cap = t_cap[i]
t_gps = t_gps[i]
self._gps_time_cal = np.polyfit(t_cap, t_gps, 1)
# Evaluate linear fit: (gps_time) = m * (capture_time) + b.
return np.polyval(
self._gps_time_cal,
self._loader.GetCaptureTime(path)) - self._gps_time_offset
def GetOffset(self):
"""Get the global GPS time offset."""
return self._gps_time_offset
def SetOffset(self, offset):
"""Set the global GPS time offset."""
for t in self._data_cache.itervalues():
t += self._gps_time_offset - offset
self._gps_time_offset = offset
def ShiftOffset(self, delta):
"""Shift the global GPS time offset."""
offset = self.GetOffset() + delta
self.SetOffset(offset)
class H5DataLoader(object):
"""Load and cache log data."""
def __init__(self, filenames=None):
self._filenames = _NormalizeFileNames(filenames)
self._data_log = _H5DataLog()
self._aio_time_cache = _H5AioTimeCache(self)
self._capture_time_cache = _H5CaptureTimeCache(self)
self._gps_time_cache = _H5GpsTimeCache(self)
self._relative_time_cache = _H5CaptureTimeCache(self)
def __enter__(self):
self.Open(self._filenames)
return self
def __exit__(self, *unused_args):
self.Close()
def Open(self, filenames=None):
"""Open HDF5 log files.
Args:
filenames: A list of log HDF5 files, sorted in time.
"""
self.Close()
if filenames is not None:
self._filenames = _NormalizeFileNames(filenames)
if self._filenames is not None:
self._data_log.Open(self._filenames)
if self._data_log.GetPathsRegex('^info/min_tv_[u]?sec'):
min_sec = self._data_log.GetData('info/min_tv_sec').astype(float)
min_usec = self._data_log.GetData('info/min_tv_usec').astype(float)
offset = min_sec + min_usec * 1e-6
self._relative_time_cache.SetOffset(offset)
def Save(self, filename, verbose=False):
"""Save data from all input files to a single, merged HDF5."""
self._data_log.Save(filename, verbose)
def Close(self):
"""Close all HDF5 log files and free associated memory."""
self._data_log.Close()
self._filenames = []
self.ClearCached()
def ClearCached(self):
"""Free cached memory."""
for t in self.__dict__.values():
if isinstance(t, _H5DataCache):
t.ClearCached()
def GetData(self, path):
"""Get data associated with the given path."""
return self._data_log.GetData(path)
def GetAioTime(self, path):
"""Get the local node time associated with the given path."""
return self._aio_time_cache.GetCached(self.GetAioHeaderPath(path))
def GetAioTimeOffset(self, path):
"""Get the local node time offset associated with the given path."""
return self._aio_time_cache.GetOffset(self.GetAioHeaderPath(path))
def SetAioTimeOffset(self, path, offset):
"""Set the local node time offset associated with the given path."""
self._aio_time_cache.SetOffset(self.GetAioHeaderPath(path), offset)
def ShiftAioTimeOffset(self, path, delta):
"""Shift the local node time offset associated with the given path."""
self._aio_time_cache.ShiftOffset(self.GetAioHeaderPath(path), delta)
def GetCaptureTime(self, path):
"""Get the capture time associated with the given path."""
return self._capture_time_cache.GetCached(self.GetCaptureHeaderPath(path))
def GetCaptureTimeOffset(self):
"""Get the global capture time offset."""
return self._capture_time_cache.GetOffset()
def SetCaptureTimeOffset(self, offset):
"""Set the global capture time offset."""
self._capture_time_cache.SetOffset(offset)
def ShiftCaptureTimeOffset(self, delta):
"""Shift the global capture time offset."""
self._capture_time_cache.ShiftOffset(delta)
def GetGpsTime(self, path):
"""Get the GPS time associated with the given path."""
return self._gps_time_cache.GetCached(self.GetCaptureHeaderPath(path))
def GetGpsTimeOffset(self):
"""Get the global GPS time offset."""
return self._gps_time_cache.GetOffset()
def SetGpsTimeOffset(self, offset):
"""Set the global GPS time offset."""
self._gps_time_cache.SetOffset(offset)
def ShiftGpsTimeOffset(self, delta):
"""Shift the global GPS time offset."""
self._gps_time_cache.ShiftOffset(delta)
def GetRelativeTime(self, path):
"""Get the relative time associated with the given path."""
return self._relative_time_cache.GetCached(self.GetCaptureHeaderPath(path))
def GetRelativeTimeOffset(self):
"""Get the global relative time offset."""
return self._relative_time_cache.GetOffset()
def SetRelativeTimeOffset(self, offset):
"""Set the global relative time offset."""
self._relative_time_cache.SetOffset(offset)
def ShiftRelativeTimeOffset(self, delta):
"""Shift the global relative time offset."""
self._relative_time_cache.ShiftOffset(delta)
def GetAioHeaderPath(self, path):
"""Get the AioHeader base path from the given path."""
if not self.IsMessagePath(path):
raise ValueError('Invalid path specified:', path)
return '/'.join(path.split('/')[0:3] + ['aio_header'])
def GetCaptureHeaderPath(self, path):
"""Get the CaptureHeader base path from the given path."""
if not self.IsMessagePath(path):
raise ValueError('Invalid path specified:', path)
return '/'.join(path.split('/')[0:3] + ['capture_header'])
def IsNodePath(self, path):
"""Determine if the path contains a valid node path."""
return re.match(r'^messages/[^/]+(/.+)?$', path)
def IsMessagePath(self, path):
"""Determine if the path contains a valid message path."""
return re.match(r'^messages/[^/]+/[^/]+(/.+)?$', path)
def IsDataPath(self, path):
"""Determine if the path contains a valid data path."""
return re.match(r'^messages/[^/]+/[^/]+/message/.+$', path)
def GetPathsRegex(self, re_match, re_sub=r'\g<0>'):
"""Get a list of paths matching the given regex pattern."""
return self._data_log.GetPathsRegex(re_match, re_sub)
def GetNodeName(self, path):
"""Get the node name associated with the given path."""
if not self.IsNodePath(path):
raise ValueError('Invalid path specified:', path)
return path.split('/')[1]
def GetMessageName(self, path):
"""Get the message name associated with the given path."""
if not self.IsMessagePath(path):
raise ValueError('Invalid path specified:', path)
return path.split('/')[2]
def GetDataName(self, path):
"""Get the data field name associated with the given path."""
if not self.IsDataPath(path):
raise ValueError('Invalid path specified:', path)
return path.split('/', 4)[4:]
@property
def filenames(self):
"""Get HDF5 file names."""
return self._filenames
class H5DataDict(object):
"""Creates a path abstraction to the H5DataLoader object."""
def __init__(self, loader, get_data_function, re_match_sub_dict):
"""Initialize the H5DataDict object.
Args:
loader: A H5DataLoader object.
get_data_function: A H5DataLoader function to map self.GetData().
re_match_sub_dict: A dict mapping path regex pattern to substitution.
"""
self._loader = loader
self._get_data_function = get_data_function
self._re_match_sub_dict = re_match_sub_dict
self._dict = {}
def BuildDict(self):
"""Build the dictionary of data paths."""
self._dict = {}
for re_match, re_sub in self._re_match_sub_dict.iteritems():
expr = re.compile(re_match)
for path in self._loader.GetPathsRegex(expr):
self._dict[expr.sub(re_sub, path)] = path
def GetPathsRegex(self, pattern):
"""Get a list of paths matching the given regex pattern."""
expr = re.compile(pattern)
return sorted([f for f in self._dict if expr.match(f)])
def GetPaths(self, prefix):
"""Get a list of paths with the given prefix."""
return self.GetPathsRegex(r'^(' + prefix + r')(/.+)?$')
def GetSubpaths(self, prefix, recursive=False):
"""Get a list of subpaths of the given prefix."""
if recursive:
return self.GetPathsRegex(r'^(' + prefix + r')/.+$')
else:
return self.GetPathsRegex(r'^(' + prefix + r')/[^/]+$')
def GetData(self, path):
"""Get data associated with the given path."""
return self._get_data_function(self._dict[path])
def GetAioTime(self, path):
"""Get the local node time associated with the given path."""
return self._loader.GetAioTime(self._dict[path])
def GetAioTimeOffset(self, path):
"""Get the local node time offset associated with the given path."""
return self._loader.GetAioTimeOffset(self._dict[path])
def SetAioTimeOffset(self, path, offset):
"""Set the local node time offset associated with the given path."""
self._loader.SetAioTimeOffset(self._dict[path], offset)
def ShiftAioTimeOffset(self, path, delta):
"""Shift the local node time offset associated with the given path."""
self._loader.ShiftAioTimeOffset(self._dict[path], delta)
def GetCaptureTime(self, path):
"""Get the capture time associated with the given path."""
return self._loader.GetCaptureTime(self._dict[path])
def GetCaptureTimeOffset(self):
"""Get the global capture time offset."""
return self._loader.GetCaptureTimeOffset()
def SetCaptureTimeOffset(self, offset):
"""Set the global capture time offset."""
self._loader.SetCaptureTimeOffset(offset)
def ShiftCaptureTimeOffset(self, delta):
"""Shift the global capture time offset."""
self._loader.ShiftCaptureTimeOffset(delta)
def GetGpsTime(self, path):
"""Get the GPS time associated with the given path."""
return self._loader.GetGpsTime(self._dict[path])
def GetGpsTimeOffset(self):
"""Get the global GPS time offset."""
return self._loader.GetGpsTimeOffset()
def SetGpsTimeOffset(self, offset):
"""Set the global GPS time offset."""
self._loader.SetGpsTimeOffset(offset)
def ShiftGpsTimeOffset(self, delta):
"""Shift the global GPS time offset."""
self._loader.ShiftGpsTimeOffset(delta)
def GetRelativeTime(self, path):
"""Get the relative time associated with the given path."""
return self._loader.GetRelativeTime(self._dict[path])
def GetRelativeTimeOffset(self):
"""Get the global relative time offset."""
return self._loader.GetRelativeTimeOffset()
def SetRelativeTimeOffset(self, offset):
"""Set the global relative time offset."""
self._loader.SetRelativeTimeOffset(offset)
def ShiftRelativeTimeOffset(self, delta):
"""Shift the global relative time offset."""
self._loader.ShiftRelativeTimeOffset(delta)
def GetNodeName(self, path):
"""Get the node name associated with the given path."""
return self._loader.GetNodeName(self._dict[path])
def GetMessageName(self, path):
"""Get the message name associated with the given path."""
return self._loader.GetMessageName(self._dict[path])
def GetDataName(self, path):
"""Get the data field name associated with the given path."""
return self._loader.GetDataName(self._dict[path])
def keys(self): # pylint: disable=invalid-name
"""Get all possible paths, used for dictionary self[] auto-completion."""
return sorted(self._dict.keys())
def __contains__(self, path):
"""Provide 'in' interface."""
return path in self._dict
def __getitem__(self, path):
"""Provide self[] dictionary access to data."""
return self.GetData(path)
class H5LogLoader(H5DataDict):
"""Abstract a HDF5 log files to simplify interface."""
def __init__(self, filenames=None):
self._data_loader = H5DataLoader(filenames)
super(H5LogLoader, self).__init__(
self._data_loader, self._data_loader.GetData,
{
r'^messages/([^/]+/[^/]+)$': r'\1',
r'^messages/([^/]+/[^/]+)/message/(.+)$': r'\1/\2',
})
self._aio_header_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{
r'^messages/([^/]+/[^/]+)$': r'\1',
r'^messages/([^/]+/[^/]+)/aio_header/(.+)$': r'\1/\2',
})
self._aio_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetAioTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._bad_packets_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{r'^bad_packets/(.+)$': r'\1'})
self._capture_header_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{
r'^messages/([^/]+/[^/]+)$': r'\1',
r'^messages/([^/]+/[^/]+)/capture_header/(.+)$': r'\1/\2',
})
self._capture_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetCaptureTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._gps_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetGpsTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._info_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{r'^info/(.+)$': r'\1'})
self._relative_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetRelativeTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._param_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{r'^parameters/(.+)$': r'\1'})
def __enter__(self):
self.Open()
return self
def __exit__(self, *unused_args):
self.Close()
def Open(self, filenames=None):
"""Open HDF5 log files.
Args:
filenames: A list of log HDF5 files, sorted in time.
"""
self._data_loader.Open(filenames)
self.BuildDict()
def Save(self, filename, verbose=False):
"""Save data from all input files to a single, merged HDF5."""
self._data_loader.Save(filename, verbose)
def Close(self):
"""Close all HDF5 log files and free associated memory."""
self._data_loader.Close()
self.ClearCached()
self.BuildDict()
def ClearCached(self):
"""Free cached memory."""
self._data_loader.ClearCached()
def BuildDict(self):
"""Build the dictionaries of data paths."""
super(H5LogLoader, self).BuildDict()
for t in self.__dict__.values():
if isinstance(t, H5DataDict):
t.BuildDict()
def GetNodes(self):
"""Get a list of nodes found in the log file."""
pattern = r'^messages/([^/]+)/[^/]+/.+'
return self._data_loader.GetPathsRegex(pattern, r'\1')
def GetMessageTypes(self, node=r'[^/]+'):
"""Get a list of message types found in the log file."""
pattern = r'^messages/' + node + r'/([^/]+)/.+$'
return self._data_loader.GetPathsRegex(pattern, r'\1')
@property
def filenames(self):
"""Get HDF5 file names."""
return self._data_loader.filenames
@property
def aio_header(self):
return self._aio_header_dict
@property
def aio_time(self):
return self._aio_time_dict
@property
def bad_packets(self):
return self._bad_packets_dict
@property
def capture_header(self):
return self._capture_header_dict
@property
def capture_time(self):
return self._capture_time_dict
@property
def data(self):
return self
@property
def gps_time(self):
return self._gps_time_dict
@property
def info(self):
return self._info_dict
@property
def param(self):
return self._param_dict
@property
def relative_time(self):
return self._relative_time_dict
| [
"luislarco@google.com"
] | luislarco@google.com |
9a2d9e86480855cb0ada2ea4e9d047a63cc37272 | f89d8e0ea9709d17da7b25e63cab36e77267cefe | /proj2/SleepProj/SleepProj/urls.py | 3912df624b726e5cfe86898af06010d890c6efb1 | [] | no_license | barbarajael/SleepModeOn | 8d36385c325933fa1bb0eb407a84476dc4fc061a | 58e2bb3ea74ae28a73230ae13e2c84876e528ef0 | refs/heads/master | 2020-03-10T06:59:35.596351 | 2018-06-20T23:59:42 | 2018-06-20T23:59:42 | 129,252,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | """SleepProj URL Configuration"""
from django.contrib import admin
from django.urls import path, re_path
import app
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
re_path(r'^$', app.views.dataSleep),
]
| [
"bmothe14@gmail.com"
] | bmothe14@gmail.com |
0148974e49046ac95e1892221c4dcba30449812a | 89a8daa6936c62d2092a19e7c4450e481b91a343 | /Py_Api/venv/bin/pip3.8 | 4efba541fb4f97b956f8da5ba1ef6669e8ef4fdd | [] | no_license | gautamsood15/PyBasics | e3391bb3d7c10d921a9fdbc61be571ab9d0f35d4 | 32992417601b7233d96cb74a6104c3a5333cdad5 | refs/heads/master | 2023-05-07T11:48:44.980225 | 2021-06-05T07:39:34 | 2021-06-05T07:39:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | 8 | #!/home/gautam/Desktop/Py_Basics/Py_Api/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gauty22@gmail.com"
] | gauty22@gmail.com |
890f060e423c3a4d0a3dafcaa953ddaf17bc4218 | 7925b46f4556be817fc94fd55662bc41f0e7a3fe | /Inherit - 1.py | 679d04b251b63b82aa42d0142fe4b769f107655a | [] | no_license | Kalantri007/OOP-1 | 607093d875f782305e7914fd29ccf26c3c5463b6 | cc3b9e998c1a13d9846172f97dd99af898c42b00 | refs/heads/main | 2023-04-09T08:59:35.057470 | 2021-04-11T11:26:16 | 2021-04-11T11:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | class Emp:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.email = first + '.' + last + '@email.com'
self.pay = pay
def fullname(self):
return '{} {}'.format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amt)
class Saurabh(Emp):
raise_amt = 1.10
def __init__(self, first, last, pay, prog_lang):
super().__init__(first, last, pay)
self.prog_lang = prog_lang
class Manager(Emp):
def __init__(self, first, last, pay, employees=None):
super().__init__(first, last, pay)
if employees is None:
self.employees = []
else:
self.employees = employees
def add_emp(self, emp):
if emp not in self.employees:
self.employees.append(emp)
def remove_emp(self, emp):
if emp in self.employees:
self.employees.remove(emp)
def print_emps(self):
for emp in self.employees:
print('-->', emp.fullname())
dev_1 = Saurabh('Pathu', 'Kalantri', 3000, 'Java')
dev_2 = Saurabh('Venku', 'Kalantri', 4000, 'Python')
mgr_1 = Manager('Shriniwas', 'Kalantri', 5000, [dev_1])
print(mgr_1.email)
mgr_1.add_emp(dev_2)
mgr_1.print_emps()
mgr_1.remove_emp(dev_2)
print('After remove function')
mgr_1.print_emps() | [
"noreply@github.com"
] | Kalantri007.noreply@github.com |
f3e35b2f4f25821c9af7d753accb90bc7b6152c4 | 091b50259ea6fcbec8943be73b4cd5f044b714ae | /metallics_api/serializers.py | b21d6365854ded938f4827bf6fe17c47ee7d50de | [] | no_license | tushar2488/MetallicsOptimizationServices | 736ca3c0edc53ff184a78a1e86235db08d04aab4 | 8899e9ec382750cab95311d07d29cbc4f7bcb351 | refs/heads/master | 2023-01-27T15:18:03.564516 | 2020-11-30T15:03:03 | 2020-11-30T15:03:03 | 316,971,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | from rest_framework import serializers
from django.forms.models import model_to_dict
from .models import Chemical, Commodity, Composition
class ChemicalCompositionSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = CompositionSerializer(value)
data = serializer.data
result = {}
for k, v in data.items():
if k == "element":
result["element"] = model_to_dict(Chemical.objects.get(id=v))
elif k == "percentage":
result["percentage"] = v
else:
pass
return result
class ChemicalSerializer(serializers.ModelSerializer):
class Meta:
model = Chemical
fields = ("id", "name")
class CompositionSerializer(serializers.ModelSerializer):
class Meta:
model = Composition
fields = ("id", "commodity", "element", "percentage")
class CommoditySerializer(serializers.ModelSerializer):
chemical_composition = ChemicalCompositionSerializer(source="composition_set", many=True, read_only=True)
class Meta:
model = Commodity
fields = ("id", "name", "price", "inventory", "chemical_composition")
| [
"tushar.tajne@nihilent.com"
] | tushar.tajne@nihilent.com |
48e1d9ec305c6fd676cc6fcfeecf686674f331eb | 3b8de8ff9cc3e61283cc873ad371c39354e00c29 | /modules/init.py | f0ad2a4f208c8c577eba19bdc0ccda7ae1f8db75 | [] | no_license | hiranoo/SearchHouse | 22f71b5712f1060ceca3f6cae196dd37e8fb2bd6 | 4f615da7d5b4c82651dd2f6ece8447daec3b5698 | refs/heads/master | 2023-02-13T11:52:49.479448 | 2021-01-11T14:47:35 | 2021-01-11T14:47:35 | 325,778,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # coding: utf-8
import os
from base_path import *
def init():
new_path_list = ['/data', '/data/fetched', '/data/selected', '/data/latest', '/data/sending']
for new_path in new_path_list:
new_path = BASE_PATH + new_path
if not os.path.exists(new_path):
os.mkdir(new_path)
| [
"trivalworks@gmail.com"
] | trivalworks@gmail.com |
ede366809d5baadd6f6d50f3790c48b9dd1a638d | 4a923c14f7642dba3638c9ceee178ccad26a4ad1 | /bot.py | 65e75afbb33a95cb172e18f24719e85da81e5e8f | [] | no_license | willnaoosmith/GoogleAccountCreator | 636a074d23bf297ac7816e2ea45b506fa623df7a | 18c783e525a2ba87e09cda72471e08f52fdf385b | refs/heads/master | 2022-12-18T22:44:05.305426 | 2020-08-17T16:29:45 | 2020-08-17T16:29:45 | 244,614,511 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,167 | py | # coding: utf-8
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import Select
import time
import random
male = ["André","Antônio","Arthur","Bernardo","Breno","Bruno","Caio","Carlos","Cauã","Daniel","Danilo","Davi","Diego","Douglas","Eduardo","Enzo","Erick","Felipe","Filipe","Franciso","Gabriel","Guilherme","Gustavo","Heitor","Henrique","Igor","Iuri","Joari","José","João","Joaquim","Juarez","Júlio","Juraci","Juvenal","Kaio","Kauan","Kauã","Kauê","Leonardo","Luan","Lucas","Luiz","Luís","Marcos","Mateus","Matheus","Miguel","Murilo","Nicolas","Otávio","Paulo","Pedro","Rafael","Renan","Ryan","Samuel","Thiago","Tiago","Victor","Vinícius","Vicente","Victor","Vitór"]
female = ["Adriana","Alice","Aline","Amanda","Ana","Anna","Antonia","Beatriz","Bianca","Brenda","Bruna","Camila","Carolina","Caroline","Clara","Daniela","Eduarda","Emilly","Emily","Evelyn","Fernanda","Francisca","Gabriela","Gabrielle","Gabrielly","Giovana","Giovanna","Helena","Isabela","Isabella","Isabelle","Janaina","Joseane","Júlia","Juliana","Juraci","Lara","Larissa","Laura","Lavinia","Letícia","Livia","Luana","Luisa","Luiza","Manuela","Marcia","Maria","Mariana","Marina","Melissa","Nicole","Patricia","Rafaela","Raissa","Rebeca","Sarah","Sofia","Sophia","Thaís","Vitória","Yasmin","Ágatha"]
surnames = ["Almeida","Alvaréz","Alves","Araújo","Azevedo","Barbosa","Barboza","Cardoso","Carvalho","Cavalcante","Cavalcanti","Correa","Correia","Costa","Cruz","Dias","Díaz","Fernandes","Fernandez","Ferreira","German","Gomes","Gomez","Gonzáles","Gonçalves","Gónzalez","Lima","Martins","Mello","Melo","Montes","Moraes","Morais","Oliveira","Pereira","Pinto","Ribeiro","Rocha","Rodrigues","Santiago","Santos","Schmidt","Schmitz","Silva","Sousa","Souza","Teixeira"]
coin = random.randint(0, 1)
if (coin == 0):
namePosition = random.randint(0, len(male))
Name = unicode(male[namePosition], errors='replace')
else:
namePosition = random.randint(0, len(female))
Name = unicode(female[namePosition], errors='replace')
surnamePosition = random.randint(0, len(surnames))
MiddleName = surnames[surnamePosition]
User = Name + MiddleName + str(random.randint(9999, 999999))
Pass = Name + str(random.randint(0, 9999)) + MiddleName
phone = '4185817014499'
BirthDay = '6'
BirthYear = '1969'
options = Options()
options.headless = False
browser = webdriver.Firefox(options=options, executable_path=r'/home/vmax-william/desktop/Bot5/geckodriver')
browser.set_page_load_timeout(10)
browser.get('https://accounts.google.com/signup/v2/webcreateaccount?flowName=GlifWebSignIn&flowEntry=SignUp')
browser.maximize_window()
time.sleep(2)
FirstName = browser.find_element_by_id('firstName')
LastName = browser.find_element_by_id('lastName')
UserName = browser.find_element_by_id('username')
Password = browser.find_element_by_name('Passwd')
PasswordConfirm = browser.find_element_by_name('ConfirmPasswd')
FirstName.send_keys(Name)
LastName.send_keys(MiddleName)
UserName.send_keys(User)
Password.send_keys(Pass)
PasswordConfirm.send_keys(Pass)
time.sleep(1)
LoginButton = browser.find_element_by_id('accountDetailsNext').click()
time.sleep(2)
PhoneNumber = browser.find_element_by_id('phoneNumberId')
PhoneNumber.send_keys(phone)
time.sleep(1)
NumberButton = browser.find_element_by_id('gradsIdvPhoneNext').click()
time.sleep(2)
CodeBox = browser.find_element_by_id('code')
ConfirmCode = input("Digite aqui o seu codigo: ")
CodeBox.send_keys(ConfirmCode)
time.sleep(1)
CodeButton = browser.find_element_by_id('gradsIdvVerifyNext').click()
time.sleep(2)
DayInput = browser.find_element_by_id('day')
MonthSelect = Select(browser.find_element_by_id('month'))
YearInput = browser.find_element_by_id('year')
GenderSelect = Select(browser.find_element_by_id('gender'))
DayInput.send_keys(BirthDay)
GenderSelect.select_by_value('1')
MonthSelect.select_by_value('1')
YearInput.send_keys(BirthYear)
time.sleep(1)
LoginButton = browser.find_element_by_id('accountDetailsNext').click()
PersonalDetailsButton = browser.find_element_by_id('personalDetailsNext').click()
PersonalDetailsButton.click()
#browser.close() | [
"brochensquewill@protonmail.com"
] | brochensquewill@protonmail.com |
58190670c273ac8e0db61251aac2f4d800f43a15 | c95804e7c46349e63f283207ccfa7e9f0f6caaa5 | /openfda/drugsfda/pipeline.py | 613d032a6764cc4a485e7ccd9af34ead921cfd16 | [
"CC0-1.0"
] | permissive | tralfamadoriangray/openfda | 34f2096d8e8cc834de0671ae95b5369a8240dcd6 | b3bd3a06812c3de7586897ca02ecb6a55fe9a578 | refs/heads/master | 2023-02-08T02:18:13.415437 | 2020-12-30T14:13:08 | 2020-12-30T14:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,794 | py | #!/usr/local/bin/python
'''
Pipeline for converting Drugs@FDA files to JSON and importing into Elasticsearch.
'''
import os
import re
from os.path import join
import arrow
import luigi
from openfda import common, config, parallel, index_util
from openfda.annotation_table.pipeline import CombineHarmonization
from openfda.common import first_file_timestamp
from openfda.drugsfda.annotate import AnnotateMapper
DOWNLOAD_FILE = 'https://www.fda.gov/media/89850/download'
BASE_DIR = join(config.data_dir(), 'drugsfda')
EXTRACTED_DIR = join(BASE_DIR, 'extracted')
RAW_DATA_FILE = join(BASE_DIR, 'raw/drugsfda.zip')
PRODUCTS_DB = join(BASE_DIR, 'json/products.db')
APPLICATIONS_DB = join(BASE_DIR, 'json/applications.db')
APPLICATIONS_DOCS_DB = join(BASE_DIR, 'json/applicationsdocs.db')
SUBMISSIONS_DB = join(BASE_DIR, 'json/submissions.db')
SUBMISSION_PROPERTY_TYPE_DB = join(BASE_DIR, 'json/submissionpropertytype.db')
MARKETING_STATUS_DB = join(BASE_DIR, 'json/marketingstatus.db')
ANNOTATED_DB = join(BASE_DIR, 'json/annotated.db')
TE_DB = join(BASE_DIR, 'json/te.db')
MERGED_DB = join(BASE_DIR, 'json/merged.db')
class DownloadDrugsFDAFiles(luigi.Task):
def requires(self):
return []
def output(self):
return luigi.LocalTarget(RAW_DATA_FILE)
def run(self):
common.download(DOWNLOAD_FILE, RAW_DATA_FILE)
class ExtractDrugsFDAFiles(luigi.Task):
def requires(self):
return DownloadDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(EXTRACTED_DIR)
def run(self):
zip_filename = RAW_DATA_FILE
output_dir = self.output().path
os.system('unzip -o %(zip_filename)s -d %(output_dir)s' % locals())
class Applications2JSONMapper(parallel.Mapper):
rename_map = {
'ApplNo': 'application_no',
'ApplType': 'application_type',
'ApplPublicNotes': 'application_public_notes',
'SponsorName': 'sponsor_name'
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = v.strip() if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '':
return (self.rename_map[k], v)
json = common.transform_dict(value, _cleaner)
if json.get('application_public_notes') != None:
del json['application_public_notes']
if json.get('application_no') and json.get('application_type'):
json['application_number'] = json.get('application_type') + json.get('application_no')
del json['application_type']
del json['application_no']
output.add(key, json)
class Product2JSONMapper(parallel.Mapper):
rename_map = {
'ApplNo': 'application_number',
'ProductNo': 'product_number',
'Form': 'df_and_route',
'Strength': 'strength',
'ReferenceDrug': 'reference_drug',
'DrugName': 'brand_name',
'ActiveIngredient': 'active_ingredients',
'ReferenceStandard': 'reference_standard'
}
VALUE_MAPPINGS = {
"reference_drug": {
"0": "No",
"1": "Yes",
"2": "TBD"
},
"reference_standard": {
"0": "No",
"1": "Yes"
}
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = v.strip() if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '':
new_key = self.rename_map[k]
if new_key in self.VALUE_MAPPINGS and v in self.VALUE_MAPPINGS[new_key]:
v = self.VALUE_MAPPINGS[new_key][v]
return (new_key, v)
json = common.transform_dict(value, _cleaner)
# Turn active ingredients into an array of objects as per the mapping.
if json.get('active_ingredients'):
ingredientList = re.sub(';\s+', ';', json['active_ingredients']).split(';')
json['active_ingredients'] = []
strengthList = re.sub(';\s+', ';', json['strength']).split(';') if json.get('strength') else []
for idx, name in enumerate(ingredientList):
ingredient = {'name': name}
if len(strengthList) > idx:
ingredient['strength'] = strengthList[idx]
json['active_ingredients'].append(ingredient)
else:
# Delete to avoid complaints from Elasticsearch.
if json.get('active_ingredients') is not None:
del json['active_ingredients']
if json.get('strength') is not None:
del json['strength']
# Split dosage and form into two distinct fields.
if json.get('df_and_route') and len(json['df_and_route'].split(';')) == 2:
json['dosage_form'] = json['df_and_route'].split(';')[0].strip()
json['route'] = json['df_and_route'].split(';')[1].strip()
# Sometimes the entire entry is Unknown. Indicate this for both df & route.
elif json.get('df_and_route') and "UNKNOWN" in json['df_and_route']:
json['dosage_form'] = json['df_and_route']
json['route'] = json['df_and_route']
# Sometimes the entire only contains dosage form.
else:
json['dosage_form'] = json['df_and_route']
json['route'] = None
# Delete the field either way
del json['df_and_route']
# Assign application number as the key, since all three drugs@FDA files can be joined by this key.
key = build_products_key(json['application_number'], json)
del json['application_number']
output.add(key, json)
def build_products_key(app_number, json):
return ('%s-%s' % (app_number, json['product_number']))
class MarketingStatus2JSONMapper(parallel.Mapper):
def __init__(self, doc_lookup):
parallel.Mapper.__init__(self)
self.doc_lookup = doc_lookup
rename_map = {
'MarketingStatusID': 'marketing_status_id',
'ApplNo': 'application_number',
'ProductNo': 'product_number'
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = v.strip() if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '':
return (self.rename_map[k], v)
json = common.transform_dict(value, _cleaner)
if json.get('marketing_status_id'):
json['marketing_status'] = self.doc_lookup[json['marketing_status_id']]
del json['marketing_status_id']
# Assign application number as the key, since all three drugs@FDA files can be joined by this key.
key = build_products_key(json['application_number'], json)
del json['application_number'], json['product_number']
output.add(key, json)
class TE2JSONMapper(parallel.Mapper):
def __init__(self, doc_lookup):
parallel.Mapper.__init__(self)
self.doc_lookup = doc_lookup
rename_map = {
'ApplNo': 'application_number',
'ProductNo': 'product_number',
'MarketingStatusID': 'marketing_status_id',
'TECode': 'te_code'
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = v.strip() if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '':
return (self.rename_map[k], v)
json = common.transform_dict(value, _cleaner)
if json.get('marketing_status_id'):
json['marketing_status'] = self.doc_lookup[json['marketing_status_id']]
del json['marketing_status_id']
# Assign application number as the key, since all three drugs@FDA files can be joined by this key.
key = build_products_key(json['application_number'], json)
del json['application_number'], json['product_number']
output.add(key, json)
class Submissions2JSONMapper(parallel.Mapper):
def __init__(self, doc_lookup):
parallel.Mapper.__init__(self)
self.doc_lookup = doc_lookup
rename_map = {
'ApplNo': 'application_number',
'SubmissionClassCodeID': 'submission_class_code_id',
'SubmissionType': 'submission_type',
'SubmissionNo': 'submission_number',
'SubmissionStatus': 'submission_status',
'SubmissionStatusDate': 'submission_status_date',
'SubmissionsPublicNotes': 'submission_public_notes',
'ReviewPriority': 'review_priority'
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = common.convert_unicode(v.strip()) if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '':
return (self.rename_map[k], v)
json = common.transform_dict(value, _cleaner)
if json.get('submission_class_code_id') and json.get('submission_class_code_id') is not None:
json['submission_class_code'] = self.doc_lookup[json['submission_class_code_id']][0]
descr = self.doc_lookup[json['submission_class_code_id']][1].rstrip()
if descr:
json['submission_class_code_description'] = descr
del json['submission_class_code_id']
# Convert date to format used throughout openFDA (yyyymmdd)
if json.get('submission_status_date'):
json['submission_status_date'] = arrow.get(json['submission_status_date']).strftime("%Y%m%d")
# Assign application number as the key, since all three drugs@FDA files can be joined by this key.
key = build_submissions_key(json['application_number'], json)
del json['application_number']
output.add(key, json)
def build_submissions_key(app_number, json):
return ('%s-%s-%s' % (app_number, json['submission_type'], json['submission_number']))
class SubmissionPropertyType2JSONMapper(parallel.Mapper):
rename_map = {
'ApplNo': 'application_number',
'SubmissionType': 'submission_type',
'SubmissionNo': 'submission_number',
'SubmissionPropertyTypeCode': 'code',
'SubmissionPropertyTypeID': 'id'
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = v.strip() if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '' and v != 'Null':
return (self.rename_map[k], v)
json = common.transform_dict(value, _cleaner)
# Assign application number as the key, since all three drugs@FDA files can be joined by this key.
key = build_submissions_key(json['application_number'], json)
del json['application_number'], json['submission_number'], json['submission_type']
output.add(key, json)
class ApplicationsDocs2JSONMapper(parallel.Mapper):
def __init__(self, doc_lookup):
parallel.Mapper.__init__(self)
self.doc_lookup = doc_lookup
rename_map = {
'ApplicationDocsID': 'id',
'ApplicationDocsTypeID': 'type_id',
'ApplNo': 'application_number',
'SubmissionType': 'submission_type',
'SubmissionNo': 'submission_number',
'ApplicationDocsTitle': 'title',
'ApplicationDocsURL': 'url',
'ApplicationDocsDate': 'date'
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
v = v.strip() if isinstance(v, str) else v
if k in self.rename_map and v is not None and v != '':
new_key = self.rename_map[k]
if not (new_key == 'title' and v == '0'):
return (new_key, v)
json = common.transform_dict(value, _cleaner)
json['type'] = self.doc_lookup[json['type_id']]
del json['type_id']
# Convert date to format used throughout openFDA (yyyymmdd)
json['date'] = arrow.get(json['date']).strftime("%Y%m%d")
json['url'] = common.convert_unicode(json['url'])
# Assign application number as the key, since all three drugs@FDA files can be joined by this key.
key = build_submissions_key(json['application_number'], json)
del json['application_number'], json['submission_number'], json['submission_type']
output.add(key, json)
class Applications2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(APPLICATIONS_DB)
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'Applications.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=Applications2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class Products2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(PRODUCTS_DB)
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'Products.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=Product2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class MarketingStatus2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(MARKETING_STATUS_DB)
def run(self):
with open(join(EXTRACTED_DIR, 'MarketingStatus_Lookup.txt')) as fin:
rows = (line.split('\t') for line in fin)
doc_lookup = {row[0]: row[1] for row in rows}
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'MarketingStatus.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=MarketingStatus2JSONMapper(doc_lookup=doc_lookup),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class TE2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(TE_DB)
def run(self):
with open(join(EXTRACTED_DIR, 'MarketingStatus_Lookup.txt')) as fin:
rows = (line.split('\t') for line in fin)
doc_lookup = {row[0]: row[1] for row in rows}
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'TE.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=TE2JSONMapper(doc_lookup=doc_lookup),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class Submissions2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(SUBMISSIONS_DB)
def run(self):
with open(join(EXTRACTED_DIR, 'SubmissionClass_Lookup.txt')) as fin:
rows = ( line.split('\t') for line in fin )
doc_lookup = {row[0]: [row[1], row[2]] for row in rows}
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'Submissions.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=Submissions2JSONMapper(doc_lookup=doc_lookup),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class SubmissionPropertyType2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(SUBMISSION_PROPERTY_TYPE_DB)
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'SubmissionPropertyType.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=SubmissionPropertyType2JSONMapper(),
reducer=parallel.ListReducer(),
output_prefix=self.output().path)
class ApplicationsDocs2JSON(luigi.Task):
def requires(self):
return ExtractDrugsFDAFiles()
def output(self):
return luigi.LocalTarget(APPLICATIONS_DOCS_DB)
def run(self):
with open(join(EXTRACTED_DIR, 'ApplicationsDocsType_Lookup.txt')) as fin:
rows = (line.split('\t') for line in fin)
doc_lookup = {row[0]: row[1].rstrip() for row in rows}
parallel.mapreduce(
parallel.Collection.from_glob(
join(self.input().path, 'ApplicationDocs.txt'), parallel.CSVDictLineInput(delimiter='\t')),
mapper=ApplicationsDocs2JSONMapper(doc_lookup=doc_lookup),
reducer=parallel.ListReducer(),
output_prefix=self.output().path)
class MergeAllMapper(parallel.Mapper):
def __init__(self, applications_db_path, products_db_path, applications_docs_db_path, submissions_db_path,
submissions_property_type_db_path, marketing_status_path, te_db_path):
self.applications_db_path = applications_db_path
self.products_db_path = products_db_path
self.applications_docs_db_path = applications_docs_db_path
self.submissions_db_path = submissions_db_path
self.submissions_property_type_db_path = submissions_property_type_db_path
self.marketing_status_db_path = marketing_status_path
self.te_db_path = te_db_path
def map_shard(self, map_input, map_output):
# Transform product DB into a dictionary keyed by application number
self.products_dict = {}
for key, product in parallel.ShardedDB.open(self.products_db_path).range_iter(None, None):
split = key.split('-')
app_key = split[0]
products_arr = [] if self.products_dict.get(app_key) is None else self.products_dict.get(app_key)
products_arr.append(product)
self.products_dict[app_key] = products_arr
# Transform all sub-product DBs into a dictionary keyed by application number & product number
self.marketing_status_dict = {}
for key, value in parallel.ShardedDB.open(self.marketing_status_db_path).range_iter(None, None):
self.marketing_status_dict[key] = value
self.te_dict = {}
for key, value in parallel.ShardedDB.open(self.te_db_path).range_iter(None, None):
self.te_dict[key] = value
# Transform submissions DB into a dictionary keyed by application number
self.submissions_dict = {}
for key, submission in parallel.ShardedDB.open(self.submissions_db_path).range_iter(None, None):
split = key.split('-')
app_key = split[0]
submissions_arr = [] if self.submissions_dict.get(app_key) is None else self.submissions_dict.get(app_key)
submissions_arr.append(submission)
self.submissions_dict[app_key] = submissions_arr
# Transform all sub-submission DBs into a dictionary keyed by application number & submission number
self.submissions_property_type_dict = {}
for key, value in parallel.ShardedDB.open(self.submissions_property_type_db_path).range_iter(None, None):
self.submissions_property_type_dict[key] = value
self.applications_docs_dict = {}
for key, value in parallel.ShardedDB.open(self.applications_docs_db_path).range_iter(None, None):
self.applications_docs_dict[key] = value
parallel.Mapper.map_shard(self, map_input, map_output)
def map(self, key, application, out):
self.add_products(application)
self.add_submissions(application)
out.add(key, application)
def add_products(self, application):
key = re.sub("[^0-9]", "", application['application_number'])
products = self.products_dict.get(key)
if products:
products = self.add_marketing_status(products, key)
products = self.add_te(products, key)
application['products'] = products
def add_marketing_status(self, products, app_key):
for product in products:
key = build_products_key(app_key, product)
if key in self.marketing_status_dict:
marketing_json = self.marketing_status_dict.get(key)
product['marketing_status'] = marketing_json['marketing_status'].rstrip()
return products
def add_te(self, products, app_key):
for product in products:
key = build_products_key(app_key, product)
if key in self.te_dict:
te_json = self.te_dict.get(key)
if te_json.get('te_code'):
product['te_code'] = te_json['te_code'].rstrip()
if not 'marketing_status' in product and 'marketing_status' in te_json:
product['marketing_status'] = te_json['marketing_status'].rstrip()
return products
def add_submissions(self, application):
key = re.sub("[^0-9]", "", application['application_number'])
submissions = self.submissions_dict.get(key)
if submissions:
submissions = self.add_submissions_property_type(submissions, key)
submissions = self.add_applications_docs(submissions, key)
application['submissions'] = submissions
def add_submissions_property_type(self, submissions, app_key):
for submission in submissions:
key = build_submissions_key(app_key, submission)
if key in self.submissions_property_type_dict:
prop_type = self.submissions_property_type_dict.get(key)
submission['submission_property_type'] = prop_type
return submissions
def add_applications_docs(self, submissions, app_key):
for submission in submissions:
key = build_submissions_key(app_key, submission)
if key in self.applications_docs_dict:
submission['application_docs'] = self.applications_docs_dict.get(key)
return submissions
class MergeAll(luigi.Task):
def requires(self):
return [Applications2JSON(), Products2JSON(), ApplicationsDocs2JSON(), Submissions2JSON(),
SubmissionPropertyType2JSON(), MarketingStatus2JSON(), TE2JSON()]
def output(self):
return luigi.LocalTarget(MERGED_DB)
def run(self):
applications_db = self.input()[0].path
products_db = self.input()[1].path
applications_docs_db = self.input()[2].path
submissions_db = self.input()[3].path
submissions_property_type_db = self.input()[4].path
marketing_status = self.input()[5].path
te_db = self.input()[6].path
parallel.mapreduce(
parallel.Collection.from_sharded(applications_db),
mapper=MergeAllMapper(applications_db, products_db, applications_docs_db, submissions_db,
submissions_property_type_db, marketing_status, te_db),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
map_workers=1,
num_shards=1) # TODO: improve the code to avoid having to limit number of shards to one
class AnnotateDrugsFDA(luigi.Task):
def requires(self):
return [MergeAll(), CombineHarmonization()]
def output(self):
return luigi.LocalTarget(ANNOTATED_DB)
def run(self):
input_db = self.input()[0].path
harmonized_file = self.input()[1].path
parallel.mapreduce(
parallel.Collection.from_sharded(input_db),
mapper=AnnotateMapper(harmonized_file),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=1) # TODO: improve the code to avoid having to limit number of shards to one
class LoadJSON(index_util.LoadJSONBase):
index_name = 'drugsfda'
type_name = 'drugsfda'
mapping_file = './schemas/drugsfda_mapping.json'
data_source = AnnotateDrugsFDA()
use_checksum = False
optimize_index = True
last_update_date = lambda _: first_file_timestamp(os.path.dirname(RAW_DATA_FILE))
if __name__ == '__main__':
luigi.run()
| [
"noreply@github.com"
] | tralfamadoriangray.noreply@github.com |
c83df4ad926d22384cca579d1feb35abebc2bfec | 866b7615b666875e6cdc024f951fe9baaf728993 | /python/ytdl | 02e0be2579d4651971560b28fffcbbb066f4d6f6 | [] | no_license | sk1418/testit | fcfdb9c6fc51d26d3351bdf6b7783c88b9338d9f | 1628d8bf4de8f28d2441d2785428a74c7eee2932 | refs/heads/master | 2022-04-30T18:36:05.574197 | 2022-03-19T21:58:11 | 2022-03-19T22:05:13 | 8,029,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,842 | #!/usr/bin/python
###################################################
# python script to download youtube video, supports different quality/format options
#
# It supports user choosing different video
# quality/format to download.
#
# Version: 1.0
#
# Kent 2010.10.19
# Email: kent dot yuan at gmail dot com.
#
# Todo:
# - output directory as parameter
# - batch downloading
#
###################################################
#new line from SK Desktop
import urllib2, re ,os, sys
# lq,mq,hq are at this moment not used. In later version (if there was), batch downloading would be
# supported. Then they will make sense.
lq = ('34','0','5','13','17')
mq = ('6','35','18')
#oh added text again
hq = ('37','32')
#--------------------------------------
quality ={
#ok, merged again!!add something else
'34' : ('flv', '320x240', 'Stereo 44KHz MP3'),
'0' : ('flv', '320x240', 'Mono 22KHz MP3'),
'5' : ('flv', '320x240', 'Mono 44KHz MP3'),
'13' : ('3gp', '176x144', 'Stereo 8KHz'),
'17' : ('3gp', '176x144', 'Mono 22KHz'),
'6' : ('flv', '480x360', 'Mono 44KHz MP3'),
'35' : ('flv', '640x380', 'Stereo 44KHz MP3'),
'18' : ('mp4', '480x360', 'Stereo 44KHz AAC H.264'),
'22' : ('mp4', '1280x720', 'Stereo 44KHz AAC H.264'),
'37' : ('mp4', '1920x1080', 'Stereo 44KHz AAC H.264')
}
def usage():
print "YouTube Video Downloader"
print "Usage:"
print "\t" + sys.argv[0] + " <YouTube Link>"
def __output(flag, msg):
if flag == 'i':
print "[INFO] %s" % msg
elif flag == 'e':
print "\n[ERROR] %s\n" % msg
elif flag == 'n':
print " %s" % msg
elif flag == 's':
print "\n========================================\n"
def __inputValid(n, dlMap):
if not n:
return False
if n=='x':
__output('i','User aborted')
sys.exit()
else:
try:
return dlMap.has_key(dlMap.keys()[int(n)-1])
except:
__output('e', 'Invalid user input')
return False
#decode url function
_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
urldecode = lambda x: _ud.sub( lambda m: chr( int( m.group(1), 16 ) ) ,x )
def getVideoLinks(ytLink):
#get video id
try:
vid = ytLink.split("v=")[1].split("&")[0]
videoInfo = urllib2.urlopen("http://www.youtube.com/get_video_info?video_id="+vid).read()
except:
__output('e',"video link cannot be parsed. Please check the youtube link")
sys.exit(1)
regex="""fmt_url_map=(.*?)&"""
p = re.compile(regex,re.DOTALL)
m = p.search(videoInfo)
if not m:
__output('e','Parsing video info failed. Link:'+ ytLink)
sys.exit(1)
#building up the dlMap
dlinks = urldecode(m.group(1)).split(",")
dlMap = {}
for item in dlinks:
dlMap[item.split('|')[0]] = urldecode(item.split('|')[1])
#waiting user's input of video quality
n = None
while ( not __inputValid(n,dlMap)):
__output('i', 'Available video format/qualities:')
__output('s','===')
for k in dlMap.keys():
if quality.has_key(k):
__output('n', str(dlMap.keys().index(k)+1) + " - " + str(quality[k]))
__output('n', 'x - Quit')
__output('s','===')
n = raw_input("Choose the format and quality of the video you wanna download:")
qIdx = dlMap.keys()[int(n)-1]
filename = vid + "." + quality[qIdx][0]
url = dlMap[qIdx]
cmd = 'wget -O ~/Desktop/'+filename+' "' + url + '"'
__output('s','===')
__output('i','downloading ' + '-'.join(quality[qIdx]) + ' as ' + filename)
__output('s','===')
#print cmd
os.system(cmd)
if sys.argv.__len__()<2 or not sys.argv[1]:
usage()
sys.exit(1)
else:
getVideoLinks(sys.argv[1])
| [
"kent.yuan@Gmail.com"
] | kent.yuan@Gmail.com | |
1bc34216c87bb8b8ae5c10f623ab5485230d4190 | 27eefa6e7f102378c433798d08154ae9555377c3 | /hrank_pyt/classes_complex_numbers.py | a0711c7e80171bcb74fd1b8e1807d7c8427549eb | [] | no_license | akaydinb/hackerrank-python | d0604c90cf55b785f406df240f9bac1f6d422409 | cdc3a6179e7ba1202da1a7767f1e4a8e025abb36 | refs/heads/master | 2022-12-10T09:28:44.903586 | 2020-08-27T11:30:32 | 2020-08-27T11:30:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | #!/usr/bin/python3
# Classes: Dealing with complex numbers
import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real;
self.imaginary = imaginary;
def __add__(self, no):
return Complex((self.real + no.real), (self.imaginary + no.imaginary));
def __sub__(self, no):
return Complex((self.real - no.real), (self.imaginary - no.imaginary));
def __mul__(self, no):
return Complex((self.real * no.real - self.imaginary * no.imaginary),
(self.real * no.imaginary + self.imaginary * no.real));
def __truediv__(self, no):
m2 = (no.real ** 2) + (no.imaginary ** 2)
return Complex((self.real * no.real - self.imaginary * -no.imaginary) / m2,
(self.real * -no.imaginary + self.imaginary * no.real) / m2);
def mod(self):
return Complex(math.sqrt((self.real ** 2) + (self.imaginary ** 2)), 0);
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| [
"9348636+akaydinb@users.noreply.github.com"
] | 9348636+akaydinb@users.noreply.github.com |
fbeecaa4293179be24399fb4bb5c7eee64229141 | 50a8c057fd6d8cd0ec96ca9b79c9328432335650 | /ubisqsh.py | e945ad61a8de0c5092e765f01c13e8f9f6c84a5b | [
"MIT"
] | permissive | KurSh/qc_modem_tools | ee804b566f83e30dde13e4aaf2f55e1a95c74fda | fce2f00e226f0fce82f064d218bf6adb70ea8647 | refs/heads/master | 2023-07-07T19:16:43.556182 | 2020-12-25T20:25:52 | 2020-12-25T20:25:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | #!/usr/bin/env python3
from struct import unpack
import os,sys
def parse_ubihdr(rf):
curpos=rf.tell()
magic = rf.read(4)
if magic == b"UBI#":
rf.seek(curpos+0x10)
hdrsize = unpack(">I", rf.read(4))[0]
blksize = unpack(">I", rf.read(4))[0]
data = unpack(">I", rf.read(4))[0]
rf.seek(curpos+0x3C)
crc = unpack(">I", rf.read(4))[0]
rf.seek(curpos)
return [hdrsize,blksize,data,crc]
def parse_ubihdr2(rf):
curpos=rf.tell()
magic = rf.read(4)
if magic == b"UBI!":
flag = unpack("<I", rf.read(4))[0]
rf.seek(curpos+0xC)
blk = unpack(">I", rf.read(4))[0]
rf.seek(curpos + 0x3C)
crc = unpack(">I", rf.read(4))[0]
rf.seek(curpos)
return [flag,blk,crc]
def main():
if len(sys.argv)<2:
print("Usage: ubisqsh.py <filename>")
sys.exit()
filename=sys.argv[1]
with open(filename,'rb') as rf:
with open(filename+".out","wb") as wf:
pos=0
while pos<os.stat(filename).st_size:
hdrsize,blksize,data,crc=parse_ubihdr(rf)
rf.seek(pos+hdrsize)
flag,blk,crc=parse_ubihdr2(rf)
if flag&0xF000000==0:
print(f"Blk %d Flag %x WR" %(blk,flag))
rf.seek(pos + blksize)
rdata=rf.read(0x40000-blksize)
wf.write(rdata)
else:
print(f"Blk %d Flag %x SK" %(blk,flag))
rf.seek(pos+0x40000)
pos+=0x40000
print("Done.")
if __name__=="__main__":
main() | [
"info@revskills.de"
] | info@revskills.de |
e2d13ee6442f4b0bb406ce1c2a80561ba9bce769 | 71f24b3427a242ba87451d6f057daedfecd2b05d | /scaffold/spec/prod/wsgi.py | 9043e52b21871c797ec5582d2f5fa9d9982c87ff | [] | no_license | gdipkf1986/django-scaffold | 8b0d4b41f493047316a3f0573c7db8d6d0432466 | 9e5b1e02352eaf7893260480c9102cea72ffcf3d | refs/heads/master | 2020-02-26T15:26:49.403292 | 2015-10-05T04:32:32 | 2015-10-05T04:32:32 | 34,237,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # -*- coding: utf-8 -*-
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scaffold.spec.prod.settings")
from scaffold.wsgi import application | [
"huangf@garena.com"
] | huangf@garena.com |
6ddb0bb22d3420954fafaf311e40e650fa1b866f | 6dba9105e4e2c6f4b1d54deb1b96d7c930dc6375 | /flask_blog/__init__.py | 9490f9902e56ef79dbc902773d044b917bea40aa | [] | no_license | angelabauer/new-blog-test | 196bdd03b38af639b99b79a6b1ea970d6d21d54a | 826ecc12010b719521ca5535d8a5209b29d252d9 | refs/heads/master | 2023-01-02T03:18:20.769690 | 2020-10-23T15:34:18 | 2020-10-23T15:34:18 | 306,678,113 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,322 | py | from flask import Flask, render_template, redirect, url_for, flash, abort
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from datetime import date
from functools import wraps
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user
from forms import LoginForm, RegisterForm, CreatePostForm, CommentForm
from flask_gravatar import Gravatar
import os
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("SECRET_KEY")
ckeditor = CKEditor(app)
Bootstrap(app)
gravatar = Gravatar(app, size=100, rating='g', default='retro', force_default=False, force_lower=False, use_ssl=False, base_url=None)
##CONNECT TO DB
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DATABASE_URL")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
##CONFIGURE TABLE
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(100))
posts = relationship("BlogPost", back_populates="author")
comments = relationship("Comment", back_populates="comment_author")
class BlogPost(db.Model):
__tablename__ = "blog_posts"
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey("users.id"))
author = relationship("User", back_populates="posts")
title = db.Column(db.String(250), unique=True, nullable=False)
subtitle = db.Column(db.String(250), nullable=False)
date = db.Column(db.String(250), nullable=False)
body = db.Column(db.Text, nullable=False)
img_url = db.Column(db.String(250), nullable=False)
comments = relationship("Comment", back_populates="parent_post")
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey("blog_posts.id"))
author_id = db.Column(db.Integer, db.ForeignKey("users.id"))
parent_post = relationship("BlogPost", back_populates="comments")
comment_author = relationship("User", back_populates="comments")
text = db.Column(db.Text, nullable=False)
db.create_all()
def admin_only(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.id != 1:
return abort(403)
return f(*args, **kwargs)
return decorated_function
@app.route('/')
def get_all_posts():
posts = BlogPost.query.all()
return render_template("index.html", all_posts=posts, current_user=current_user)
@app.route('/register', methods=["GET", "POST"])
def register():
form = RegisterForm()
if form.validate_on_submit():
if User.query.filter_by(email=form.email.data).first():
print(User.query.filter_by(email=form.email.data).first())
#User already exists
flash("You've already signed up with that email, log in instead!")
return redirect(url_for('login'))
hash_and_salted_password = generate_password_hash(
form.password.data,
method='pbkdf2:sha256',
salt_length=8
)
new_user = User(
email=form.email.data,
name=form.name.data,
password=hash_and_salted_password,
)
db.session.add(new_user)
db.session.commit()
login_user(new_user)
return redirect(url_for("get_all_posts"))
return render_template("register.html", form=form, current_user=current_user)
@app.route('/login', methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
email = form.email.data
password = form.password.data
user = User.query.filter_by(email=email).first()
# Email doesn't exist or password incorrect.
if not user:
flash("That email does not exist, please try again.")
return redirect(url_for('login'))
elif not check_password_hash(user.password, password):
flash('Password incorrect, please try again.')
return redirect(url_for('login'))
else:
login_user(user)
return redirect(url_for('get_all_posts'))
return render_template("login.html", form=form, current_user=current_user)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('get_all_posts'))
@app.route("/post/<int:post_id>", methods=["GET", "POST"])
def show_post(post_id):
form = CommentForm()
requested_post = BlogPost.query.get(post_id)
if form.validate_on_submit():
if not current_user.is_authenticated:
flash("You need to login or register to comment.")
return redirect(url_for("login"))
new_comment = Comment(
text=form.comment_text.data,
comment_author=current_user,
parent_post=requested_post
)
db.session.add(new_comment)
db.session.commit()
return render_template("post.html", post=requested_post, form=form, current_user=current_user)
@app.route("/about")
def about():
return render_template("about.html", current_user=current_user)
@app.route("/contact")
def contact():
return render_template("contact.html", current_user=current_user)
@app.route("/new-post", methods=["GET", "POST"])
@admin_only
def add_new_post():
form = CreatePostForm()
if form.validate_on_submit():
new_post = BlogPost(
title=form.title.data,
subtitle=form.subtitle.data,
body=form.body.data,
img_url=form.img_url.data,
author=current_user,
date=date.today().strftime("%B %d, %Y")
)
db.session.add(new_post)
db.session.commit()
return redirect(url_for("get_all_posts"))
return render_template("make-post.html", form=form, current_user=current_user)
@app.route("/edit-post/<int:post_id>", methods=["GET", "POST"])
@admin_only
def edit_post(post_id):
post = BlogPost.query.get(post_id)
edit_form = CreatePostForm(
title=post.title,
subtitle=post.subtitle,
img_url=post.img_url,
author=current_user,
body=post.body
)
if edit_form.validate_on_submit():
post.title = edit_form.title.data
post.subtitle = edit_form.subtitle.data
post.img_url = edit_form.img_url.data
post.body = edit_form.body.data
db.session.commit()
return redirect(url_for("show_post", post_id=post.id))
return render_template("make-post.html", form=edit_form, is_edit=True, current_user=current_user)
@app.route("/delete/<int:post_id>")
@admin_only
def delete_post(post_id):
post_to_delete = BlogPost.query.get(post_id)
db.session.delete(post_to_delete)
db.session.commit()
return redirect(url_for('get_all_posts'))
def get_app():
return app
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| [
"angela@londonappbrewery.com"
] | angela@londonappbrewery.com |
2203b01c4dc8cf240f7580252e4dbfe1e8defc5c | cb1d53abffe60987615119fb3ee82eb27b5c2655 | /Day 9.py | ba6a17f599238e837ea84b62ca8d3d8d0af90738 | [] | no_license | NathanielB123/2019-Advent-of-Code | 4a247e1cdb659fc311e066e900991b17578ab8d2 | 4d310a2d6444f6fb84c606a53d16ec49014a183b | refs/heads/master | 2021-12-08T07:41:30.980341 | 2021-10-02T09:14:07 | 2021-10-02T09:14:07 | 226,645,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,813 | py | Code=[1102,34463338,34463338,63,1007,63,34463338,63,1005,63,53,1101,0,3,1000,109,988,209,12,9,1000,209,6,209,3,203,0,1008,1000,1,63,1005,63,65,1008,1000,2,63,1005,63,904,1008,1000,0,63,1005,63,58,4,25,104,0,99,4,0,104,0,99,4,17,104,0,99,0,0,1102,23,1,1004,1102,1,26,1000,1102,897,1,1028,1101,27,0,1012,1102,33,1,1001,1102,32,1,1007,1101,39,0,1005,1101,0,29,1018,1101,0,0,1020,1101,1,0,1021,1101,0,21,1002,1102,1,35,1014,1101,0,36,1009,1102,1,38,1006,1102,1,251,1024,1102,28,1,1017,1102,37,1,1008,1102,1,329,1026,1102,25,1,1011,1102,31,1,1013,1102,892,1,1029,1102,242,1,1025,1102,1,881,1022,1102,22,1,1003,1102,874,1,1023,1101,20,0,1016,1101,24,0,1019,1101,0,326,1027,1101,0,34,1015,1102,1,30,1010,109,-2,2102,1,7,63,1008,63,36,63,1005,63,205,1001,64,1,64,1105,1,207,4,187,1002,64,2,64,109,9,21101,40,0,6,1008,1013,43,63,1005,63,227,1105,1,233,4,213,1001,64,1,64,1002,64,2,64,109,26,2105,1,-9,4,239,1001,64,1,64,1106,0,251,1002,64,2,64,109,-15,1205,2,263,1105,1,269,4,257,1001,64,1,64,1002,64,2,64,109,-9,2102,1,0,63,1008,63,36,63,1005,63,295,4,275,1001,64,1,64,1106,0,295,1002,64,2,64,109,-14,1207,10,38,63,1005,63,311,1105,1,317,4,301,1001,64,1,64,1002,64,2,64,109,28,2106,0,4,1106,0,335,4,323,1001,64,1,64,1002,64,2,64,109,-8,1206,6,351,1001,64,1,64,1106,0,353,4,341,1002,64,2,64,109,-1,2107,33,-7,63,1005,63,369,1106,0,375,4,359,1001,64,1,64,1002,64,2,64,109,-9,2108,26,-1,63,1005,63,395,1001,64,1,64,1106,0,397,4,381,1002,64,2,64,109,3,1201,-2,0,63,1008,63,38,63,1005,63,419,4,403,1105,1,423,1001,64,1,64,1002,64,2,64,109,-13,2101,0,9,63,1008,63,23,63,1005,63,445,4,429,1105,1,449,1001,64,1,64,1002,64,2,64,109,11,1208,1,32,63,1005,63,471,4,455,1001,64,1,64,1106,0,471,1002,64,2,64,109,17,21108,41,38,-4,1005,1019,487,1105,1,493,4,477,1001,64,1,64,1002,64,2,64,109,6,1206,-9,511,4,499,1001,64,1,64,1106,0,511,1002,64,2,64,109,-23,21102,42,1,8,1008,1014,42,63,1005,63,533,4,517,1106,0,537,1001,64,1,64,1002,64,2,64,109,-3,2107,36,5,63,1005,63,555,4,543,1106,0,559,1001,64,1,64,1002,64,2,64,109,-6,1202,5,1,63,1008,63,21,63,1005,63,581,4,565,1106,0,585,1001,64,1,64,1002,64,2,64,109,1,1208,10,40,63,1005,63,605,1001,64,1,64,1106,0,607,4,591,1002,64,2,64,109,7,1201,0,0,63,1008,63,42,63,1005,63,631,1001,64,1,64,1106,0,633,4,613,1002,64,2,64,109,1,21107,43,42,7,1005,1013,649,1105,1,655,4,639,1001,64,1,64,1002,64,2,64,109,7,21108,44,44,3,1005,1016,677,4,661,1001,64,1,64,1106,0,677,1002,64,2,64,109,-7,21102,45,1,9,1008,1015,44,63,1005,63,701,1001,64,1,64,1106,0,703,4,683,1002,64,2,64,109,13,21101,46,0,-7,1008,1012,46,63,1005,63,729,4,709,1001,64,1,64,1105,1,729,1002,64,2,64,109,-13,2101,0,3,63,1008,63,33,63,1005,63,753,1001,64,1,64,1106,0,755,4,735,1002,64,2,64,109,14,1205,1,773,4,761,1001,64,1,64,1105,1,773,1002,64,2,64,109,-23,1202,10,1,63,1008,63,30,63,1005,63,797,1001,64,1,64,1105,1,799,4,779,1002,64,2,64,109,13,2108,22,-7,63,1005,63,817,4,805,1106,0,821,1001,64,1,64,1002,64,2,64,109,-11,1207,5,24,63,1005,63,843,4,827,1001,64,1,64,1105,1,843,1002,64,2,64,109,11,21107,47,48,7,1005,1017,861,4,849,1106,0,865,1001,64,1,64,1002,64,2,64,109,15,2105,1,-2,1001,64,1,64,1106,0,883,4,871,1002,64,2,64,109,10,2106,0,-7,4,889,1106,0,901,1001,64,1,64,4,64,99,21102,1,27,1,21102,1,915,0,1105,1,922,21201,1,28510,1,204,1,99,109,3,1207,-2,3,63,1005,63,964,21201,-2,-1,1,21102,1,942,0,1106,0,922,22102,1,1,-1,21201,-2,-3,1,21101,957,0,0,1106,0,922,22201,1,-1,-2,1105,1,968,21202,-2,1,-2,109,-3,2106,0,0]
Index=0
InputNum=0
RelativeBase=0
Memory=100000
for _ in range(Memory):
Code.append(0)
while not Code[Index]==99:
Parameters=[]
if int(str(Code[Index])[-2:])==1:
if len(str(Code[Index]))<5:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==0:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==2:
Parameters.append(Code[Index+3]+RelativeBase)
else:
Parameters.append(Index+3)
if len(str(Code[Index]))<4:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==0:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==2:
Parameters.append(Code[Index+2]+RelativeBase)
else:
Parameters.append(Index+2)
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
Code[Parameters[0]]=Code[Parameters[1]]+Code[Parameters[2]]
Index=Index+4
elif int(str(Code[Index])[-2:])==2:
if len(str(Code[Index]))<5:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==0:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==2:
Parameters.append(Code[Index+3]+RelativeBase)
else:
Parameters.append(Index+3)
if len(str(Code[Index]))<4:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==0:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==2:
Parameters.append(Code[Index+2]+RelativeBase)
else:
Parameters.append(Index+2)
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
Code[Parameters[0]]=Code[Parameters[1]]*Code[Parameters[2]]
Index=Index+4
elif int(str(Code[Index])[-2:])==3:
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
Code[Parameters[0]]=int(input())
Index=Index+2
elif int(str(Code[Index])[-2:])==4:
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
print(Code[Parameters[0]])
Index=Index+2
elif int(str(Code[Index])[-2:])==5:
if len(str(Code[Index]))<4:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==0:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==2:
Parameters.append(Code[Index+2]+RelativeBase)
else:
Parameters.append(Index+2)
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
if Code[Parameters[1]]==0:
Index+=3
else:
Index=Code[Parameters[0]]
elif int(str(Code[Index])[-2:])==6:
if len(str(Code[Index]))<4:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==0:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==2:
Parameters.append(Code[Index+2]+RelativeBase)
else:
Parameters.append(Index+2)
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
if Code[Parameters[1]]==0:
Index=Code[Parameters[0]]
else:
Index+=3
elif int(str(Code[Index])[-2:])==7:
if len(str(Code[Index]))<5:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==0:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==2:
Parameters.append(Code[Index+3]+RelativeBase)
else:
Parameters.append(Index+3)
if len(str(Code[Index]))<4:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==0:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==2:
Parameters.append(Code[Index+2]+RelativeBase)
else:
Parameters.append(Index+2)
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
if Code[Parameters[2]]<Code[Parameters[1]]:
Code[Parameters[0]]=1
else:
Code[Parameters[0]]=0
Index+=4
elif int(str(Code[Index])[-2:])==8:
if len(str(Code[Index]))<5:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==0:
Parameters.append(Code[Index+3])
elif int(str(Code[Index])[len(str(Code[Index]))-5])==2:
Parameters.append(Code[Index+3]+RelativeBase)
else:
Parameters.append(Index+3)
if len(str(Code[Index]))<4:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==0:
Parameters.append(Code[Index+2])
elif int(str(Code[Index])[len(str(Code[Index]))-4])==2:
Parameters.append(Code[Index+2]+RelativeBase)
else:
Parameters.append(Index+2)
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
if Code[Parameters[2]]==Code[Parameters[1]]:
Code[Parameters[0]]=1
else:
Code[Parameters[0]]=0
Index+=4
elif int(str(Code[Index])[-2:])==9:
if len(str(Code[Index]))<3:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==0:
Parameters.append(Code[Index+1])
elif int(str(Code[Index])[len(str(Code[Index]))-3])==2:
Parameters.append(Code[Index+1]+RelativeBase)
else:
Parameters.append(Index+1)
RelativeBase+=Code[Parameters[0]]
Index=Index+2
else:
print("ERROR")
print(Index)
print(Code[Index])
break
| [
"30476356+NathanielB@users.noreply.github.com"
] | 30476356+NathanielB@users.noreply.github.com |
f26b9ee73be0d7493db27af80ec42b7699b9f50b | 5d8be0b8cfe03baf50a5607e98a41018471c79a0 | /ledbrick.py | 0892428a28c6fe4201add69a112525e7fff45f58 | [] | no_license | LCampbell2s/pythonschooling | 26faff3488d81483a378d886c6721b77916a6453 | f45d34ab5d9d43a1e94e36fe4ac6885851d24b6a | refs/heads/main | 2023-05-13T20:13:06.035617 | 2021-06-03T15:45:02 | 2021-06-03T15:45:02 | 371,017,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | import explorerhat as eh
import time
eh.output.one.on()
print("Lights on!")
time.sleep(2)
eh.output.one.off()
print("Lights out!")
| [
"noreply@github.com"
] | LCampbell2s.noreply@github.com |
8b6ffe717347139cea85ca695721274b02808e86 | d89df0f93114b02d50c2326538a13e03bafe6e1c | /projet/contact/migrations/0001_initial.py | 3097cc62521a71816724c75b1e5f2725fdd886b5 | [] | no_license | MJoshua25/projet_sp_dj_resto_g5 | 82212fe82a930cffce50e64577ed8739675f5b2c | 9b7afbc7c92df9dce52a9e50dddd305d97ed73b3 | refs/heads/master | 2022-12-04T06:50:55.414563 | 2019-10-18T12:10:56 | 2019-10-18T12:10:56 | 213,174,126 | 1 | 0 | null | 2022-11-22T04:34:11 | 2019-10-06T13:31:39 | JavaScript | UTF-8 | Python | false | false | 1,623 | py | # Generated by Django 2.2.6 on 2019-10-08 20:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('statut', models.BooleanField(default=False)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('nom', models.CharField(max_length=250)),
('sujet', models.CharField(max_length=250)),
('email', models.EmailField(max_length=254)),
('message', models.TextField()),
],
options={
'verbose_name': 'Message',
'verbose_name_plural': 'Messages',
},
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('statut', models.BooleanField(default=False)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('email', models.EmailField(max_length=254)),
],
options={
'verbose_name': 'Newsletter',
'verbose_name_plural': 'Newsletters',
},
),
]
| [
"cedric.gbele@uvci.edu.ci"
] | cedric.gbele@uvci.edu.ci |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.