index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,100 | 3653c6fce33467600a3eea72578ed995606bfc03 | import string
#takes file as input, outputs a dictionary of keys from the file
#file should be in format (apiName, key/id)
#dictionary key = apiName, value = key/id
def getKeys(f):
keys = {}
f = open(f, 'r')
for line in f:
apiInfo = line.split(',')
keys[apiInfo[0]] = apiInfo[1].strip(string.whitespace)
keys.pop('apiName', None)
return keys
#print(getKeys('keys.txt'))
|
4,101 | d5f1601d11eb54e6c3dafab0137ec8f2358bb568 | import json
import boto3
import os
from helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2
def getEC2FilteredRegionalInstanceInfo(region):
ec2RegionalClient = boto3.client('ec2', region_name = region)
paginator = ec2RegionalClient.get_paginator('describe_instances')
page_iterator = paginator.paginate()
allEC2Instances = []
for result in page_iterator:
for reservation in result['Reservations']:
for instance in reservation['Instances']:
allEC2Instances.append({'InstanceId': instance['InstanceId'] , 'Tags': instance.get('Tags',[])})
return excludeOptedOutEC2Instances(allEC2Instances)
def isOutputedOutEC2Instance(instanceInfo):
if any( (d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True') for d in instanceInfo['Tags']):
return True
def excludeOptedOutEC2Instances(ec2Instances):
filteredEC2InstanceIdList = []
for instanceInfo in ec2Instances:
if isOutputedOutEC2Instance(instanceInfo):
print('Exlcuding instance {}'.format(instanceInfo))
else:
filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])
return filteredEC2InstanceIdList
def gatherEC2Info():
regionList = getEC2Regions()
ec2RegionDict = {}
for region in regionList:
regionalInstances = getEC2FilteredRegionalInstanceInfo(region)
if len(regionalInstances)>0:
ec2RegionDict[region]=regionalInstances
return ec2RegionDict
def handler(event, context):
ec2RegionalInfo = gatherEC2Info()
if len(ec2RegionalInfo.keys())!=0:
print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))
messageAttributes = {
'notificationFor': {
'DataType': 'String',
'StringValue': SNS_NOTIFICATION_IIAS_EC2
}
}
sendDataToSNS(ec2RegionalInfo,messageAttributes)
else:
print('No new EC2 instances in IIAS scope') |
4,102 | 420c3944de0a5436a9824604fd6caf27706eb99c | def print_duplicates(arr):
uniques = set()
for elem in arr:
if elem in uniques:
print(elem, end=' ')
else:
uniques.add(elem)
|
4,103 | bc843abecfc076c9413498f9ebba0da0857ad3cc | from eth_account.account import Account
from nucypher.characters.lawful import Alice, Bob, Ursula
from nucypher.network.middleware import RestMiddleware
from nucypher.data_sources import DataSource
from umbral.keys import UmbralPublicKey
import sys
import os
import binascii
import shutil
import maya
import datetime
teacher_rest_port = 3501
m = 2
n = 3
with open("examples-runtime-cruft/node-metadata-{}".format(teacher_rest_port), "r") as f:
f.seek(0)
teacher_bytes = binascii.unhexlify(f.read())
URSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)
print("Will learn from {}".format(URSULA))
SHARED_CRUFTSPACE = "{}/examples-runtime-cruft".format(os.path.dirname(os.path.abspath(__file__)))
CRUFTSPACE = "{}/drm".format(SHARED_CRUFTSPACE)
CERTIFICATE_DIR = "{}/certs".format(CRUFTSPACE)
shutil.rmtree(CRUFTSPACE, ignore_errors=True)
os.mkdir(CRUFTSPACE)
os.mkdir(CERTIFICATE_DIR)
URSULA.save_certificate_to_disk(CERTIFICATE_DIR)
class ETHAccount(object):
def send_eth_to(self, to, amount):
return(to.fallback(self, amount))
class Author(object):
"""
The author of the book
"""
balance = 0
def __init__(self, eth_pk_bytes, character):
self.account = Account.create(eth_pk_bytes)
self.character = character
class Book(object):
def __init__(self, author):
self.author = author
self.content = b"PlainText of the book"
self.label = b"book"
class BookStoreEthContract(object):
"""
The contract receiving the rewards and selling the books
"""
def __init__(self, book, author, price, purchase_event_hook):
self.book = book
self.rewardee = author
self.price = price
self.purchase_event_hook = purchase_event_hook
def fallback(self, sender, amount):
print("Received %s ETH from %s" % (amount, sender.account.address))
if amount == self.price:
sender.balance -= amount
self.rewardee.balance += amount
return(self.purchase_event_hook(sender))
class BookStoreDelivery(object):
def __init__(self, book):
self.book = book
self.author = book.author
def deliver_purchase(self, to):
policy_end_datetime = maya.now() + datetime.timedelta(days=5)
policy = author.character.grant(first_buyer.character, self.book.label, m=m, n=n,
expiration=policy_end_datetime)
author_pubkey = bytes(self.author.character.stamp)
data_source = DataSource(policy_pubkey_enc=policy.public_key)
message_kit, _signature = data_source.encapsulate_single_message(self.book.content)
data_source_public_key = bytes(data_source.stamp)
return (author_pubkey, policy.public_key, data_source_public_key, self.book.label, message_kit)
class Buyer(ETHAccount):
"""
The person who pays for the book and receives content
"""
balance = 100
def __init__(self, eth_pk_bytes, character):
self.account = Account.create(eth_pk_bytes)
self.character = character
author = Author(b"Author's ETH account", Alice(network_middleware=RestMiddleware(),
known_nodes=(URSULA,),
federated_only=True,
known_certificates_dir=CERTIFICATE_DIR,))
author.character.start_learning_loop(now=True)
book = Book(author)
first_buyer = Buyer(b"First Buyer's ETH account", Bob(known_nodes=(URSULA,),
federated_only=True,
known_certificates_dir=CERTIFICATE_DIR))
book_store_delivery = BookStoreDelivery(book)
book_store_contract = BookStoreEthContract(book, author, 10, book_store_delivery.deliver_purchase)
author_public_key, policy_public_key, data_source_public_key, label, kit = first_buyer.send_eth_to(book_store_contract, 10)
first_buyer.character.join_policy(label, # The label - he needs to know what data he's after.
bytes(author.character.stamp), # To verify the signature, he'll need Alice's public key.
# He can also bootstrap himself onto the network more quickly
# by providing a list of known nodes at this time.
node_list=[("localhost", 3601)]
)
datasource_as_understood_by_bob = DataSource.from_public_keys(
policy_public_key=policy_public_key,
datasource_public_key=data_source_public_key,
label=label
)
alice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(author_public_key)
delivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,
data_source=datasource_as_understood_by_bob,
alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)
print(delivered_cleartexts)
|
4,104 | 2fb8bce3a64787dbaf5a3bb3da53f70005048467 |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
import os
GFE_PATH = "C:\Haely\MS2017\sem2\EE 259\Project\grammatical_facial_expression"
def load_a_affirm_data(gfe_path=GFE_PATH):
csv_patha = os.path.join(gfe_path, "a_affirmative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_patha)
def load_a_affirm_target(gfe_path=GFE_PATH):
csv_targeta = os.path.join(gfe_path, "a_affirmative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targeta)
def load_a_cond_data(gfe_path=GFE_PATH):
csv_pathc = os.path.join(gfe_path, "a_conditional_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathc)
def load_a_cond_target(gfe_path=GFE_PATH):
csv_targetc = os.path.join(gfe_path, "a_conditional_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetc)
def load_a_doubtq_data(gfe_path=GFE_PATH):
csv_pathd = os.path.join(gfe_path, "a_doubt_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathd)
def load_a_doubtq_target(gfe_path=GFE_PATH):
csv_targetd = os.path.join(gfe_path, "a_doubts_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetd)
def load_a_emphasis_data(gfe_path=GFE_PATH):
csv_pathe = os.path.join(gfe_path, "a_emphasis_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathe)
def load_a_emphasis_target(gfe_path=GFE_PATH):
csv_targete = os.path.join(gfe_path, "a_emphasis_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targete)
def load_a_neg_data(gfe_path=GFE_PATH):
csv_pathn = os.path.join(gfe_path, "a_negative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathn)
def load_a_neg_target(gfe_path=GFE_PATH):
csv_targetn = os.path.join(gfe_path, "a_negative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetn)
def load_a_rel_data(gfe_path=GFE_PATH):
csv_pathr = os.path.join(gfe_path, "a_relative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathr)
def load_a_rel_target(gfe_path=GFE_PATH):
csv_targetr = os.path.join(gfe_path, "a_relative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetr)
def load_a_topics_data(gfe_path=GFE_PATH):
csv_patht = os.path.join(gfe_path, "a_topics_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_patht)
def load_a_topics_target(gfe_path=GFE_PATH):
csv_targett = os.path.join(gfe_path, "a_topics_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targett)
def load_a_wh_data(gfe_path=GFE_PATH):
csv_pathw = os.path.join(gfe_path, "a_wh_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathw)
def load_a_wh_target(gfe_path=GFE_PATH):
csv_targetw = os.path.join(gfe_path, "a_wh_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetw)
def load_a_yn_data(gfe_path=GFE_PATH):
csv_pathy = os.path.join(gfe_path, "a_yn_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathy)
def load_a_yn_target(gfe_path=GFE_PATH):
csv_targety = os.path.join(gfe_path, "a_yn_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targety)
# In[3]:
def load_b_affirm_data(gfe_path=GFE_PATH):
csv_pathab = os.path.join(gfe_path, "b_affirmative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathab)
def load_b_affirm_target(gfe_path=GFE_PATH):
csv_targetab = os.path.join(gfe_path, "b_affirmative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetab)
def load_b_cond_data(gfe_path=GFE_PATH):
csv_pathcb = os.path.join(gfe_path, "b_conditional_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathcb)
def load_b_cond_target(gfe_path=GFE_PATH):
csv_targetcb = os.path.join(gfe_path, "b_conditional_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetcb)
def load_b_doubtq_data(gfe_path=GFE_PATH):
csv_pathdb = os.path.join(gfe_path, "b_doubt_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathdb)
def load_b_doubtq_target(gfe_path=GFE_PATH):
csv_targetdb = os.path.join(gfe_path, "b_doubt_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetdb)
def load_b_emphasis_data(gfe_path=GFE_PATH):
csv_patheb = os.path.join(gfe_path, "b_emphasis_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_patheb)
def load_b_emphasis_target(gfe_path=GFE_PATH):
csv_targeteb = os.path.join(gfe_path, "b_emphasis_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targeteb)
def load_b_neg_data(gfe_path=GFE_PATH):
csv_pathnb = os.path.join(gfe_path, "b_negative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathnb)
def load_b_neg_target(gfe_path=GFE_PATH):
csv_targetnb = os.path.join(gfe_path, "b_negative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetnb)
def load_b_rel_data(gfe_path=GFE_PATH):
csv_pathrb = os.path.join(gfe_path, "b_relative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathrb)
def load_b_rel_target(gfe_path=GFE_PATH):
csv_targetrb = os.path.join(gfe_path, "b_relative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetrb)
def load_b_topics_data(gfe_path=GFE_PATH):
csv_pathtb = os.path.join(gfe_path, "b_topics_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathtb)
def load_b_topics_target(gfe_path=GFE_PATH):
csv_targettb = os.path.join(gfe_path, "b_topics_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targettb)
def load_b_wh_data(gfe_path=GFE_PATH):
csv_pathwb = os.path.join(gfe_path, "b_wh_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathwb)
def load_b_wh_target(gfe_path=GFE_PATH):
csv_targetwb = os.path.join(gfe_path, "b_wh_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetwb)
def load_b_yn_data(gfe_path=GFE_PATH):
csv_pathyb = os.path.join(gfe_path, "b_yn_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathyb)
def load_b_yn_target(gfe_path=GFE_PATH):
csv_targetyb = os.path.join(gfe_path, "b_yn_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetyb)
# In[4]:
affirmda = load_a_affirm_data()
affirmta = load_a_affirm_target()
condda = load_a_cond_data()
condta = load_a_cond_target()
doubtqda = load_a_doubtq_data()
doubtqta = load_a_doubtq_target()
emphda = load_a_emphasis_data()
emphta = load_a_emphasis_target()
negda = load_a_neg_data()
negta = load_a_neg_target()
relda = load_a_rel_data()
relta = load_a_rel_target()
topicsda = load_a_topics_data()
topicsta = load_a_topics_target()
whda = load_a_wh_data()
whta = load_a_wh_target()
ynda = load_a_yn_data()
ynta = load_a_yn_target()
# In[5]:
affirmdb = load_b_affirm_data()
affirmtb = load_b_affirm_target()
conddb = load_b_cond_data()
condtb = load_b_cond_target()
doubtqdb = load_b_doubtq_data()
doubtqtb = load_b_doubtq_target()
emphdb = load_b_emphasis_data()
emphtb = load_b_emphasis_target()
negdb = load_b_neg_data()
negtb = load_b_neg_target()
reldb = load_b_rel_data()
reltb = load_b_rel_target()
topicsdb = load_b_topics_data()
topicstb = load_b_topics_target()
whdb = load_b_wh_data()
whtb = load_b_wh_target()
yndb = load_b_yn_data()
yntb = load_b_yn_target()
# In[8]:
users_combine_affirmd = pd.concat([affirmda, affirmdb],ignore_index=True)
affirm_y = pd.concat([affirmta,affirmtb],ignore_index=True)
users_combine_condd = pd.concat([condda, conddb],ignore_index=True)
cond_y = pd.concat([condta, condtb],ignore_index=True)
users_combine_doubtqd = pd.concat([doubtqda, doubtqdb],ignore_index=True)
doubtq_y = pd.concat([doubtqta, doubtqtb],ignore_index=True)
users_combine_emphd = pd.concat([emphda, emphdb],ignore_index=True)
emph_y = pd.concat([emphta, emphtb],ignore_index=True)
users_combine_negd = pd.concat([negda, negdb],ignore_index=True)
neg_y = pd.concat([negta, negtb],ignore_index=True)
users_combine_reld = pd.concat([relda, reldb],ignore_index=True)
rel_y = pd.concat([relta, reltb],ignore_index=True)
users_combine_topicsd = pd.concat([topicsda, topicsdb],ignore_index=True)
topics_y = pd.concat([topicsta, topicstb],ignore_index=True)
users_combine_whd = pd.concat([whda, whdb],ignore_index=True)
wh_y = pd.concat([whta, whtb],ignore_index=True)
users_combine_ynd = pd.concat([ynda, yndb],ignore_index=True)
yn_y = pd.concat([ynta, yntb],ignore_index=True)
# In[11]:
users_combine_affirmd['affirm_y']=affirm_y
affirm_y.drop([10])
# In[12]:
users_combine_condd['cond_y']=cond_y
cond_y.drop([10])
# In[13]:
users_combine_doubtqd['doubtq_y']=doubtq_y
doubtq_y.drop([10])
# In[14]:
users_combine_emphd['emph_y']=emph_y
emph_y.drop([10])
# In[15]:
users_combine_negd['neg_y']=neg_y
neg_y.drop([10])
# In[16]:
users_combine_reld['rel_y']=rel_y
rel_y.drop([10])
# In[17]:
users_combine_topicsd['topics_y']=topics_y
topics_y.drop([10])
# In[18]:
users_combine_whd['wh_y']=wh_y
wh_y.drop([10])
# In[19]:
users_combine_ynd['yn_y']=yn_y
yn_y.drop([10])
# In[22]:
from sklearn.model_selection import train_test_split
ya=users_combine_affirmd['affirm_y']
Xa_train,Xa_test,ya_train,ya_test = train_test_split(users_combine_affirmd.iloc[:,1:],ya,stratify=ya)
yc=users_combine_condd['cond_y']
Xc_train,Xc_test,yc_train,yc_test = train_test_split(users_combine_condd.iloc[:,1:],yc,stratify=yc)
yd=users_combine_doubtqd['doubtq_y']
Xd_train,Xd_test,yd_train,yd_test = train_test_split(users_combine_doubtqd.iloc[:,1:],yd,stratify=yd)
ye=users_combine_emphd['emph_y']
Xe_train,Xe_test,ye_train,ye_test = train_test_split(users_combine_emphd.iloc[:,1:],ye,stratify=ye)
yn=users_combine_negd['neg_y']
Xn_train,Xn_test,yn_train,yn_test = train_test_split(users_combine_negd.iloc[:,1:],yn,stratify=yn)
yr=users_combine_reld['rel_y']
Xr_train,Xr_test,yr_train,yr_test = train_test_split(users_combine_reld.iloc[:,1:],yr,stratify=yr)
yt=users_combine_topicsd['topics_y']
Xt_train,Xt_test,yt_train,yt_test = train_test_split(users_combine_topicsd.iloc[:,1:],yt,stratify=yt)
yw=users_combine_whd['wh_y']
Xw_train,Xw_test,yw_train,yw_test = train_test_split(users_combine_whd.iloc[:,1:],yw,stratify=yw)
yy=users_combine_ynd['yn_y']
Xy_train,Xy_test,yy_train,yy_test = train_test_split(users_combine_ynd.iloc[:,1:],yy,stratify=yy)
# In[25]:
from sklearn.preprocessing import scale
from scipy import stats
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda_clf = LDA(solver='lsqr',store_covariance=True)
lda_clf.fit(Xa_train,ya_train)
ya_predicted = lda_clf.predict(Xa_test)
print('\n The error rate of the LDA model for affirm is {0:.2f}% '.format(100*np.mean(ya_predicted!=ya_test)))
lda_clf.fit(Xc_train,yc_train)
yc_predicted = lda_clf.predict(Xc_test)
print('\n The error rate of the LDA model for conditional is {0:.2f}% '.format(100*np.mean(yc_predicted!=yc_test)))
lda_clf.fit(Xd_train,yd_train)
yd_predicted = lda_clf.predict(Xd_test)
print('\n The error rate of the LDA model for doubt questions is {0:.2f}% '.format(100*np.mean(yd_predicted!=yd_test)))
lda_clf.fit(Xe_train,ye_train)
ye_predicted = lda_clf.predict(Xe_test)
print('\n The error rate of the LDA model for emphasis is {0:.2f}% '.format(100*np.mean(ye_predicted!=ye_test)))
lda_clf.fit(Xn_train,yn_train)
yn_predicted = lda_clf.predict(Xn_test)
print('\n The error rate of the LDA model for negative is {0:.2f}% '.format(100*np.mean(yn_predicted!=yn_test)))
lda_clf.fit(Xr_train,yr_train)
yr_predicted = lda_clf.predict(Xr_test)
print('\n The error rate of the LDA model for relativr is {0:.2f}% '.format(100*np.mean(yr_predicted!=yr_test)))
lda_clf.fit(Xt_train,yt_train)
yt_predicted = lda_clf.predict(Xt_test)
print('\n The error rate of the LDA model for topics is {0:.2f}% '.format(100*np.mean(yt_predicted!=yt_test)))
lda_clf.fit(Xw_train,yw_train)
yw_predicted = lda_clf.predict(Xw_test)
print('\n The error rate of the LDA model for wh questions is {0:.2f}% '.format(100*np.mean(yw_predicted!=yw_test)))
lda_clf.fit(Xy_train,yy_train)
yy_predicted = lda_clf.predict(Xy_test)
print('\n The error rate of the LDA model for yes or no is {0:.2f}% '.format(100*np.mean(yy_predicted!=yy_test)))
|
4,105 | c0bd060990d00ab50c9f2d3060b7f975ff16e1ab | import sys, time
from machine import Pin
print('LOAD: blinker.py')
def blink_connected_to_wifi(pin=23):
_blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])
def blink_not_connected_to_wifi(pin=23):
_blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, 0.5, 0.5]])
# pin - the pin, connected to LED
# pattern - the array of items: [blink_count, on-period, off-period]
def _blink_pattern(pin, pattern):
p = Pin(pin, Pin.OUT)
try:
for item in pattern:
for j in range(item[0]):
p.value(1)
time.sleep(item[1])
p.value(0)
time.sleep(item[2])
except:
p.value(0)
Pin(pin, Pin.IN)
|
4,106 | f3a1a926feabcabc870f0a41ae239939c331d09d | from pathlib import Path
file = Path(__file__).parent / 'input.txt'
Y = 2000000
MAX_X = 4000000
MIN_X = 0
MAX_Y = 4000000
MIN_Y = 0
# file = Path(__file__).parent / 'test_input.txt'
# Y = 10
# MAX_X = 20
# MIN_X = 0
# MAX_Y = 20
# MIN_Y = 0
text = file.read_text().splitlines()
class Beacon():
def __init__(self, pos, sensor) -> None:
self.pos = pos
self.sensor = sensor
def __str__(self) -> str:
return f"B{self.pos}"
def __repr__(self) -> str:
return f"{self}"
def __hash__(self) -> int:
return hash(self.pos)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
@property
def distance_to(self, pos):
x,y = pos
return abs(self.x - x) + (self.y - y)
class Sensor():
def __init__(self, pos, beacon) -> None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) -> str:
return f"S{self.pos}"
def __repr__(self) -> str:
return f"{self}"
def __hash__(self) -> int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx,sy), (bx,by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x,y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid():
def __init__(self, sensors, beacons) -> None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = (x, idx)
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
beacons = {}
sensors = {}
for line in text:
s = Sensor.from_text(line)
beacons[s.beacon.pos] = s.beacon
sensors[s.pos] = s
grid = Grid(sensors, beacons)
def print_row(grid, row_idx):
r = ""
for x,v in grid[row_idx]:
if isinstance(v, Beacon):
r += 'B'
elif isinstance(v, Sensor):
r += 'S'
elif grid.is_covered((x,row_idx)):
r += '#'
else:
r += '.'
return r
def count_covered(prow):
count = 0
for c in prow:
if c == '#':
count += 1
return count
print("Part 1:", count_covered(print_row(grid, Y)))
def walk_perimeters(grid):
for sensor in grid.sensors.values():
# walk the perimeter and check if each adjacent position is
# covered. If not, we have a winner
for dx in range(sensor.range + 2):
dy = (sensor.range + 1) - dx
for signx, signy in [(-1,-1),(-1,1),(1,-1),(1,1)]:
x = sensor.x + (dx * signx)
y = sensor.y + (dy * signy)
if not(0 <= x <= MAX_X and 0 <= y <= MAX_Y):
continue
if not grid.is_covered((x,y)):
return x * 4000000 + y
print("Part 2:", walk_perimeters(grid)) |
4,107 | 17b8fec5583f2544bd02a2409528082fa1dc2a1e | import numpy as np
n = int(input())
a = [list(map(int, input().split())) for _ in range(n)]
b = [list(map(int, input().split())) for _ in range(n)]
a = np.array(a)
b = np.array(b)
print(np.dot(a, b)) |
4,108 | 67e6d39ef291e4bb30c0b6bab7b71d97c86b0ef1 | from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
p_stemmer = PorterStemmer()
s_stemmer = SnowballStemmer(language="english")
print(s_stemmer.stem("writing"))
|
4,109 | 7261c5f9ac87c8337383daec312372b345ab7652 | #!/usr/bin/env python
"""
This example shows how to create an unstructured grid.
"""
import vtk
import numpy as np
import pickle as pkl
colors_list = pkl.load(open('permuted_colors.pkl','rb'))
meta = pkl.load(open('v_atlas/meta_information.pkl','rb'))
def main():
colors = vtk.vtkNamedColors()
Data=np.load('tessaltions_compressed.npz')
indices=meta['sorted_keys']
struct_D={} # a mapping of structure names to colors.
for i,s in enumerate(set([x[0] for x in indices])):
struct_D[s]=colors_list[i]
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for index in range(len(indices)):
x=Data['points_'+str(index)]
triangles = Data['triangles_'+str(index)]
print(index,x.shape, triangles.shape,'\r',end='')
points = vtk.vtkPoints()
for i in range(0, x.shape[0]):
points.InsertPoint(i, x[i,:])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(triangles.shape[0])
for i in range(triangles.shape[0]):
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i,:])
ugrid.SetPoints(points)
uGridNormals = vtk.vtkPolyDataNormals()
uGridNormals.SetInputData(ugrid)
uGridNormals.SetFeatureAngle(30.0)
#uGridNormals.ComputePointNormalsOn()
uGridNormals.SplittingOn()
print(uGridNormals)
uGridNormals.Update() # causes an error
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(uGridNormals.GetOutput())
ugridMapper = vtk.vtkPolyDataMapper()
ugridMapper.SetInputData(normalsPolyData)
ugridMapper.ScalarVisibilityOff()
# ugridMapper = vtk.vtkDataSetMapper()
# ugridMapper.SetInputData(ugrid)
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
# print(index,indices[index],struct_D[indices[index][0]])
color = struct_D[indices[index][0]]
ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))
ugridActor.GetProperty().SetDiffuse(.7)
ugridActor.GetProperty().SetSpecularPower(20)
ugridActor.GetProperty().SetSpecular(.5)
ugridActor.GetProperty().EdgeVisibilityOff()
ugridActor.GetProperty().SetOpacity(0.5)
ugridActor.GetProperty().SetInterpolationToGouraud()
renderer.AddActor(ugridActor)
break
renderer.SetBackground(colors.GetColor3d('Beige'))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
# Interact with the data.
renWin.Render()
iren.Start()
if __name__ == "__main__":
main()
|
4,110 | 510d411d79d5df8658703241f161b3e2a9ec5932 | #!/usr/bin/env python2.7
'''
lib script to encapsulate the camera info
'''
from xml.dom import minidom, Node
# what % of the file system remains before deleting files
# amount that we will cleanup relative to the filesystem total
CAMERA_XML_FILE = "/tmp/cameras.xml"
def cameras_get_info():
'''
cameras_get_info - reads the camera info from the XML file and
puts it into a python data structure and returns it.
'''
status = 0
xmldoc = minidom.parse(CAMERA_XML_FILE)
itemlist = xmldoc.getElementsByTagName('camera')
# camera info to return
cameras_info = []
for i in xrange(len(itemlist)):
cameras_info.append({'id':itemlist[i].attributes['id'].value})
a=itemlist[i].getElementsByTagName('user')
cameras_info[i].update({'user':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('model')
cameras_info[i].update({'model':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('passwd')
cameras_info[i].update({'passwd':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('port')
cameras_info[i].update({'port':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('ip_address')
cameras_info[i].update({'ip_address':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('disk_location')
cameras_info[i].update({'disk_location':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('mfgr')
cameras_info[i].update({'mfgr':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('ftp_loc')
cameras_info[i].update({'ftp_loc':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('status')
cameras_info[i].update({'status':a[0].firstChild.data})
a=itemlist[i].getElementsByTagName('location')
cameras_info[i].update({'location':a[0].firstChild.data})
return status, cameras_info
|
4,111 | 70b8efa844395592131382d1d1e2c39150804f99 | from setuptools import Command
class decl_cmd1(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
class decl_cmd2(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
|
4,112 | a470aad80e47b244811e4d9aed4a630ba36a8daf | """helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
https://docs.djangoproject.com/zh-hans/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#example:
# python3.0
from django.contrib import admin
# 为何要用 path呢
from django.urls import path, include
from django.conf.urls import url
from . import view
# 如何链接其他文件模块下的路径呢
#
urlpatterns = [
# path('hello/', view.hello),
# path('hello/<int:year>/', view.hello), # hello()中要有对应的参数
# path('ifor/', view.ifor),
path('admin/', admin.site.urls),
# path('blog/', blog.views.goodbye),
# path('', include('blog.urls.py', namespace='blog')), # 错误
path('', include('blog.urls', namespace='blog')),
# url(r'^hello/$', view.hello),
url(r'^hello/([0-9]{4})/$', view.hello),
url(r'^ifor/', view.ifor),
# url(r'^blog/', 'blog.views.goodbye')
#
]
"""
# python 2.7
from django.conf.urls import url
from . import view
urlpatterns = [
url(r'^$', view.hello),
]
""" |
4,113 | c120db53e1ea5a5b865b891cf602a13113fb1e41 | import urllib.request
import io
import cv2
import numpy as np
img_url = 'http://192.168.0.2:7079/hi'
while True:
data = urllib.request.urlopen(img_url)
raw_data = data.read()
nparr = np.frombuffer(raw_data, np.byte)
image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
cv2.imshow("test", image_raw)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows() |
4,114 | 86c4193ec0fee8a0c06858913ec8153fcf0df6d9 | #!/usr/bin/python
import pyglet
from pyglet.gl import *
win = pyglet.window.Window()
@win.event
def on_draw():
# Clear buffers
glClear(GL_COLOR_BUFFER_BIT)
# Draw outlines only
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Draw some stuff
glBegin(GL_TRIANGLES)
glVertex3i(0, 0, 0)
glVertex3i(300, 0, 0)
glVertex3i(0, 300, 0)
glEnd()
pyglet.app.run()
|
4,115 | c2260278c8dfb353f55ee9ea3495049b08169447 | from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.signals import post_save
from apps.common.constants import NOTIFICATION_TYPE_CHOICES, INFO
from apps.core.models import BaseModel
from apps.core.utils.helpers import get_upload_path
from apps.core.utils.push_notification import send_push_message
User = get_user_model()
class City(BaseModel):
name = models.CharField(max_length=255, db_index=True)
def __str__(self):
return self.name
class Article(BaseModel):
created_by = models.ForeignKey(User, related_name='articles', on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=200)
description = models.TextField()
# Below fields are optional
image = models.ImageField(
upload_to=get_upload_path,
blank=True
)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.title
class UserNotification(BaseModel):
title = models.CharField(max_length=150)
sent_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name='sent_notifications')
sent_to = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name='notifications')
content = models.TextField(blank=True)
is_read = models.BooleanField(default=False) # To mark notification as read
notification_type = models.CharField(
max_length=15,
choices=NOTIFICATION_TYPE_CHOICES,
default=INFO
)
def __str__(self):
if self.sent_by:
return f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'
return f'{str(self.sent_to)} content {self.content}'
class Meta:
ordering = ('is_read', '-created_at')
def send_push_notification(sender, instance, created, **kwargs):
if created:
receiver = instance.sent_to
receiver_device = receiver.devices.filter(is_active=True).first()
if receiver_device:
send_push_message(
receiver_device.registration_id,
title=instance.title,
body=instance.content
)
def send_article_notifications(sender, instance, created, **kwargs):
if created:
UserNotification.objects.bulk_create([
UserNotification(**{
'title': instance.title,
'sent_to': user,
'notification_type': INFO,
'content': instance.description
}) for user in User.objects.all()
])
post_save.connect(send_push_notification, sender=UserNotification)
post_save.connect(send_article_notifications, sender=Article)
|
4,116 | c07454dfb9dabb89c86f63063231ae9cf915aa38 | from django.db.models import Model, CharField, IntegerField, ManyToManyField, ForeignKey, PROTECT
from django.core.validators import MaxValueValidator, MinValueValidator
from polymorphic.models import PolymorphicModel
from model_utils import Choices
class Tendencia(Model):
valor = CharField(max_length=16, unique=True)
slug = CharField(max_length=3, unique=True)
def __str__(self):
return self.valor
class Bba(Model):
nivel = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(1)
]
)
valor = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(0)
]
)
QUALIDADE = Choices(('boa', ('BBA Boa')), ('ruim', ('BBA Ruim')))
qualidade = CharField(choices=QUALIDADE, max_length=4)
class Meta:
unique_together = ('qualidade', 'nivel', 'valor')
def __str__(self):
return 'BBA {} nível {}'.format(self.qualidade, self.nivel)
class Atributo(Model):
NOME = Choices(('Força'), ('Destreza'), ('Constituição'),
('Inteligência'), ('Sabedoria'), ('Carisma'))
nome = CharField(choices=NOME, max_length=12, unique=True)
SLUG = Choices(('for'), ('des'), ('con'),
('int'), ('sab'), ('car'))
slug = CharField(choices=SLUG, max_length=3, unique=True)
def __str__(self):
return self.nome
class Resistencia(Model):
nivel = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(1)
]
)
valor = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(0)
]
)
NOME = Choices(('Fortitude'), ('Reflexo'), ('Vontade'))
nome = CharField(choices=NOME, max_length=9)
SLUG = Choices(('fort'), ('ref'), ('von'))
slug = CharField(choices=SLUG, max_length=4)
QUALIDADE = Choices(('boa', ('Resistencia Boa')), ('ruim', ('Resistencia Ruim')))
qualidade = CharField(choices=QUALIDADE, max_length=4)
atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)
class Meta:
unique_together = ('slug', 'qualidade', 'nivel', 'valor')
def __str__(self):
return '{} {} nivel {}'.format(self.nome, self.qualidade, self.nivel)
class Pericia(Model):
nome = CharField(max_length=37)
slug = CharField(max_length=37, unique=True)
atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)
def __str__(self):
return self.nome
class Classe(PolymorphicModel):
nome = CharField(max_length=20)
slug = CharField(max_length=20, unique=True)
pericias = ManyToManyField(Pericia, related_name='+')
quantidade_pericias_por_nivel = IntegerField(
validators=[
MinValueValidator(1)
]
)
bbas = ManyToManyField(Bba, related_name='+')
resistencias = ManyToManyField(Resistencia, related_name='+')
tendencias = ManyToManyField(Tendencia, related_name='+')
DV = Choices((4, ('d4')), (6, ('d6')), (8, ('d8')), (10, ('d10')), (12, ('d12')))
dv = IntegerField(choices=DV)
CONJURADOR = Choices(('div', ('Divino')), ('arc', ('Arcano')), ('nan', ('Não conjurador')))
conjurador = CharField(choices=CONJURADOR, default=CONJURADOR.nan, max_length=3)
# conjurador_completo = BooleanField(default=True)
def add_tendencia(self, tendencia):
self.tendencias.append(tendencia)
def add_pericia(self, pericia):
self.pericias.append(pericia)
def add_bba(self, bba):
self.bbas.append(bba)
def add_resistencia(self, resistencia):
self.resistencias.append(resistencia)
def get_bba_nivel(self, nivel):
for bba in self.bbas.all():
assert isinstance(bba, Bba)
if bba.nivel == nivel:
return bba.valor
return 0
def __str__(self):
return self.nome
class ClassePrestigio(Classe):
pass
class Tipo(Model):
nome = CharField(max_length=14)
slug = CharField(max_length=14, unique=True)
def __str__(self):
return self.nome
class Raca(Model):
nome = CharField(max_length=14)
slug = CharField(max_length=14, unique=True)
def __str__(self):
return self.nome
class Modelo(Model):
nome = CharField(max_length=20)
slug = CharField(max_length=20, unique=True)
def __str__(self):
return self.nome
|
4,117 | e3b39c6655fc14efec3b3f95b08bc7b2c036cbdc | import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
import numpy as np
import imdb
import csv
import networkx as nx
from networkx import *
def split_data(data):
df = pd.read_csv(data)
ranks = df.groupby('userId')['timestamp'].rank(method='first')
counts = df['userId'].map(df.groupby('userId')['timestamp'].apply(len))
# myes = (ranks / counts) > 0.8
df['new_col'] = (ranks / counts) > 0.8
# print(myes)
print(df.head())
train = df.loc[df['new_col'] == False]
test = df.loc[df['new_col'] == True]
train = train.drop(['new_col'], axis=1)
test = test.drop(['new_col'], axis=1)
train.to_csv(r'C:\Users\Darkmaster\PycharmProjects\Recommender\Data\Cvorm\training.csv', header=False, index=False)
test.to_csv(r'C:\Users\Darkmaster\PycharmProjects\Recommender\Data\Cvorm\testing.csv', header=False, index=False)
# print(test.head())
# ----AND THEN SAVE THOSE AS CSV----
# for row in df.index
# print(test_train)
# print(ranks.head())
# print(counts.head())
# def make_train_or_test_txt(ratingdata):
# df = pd.read_csv(ratingdata)
# users = []
# [users.append(x) for x in df["userId"] if x not in users]
# print(users)
# with open('Data/KGAT/train.txt', 'w') as f:
# # writer = csv.writer(f, delimiter='\t')
# for x in users:
# items = []
# items = df.query('userId == {}'.format(x))["movieId"]
# items = items.values.tolist()
# stringerbell = ''.join((str(e) + "\t") for e in items)
# print(stringerbell)
# # writer.writerow("{}{}".format(x, items))
# # writer.writerow(str(x) + stringerbell)
# f.write(str(x) + "\t" + stringerbell + "\n")
# # print(items)
# # for j in range(len(df)):
# # try:
# # getitems = [x for x in df.loc[df["movieId"]]]
# # except:
# # continue
# print(df.head())
# make_train_or_test_txt('Data/ratings.csv')
split_data('C:\\Users\\Darkmaster\\PycharmProjects\\Recommender\\Data\\ratings.csv') |
4,118 | cf97c87400649dd15e5d006707f9adfbd0c91b2c | from django.shortcuts import render
from .forms import TeacherForm,Teacher
from django.http import HttpResponse
def add_teacher(request):
if request.method=="POST":
form=TeacherForm(request.POST)
if form.is_valid():
form.save()
return redirect("list_teachers")
else:
return HttpResponse("invalid data",status=400)
else:
form=TeacherForm()
return render(request,"add_teacher.html",{"form":form})
def list_teachers(request):
teachers=Teacher.objects.all()
return render(request, "list_teachers.html",{"teachers":teachers})
def teacher_detail(request, pk):
teacher=Teacher.objects.get(pk=pk)
return render(request, "teacher_detail.html",{"teacher":teacher})
def edit_teacher(request, pk):
teacher=Teacher.objects.get(pk=pk)
if request.method== "POST":
form=TeacherForm(request.POST, instance=teacher)
if form.is_valid:
form.save()
return redirect("list_teachers")
else:
form=TeacherForm(instance=teacher)
return render(request, "edit_teacher.html",{"form":form})
# form = TeacherForm()
# return render(request,"add_teacher.html",{"form":form})
# Create your views here.
|
4,119 | 94559d9fd296acd468c33d6b0541b974575b8852 | # -*- coding: utf-8 -*-
"""
Automatically create and parse commands
based on a YAML configuration file.
NOTE: we can't have a logger here,
before knowing the level of debug.
"""
import os
import sys
import argparse
from controller import __version__, PROJECTRC, PROJECTRC_ALTERNATIVE
from controller.conf_utilities import load_yaml_file
from controller import log
class ArgParser:
def __init__(self, args=None):
if args is None:
args = sys.argv
self.current_args = {}
self.host_configuration = {}
# This method can raise ValueErrors
self.check_args(args)
# This method saves configuration objects in self
self.read_configuration()
# Arguments definition
parser = argparse.ArgumentParser(
prog=args[0], description=self.parse_conf.get('description')
)
# PARAMETERS
sorted_options = sorted(self.parse_conf.get('options', {}).items())
for option_name, options in sorted_options:
self.add_parser_argument(parser, option_name, options)
version_string = 'rapydo version {}'.format(__version__)
parser.add_argument('--version', action='version', version=version_string)
# Sub-parser of commands [check, init, etc]
main_command = self.parse_conf.get('action')
subparsers = parser.add_subparsers(
title='Available commands',
dest=main_command.get('name'),
help=main_command.get('help'),
)
subparsers.required = True
# ##########################
# COMMANDS
# BASE normal commands
mycommands = self.parse_conf.get('subcommands', {})
for command_name, options in sorted(mycommands.items()):
# Creating a parser for each sub-command [check, init, etc]
subparse = subparsers.add_parser(
command_name, help=options.get('description')
)
# controlcommands = options.get('controlcommands', {})
# # Some subcommands can have further subcommands
# [control start, stop, etc]
# if len(controlcommands) > 0:
# innerparser = subparse.add_subparsers(
# dest='controlcommand'
# )
# innerparser.required = options.get('controlrequired', False)
# for subcommand, suboptions in controlcommands.items():
# subcommand_help = suboptions.pop(0)
# # Creating a parser for each sub-sub-command
# # [control start/stop]
# innerparser.add_parser(subcommand, help=subcommand_help)
suboptions = options.get('suboptions', {}).items()
for option_name, suboptions in suboptions:
self.add_parser_argument(subparse, option_name, suboptions)
# ##########################
# Print usage if no arguments provided
if len(args) == 1:
parser.print_help()
sys.exit(1)
# ##########################
# Reading input parameters
# Partial parsing
# https://docs.python.org/3.4/library/argparse.html#partial-parsing
# Example
# https://gist.github.com/von/949337/
# self.current_args = parser.parse_args()
current_args_namespace, self.remaining_args = parser.parse_known_args(args[1:])
self.current_args = vars(current_args_namespace)
# custom commands as a separate parser
self.extra_parser = argparse.ArgumentParser(
description='Custom rapydo commands from your own configuration',
add_help=False,
usage='\n$ rapydo custom CUSTOM_COMMAND',
)
self.extra_command_parser = self.extra_parser.add_subparsers(
title='Available custom commands',
dest='custom',
help='list of custom commands',
)
self.extra_command_parser.required = True
# ##########################
if self.current_args.get("log_level", "DEPRECATED") != "DEPRECATED":
# Deprecated since version 0.7.0
log.warning(
"--log-level parameter is deprecated, set env variable LOGURU_LEVEL")
log.verbose("Parsed arguments: {}", self.current_args)
def add_parser_argument(self, parser, option_name, options):
params = self.prepare_params(options)
alias = params.pop('alias', None)
positional = params.pop('positional', False)
param_name = '--{}'.format(option_name)
if positional:
parser.add_argument(option_name, **params)
elif alias is None:
parser.add_argument(param_name, **params)
else:
parser.add_argument(param_name, '-{}'.format(alias), **params)
@staticmethod
def check_args(args):
# Check on format
for element in args:
if element.startswith('--') and '_' in element:
raise ValueError(
"Wrong \"{}\" option provided.\n".format(element)
+ "Arguments containing '_' are not allowed.\n"
+ "Use '-' instead\n"
)
# NOTE: the standard is to use only '-' separators for arguments
# beware: argparse converts them into '_' when you want to retrieve
def read_configuration(self):
# READ MAIN FILE WITH COMMANDS AND OPTIONS
self.parse_conf = load_yaml_file(
'argparser.yaml', path=os.path.dirname(os.path.realpath(__file__))
)
try:
# READ PROJECT INIT FILE: .projectrc
pinit_conf = load_yaml_file(
PROJECTRC, path=os.curdir, is_optional=True)
# Allow alternative for PROJECT INIT FILE: .project.yml
if len(pinit_conf) < 1:
pinit_conf = load_yaml_file(
PROJECTRC_ALTERNATIVE, path=os.curdir, is_optional=True)
except AttributeError as e:
log.exit(e)
self.host_configuration = pinit_conf.pop('project_configuration', {})
# Mix with parse_conf
for key, value in pinit_conf.items():
# value = pinit_conf.get(key, None)
if value is None:
continue
if not isinstance(value, dict):
# This is a first level option
if key in self.parse_conf['options']:
self.parse_conf['options'][key]['default'] = value
else:
print("\nUnknown parameter {} found in {}\n".format(key, PROJECTRC))
else:
# This is a second level parameter
if key not in self.parse_conf['subcommands']:
print("\nUnknown command {} found in {}\n".format(key, PROJECTRC))
else:
conf = self.parse_conf['subcommands'][key]['suboptions']
for subkey, subvalue in value.items():
if subkey in conf:
conf[subkey]['default'] = subvalue
else:
print("Unknown parameter {}/{} found in {}\n".format(
key, subkey, PROJECTRC))
@staticmethod
def prepare_params(options):
pconf = {}
default = options.get('default')
pconf['default'] = default
myhelp = "{} [default: {}]".format(options.get('help'), default)
pconf['help'] = myhelp
if options.get('type') == 'bool':
if default:
pconf['action'] = 'store_false'
else:
pconf['action'] = 'store_true'
else:
# type and metavar are allowed for bool
pconf['type'] = str
pconf['metavar'] = options.get('metavalue')
if 'alias' in options:
pconf['alias'] = options['alias']
if 'positional' in options:
pconf['positional'] = options['positional']
return pconf
|
4,120 | 127ca34d3fae3af4506258388a28c539ccc7c33b | /Users/linhly/anaconda/lib/python3.6/reprlib.py |
4,121 | f15f96658130ac9bba748a518371ad80d9772fbc | import pickle
from pathlib import Path
from rich.console import Console
from fourierdb import FourierDocument, FourierCollection, FourierDB
console = Console()
doc = FourierDocument({"bar": "eggs", "xyz": "spam"})
doc2 = FourierDocument({"a": "foo", "b": "bar"})
doc3 = FourierDocument({"abc": "xyz"})
doc4 = FourierDocument({1: 2, 3: 4, 5: 6})
doc5 = FourierDocument({"hello": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
FOURIER_DIR = Path.home() / ".fourier"
FOURIER_LOGS = FOURIER_DIR / "logs"
FOURIER_DBS = FOURIER_DIR / "databases"
coll = FourierCollection("coll", doc, doc2)
coll2 = FourierCollection("coll2", doc3, doc4, doc5)
db = FourierDB("db")
db.add_collection(coll)
db.add_collection(coll2)
pickle.dump(db, open(""))
|
4,122 | 7d3f4e0a5031f9ce618c568b440c7425489060a1 | import sys
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
COM = Obj('COM')
orbits = {}
orbits['COM'] = COM
effects = [x.strip().split(')') for x in list(sys.stdin)]
for c,o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
|
4,123 | 6437cb90ebaed7cf59df780062ebccf77fcef084 | from ethereum.abi import (
decode_abi,
normalize_name as normalize_abi_method_name,
method_id as get_abi_method_id)
from ethereum.utils import encode_int, zpad, decode_hex
import json
import time
from web3 import Web3, HTTPProvider, TestRPCProvider
from solc import compile_source
from web3.contract import ConciseContract
import sys
import os
Cpath = os.path.dirname(os.path.realpath(__file__))
host = 'localhost'
TID = sys.argv[1]
# web3.py instance
w3 = Web3(HTTPProvider('http://'+host+':3000'))
f = open(Cpath+'/abi','r')
line = f.readline()
Jline = json.loads(line)
f.close()
abi = Jline
Transaction = w3.eth.getTransaction(TID)
#print(Transaction.input)
def decode_contract_call(contract_abi: list, call_data: str):
call_data_bin = decode_hex(call_data)
method_signature = call_data_bin[:4]
for description in contract_abi:
if description.get('type') != 'function':
continue
method_name = normalize_abi_method_name(description['name'])
arg_types = [item['type'] for item in description['inputs']]
method_id = get_abi_method_id(method_name, arg_types)
if zpad(encode_int(method_id), 4) == method_signature:
try:
args = decode_abi(arg_types, call_data_bin[4:])
except AssertionError:
# Invalid args
continue
return method_name, args
result = decode_contract_call(abi,Transaction.input)
#result = decode_contract_call(abi,"0xa9059cbb0000000000000000000000006cd5d27785e38b28a0d9656bcc795d90a4d670c500000000000000000000000000000000000000000000000000000000000001f4")
print(result)
print(Transaction['from'])
|
4,124 | b53294330a908f8a50d8fbb50b9c88e2bc6135a1 | print("gggg")
print("gggg")
print("gggg")
|
4,125 | f4c3b6ee6389b31c6a280bf7cfe920a2791c1299 | import logging
from tqdm import tqdm
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_tables(db_engine):
"""RUN SQL STATEMENTS TO CREATE TABLES"""
with db_engine.connect() as conn:
create_table_stmts = []
create_drugs_table = """
DROP TABLE IF EXISTS drugs CASCADE;
CREATE TABLE drugs (
drugbank_id char(7) PRIMARY KEY,
name varchar NOT NULL, -- Something “Human Readable”,
smiles varchar
);
"""
create_table_stmts.append(create_drugs_table)
create_drug_id_types_table = """
DROP TABLE IF EXISTS drug_identifier_types CASCADE;
CREATE TABLE drug_identifier_types (
identifier_type_id SERIAL UNIQUE,
identifier_type_name varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_id_types_table)
create_drug_identifiers_table = """
DROP TABLE IF EXISTS drug_identifiers CASCADE;
CREATE TABLE drug_identifiers (
drugbank_id char(7) references drugs(drugbank_id),
alt_identifier_value varchar NOT NULL,
alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,
alt_identifier_url varchar
);
"""
create_table_stmts.append(create_drug_identifiers_table)
create_drug_action_types = """
DROP TABLE IF EXISTS drug_action_types CASCADE;
CREATE TABLE drug_action_types (
action_type_id SERIAL UNIQUE,
action_type varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_action_types)
create_drug_targets_table = """
DROP TABLE IF EXISTS drug_targets CASCADE;
CREATE TABLE drug_targets (
drugbank_id char(7) references drugs(drugbank_id),
gene_name varchar NOT NULL,
action_type_id int references drug_action_types(action_type_id),
UNIQUE(drugbank_id, gene_name, action_type_id)
);
"""
create_table_stmts.append(create_drug_targets_table)
logger.info("Creating %d tables", len(create_table_stmts))
for stmt in tqdm(create_table_stmts):
conn.execute(stmt)
|
4,126 | d638194a37dc503b7dfb5410abf264be67c3a4f0 | import pyttsx
engine = pyttsx.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-55)
engine.say('Hello , whats your name ?');
engine.say('I am mr. robot. What news would you like to listen to today ?');
#engine.say('Sally sells seashells by the seashore.')
#engine.say('Sally sells seashells by the seashore.')
#voices = engine.getProperty('voices')
#for voice in voices:
# engine.setProperty('voice', voice.id)
# engine.say('The quick brown fox jumped over the lazy dog.')
#engine.say('The quick brown fox jumped over the lazy dog.')
engine.runAndWait()
|
4,127 | 471d4cc95d6cb8d02f1c96e940c2a2235affbc52 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 17:24:25 2015
@author: Damien
"""
import numpy as np
from operator import itemgetter
import itertools
def writeOBJ(vertlist,trilist,filename):
print "number of triangles: " + str(len(trilist))
print "number of vertices: " + str(len(vertlist))
OBJ = open(filename, "w")
OBJ.write('# Created with OBJ writer test version DM\n')
OBJ.write('# COORDINATE_SYSTEM: OGC_DEF PROJCS["Netherlands, Amersfoort RD 2008 datum, New System",GEOGCS["Amersfoort",DATUM["Amersfoort",SPHEROID["Bessel, 1841",6377397.155,299.1528153513275,AUTHORITY["EPSG","7004"]],AUTHORITY["EPSG","6289"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4289"]],PROJECTION["Stereographic"],PARAMETER["latitude_of_origin",52.1561605555556],PARAMETER["central_meridian",5.38763888888889],PARAMETER["scale_factor",0.9999079],PARAMETER["false_easting",155000],PARAMETER["false_northing",463000],UNIT["METER",1],AUTHORITY["EPSG","28992"]]\n')
OBJ.write('# Number of Geometry Coordinates : ' + str(len(vertlist)) + '\n')
OBJ.write('# Number of Texture Coordinates : 0\n')
OBJ.write('# Number of Normal Coordinates : 0\n')
# loop through vertices and write to obj
for vert in vertlist:
OBJ.write("v " + str(vert[0]) + " " + str(vert[1]) + " " + str(vert[2]) + "\n")
OBJ.write('# Number of Elements in set : ' + str(len(trilist)) + '\n')
# loop through triangles and write to obj
for tri in trilist:
OBJ.write("f " + str(tri[0]) + " " + str(tri[1]) + " " + str(tri[2]) + "\n")
OBJ.write('# Total Number of Elements in file: ' + str(len(trilist)) + '\n')
OBJ.write('# EOF')
OBJ.close()
class Vertice:
def __init__(self,x,y,z,vertID):# ,vertID
self.X = float(x)
self.Y = float(y)
self.Z = float(z)
self.ID = int(vertID)
self.string = "(%s , %s , %s)" % (self.X,self.Y,self.Z)
self.neighbourNormals = []
def getVerticePosition(self):
#def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!
"""self.X = self.scale * ((self.I+.5)/self.dims) + self.translate[0]
self.Y = self.scale * ((self.J+.5)/self.dims) + self.translate[1]
self.Z = self.scale * ((self.K+.5)/self.dims) + self.translate[2] # klopt dit, centroid vs vertice? """
return(self.X,self.Y,self.Z)
def addNeighbourNormal(self,normalvec):
self.neighbourNormals.append(normalvec)
##############################################################################
# triangle class #
##############################################################################
class Triangle:
def __init__(self,n1,n2,n3): # should node indexes be stored?
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.position = [n1,n2,n3]
#self.fullpos = (n1,n2,n3)
self.neighbourNormals = []
# [n1.neighbourNormals,n2.neighbourNormals,n3.neighbourNormals]
#def addPosition(self,p1,p2,p3):
#self.position = [p1,p2,p3]
#def getNodeIndexes(self):
#return (self.n1,self.n2,self.n3)
#self.id = triID
#triID += 1 # werkt dit? # niet nodig?
def getNormalizedNormalVec(self):
"""# create Vertice for each node
Vert1 = Vertice(self.n1[0],self.n1[1],self.n1[2],self.model,self.dims,self.scale,self.translate)
Vert2 = Vertice(self.n2[0],self.n2[1],self.n2[2],self.model,self.dims,self.scale,self.translate)
Vert3 = Vertice(self.n3[0],self.n3[1],self.n3[2],self.model,self.dims,self.scale,self.translate)
# get real pos for each Vertice, list as TriPos
Vert1Pos = Vert1.getVerticePosition()
Vert2Pos = Vert2.getVerticePosition()
Vert3Pos = Vert3.getVerticePosition()"""
TriPos = self.position
# calc normalized normal vecor for Tri
# get vectors Vert1Vert2 & Vert2Vert3
TriVectors = np.subtract(TriPos[1:],TriPos[:-1])
# get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)
TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0
# get length of surface normal
length = np.linalg.norm(TriNorm)
# divide each component of surface normal by length (= normalized surface normal)
NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found
# create string of tuple for segment dict
#SegmDict = str(tuple(NormalizedNormalVec))
return NormalizedNormalVec.tolist()
##############################################################################
# get angle between vectors #
##############################################################################
def unit_vector(vector):
""" Returns the unit vector of the vector. """
#print 'unit_vector'
#print vector
#print type(vector)
#npvector = np.array(vector)
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
#print v1, v2
angle = np.arccos(np.dot(v1_u, v2_u))
#print angle
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
def thinNVList(nvlist):
NVset_thinned = sorted(nvlist, key=itemgetter(0,1,2))
for i in range(len(NVset_thinned)-1,0,-1):
vec1 = NVset_thinned[i]
vec2 = NVset_thinned[i-1]
if np.array_equal(vec1,vec2):
del NVset_thinned[i]
else:
continue
#print 'nvset thinned'
#print NVset_thinned
#a = [subset[0] for subset in NVset_thinned]
#print a
#return a
#if len(NVset_thinned) > 0:
return NVset_thinned #[0]
#else:
#return []
def testlist2OBJ(testlist,filename):
temp_vertlist = []
temp_trilist = []
vertID = 1
for tri in testlist:
index_tri = []
#print tri.position
for vert in tri.position:
temp_vertlist.append(vert)
index_tri.append(vertID)
vertID+=1
temp_trilist.append(index_tri)
writeOBJ(temp_vertlist,temp_trilist,filename)
#print temp_vertlist,temp_trilist
return
def checkOrtho(NVset):
flatNVset = [NV for subset in NVset for NV in subset]
thinned_flatNVset = thinNVList(flatNVset)
numberNVs = len(thinned_flatNVset)
#print numberNVs
#numberNVs = 3
count = [i for i in range(numberNVs)]
#print count
indexes = list(itertools.combinations(count,2))
for indexpair in indexes:
#print list(indexpair)
pair = [thinned_flatNVset[ids] for ids in indexpair]
#print pair
angle = angle_between(pair[0],pair[1])
if not np.allclose(angle, 1.57079632679):
return False
else:
continue
return True
# extend later? remove non ortho?
#print checkOrtho([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0]]])
def distance(point1,point2):
nppoint1 = np.array(point1)
nppoint2 = np.array(point2)
dist = np.linalg.norm(nppoint1-nppoint2)
return dist
def testEquilateral(Tri):
dist1 = distance(Tri[0],Tri[1])
dist2 = distance(Tri[1],Tri[2])
dist3 = distance(Tri[2],Tri[0])
if np.allclose(dist1,dist2) and np.allclose(dist2,dist3) and np.allclose(dist3,dist1):
# get center
##print "*********** EQUILAT TRI***************"
##print "p1 = " + str(Tri[0])
#print Tri[0][0], Tri[0][1],Tri[0][2]
##print "p2 = " + str(Tri[1])
#print Tri[1][0], Tri[1][1],Tri[1][2]
##print "p3 = "+ str(Tri[2])
center = [np.sum([Tri[0][0],Tri[1][0],Tri[2][0]])/3.0,np.sum([Tri[0][1],Tri[1][1],Tri[2][1]])/3.0, np.sum([Tri[0][2],Tri[1][2],Tri[2][2]])/3.0 ]
##print "p4 = " + str(center)
return [True, center ]
else:
return [False ]
def testCornerConcaveConvex(Tri,NVset): # cornerTriTest only run on single corner triangles? or extend?
dist1, dist2,dist3 = distance(Tri[0],Tri[1]),distance(Tri[1],Tri[2]),distance(Tri[2],Tri[0])
stepsize = dist1/10.0 # must be much smaller than polygon
NVset = [subset[0] for subset in NVset]
movedTri = (np.array(Tri) + (np.array(NVset) * stepsize)).tolist()
moved_dist1 = distance(movedTri[0],movedTri[1])
moved_dist2 = distance(movedTri[1],movedTri[2])
moved_dist3 = distance(movedTri[2],movedTri[0])
boolList = [moved_dist1 >dist1,moved_dist2 > dist2,moved_dist3 > dist3]
if sum(boolList) == 3:
return "CONVEX"
elif sum(boolList) == 0:
return "CONCAVE"
elif sum(boolList) == 2: # for convex chamfer, change later
return "CONVEX"
else:
#print "ONVERWACHTE CASE????????????????"
#print sum(boolList)
return "CONVEX/CONCAVE"
def testChamferConcaveConvex(Tri,NVset):
dist1, dist2,dist3 = distance(Tri[0],Tri[1]),distance(Tri[1],Tri[2]),distance(Tri[2],Tri[0])
stepsize = dist1/10.0 # must be much smaller than polygon
pass
##############################################################################
# filter triangles based on NV collection #
##############################################################################
def testAngles(triNV,NVset):
#print "********** START testAngles ************"
# GET UNIQUE NUMBER
flatNVset = [NV for subset in NVset for NV in subset]
NVset_thinned = thinNVList(flatNVset)
numberUnique = len(NVset_thinned)
#print "numberUnique = "+ str(numberUnique)
# GET CORNER AND CHAMFER VECTORS
if numberUnique > 2:
original_CornerNVs = [[],[],[]]
original_ChamferNVs = [[],[],[]]
for subset in range(3):
for vec in NVset[subset]:
if np.allclose([angle_between(triNV, vec)],[0.955316618125]): # assumption, correct for MC?
original_CornerNVs[subset].append(vec)
elif np.allclose([angle_between(triNV, vec)],[0.785398163397]):
original_ChamferNVs[subset].append(vec)
flat_original_CornerNVs = [NV for subset in original_CornerNVs for NV in subset]
unique_CornerNVs = thinNVList(flat_original_CornerNVs)
numberCornerNVs = len(unique_CornerNVs)
flat_original_ChamferNVs = [NV for subset in original_ChamferNVs for NV in subset]
unique_ChamferNVs = thinNVList(flat_original_ChamferNVs)
numberChamferNVs = len(unique_ChamferNVs)
# for subset in original, thin the list?
for i in range(3):
#print i
#print original_CornerNVs
original_CornerNVs[i] = thinNVList(original_CornerNVs[i])
#print original_CornerNVs
original_ChamferNVs[i] = thinNVList(original_ChamferNVs[i])
return numberUnique, numberCornerNVs, numberChamferNVs, original_CornerNVs, original_ChamferNVs
else:
return numberUnique, 0, 0, [], [] # niet netjes?
def detectTriangles(vertlist,trilist,voxelsize):
print "starting triangle detection"
vertDict = {}
vertID = 1
triDict = {}
triID = 1
new_vertlist = []
#print len(vertlist)
# CREATE VER DICT
# for tri in tri list, get normal vec
# for vert in tri, check in vertdict, if there: add normal vec, if not there, add it, vertid +1 add normal vec
for index in range(len(trilist)):
tri = trilist[index]
node1,node2,node3 = tri[0], tri[1], tri[2]
updateTri = []
TRI = Triangle(vertlist[node1-1],vertlist[node2-1],vertlist[node3-1])
NNV = TRI.getNormalizedNormalVec()
for node in tri:
Node = vertlist[node-1]
VERT = Vertice(Node[0],Node[1],Node[2],vertID)
# if not in dict: attach NNV, add vert to dict
if VERT.string not in vertDict:
#print "**********CHECK******"
#print NNV
VERT.addNeighbourNormal(NNV)
vertDict[VERT.string] = VERT
#print vertDict[VERT.string].neighbourNormals
updateTri.append(vertID)
new_vertlist.append(VERT.getVerticePosition())
vertID +=1
# if in dict, attach NNV to existing vert
else:
#print "**********CHECK******"
#print vertDict[VERT.string].neighbourNormals
vertDict[VERT.string].addNeighbourNormal(NNV)
#print vertDict[VERT.string].neighbourNormals
updateTri.append(vertDict[VERT.string].ID)
trilist[index] = updateTri
# CREATE TRI DICT
# get all NNVs from single points in triangle list
#for index in range(0,1):
for index in range(len(trilist)):
TRI = Triangle(new_vertlist[trilist[index][0]-1],new_vertlist[trilist[index][1]-1],new_vertlist[trilist[index][2]-1]) #-1 needed?
for node in trilist[index]:
#print node
dict_string = "(%s , %s , %s)" % (new_vertlist[node-1][0],new_vertlist[node-1][1],new_vertlist[node-1][2]) #-1 needed!
#print dict_string
vertObject = vertDict[dict_string]
#print vertObject.neighbourNormals
TRI.neighbourNormals.append(vertObject.neighbourNormals)
triDict[index+1] = TRI
convexCornerList = []
concaveCornerList = []
convexChamferList = []
concaveChamferList = []
concaveConvexCase1List = []
concaveConvexCase2List = []
# SHARPENING
sharpenedTriList = []
# order not important right?
for tri in triDict.values():
triNV = tri.getNormalizedNormalVec()
angleResults = testAngles(tri.getNormalizedNormalVec(),tri.neighbourNormals)
# FLAT TRI
if angleResults[0] < 3:
sharpenedTriList.append(tri)
# CORNER TRI
elif angleResults[1] == 3:
equilateralTest = testEquilateral(tri.position)
if equilateralTest[0]:
directionVec = [-1 if val < 0 else 1 for val in triNV]
moveVec = np.array(directionVec) * voxelsize/3.0
convexConcavetest = testCornerConcaveConvex(tri.position,angleResults[3])
if convexConcavetest == "CONCAVE":
# DETECTION
#print "CONCAVE"
concaveCornerList.append(tri)
# SHARPENING
newpoint = np.around(np.array(equilateralTest[1]) - np.array(moveVec), decimals=5)
moveVecs = [(-np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
n15 = np.add(tri.n1,moveVecs[1])
n25 = np.add(tri.n2,moveVecs[2])
n35 = np.add(tri.n3,moveVecs[0])
TRI1 = Triangle(tri.n1,n15,newpoint)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(n15,tri.n2,newpoint)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(tri.n2,n25,newpoint)
sharpenedTriList.append(TRI3)
TRI4 = Triangle(n25,tri.n3,newpoint)
sharpenedTriList.append(TRI4)
TRI5 = Triangle(tri.n3,n35,newpoint)
sharpenedTriList.append(TRI5)
TRI6 = Triangle(n35,tri.n1,newpoint)
sharpenedTriList.append(TRI6)
elif convexConcavetest == "CONVEX":
# DETECTION
#print "CONVEX"
convexCornerList.append(tri)
# SHARPENING
newpoint = np.around(np.array(equilateralTest[1]) + np.array(moveVec), decimals=5)
moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
n15 = np.add(tri.n1,moveVecs[1])
n25 = np.add(tri.n2,moveVecs[2])
n35 = np.add(tri.n3,moveVecs[0])
TRI1 = Triangle(tri.n1,n15,newpoint)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(n15,tri.n2,newpoint)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(tri.n2,n25,newpoint)
sharpenedTriList.append(TRI3)
TRI4 = Triangle(n25,tri.n3,newpoint)
sharpenedTriList.append(TRI4)
TRI5 = Triangle(tri.n3,n35,newpoint)
sharpenedTriList.append(TRI5)
TRI6 = Triangle(n35,tri.n1,newpoint)
sharpenedTriList.append(TRI6)
"""else:
# change nothing
sharpenedTriList.append(tri)"""
else:
print '***********************CASE1******************* '
# DETECTION
concaveConvexCase1List.append(tri)
# SHARPENING
dist1,dist2,dist3 = distance(tri.n1,tri.n2),distance(tri.n2,tri.n3),distance(tri.n3,tri.n1)
#print dist1, dist2,dist3
if np.isclose(dist3,dist1):
middleIndex,middlePos = 0,tri.n1
elif np.isclose(dist1,dist2):
middleIndex,middlePos = 1,tri.n2
elif np.isclose(dist2,dist3):
middleIndex,middlePos = 2,tri.n3
sideIndexes = [0,1,2]#.remove(singleIndex)
sideIndexes.remove(middleIndex)
moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
#print moveVecs
triList = [tri.n1,tri.n2,tri.n3]
moveCheck = np.add(moveVecs,triList)
if np.allclose(moveCheck[sideIndexes[0]],moveCheck[middleIndex]):
print moveCheck[sideIndexes[0]]
print moveCheck[middleIndex]
sideOutterIndex,sideInnerIndex = sideIndexes[0],sideIndexes[1]
elif np.allclose(moveCheck[sideIndexes[1]],moveCheck[middleIndex]):
print moveCheck[sideIndexes[1]]
print moveCheck[middleIndex]
sideOutterIndex,sideInnerIndex = sideIndexes[1],sideIndexes[0]
else:
print "OOPS"
# sideOutter / sideInner check is fout?
singleFirst = np.add(triList[sideOutterIndex],np.subtract(triList[sideInnerIndex],middlePos))
singleSecond = np.subtract(triList[sideOutterIndex],moveVecs[middleIndex])
singleThird = np.add(singleFirst,moveVecs[sideOutterIndex])
TRI1 = Triangle(singleFirst,singleSecond,middlePos)
sharpenedTriList.append(TRI1)
#TRI2 = Triangle(singleFirst,singleSecond,triList[sideOutterIndex])
#sharpenedTriList.append(TRI2)
TRI3 = Triangle(middlePos,singleThird,singleFirst)
sharpenedTriList.append(TRI3)
elif angleResults[1] == 2:
# DETECTION
concaveConvexCase2List.append(tri)
# SHARPENING
#print "****************** CASE 2 ****************************"
vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]
for cornerVec in vecList:
# write above in better way, unnecessary searches?)
if angleResults[3].count([cornerVec]) == 2:
double = cornerVec # niet nodig
elif angleResults[3].count([cornerVec]) == 1:
single = cornerVec
singleIndex = angleResults[3].index([single])
#print singleIndex
triList = [tri.n1,tri.n2,tri.n3]
doubleIndexes = [0,1,2]#.remove(singleIndex)
#print doubleIndexes
doubleIndexes.remove(singleIndex)
#print doubleIndexes
# check distances from double to single
if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):
doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]
elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):
doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]
# triangle vertices defined
singlePos = triList[singleIndex]
doubleFPos = triList[doubleFIndex]
doubleCPos = triList[doubleCIndex]
# define 3 new vertices
singleFirst = np.add(doubleCPos,doubleFPos)/2.0
#print triList
empty = [0.0,0.0,0.0]
vecList = [ ]
moveVec = np.subtract(singlePos,singleFirst)
for i in range(len(moveVec)):
if moveVec[i] != 0:
temp = np.copy(empty)
temp[i] = moveVec[i]
vecList.append(temp)
if np.allclose(distance(singleFirst,np.add(singleFirst,vecList[1])),distance(singlePos,np.add(singleFirst,vecList[1]))):
singleSecond,singleThird = np.add(singleFirst,vecList[0]),np.add(singleFirst,vecList[1])
elif np.allclose(distance(singleFirst,np.add(singleFirst,vecList[0])),distance(singlePos,np.add(singleFirst,vecList[0]))):
singleSecond,singleThird = np.add(singleFirst,vecList[1]),np.add(singleFirst,vecList[0])
# write sharpened triangles
TRI1 = Triangle(singleFirst,doubleCPos,singleSecond)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,singleSecond,singlePos)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(singleFirst,singlePos,singleThird)
sharpenedTriList.append(TRI3)
singleFourth = np.add(singleFirst,np.subtract(singleSecond,doubleCPos,))
TRI4 = Triangle(singleFirst,doubleFPos,singleFourth)
sharpenedTriList.append(TRI4)
"""moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
#print moveVecs
singleSecond = np.add(singleFirst,moveVecs[doubleFIndex])
singleThird = np.subtract(singlePos,moveVecs[doubleFIndex])
# write sharpened triangles
TRI1 = Triangle(doubleCPos,singleFirst,singleThird)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,singleSecond,singleThird)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(singleThird,singleSecond,singlePos)
sharpenedTriList.append(TRI3)"""
# CHAMFER TRI
elif angleResults[2] == 2:
if checkOrtho(angleResults[4]):
convexConcavetest = testCornerConcaveConvex(tri.position,angleResults[4])
if convexConcavetest == "CONCAVE":
# DETECTION
convexChamferList.append(tri)
# SHARPENING
vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]
for chamferVec in vecList:
# write above in better way, unnecessary searches?)
if angleResults[4].count([chamferVec]) == 2:
double = chamferVec # niet nodig
elif angleResults[4].count([chamferVec]) == 1:
single = chamferVec
singleIndex = angleResults[4].index([single]) # only finds first, allowed in case of single
triList = [tri.n1,tri.n2,tri.n3]
doubleIndexes = [0,1,2]#.remove(singleIndex)
doubleIndexes.remove(singleIndex)
# check distances from double to single
if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):
doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]
elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):
doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]
# triangle vertices defined
singlePos = triList[singleIndex]
doubleFPos = triList[doubleFIndex]
doubleCPos = triList[doubleCIndex]
# construct next 2 vertices
moveVec = [(-np.multiply(item,voxelsize)/2.0) for item in double]
singleFirst = np.add(singlePos,moveVec)
singleSecond = np.add(singleFirst,np.subtract(doubleFPos,doubleCPos))
# write sharpened triangles
TRI1 = Triangle(singleFirst,doubleCPos,doubleFPos)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,doubleFPos,singleSecond)
sharpenedTriList.append(TRI2)
elif convexConcavetest == "CONVEX":
concaveChamferList.append(tri)
#print "*************** CONVEX CHAMFER *****************"
#print tri
#print angleResults[4]
vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]
for chamferVec in vecList:
# write above in better way, unnecessary searches?)
if angleResults[4].count([chamferVec]) == 2:
double = chamferVec # niet nodig
elif angleResults[4].count([chamferVec]) == 1:
single = chamferVec
singleIndex = angleResults[4].index([single]) # only finds first, allowed in case of single
triList = [tri.n1,tri.n2,tri.n3]
doubleIndexes = [0,1,2]#.remove(singleIndex)
doubleIndexes.remove(singleIndex)
# check distances from double to single
if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):
doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]
elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):
doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]
# triangle vertices defined
singlePos = triList[singleIndex]
doubleFPos = triList[doubleFIndex]
doubleCPos = triList[doubleCIndex]
# construct next 2 vertices
moveVec = [(np.multiply(item,voxelsize)/2.0) for item in double]
singleFirst = np.add(singlePos,moveVec)
singleSecond = np.add(singleFirst,np.subtract(doubleFPos,doubleCPos))
# write sharpened triangles
TRI1 = Triangle(singleFirst,doubleCPos,doubleFPos)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,doubleFPos,singleSecond)
sharpenedTriList.append(TRI2)
else:
# change nothing
sharpenedTriList.append(tri)
else:
# change nothing
sharpenedTriList.append(tri)
else:
# change nothing
#print tri
sharpenedTriList.append(tri)
# WRITE SHARPENED TRIANGLES TO VERT AND TRI LIST
sharp_vertlist = []
sharp_trilist = []
vertID = 1
for tri in sharpenedTriList:
index_tri = []
#print tri.position
for vert in tri.position:
sharp_vertlist.append(vert)
index_tri.append(vertID)
vertID+=1
sharp_trilist.append(index_tri)
"""
testlist2OBJ(convexCornerList, "convexCornerList.obj")
testlist2OBJ(concaveCornerList, "concaveCornerList.obj")
testlist2OBJ(concaveConvexCase1List, "concaveConvexCase1List.obj")
testlist2OBJ(concaveConvexCase2List, "concaveConvexCase2List.obj")
testlist2OBJ(convexChamferList, "convexChamferList.obj")
testlist2OBJ(concaveChamferList, "concaveChamferList.obj")
testlist2OBJ(sharpenedTriList, "sharpenedTriList.obj")"""
return sharp_vertlist, sharp_trilist
|
4,128 | e748420dfdb77fa8661111a92fc48b79f64bff10 | #!/usr/bin/env python
# encoding: utf-8
"""
PreScaledTriggers.py
Created by Bryn Mathias on 2011-11-02.
Copyright (c) 2011 Imperial College. All rights reserved.
"""
import sys
import os
from plottingUtils import *
# HLT_HT600_v1Pre_1_HLT_HT300_v9Pre_210
def main():
c1 = Print("HLT_HT550_HLT_HT250.pdf")
c1.open()
# c1.Print()
diffList = []
cumuList = []
histList = ("HT_Nom","HT_Denom")
dirs = [
"HLT_HT550_v11_HLT_HT250_v11",
"HLT_HT550_v2_HLT_HT250_v2",
"HLT_HT550_v3_HLT_HT250_v3",
"HLT_HT550_v4_HLT_HT250_v4",
"HLT_HT550_v5_HLT_HT250_v5",
"HLT_HT550_v6_HLT_HT250_v6",
"HLT_HT550_v7_HLT_HT250_v7",
"HLT_HT550_v8_HLT_HT250_v8" ,
]
# dirs = [ "HT275_HLT_HT250_AlphaT0p53_v2_HLT_Mu15_HT200_v2", "HT275_HLT_HT250_AlphaT0p53_v3_HLT_Mu15_HT200_v3",
# "HT275_HLT_HT250_AlphaT0p53_v4_HLT_Mu15_HT200_v4", "HT275_HLT_HT250_AlphaT0p53_v5_HLT_Mu30_HT200_v1",
# "HT275_HLT_HT250_AlphaT0p53_v6_HLT_Mu40_HT200_v4", "HT275_HLT_HT250_AlphaT0p55_v1_HLT_Mu5_HT200_v4" ,
# "HT275_HLT_HT250_AlphaT0p55_v2_HLT_Mu40_HT200_v4"]
# weights = [138.018/2760.509,444.633/2760.509,4.291/2760.509,179.041/2760.509,1799.0/2760.509,233.808/2760.509,1799.0/2760.509]
weights = [1.0,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,]
mg = None
c1.cd()
c1.Clear()
Nom = GetSumHist(File = ["4fbHTTriggers.root"], Directories = dirs, Hist = histList[0], Col = r.kBlack, Norm = weights, LegendText = "")
Nom.HideOverFlow()
Denom = GetSumHist(File = ["4fbHTTriggers.root"], Directories = dirs, Hist = histList[1], Col = r.kRed, Norm = weights, LegendText = "")
Denom.HideOverFlow()
Nom.Rebin(25,None)
Denom.Rebin(25,None)
Nom.hObj.GetXaxis().SetRangeUser(0.,1200.)
Denom.hObj.GetXaxis().SetRangeUser(0.,1200.)
Denom.hObj.SetTitle("HLT_HT550_HLT_HT250")
Denom.Draw("h")
Denom.hObj.GetXaxis().SetTitle("H_{T}")
Denom.hObj.GetYaxis().SetTitle("Number of Trigger events / %f"%(Denom.hObj.GetBinWidth(1)))
Denom.hObj.GetYaxis().SetTitleOffset(1.15)
Nom.hObj.SetMarkerStyle(20)
Nom.Draw("psame")
c1.Print()
c1.toFile(Nom.hObj,"Nom_Standard_All")
c1.toFile(Denom.hObj,"Denom_Standard_All")
turnon = TurnOn(Nom,Denom)
# c1.Clear()
turnon.setRange(0.,1200.)
c1.cd()
turnon.DifferentialTurnOn().GetXaxis().SetRangeUser(0.,1200.)
turnon.DifferentialTurnOn().Draw("ap")
diffList.append(turnon.DifferentialTurnOn())
c1.toFile(turnon.DifferentialTurnOn(),"HLT_HT550_HLT_HT250")
c1.Print()
# leg = Legend()
# print float(pair.split("_")[7])/float((pair.split("_")[3:4])[0])
# if float(pair.split("_")[7])%float((pair.split("_")[3:4])[0]) == 0:
cumNom = Nom.CumulativeHist()
cumDenom = Denom.CumulativeHist()
cumDenom.GetYaxis().SetTitle("")
cumDenom.Draw("h")
cumNom.Draw("psame")
c1.Print()
cumuTurnOn = r.TGraphAsymmErrors()
cumuTurnOn.Divide(cumNom,cumDenom)
cumuTurnOn.GetXaxis().SetTitle("H_{T}^{cut} ")
cumuTurnOn.GetXaxis().SetTitleSize(0.05)
cumuTurnOn.GetYaxis().SetTitle("Cumulative efficiency")
cumuTurnOn.GetYaxis().SetTitleOffset(1.5)
cumuTurnOn.GetXaxis().SetRangeUser(0.,1200.)
cumuTurnOn.SetMarkerStyle(20)
cumuTurnOn.SetMarkerSize(0.5)
cumuTurnOn.SetTitle("Cumulative HLT_HT550_HLT_HT250")
cumuList.append(cumuTurnOn)
c1.toFile(cumNom,"CumuNom_All")
c1.toFile(cumDenom,"CumuDenom_All")
cumuTurnOn.Draw("ap")
cumuTurnOn.GetXaxis().SetRangeUser(0.,1200.)
c1.canvas.Update()
c1.Print()
c1.toFile(cumuTurnOn,"Cumulative HLT_HT550_HLT_HT250")
c1.Clear()
c1.close()
pass
if __name__ == '__main__':
main()
|
4,129 | 426b711571d3b5c4f8c7b0bad3a613951902e60b | def calc_fib(n):
fib_lis = dict()
for i in range(n+1):
if (i <= 1):
fib_lis[i] = i
else:
fib_lis[i] = fib_lis[i-2] + fib_lis[i-1]
return fib_lis[n]
n = int(input())
print(calc_fib(n))
|
4,130 | 40b9114e4348bab5d76d68a937b3abe95a90c230 | import os
# didnt endup using this
import time
# from django.contrib.gis.utils import LayerMapping
from django.contrib.gis.geos import fromstr
# from models import Harbord
import csv
from pygeocoder import Geocoder
# from django.contrib.gis.geos import (Point, fromstr, fromfile,
# GEOSGeometry, MultiPoint, MultiPolygon, Polygon)
tree_csv = os.path.abspath('../harbordvillage/Inventory2009_test.csv')
#Setup
with open(tree_csv, "rU") as csvinput:
with open("../harbordvillage/outfile.csv","w+") as csvoutput:
writer = csv.writer(csvoutput,quoting=csv.QUOTE_NONNUMERIC)
reader = csv.reader(csvinput)
all = []
row = next(reader)
row.append('Address')
all.append(row)
for row in reader:
add=("%s %s %s %s" % (row[1], row[0], 'Toronto', 'Canada'))
# pygeocode stuff
# time.sleep(1)
results = Geocoder.geocode(add)
row[0] = results.route
# print(isinstance(results, basestring))
ind = results[0].coordinates
lat=ind[0]
lon=ind[1]
ind= str(lat) + ' ' + str(lon)
print(ind)
mypoint = fromstr('POINT('+ ind + ')')
# print(type(mypoint))
try:
row.append(mypoint)
except:
pass
all.append(row)
print(row)
# row.append(results.cooridnates)
# print(row)
writer.writerows(all)
|
4,131 | f92a1398a27541557ec5bbf752d44ce40d1df94a | # -*- coding: utf-8 -*-
#imports
from math import sqrt, pi, exp
from csv import reader
from random import seed,randrange
"""
Helper functions
"""
#calculate probability
def probability(x,avg,standev):
exponent = exp(-((x-avg)**2 / (2 * standev**2)))
return (1/(sqrt(2*pi) *standev)) * exponent
#mean
def avg(vals):
return sum(vals)/float(len(vals))
#standard deviation
def standev(vals):
mean = avg(vals)
var = sum([(x-mean)**2 for x in vals]) / float(len(vals)-1)
return sqrt(var)
"""
Data Handling
"""
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def int_from_string_col(data,col):
class_val =[row[col] for row in data]
unique_set = set(class_val)
lookup = dict()
for i, val in enumerate(unique_set):
lookup[val] = i
for row in data:
row[col] = lookup[row[col]]
return lookup
def move_class_to_last_col(data,col):
for row in data:
temp = row[col]
del row[col]
row.append(temp)
return data
"""
Implementation Functions
"""
"""
We need to calculate the probability of data according to their class so the
training data needs to be split up by classes. In order to do this we need to
establish the column that represents the class value for each dataset.
"""
# this works for datasets with last column representing class value
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if(class_val not in data_by_class):
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
"""
We need to find the mean and standard deviation for each column of input.
"""
def data_stats(data):
stats = [(avg(col),standev(col),len(col)) for col in zip(*data)]
del(stats[-1])
return stats
def class_stats(data):
split = split_class(data)
class_stats = dict()
for class_val, row in split.items():
class_stats[class_val] = data_stats(row)
return class_stats
"""
Calculate Class Probabilities
"""
def class_get_prob(stats,instance):
num_rows = sum([stats[label][0][2] for label in stats])
prob_vals = dict()
for class_val, class_stats in stats.items():
prob_vals[class_val] = stats[class_val][0][2]/float(num_rows)
for i in range(len(class_stats)):
avg,standev,size = class_stats[i]
prob_vals[class_val] *= probability(instance[i],avg,standev)
return prob_vals
def predict(stats,instance):
prob_vals = class_get_prob(stats,instance)
top_prob, top_label = -1, None
for class_val, prob in prob_vals.items():
if top_label is None or prob > top_prob:
top_prob = prob
top_label = class_val
return top_label
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train,test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats,row)
preds.append(result)
return(preds)
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data,target)
for i in range(len(data[0])-1):
str_column_to_float(data,i)
int_from_string_col(data,len(data[0])-1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print("10-fold Cross-Validation Accuracy Scores")
for score in accuracies:
print("%.4f%%" % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies)/float(len(accuracies))))
|
4,132 | 42f656898481768ea0bf1ca0b6afbe06de9dd597 | import numpy as np
from math import *
from visual import *
from visual.graph import *
def energy2(n):
return ((n*h/L)**2)/(8*m)*convert
def factorial(n):
out=1
for x in range(n):
out=out*(x+1)
return out
def bosonconfigs(numelvl,numpart):
x=numpart
n=numelvl
out=choose(x+n-1,x)
return out
def choose(choices,x):
if choices>=x:
out=int(factorial(choices)/((factorial(x)*factorial(choices-x))))
else:
out=0
return out
def configs(x,elvl,particle="boson",out=None):
"""
Generate configs for bosons or fermions.
Parameters
----------
x : positive integer
Number of particles in the system
Energylvls : 1-D array
List of valid energy states of the system
particle : "boson" or "fermion"
out : 1-D array
Array to put configs in
Returns
-------
out : 1-D array
array of total energys of all valid configurations
"""
dtype = elvl.dtype
n=elvl.size #number of energy levels
if particle=="boson":
if out is None:
out=None
out = np.zeros(bosonconfigs(n,x), dtype=dtype)
if x==1:
for i in range(n):
out[i]=elvl[i]
if x>1:
k=0 #index for end of last input added
for m in range(n): #m is the energy level index
end=k+bosonconfigs(n-m,x-1) #last energy level
configs(x-1, elvl[m:],particle,out[k:end])
for i in range(k,end):
out[i]+= elvl[m]
k=end
return out
if particle=="fermion":
if out is None:
out=None
out = np.zeros(choose(n,x), dtype=dtype)
if x==1:
for i in range(n):
out[i]=elvl[i]
if x>1:
k=0 #index for end of last input added
for m in range(n): #m is the energy level index
end=k+choose(n-(m+1),x-1) #last energy level
configs(x-1, elvl[m+1:],particle,out[k:end])
for i in range(k,end):
out[i]+= elvl[m]
k=end
return out
h = 6.62606957 * 10 ** -34 #Plank's constant
#m = 1.67492735174 * 10 ** -27 #this is mass of neutron
m = 9.11 * 10**-31 #this is mass of electron
L = 0.39 * 10**-9 #size of box
convert = 6.24150934 * 10**18
maximum = 100
kb = 1.3806488 * 10**-23
energylevels = np.fromiter((((x*h/L)**2)/(8*m)*convert for x in range(maximum+1)),dtype=float)
#this creates the entire table of energy levels as a single list
def oneDEnergy(n):
energylevels = np.fromiter((((x*h/L)**2)/(8*m) for x in range(1,n+1)),dtype=float)
return energylevels
#this creates the entire table of energy levels as a single list
def ThreeDEnergy(n):
out=np.empty(n**3)
index=0
for i in range(1,n+1):
for j in range(1,n+1):
for k in range(1,n+1):
out[index]=((i*h/L)**2+(j*h/L)**2+(k*h/L)**2)/(8*m)*convert
index=index+1
return np.sort(out)
def fermion(n):
energycount = []
energy = oneDEnergy(maximum)
fermionlist = configs(n,energy,'fermion')
for config in np.nditer(fermionlist):
if config < energy[-1] + (n-1)*energy[0]:
energycount.append(config)
#return number of configurations in energy range
fnc1 = ghistogram(bins = np.linspace(min(energycount), max(energycount), 100), color = color.red)
fnc1.plot(data=energycount)
hist, binedges = np.histogram(energycount,bins = 100,weights = None, density = False)
return (hist,binedges)
def boson(n,nthElvl,acc):
'''
n is the number of particles
nthElvl is the nth energy level to go up to
acc is integer of histogram(s)
'''
Elvl= oneDEnergy(nthElvl)
#print Elvl
econf= np.sort(configs(n, Elvl, particle="boson"))
max_energy=Elvl[-1]+(n-1)*Elvl[0]
#np.sort(econf)
fcn1 = ghistogram(bins = np.linspace(econf[0],max_energy,acc))
bound=True
m=0
while bound==True:
if econf[m]>max_energy:
bound=False
else:
m=m+1
if m==econf.size:
break
fcn1.plot(data=econf[:m])
hist, binedges = np.histogram(econf[:m],bins = 100,weights = None, density = False)
return (hist,binedges)
def boltzfit(xvalues,yvalues,degree):
xlist = []
#ylist = []
for i in range(len(xvalues)-1):
xlist.append((xvalues[i] + xvalues[i+1])/(2)) #Average energy in joules
#for j in range(len(yvalues)):
#ylist.append(kb*log(yvalues[j])) #Convert to boltzmann entropy
return np.polyfit(xlist,yvalues,degree)
def gibbsfit(xvalues,yvalues,degree):
for j in range(len(yvalues)): #generate gibbs configurations
if j > 0:
yvalues[j] = yvalues[j-1] + yvalues[j]
#for j in range(len(yvalues)):
#ylist.append(kb*log(yvalues[j])) #Convert to entropy
return np.polyfit(xvalues[1:],yvalues,degree)
def conequation(n,degree,particle ='boson',method ='gibbs'):
if particle == 'boson':
data = boson(n, maximum, 100)
if particle == 'fermion':
data = fermion(n)
print (data)
if method == 'boltzmann':
return (boltzfit(data[1],data[0],degree))
if method == 'gibbs':
return (gibbsfit(data[1],data[0],degree))
|
4,133 | 3c3d45f0844496b8d623286b36a4935a154f410a | # coding: utf-8
import datetime
import json
import requests
import os
import re
import sys
from todoist.api import TodoistAPI
#SLACK_CHANNEL = os.environ['SLACK_CHANNEL']
#SLACK_POSTURL = os.environ['SLACK_POSTURL']
TDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)
TDIAPI.sync()
name = os.environ['TODOIST_PJT']
def lambda_handler(event, context):
if event["function"] == 'tasklist':
msg = tasklist(name)
if event["function"] == 'activity':
msg = activity(name)
return
def activity(name):
actlogs = TDIAPI.activity.get()
pjts = TDIAPI.state['projects']
for projects_id in pjts:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
else:
print('[INFO] Not match project name')
event_list = []
for events in actlogs['events']:
today = datetime.datetime.now().strftime("%Y-%m-%d")
'''
todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する
そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック
'''
todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9)
todoist_date = str(todoist_times.strftime("%Y-%m-%d"))
if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id:
event_list.append(events['extra_data']['content'])
print(event_list)
return event_list
def tasklist(name):
pjts = TDIAPI.state['projects']
items = TDIAPI.state['items']
labels = TDIAPI.state['labels']
sects = TDIAPI.state['sections']
inbox_list = []
doing_list = []
review_list = []
any_list = []
for projects_id in list:
if projects_id['name'] == name:
tasks_project_id = projects_id['id']
break
try:
tasks_project_id
except NameError:
print("プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。")
return
print(labels)
sys.exit()
for item in items:
l_content = item['content']
l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ]
l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']]
#print('+++')
#print(l_pjt_id)
#print(l_content)
#print(l_sec_name[0])
if l_sec_name is not None and l_sec_name[0] == 'ToDo':
print(l_sec_name)
#if item['checked'] == 0 and item['project_id'] == tasks_project_id:
#taskcontent = '- ' + item['content']
#slackmessage.append(taskcontent)
#print(taskcontent)
#print(slackmessage)
#message = '\n'.join(slackmessage)
return
def slack_notify():
title = "*[定期通知] プロジェクト " + name + " のタスクリスト*\n"
slack_message = {
'channel': SLACK_CHANNEL,
'icon_emoji': ":todoist:",
'text': title,
"attachments": [
{
"color": "#36a64f",
"fields": [
{
"value": msg,
},
],
}
]
}
#requests.post(SLACK_POSTURL, data=json.dumps(slack_message))
|
4,134 | cd2062055e30fc37a5f00f4bce6ffd9ea5eda860 | /Applications/anaconda2/lib/python2.7/warnings.py |
4,135 | 675dc9467dd6db9c2a429941af56d78d6c0e1c08 | """
Copyright (C) 2005 - 2016 Splunk Inc. All Rights Reserved.
"""
import logging
import sys
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
import splunk.admin as admin
import splunk.entity as entity
import splunk.util as util
from notable_event_suppression import NotableEventSuppression
from splunk import ResourceNotFound
from splunk.clilib.bundle_paths import make_splunkhome_path
sys.path.append(make_splunkhome_path(["etc", "apps", "SA-Utils", "lib"]))
from SolnCommon.log import setup_logger, SHORT_FORMAT
logger = setup_logger('suppressions_rest_handler', format=SHORT_FORMAT)
logger.setLevel(logging.INFO)
class InvalidConfigException(Exception):
pass
class InvalidParameterValueException(InvalidConfigException):
"""
Describes a config parameter that has an invalid value.
"""
def __init__(self, field, value, value_must_be):
message = "The value for the parameter '%s' is invalid: %s (was %s)" % (field, value_must_be, value)
super(InvalidConfigException, self).__init__(message)
class UnsupportedParameterException(InvalidConfigException):
"""
Describes a config parameter that is unsupported.
"""
pass
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__("Missing transition detected")
def _getFieldValue(args, name, default_value=None, max_length=None):
'''Get the field value from the argument list.'''
# Get the value if defined or the default value if not defined
value = args[name][0] or default_value if name in args else default_value
# Check the length
if value and max_length and len(value) > max_length:
raise admin.ArgValidationException(
'App %s cannot be longer than %s character%s.' % (name, max_length, "s" if max_length > 1 else ""))
return value
def _addToDictIfNonNull(d, name, value):
'''Add the given name and value to the dictionary if the value is not none.
Arguments:
d -- the dictionary to add to
name -- the name of the object to add
value -- the value of the object to add (if not none)
'''
if value is not None:
d[name] = value
class Suppressions(admin.MConfigHandler):
'''
Set up supported arguments
'''
# admin.py constants
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4': 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32': 'ACTION_RELOAD'}
# Permissions
WRITE_CAPABILITY = 'edit_suppressions'
# Default Params
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
# Configuration key mapping
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
# Default Vals
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
# set write capability
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:
# Fill required params
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
# Fill valid params
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
'''Handles creation of a suppression.'''
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
# Refresh
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
# Make sure the name is not empty
if not name or len(name) == 0:
raise admin.ArgValidationException("The name of the suppression must not be empty")
# Make sure the name follows the convention
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException("The name of the suppression must follow proper convention")
# Make sure the item does not already exist
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException("A suppression entry already exists for %s" % (name))
# Get the field values
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
# Add the field values to a configuration dictionary (that will be verified)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self.getSessionKey())
conf.namespace = self.appName # always save things to SOME app context.
conf.owner = self.context == admin.CONTEXT_APP_AND_USER and self.userName or "-"
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
## Notable Suppression Audit Log Data
log_data = {
'action': 'create',
'suppression': conf['name'][len(NotableEventSuppression.SUPPRESSION_START):],
'user': conf['eai:acl']['owner'],
'status': 'success',
'signature': 'Notable event suppression successfully created'
}
# Check the configuration
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = "The configuration for the new suppression '%s' is invalid and could not be created: %s" % (name, str(e))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
raise admin.ArgValidationException(e)
# Write out an update to the eventtypes config file
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
# Reload suppressions
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = NotableEventSuppression.disable_expired_suppressions(session_key=self.getSessionKey())
logger.info("%s expired suppressions detected; %s were enabled (now disabled)", expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
# Get the configurations from suppression.conf
suppressionDict = self.readConfCtx('eventtypes')
# Get all suppressions and provide the relevant options
if suppressionDict != None:
# Check each conf
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(stanza)
if stanzaMatch:
try:
# Check config
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error("The configuration for suppression '%s' is invalid: %s", stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info('Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes', sessionKey=self.getSessionKey())
except Exception as e:
logger.warn('Could not refresh suppression configurations via properties endpoint: %s', str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
# Refresh
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
# Make sure the name follows the convention
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException("The name of the suppression must follow proper convention")
try:
conf = entity.getEntity('saved/eventtypes', name, sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException("A suppression configuration with the given name '%s' could not be found" % (name))
else:
# Stop if no name was provided
raise admin.ArgValidationException("No name provided")
## Notable Suppression Audit Log Data
log_data = {
'status': 'success',
'action': 'edit',
'signature': 'Notable event suppression successfully saved',
'suppression': name[len(NotableEventSuppression.SUPPRESSION_START):],
'user': conf['eai:userName']
}
# Create the resulting configuration that would be persisted if the settings provided are applied
for key, val in conf.items():
if key in args.data:
# Set the value to a single space so that the field is set to a blank value
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
## If a value other than the 'disabled' param is changed, it
# came from the editor, otherwise the lister.
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key], enableStrictMode=True)
new_value = util.normalizeBoolean(new_value, enableStrictMode=True)
if conf_key != new_value:
log_data['action'] = 'disable' if new_value else 'enable'
log_data['signature'] = 'Suppression successfully disabled' if new_value else 'Suppression successfully enabled'
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
# Check config
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = "The edit attempt for the suppression '%s' produced an invalid configuration: %s" % (name, str(e))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'] = 'Error occurred while enabling the suppression: ' + str(e)
else:
log_data['signature'] = 'Error occurred while disabling the suppression: ' + str(e)
logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
raise admin.ArgValidationException(e)
# Write out an update to the eventtypes config file
entity.setEntity(conf, sessionKey=self.getSessionKey())
# Log that the suppression was updated
logger.info("Successfully updated the '%s' suppression", name)
# Reload suppressions
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
def handleRemove(self, confInfo):
owner = ((self.context == admin.CONTEXT_APP_AND_USER) and self.userName) or "-"
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id, namespace=self.appName, owner=owner, sessionKey=self.getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
# Below is a list of the required fields. The entries in this list will be removed as they
# are observed. An empty list at the end of the config check indicates that all necessary
# fields where provided.
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
# Add each of the settings
for key, val in settings.items():
# Set val to empty if None
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
# Key is eai; Set meta
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
# Key is eai; userName/appName
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
# Key is not proper
else:
pass
# Check each of the settings individually
logger.info("Checking general settings for the '%s' suppression", stanza)
for key, val in settings.items():
# Set val to empty if None
if val is None:
val = ''
# Check the disabled/selected value
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
# Remove the field from the list of required fields
try:
required_fields.remove(key)
except ValueError:
pass # Field not available, probably because it is not required
except ValueError:
raise InvalidParameterValueException(key, val, "must be a valid boolean")
elif key in Suppressions.REQUIRED_PARAMS:
# Remove the field from the list of required fields
try:
required_fields.remove(key)
except ValueError:
pass # Field not available, probably because it is not required
elif key in Suppressions.VALID_PARAMS:
pass
# Key is eai
elif key.startswith(admin.EAI_META_PREFIX):
pass
# Key is not proper
else:
if throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn("The configuration for '%s' contains an unsupported parameter: %s", stanza, key)
# Error if some of the required fields were not provided
if len(required_fields) > 0:
raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())
# initialize the handler
admin.init(Suppressions, admin.CONTEXT_APP_AND_USER) |
4,136 | 83c109bc5aab6739a3a32116fae4f0c011d6118e | import torch
from torch.nn import functional as F
from sklearn.metrics import f1_score, accuracy_score
@torch.no_grad()
def validate(data, model):
model.evaluate()
out = model(data.x, data.train_index)
return model.loss(out[data.val_mask == 1], data.y[data.val_mask == 1])
@torch.no_grad()
def validate_fb(data, model,lsym):
model.evaluate()
out = model(data.x, data.train_index,lsym)
return model.loss(out[data.val_mask == 1], data.y[data.val_mask == 1])
# @torch.no_grad()
# def validate_sage(data, model, subgraph_loader, device):
# model.evaluate()
#
# out = model.gnn_model.inference(data.x, subgraph_loader, device)
# return model.loss(out[data.val_mask == 1], data.y[data.val_mask == 1])
def evaluate_metrics(data, out, device):
outputs = {}
for key in ['train', 'val', 'test']:
mask = data['{}_mask'.format(key)]
loss = F.cross_entropy(out[mask == 1], data.y[mask == 1]).item()
pred = out[mask == 1].max(dim=1)[1]
outputs['{}_loss'.format(key)] = loss
if device == 'cpu':
outputs['{}_f1'.format(key)] = f1_score(data.y[mask == 1], pred.data.numpy(), average='micro')
outputs['{}_acc'.format(key)] = accuracy_score(data.y[mask == 1], pred.data.numpy())
else:
outputs['{}_f1'.format(key)] = f1_score(data.y[mask == 1].cpu(), pred.data.cpu().numpy(), average='micro')
outputs['{}_acc'.format(key)] = accuracy_score(data.y[mask == 1].cpu(), pred.data.cpu().numpy())
return outputs
@torch.no_grad()
def evaluate(model, data, device):
model.evaluate()
out = model(data.x, data.train_index)
return evaluate_metrics(data, out, device)
@torch.no_grad()
def evaluate_fb(model, data, device, lsym):
model.evaluate()
out = model(data.x, data.train_index, lsym)
return evaluate_metrics(data, out, device)
# @torch.no_grad()
# def evaluate_sage(model, data, subgraph_loader, device):
# model.evaluate()
# out = model.gnn_model.inference(data.x, subgraph_loader, device)
#
# return evaluate_metrics(data, out)
|
4,137 | a3216aa41cd28b91653b99017e21a03e43372e9b | # -*- encoding: utf-8 -*-
#----------------------------------------------------------------------------
#
# Copyright (C) 2014 .
# Coded by: Borni DHIFI (dhifi.borni@gmail.com)
#
#----------------------------------------------------------------------------
import models
import wizard
import parser
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
4,138 | de6a6c2dc7bea255e5674663616c962c1d1625e0 | class Solution:
# @param arrive : list of integers
# @param depart : list of integers
# @param K : integer
# @return a boolean
def hotel(self, arrive, depart, K):
self.count = 0
self.temp = 0
for i in range(len(arrive)):
for j in range(i, len(depart)):
if arrive[j] < arrive[i]:
self.temp = arrive[j]
arrive[j] = arrive[i]
arrive[i] = self.temp
if depart[j] < depart[i]:
self.temp = depart[j]
depart[j] = depart[i]
depart[i] = self.temp
for i in range(len(arrive)):
self.x = i
while (arrive[self.x + 1] < depart[self.x]):
self.count = self.count + 1
self.x = self.x + 1
print ("Count: ",self.count)
print ("K: ", K)
print ("Arrive: ", arrive)
print ("Depart: ", depart)
if self.count < K:
return True
else:
return False
beg = 0
end = len(arrive)
mid = (beg + mid) / 2
for i in range(len(arrive)):
obj = Solution()
l1 = [1,2,3,4]
l2 = [10, 2, 6, 14]
k = 1
print obj.hotel(l1,l2,k)
|
4,139 | 2d5e147b081283047cd044746d73d91ee2e59052 | from datetime import datetime
import xarray
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.dates import date2num
import numpy as np
from matplotlib.gridspec import GridSpec
def test_plot_area_avg(target_nc_folder="", source_nc_path=""):
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009"
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_icefix_Obs_1980-1981_test"
#target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1"
ice_fr = xarray.open_dataset(source_nc_path)["LC"]
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
# t, x, y
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords["time"]
source_time = pd.to_datetime(source_time.values.tolist())
s_source = pd.Series(data=[
(field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in source_data
], index=source_time)
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + "/*daily.nc")["lake_ice_fraction"]
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords["t"].values.tolist())
s_lkeff = pd.Series([
(field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in lkeff_data
], index=lkeff_time)
s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.index >= lkeff_time[0])]
assert isinstance(s_source, pd.Series)
#
print(f"Source: len={len(s_source)}")
print(f"Lkeff: len={len(s_lkeff)}")
# do the plotting
fig = plt.figure()
gs = GridSpec(2, 1)
# plot initial lake fractions
ax = fig.add_subplot(gs[0, 0])
s_source.plot(ax=ax, marker=".", linestyle="None", label="original")
ax.legend()
# plot lake fractions outputed by hles algorithm
ax = fig.add_subplot(gs[1, 0], sharex=ax)
s_lkeff.plot(ax=ax, marker=".", linestyle="None", label="lkeff")
ax.legend()
# plt.show()
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f"{label}: no meaningful data")
return
good_data = field[good_mask]
print(f"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\n"
f"mean={good_data.mean()}\n"
f"std={good_data.std()}\n")
print("-" * 20)
def test_plot_maps(target_nc_folder, source_nc_path=""):
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009"
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1"
ice_fr = xarray.open_dataset(source_nc_path)["LC"]
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
# t, x, y
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords["time"]
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + "/*daily.nc")["lake_ice_fraction"]
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords["t"].values.tolist())
# select from lkeff data
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
# Select from the source time and data
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f"Source if: {ts}")
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f"Lkeff if: {tl}")
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print("*" * 20)
__print_field_stats(ts, data_s, "source")
__print_field_stats(tl, data_l, "lkeff")
print("*" * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f"ms={ms}; ml={ml}")
plt.show()
plt.close(fig)
def main():
target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981"
# source_nc_path = "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260/cis_nic_glerl_interpolated_lc.nc"
source_nc_path = "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc"
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)
# test_plot_maps(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)
plt.show()
if __name__ == '__main__':
main() |
4,140 | a868ecb6ea6a5c7a186ddd8fa4fb76d96efeb21d | import numpy as np
'''
1. Create 0-D array, 1-D array, 2-D array, 3-D array with following value
0-D: [2]
1-D: [3, 4, 5, 6, 7]
2-D: [[8, 1, 3], [2, 3, 4], [6, 2, 5]]
3-D: [[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]]
print them
'''
D0 = np.array(2)
D1 = np.array([3, 4, 5, 6, 7])
D2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])
D3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])
print('D0')
print(D0)
print('D1')
print(D1)
print('D2')
print(D2)
print('D3')
print(D3)
'''
2. Use index to change all value 8 to 100 in 4 arrays
array[index1, index2] = newValue
for example: 2-D array should be changed as : [[100, 1, 3], [2, 3, 4], [6, 2, 5]]
print them
'''
D2[0, 0] = 100
print('D2')
print(D2)
D3[1, 0, 1] = 100
D3[1, 2, 0] = 100
print('D3')
print(D3)
'''
3. Print the sum of all following values
a. the value of 0-D array
b. the middle of 1-D array
c. the center of 2-D array
d. the center of 3-D array ( the center of middle 2-D array )
* The value should be 11
'''
print('*** the final sum result is: ')
print(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1]) |
4,141 | a7123fa221555b15162dbab0d93a86965190b805 | # BotSetup.py
from websockets.exceptions import InvalidStatusCode
from dokbot.DokBotCog import DokBotCog
from events.EventCog import EventCog
from dotenv import load_dotenv
from datetime import datetime
from .DokBot import DokBot
import utils.Logger as Log
import logging
import os
import sys
import traceback
import discord
def run() -> None:
os.environ['TZ'] = 'Europe/Brussels'
if sys.platform != 'win32':
from time import tzset
tzset()
print(datetime.now())
load_dotenv()
Log.setup()
token = os.getenv('DISCORD_BOT_TOKEN')
assert token, "Could not find any dokbot bot token"
intents = discord.Intents.default()
intents.members = True
prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'
bot = DokBot(command_prefix=prefix, intents=intents)
bot.add_cog(DokBotCog(bot))
bot.add_cog(EventCog(bot))
@bot.event
async def on_ready():
logging.getLogger().info(f'{bot.user.name} has connected.')
#
# @discord_client.event
# async def on_message(message: discord.Message) -> None:
# if not discord_client.is_ready() or message.author == discord_client.user:
# return
# try:
# await command_runner.run_command_for_message(message)
# except Exception as ex:
# await handle_exception(ex, author=message.author, content=message.content)
#
# @discord_client.event
# async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:
# if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():
# return
# try:
# await signup_character(client=discord_client, reaction_event=reaction_event)
# except Exception as ex:
# user = await discord_client.fetch_user(reaction_event.user_id)
# await handle_exception(ex, author=user, content="Raid signup failed")
#
# async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:
# Log.error(f"{author}, {content}, {ex}\n{traceback.format_exc()}")
# if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):
# await author.send(ex.message)
# else:
# global maintainer
# if maintainer is None:
# maintainer = await discord_client.fetch_user(MAINTAINER_ID)
# await author.send(f"There were internal difficulties. Sending a message to {maintainer.display_name}")
# await maintainer.send(f'{author.display_name}, {content}, {ex}')
#
try:
bot.run(token)
except InvalidStatusCode as e:
error_message = f"Could not start client {e}\n{traceback.format_exc()}"
Log.error(error_message)
|
4,142 | f3664f5f69207c3f2dcec96c90cd220003da0904 | import json
import paho.mqtt.client as mqtt
from datetime import datetime
import ssl
from collections import OrderedDict
import time
from tkinter import *
import numpy as np
MQTT_IP = 'emq'
MQTT_PORT = 8883
username = "spread_ICAM"
password = "spread_ICAM"
deviceType = "spread_ICAM"
version = "v1"
def on_connect(client, userdata, flags, rc):
"""0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused."""
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# If connection successful start publishing data
# if rc == 0:
# client.subscribe(subscribeTopic)
# self.__send_data_loop()
def on_message(client, userdata, msg):
print(str(datetime.now()) + " Message Received: " + str(msg.payload))
publishTopic = "%s_%s/%s/events" % (deviceType, version, username)
subscribeTopic = "%s_%s/%s/operations" % (deviceType, version, username)
# se non imposto il client_id non riesce a connettersi!!!!!
client = mqtt.Client(client_id="TentativoRaffo")
client.tls_set(ca_certs="digitalfuture_ca_public.pem", certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_SSLv23, ciphers=None)
client.tls_insecure_set(False)
client.username_pw_set(username, password=password)
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_IP, MQTT_PORT, 60, bind_address="")
client.loop_start()
#########################
#
# CREATE THE GUI
#
#########################
root = Tk()
Label(root, text="Spread simulator").grid(row=0, column=1, pady=5)
Label(root, text="Kg").grid(row=1, column=0, pady=5)
text_id = Text(root, height=1, width=10)
text_id.grid(row=1, column=1, padx=5, pady=5)
Label(root, text="Peso in kg del vassoio prelevato (Kg)").grid(row=1, column=2, pady=5)
Label(root, text="mm_kg").grid(row=2, column=0, pady=5)
text_speed = Text(root, height=1, width=10)
text_speed.grid(row=2, column=1, padx=5, pady=5)
Label(root, text="Di quanti mm affonda per ogni kg prelevato (mm)").grid(row=2, column=2, pady=5)
Label(root, text="s").grid(row=3, column=0, pady=5)
text_speed = Text(root, height=1, width=10)
text_speed.grid(row=3, column=1, padx=5, pady=5)
Label(root, text="Coefficiente di sovraelongazione delle catene").grid(row=3, column=2, pady=5)
Label(root, text="interval").grid(row=4, column=0, pady=5)
text_speed = Text(root, height=1, width=10)
text_speed.grid(row=4, column=1, padx=5, pady=5)
Label(root, text="Intervallo di invio dati (s)").grid(row=4, column=2, pady=5)
btn_start = Button(root)
btn_start["text"] = "Start"
btn_start.grid(row=5, column=1, padx=5, pady=5)
btn_start = Button(root)
btn_start["text"] = "Stop"
btn_start.grid(row=6, column=1, padx=5, pady=5)
interval_time = 1000;
def task():
spread = np.random.normal(loc=0.708727, scale=0.192176)
print("spread")
root.after(interval_time, task) # reschedule event in 2 seconds
root.after(interval_time, task)
root.mainloop()
root.destroy()
i=0
timestamp = 1234567890123
while(True):
time.sleep(1)
timestamp += i
print(timestamp)
ordered_obj_to_send = OrderedDict([
("spread", 3.0),
("timestamp_", timestamp),
("date", "eee")])
client.publish(publishTopic, json.dumps(ordered_obj_to_send), qos=2)
i+=1
#time.sleep(2) |
4,143 | 30e8e269cf6500ab804566a85c9b96b3ef9bda36 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Cesar Sinchiguano <cesarsinchiguano@hotmail.es>
#
# Distributed under terms of the BSD license.
"""
"""
import numpy as np
from open3d import *
def main():
print("Load a ply point cloud, print it, and render it")
pcd = read_point_cloud("11.ply")
''' read_point_cloud reads a point cloud from a file.
It tries to decode the file based on the extension name.
The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.'''
pcd = read_point_cloud("TestData/fragment.ply")
print(pcd)
# print("Load a ply point cloud, print it, and render it")
# pcd = read_point_cloud("bun0.pcd")
# print(pcd)
tmp=np.asarray(pcd.points)
print(tmp[0:5,0:3])#rows and column
#draw_geometries([pcd])
print("Downsample the point cloud with a voxel of 0.005")
downpcd = voxel_down_sample(pcd, voxel_size = 0.05)
draw_geometries([downpcd])
# print("Recompute the normal of the downsampled point cloud")
estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid(radius = 0.1, max_nn = 30))
draw_geometries([downpcd])
# print("Print a normal vector of the 0th point")
# print(downpcd.normals[0])
# print("Print the normal vectors of the first 10 points")
# print(np.asarray(downpcd.normals)[:10,:])
# print("")
print("Load a polygon volume and use it to crop the original point cloud")
vol = read_selection_polygon_volume("TestData/Crop/cropped.json")
chair = vol.crop_point_cloud(pcd)
#draw_geometries([chair])
print("")
print("Paint chair")
chair.paint_uniform_color([1, 0.706, 0])
#draw_geometries([chair])
print("")
if __name__ == "__main__":
main()
|
4,144 | 07fdf6605d970d2491116ad82a1119499b561d1f | M, N = map(int, input().split())
def is_prime(num):
if num <= 1:
return False
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
if __name__=="__main__":
for i in range(M, N+1):
if is_prime(i):
print(i)
|
4,145 | c70db0fc9d98657e318ecab7eb8af60cc2b19a2c | from fixate.reporting.csv import register_csv, unregister_csv |
4,146 | ae83a0e1ebf1190ab55459563bc7b86d240de89a | #! /usr/bin/python2
# Copyright 2007 John Kasunich and Jeff Epler
#
# modified by Rudy du Preez to fit with the kinematics component pumakins.c
# Note: DH parameters in pumakins halfile should bet set to
# A2=400, A3=50, D3=100, D4=400, D6=95
#
# z |
# |
# |__________y top of the base.
# /
# / A2
# x /
# /_______
# D3 /
# / A3
# |
# |
# | D4
# |___
# |
# tooltip | D6
#
# or they should be changed below to fit. Otherwise you wont get straight lines
# moving x or y or z in world mode. If all is correct the tool should rotate
# about its tip with no x,y,z movement for changes in A,B,C at any point in the
# workspace.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from vismach import *
import hal
c = hal.component("pumagui")
c.newpin("joint1", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint2", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint3", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint4", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint5", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint6", hal.HAL_FLOAT, hal.HAL_IN)
c.ready()
###################
# tool or finger
finger1 = CylinderZ(0, 5, 50, 5)
# "tooltip" for backplot will be the tip of the finger
tooltip = Capture()
# "hand" - the part the finger is attached to
link6 = Collection([
finger1,
Box(-25, -25, -10, 25, 25, 0)])
link6 = Translate([link6],0,0,-50)
link6 = Collection([tooltip,link6])
# assembly fingers, and make it rotate
link6 = HalRotate([link6],c,"joint6",1,0,0,1)
# moving part of wrist joint
link5 = Collection([
CylinderZ( 27, 30, 35, 30),
CylinderX(-13, 25, 13, 25),
Box(-11, -25, 0, 11, 25, 27)])
# move gripper to end of wrist and attach D6=95
link5 = Collection([
link5,
Translate([link6],0,0,95)])
# make wrist bend
link5 = HalRotate([link5],c,"joint5",1,1,0,0)
# fixed part of wrist joint (rotates on end of arm)
link4 = Collection([
CylinderX(-13, 22, -27, 22),
CylinderX( 13, 22, 27, 22),
Box(-15, -22, -30, -25, 22, 0),
Box( 15, -22, -30, 25, 22, 0),
Box(-25, -25, -45, 25, 25, -30)])
# attach wrist, move whole assembly forward so joint 4 is at origin
link4 = Translate([link4,link5], 0, 0, 0)
# make joint 4 rotate
link4 = HalRotate([link4],c,"joint4",1,0,0,1)
# next chunk link length is D4=400
link3 = Collection([
CylinderY(-50,35,25,35),
CylinderZ(0.0, 35, 400-45, 25)])
link3 = Translate([link3],0,50,0)
link3 = Collection([
link3,
CylinderX(-50,40,40,40)])
# move link4 forward and sideways (A3=50) and attach
link3 = Collection([
link3,
Translate([link4],0.0, 50, 400)])
# move whole assembly over so joint 3 is at origin (D3=100)
link3 = Translate([link3],100, 0, 0.0)
# rotate to J3 zero position
link3 = Rotate([link3],90,1,0,0)
# make joint 3 rotate
link3 = HalRotate([link3],c,"joint3",1,1,0,0)
# elbow stuff
link2 = CylinderX(-50,50,50,50)
# move elbow to end of upper arm
link2 = Translate([link2],0.0,0.0,400)
# rest of upper arm (A2 = 400)
link2 = Collection([
link2,
CylinderZ(400, 40, 0, 50),
CylinderX(-70,85,70,85)])
# move link 3 into place and attach
link2 = Collection([
link2,
Translate([link3], 0,0.0,400)])
# rotate into zero J2 position
link2 = Rotate([link2],90,1,0,0)
# make joint 2 rotate
link2 = HalRotate([link2],c,"joint2",1,1,0,0)
# shoulder stuff
link1 = Collection([
CylinderX(-70,70,70,70),
Box(-70,-70,0,70,70,-100)])
# move link2 to end and attach
link1 = Collection([
link1,
link2])
# move whole assembly up so joint 1 is at origin
link1 = Translate([link1],0.0, 0.0, 100)
# make joint 1 rotate
link1 = HalRotate([link1],c,"joint1",1,0,0,1)
# stationary base
link0 = Collection([
CylinderZ(750, 75, 800, 75),
CylinderZ(25, 90, 750, 50),
CylinderZ(0, 200, 35, 200)])
# move link1 to top and attach
link0 = Collection([
link0,
Translate([link1],0.0,0.0,800)])
# add a floor
floor = Box(-500,-500,-10,500,500,0.0)
work = Capture()
model = Collection([link0, floor, work])
main(model, tooltip, work, 1500)
|
4,147 | 1c7635917e398c30e4a232f76b2c02a51e165a63 | def fib(limit):
a, b = 0, 1
yield a
yield b
while b < limit:
a, b = b, a + b
yield b
print sum(x for x in fib(4000000) if not x % 2) # 4613732
|
4,148 | 45e8bdacad4ed293f7267d96abc9cbe8c8e192ae | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', admin.site.urls),
path('upload/', include('links.urls')),
]
|
4,149 | d0dbf5a13b8e718ed426a254546ba13da12b2c3e |
#Program written and maintained by Matthew Meyerink
#File responsible for defining the game based on user input
from cpu_game import CPU_Game
from warning_color import Warning
class User_Game(CPU_Game):
#Get the user phrase to start the game
def get_user_phrase(self):
correct_form = False
while (not correct_form):
correct_form = True
#Recieve the input phrase
self.phrase = input("Please input a phrase: ").upper()
#Check to make sure no numbers or special characters in phrase
for i in range(0, len(self.phrase)):
alpha_space = (self.phrase[i].isalpha()
or self.phrase[i].isspace())
if not alpha_space:
correct_form = False
print(Warning.YELLOW +
"\nPhrase needs to be all letters!!!\n" +
Warning.END)
break
#Check to make sure phrase isn't empty
if self.phrase == "":
correct_form = False
print(Warning.YELLOW +
"\nDid you mean to input nothing?",
" Do you want to play or not?!?!\n" +
Warning.END)
|
4,150 | fd877f5952c1fc0b2115d0950a066501ee7545f8 | # coding: utf-8
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.addons.ud.ud import _TIPOS_BOLSA
TIPOS_BOLSA = dict(_TIPOS_BOLSA)
def get_banco(cls, cr, browse_record, usuario_id, context=None):
dados_bancarios_model = cls.pool.get("ud.dados.bancarios")
args = [("banco_id", "=", browse_record.banco_id.id)]
if browse_record.agencia_v:
args.append(("agencia", "=", browse_record.agencia))
if browse_record.dv_agencia_v:
args.append(("dv_agencia", "=", browse_record.dv_agencia))
if browse_record.conta_v:
args.append(("conta", "=", browse_record.conta))
if browse_record.dv_conta_v:
args.append(("dv_conta", "=", browse_record.dv_conta))
if browse_record.operacao_v:
args.append(("operacao", "=", browse_record.operacao))
dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)
if dados_bancarios:
dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])
if not dados_bancarios.ud_conta_id:
return dados_bancarios.id
elif dados_bancarios.ud_conta_id.id == usuario_id:
return dados_bancarios.id
raise osv.except_osv(u"Dados Bancários duplicados", u"Outra pessoa já possui esses dados bancários!")
dados = {"banco_id": browse_record.banco_id.id, "agencia": browse_record.agencia, "dv_agencia": browse_record.dv_agencia,
"conta": browse_record.conta, "dv_conta": browse_record.dv_conta, "operacao": browse_record.operacao,
"ud_conta_id": usuario_id}
return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)
class AdicionarBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.adicionar.wizard"
_description = u"Inclusão de bolsa de monitoria para discente (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get("ud.monitoria.oferta.disciplina")
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), "
"('is_active', '=', True)]"),
"bolsas": fields.function(_bolsas, type="integer", string=u"Bolsas disponíveis",
help=u"Número de bolsas disponíveis para a disciplina"),
"valor_bolsa": fields.float(u"Bolsa (R$)"),
"tutor": fields.boolean(u"Tutor?"),
"status": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', status)]"),
# DADOS BANCÁRIOS
"dados_bancarios_id": fields.many2one("ud.dados.bancarios", u"Dados Bancários", domain=[('id', '=', False)]),
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)
res["status"] = "n_bolsista"
res["valor_bolsa"] = 400.
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state == "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo", u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["status"] = doc.state
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"]= {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
disciplina_id = self.pool.get("ud.monitoria.disciplina").browse(cr, uid, disciplina_id, context)
return {
"value": {"doc_discente_id": doc_discente_id,
"bolsas": disciplina_id.bolsas}
}
return {"value": {"doc_discente_id": False, "bolsas": 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, "id", False)
return {"value": {"dados_bancarios_id": dados_bancarios_id},
"domain": {"dados_bancarios_id": [("ud_conta_id", "=", doc.discente_id.id)]}}
return {"value": {"dados_bancarios_id": False},
"domain": {"dados_bancarios_id": [("id", "=", False)]}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u"Bolsas Insuficientes", u"Não há bolsas disponíveis para essa disciplina")
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]
)
)
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": ("%.2f" % add.valor_bolsa).replace(".", ",")
})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({"state": "bolsista", "dados_bancarios_id": dados_bancarios})
evento = {
"responsavel_id": responsavel[0],
"name": u"Adição de bolsa: \"%s\"" % add.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, add.doc_discente_id.discente_id.id)],
"descricao": u"Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\"." % (
("%.2f" % add.valor_bolsa).replace(".", ","),
add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula
)
}
add.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.transferir.wizard"
_description = u"Transferência de bolsa de monitoria (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id_de": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_de": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor_de": fields.boolean(u"Tutor?"),
"doc_discente_id_de": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', 'bolsista'), "
"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"),
"curso_id_para": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_para": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), "
"('is_active', '=', True)]"),
"tutor_para": fields.boolean(u"Tutor?"),
"status_para": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id_para": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', status_para), "
"('disciplina_id', '=', disciplina_id_para), "
"('tutor', '=', tutor_para)]"),
# DADOS BANCÁRIOS
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"),
context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id_de"] = doc.disciplina_id.curso_id.id
res["disciplina_id_de"] = doc.disciplina_id.id
res["tutor_de"] = doc.tutor
res["status_de"] = doc.state
res["doc_discente_id_de"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id_" + comp: False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id_" + comp: [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id_" + comp: False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id_" + comp: doc_discente_id}
}
return {"value": {"doc_discente_id_" + comp: False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
if perfil.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,
TIPOS_BOLSA[perfil.tipo_bolsa]
)
)
break
if not perfil:
raise osv.except_osv(
u"Perfil excluído",
u"O perfil do discente para a matrícula \"%s\" não existe ou foi excluído" % matricula or ""
)
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
break
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": valor
})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
transf.doc_discente_id_de.write({"state": "n_bolsista"})
transf.doc_discente_id_para.write({"state": "bolsista", "is_active": True})
get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)
evento = {
"responsavel_id": responsavel[0],
"name": u"Transferência de bolsa",
"envolvidos_ids": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),
(4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],
"descricao": u"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula "
u"%(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula"
u"\"%(matricula_para)s\"." % {
"valor": valor, "discente_de": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_de": perfil_de.matricula,
"discente_para": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_para": perfil_de.matricula
}
}
transf.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.remover.wizard"
_description = u"Remoção de bolsa de discente"
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor": fields.boolean(u"Tutor?"),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', 'bolsista')]"),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente não bolsista", u"O discente não é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id": doc_discente_id}
}
return {"value": {"doc_discente_id": False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
pessoa_model = self.pool.get("ud.employee")
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
rem.doc_discente_id.write({"state": "n_bolsista"})
evento = {
"responsavel_id": responsavel[0],
"name": u"Remoção de bolsa: \"%s\"" % rem.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, rem.doc_discente_id.discente_id.id)],
"descricao": u"A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida." % (
rem.doc_discente_id.discente_id.name.upper(), perfil.matricula
)
}
rem.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
|
4,151 | 9d2fdf47b5c4b56cc0177a9c0a86b1ed57c88d49 | from flask import Flask,Blueprint
from .views import login
from flask_session import Session
import redis
app = Flask(__name__,template_folder='templates',static_url_path='static')
app.debug = True
print('app.root_path===',app.root_path)
print('app.static_url_path===',app.static_url_path)
app.secret_key('uaremyhero')
app.config['SESSION_TYPE'] = 'redis' # session类型为redis
app.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379', password='123123') # 用于连接redis的配置
app.config['SESSION_KEY_PREFIX'] = 'session:' # 保存到session中的值的前缀
app.config['SESSION_PERMANENT'] = False # 如果设置为True,则关闭浏览器session就失效。
app.config['SESSION_USE_SIGNER'] = False # 是否对发送到浏览器上 session:cookie值进行加密
Session(app)
app.register_blueprint(login.login)
app.register_blueprint() |
4,152 | e480136aca96e45cc8a7ca34c1a9d09b96a5a4da | import cv2 as cv
import numpy as np
import pytesseract as tes
text = get_text_from_image("resizedReceipt.jpg")
print(text)
def get_text_from_image(imageName):
img = preprocess(imageName)
result = tes.image_to_string(img)
return result
def preprocess(image_name):
image = cv.imread(image_name)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
receiptBox = find_receipt_box(gray)
M, w, h = perspective_transform(receiptBox)
receiptImg = apply_perspective_correction(gray, M, w, h)
receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)
return receiptImg
def find_receipt_box(image):
"""
Finds a contour around the receipt in the given image.
Returns the bounding box and the binary image
"""
# gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.medianBlur(image, 15, 0)
_, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)
k = np.ones((25, 25))
thresh = cv.erode(thresh, k, iterations=1)
thresh = cv.dilate(thresh, k, iterations=1)
contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours[0], key=cv.contourArea, reverse=True)
contour = contours[0]
rect = cv.minAreaRect(contour)
box = cv.boxPoints(rect)
box = np.int0(box)
return box
def perspective_transform(contour):
"""Produces the transformation matrix and the new size for perspective correction"""
ord_rect = np.float32(order_rect(contour))
(tl, tr, br, bl) = ord_rect
dist_top = np.linalg.norm(tl - tr)
dist_btm = np.linalg.norm(bl - br)
width = max(dist_btm, dist_top)
dist_left = np.linalg.norm(tl - tr)
dist_right = np.linalg.norm(tr - br)
height = max(dist_left, dist_right)
dest_corners = np.array([
[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]
], dtype=ord_rect.dtype)
M = cv.getPerspectiveTransform(ord_rect, dest_corners)
return M, width, height
def order_rect(pts):
"""
orders a rectangle in the order top-left, top-right,
bottom-right, bottom-left
"""
new = np.zeros((4, 2), dtype="int64")
s = pts.sum(axis=1)
new[0] = pts[np.argmin(s)]
new[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
new[1] = pts[np.argmin(diff)]
new[3] = pts[np.argmax(diff)]
return new
def apply_perspective_correction(image, M, width, height):
"""Crops the contour and applies perspective correction"""
warped = cv.warpPerspective(image, M, (width, height))
return warped
|
4,153 | 83d0a32ef2d365d17caa9d311c367ed5828559ac | n, m = map(int, input().split())
li = list(map(int, input().split()))
max = 0
for i in range(0, n):
for j in range(i+1, n):
for k in range(j+1, n):
tmp = li[i] + li[j] + li[k]
if(tmp <= m and max < tmp):
max = tmp
print(max) |
4,154 | 07f8fd305e2311c0e37a785da0a826b8ea4e78ba | from project import db
from project.models import User, Recipe, Association, Ingre, Recipe_ingre
user=User.query.filter_by(username="xiaofan").first()
recipe=Recipe.query.filter_by(recipename="Jerry").first()
recipes = Recipe.query.filter(Recipe.users.any(username="xiaofan")).all()
if recipe not in recipes:
user.add_recipes([recipe])
# commit the changes
db.session.commit()
|
4,155 | 079610f2aaebec8c6e46ccf21a9d5728df1be8de | #!/usr/bin/env python
def findSubset(s0, s, t):
mys0 = s0.copy()
mys = s.copy()
if t == 0 and mys0:
return mys0
elif t == 0: # and mys0 == set()
return True
else:
if len(mys) > 0:
p = mys.pop()
mys1 = mys0.copy()
mys1.add(p)
if t-p < 0:
return findSubset(mys0, mys, t)
else:
return findSubset(mys1, mys, t-p) or findSubset(mys0, mys, t)
else:
return False
if __name__ == "__main__":
candidate = set()
big = set([1,2,3,4,5,6])
total = 11
print(findSubset(candidate, big, total))
|
4,156 | 500d6f473f07b35bf2d075d3061ac2e54eab702a | import numpy as np
import cv2
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cv2.namedWindow('Measure Angle with centerline')
# WebCam Initialize
vidCapture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))
while True:
# key = cv2.waitKey(1) & 0xFF
# if key == 27:
# break
ret, frame = vidCapture.read()
if ret==True:
# frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# img = np.zeros((512, 512, 3), np.uint8)
# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)
# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)
# cv2.imshow('frame', frame)
vidCapture.release()
out.release()
cv2.destroyAllWindows() |
4,157 | 3a8164299fa51b7d781f2b80d77cfba05b5f6915 | import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("PLOTTER_ROOT", "~/.plotter/mainnet"))).resolve()
|
4,158 | ec44e12624fbee3148cfa4f886e86ba437e920ec | """
This file contains the general data storage classes used throughout Logician.
"""
import csv
import json
import os
from collections import OrderedDict
VALID_CHANNEL_COUNTS = [4]
class Acquisition:
"""
The acqusition object contains data from all of the acquired channels.
Parameters
----------
data : array or bytes or str
Array of form [[1, 0, 0, ...], [0, 0, 1, ...], ...]
or bytes of data.
If data is bytes, channel_count must be provided.
samplerate : int
The acquisition rate in Samples / sec.
"""
def __init__(self, data, sample_rate=1, channel_count=None):
if isinstance(data, list):
if len(data) not in VALID_CHANNEL_COUNTS:
raise ValueError('data must have length %s'
% str(VALID_CHANNEL_COUNTS))
l = len(data[0])
for channel in data:
if len(channel) != l:
raise ValueError('All channels must be have same length.')
self.data = data
elif isinstance(data, bytes):
if channel_count not in VALID_CHANNEL_COUNTS:
raise ValueError('Invalid number of channels.')
# Convert byte string to list of 1's and 0's. If there are 4
# channels each byte should have 2 4 channel samples in it. The MSB
# is the 4th channel of the least recent sample.
sep_channel_data = [f(c) for c in data
for f in (lambda x: ord(x) >> 4,
lambda x: ord(x) & 0x0F)]
unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))]
for d in sep_channel_data]
self.data = list(zip(*unpacked_data))
self.data.reverse()
elif isinstance(data, str):
self.load_csv_file(data)
return
else:
raise TypeError('Invalid data type')
self.sample_rate = sample_rate
@property
def dt(self):
return 1.0 / self.sample_rate
@property
def acquisition_length(self):
return len(self.data[0])
@property
def channel_count(self):
return len(self.data)
def csv_string(self):
out_string = '#sample_rate=%d' % self.sample_rate
for row in zip(*self.data):
out_string += str(row)[1:-1].replace(' ', '')
out_string += '\n'
return out_string
def load_csv_file(self, fname):
with open(fname) as f:
reader = csv.reader(f)
header = next(reader)
sample_rate = int(header[0].split('=')[-1])
data = [[int(d) for d in row] for row in reader
if len(row) != 1]
self.data = list(zip(*data))
self.sample_rate = sample_rate
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def __iter__(self):
return iter(self.data)
class AnalyzerCommand:
"""
Simple class to hold analyzer commands and create appropriate command bytes
to be sent to the firmware.
"""
sample_counts = OrderedDict((('200K', 200000),
('100K', 100000),
('50K', 50000),
('10K', 10000),
('2K', 2000)))
sample_rates = OrderedDict((('1 MS/s', 1000000),
('500 KS/s', 500000),
('200 KS/s', 200000),
('100 KS/s', 10000)))
def __init__(self, sample_rate=1e6, sample_count=64000,
trigger_type=0, trigger_channel=0):
sp = int(1.0 / sample_rate / 1e-6)
self.sample_count = sample_count
self.sample_rate = sample_rate
sample_count /= 1000
self.command_bytes = \
[0x01, # Command
(sp & 0x00FF), (sp >> 8), # Sample Period (us)
(sample_count & 0x00FF), (sample_count >> 8),
trigger_type, trigger_channel]
self.command_bytes = (''.join([chr(x) for x in self.command_bytes]) +
' '*(64 - len(self.command_bytes)))
class ThemeManager:
"""
A class to manage and load themes for the signal display.
"""
def __init__(self, theme_dir):
self.theme_dir = theme_dir
self.refresh()
def refresh(self):
self.themes = []
for fname in os.listdir(self.theme_dir):
if fname.endswith('.json'):
try:
j = json.loads(
open(os.path.join(self.theme_dir, fname)).read())
self.themes.append(j)
except:
continue
def theme_names(self):
"""
Returns the names for each theme.
"""
return [theme.get('name', 'Error') for theme in self.themes]
def theme_named(self, name):
"""
Returns the theme named name.
Paramters
---------
name : str
The name of the theme to return.
Returns
-------
Returns the theme as a dict, or an empty dict if theme could not be
found.
"""
for theme in self.themes:
if theme.get('name', 'Error') == name:
return theme
|
4,159 | 1d4a51cfbd5df9ac9074c816a140309e04fff021 | ###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
"""
This module contains the FleurBaseWorkChain.
FleurBaseWorkChain is a workchain that wraps the submission of
the FLEUR calculation. Inheritance from the BaseRestartWorkChain
allows to add scenarios to restart a calculation in an
automatic way if an expected failure occurred.
"""
from aiida import orm
from aiida.common import AttributeDict
from aiida.engine import while_
from aiida.engine.processes.workchains import BaseRestartWorkChain
from aiida.engine.processes.workchains.utils import process_handler, ProcessHandlerReport
from aiida_fleur.tools.common_fleur_wf import optimize_calc_options
from aiida_fleur.calculation.fleur import FleurCalculation
from aiida_fleur.data.fleurinp import get_fleurinp_from_remote_data
class FleurBaseWorkChain(BaseRestartWorkChain):
"""Workchain to run a FLEUR calculation with automated error handling and restarts"""
_workflowversion = '0.2.1'
_process_class = FleurCalculation
@classmethod
def define(cls, spec):
super().define(spec)
spec.expose_inputs(FleurCalculation, exclude=('metadata.options',))
spec.input('options', valid_type=orm.Dict, help='Optional parameters to set up computational details.')
spec.input('description', valid_type=str, required=False, non_db=True, help='Calculation description.')
spec.input('label', valid_type=str, required=False, non_db=True, help='Calculation label.')
spec.input(
'add_comp_para',
valid_type=orm.Dict,
default=lambda: orm.Dict(dict={
'only_even_MPI': False,
'forbid_single_mpi': False,
'max_queue_nodes': 20,
'max_queue_wallclock_sec': 86400
}),
help='Gives additional control over computational parameters'
'only_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.'
'This might speedup a calculation for machines having even number of sockets per node.'
'max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.'
'max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.'
)
spec.outline(
cls.setup,
cls.validate_inputs,
while_(cls.should_run_process)(
cls.run_process,
cls.inspect_process,
),
cls.results,
)
spec.expose_outputs(FleurCalculation)
spec.exit_code(311,
'ERROR_VACUUM_SPILL_RELAX',
message='FLEUR calculation failed because an atom spilled to the'
'vacuum during relaxation')
spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message='Overlapping MT-spheres during relaxation.')
spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message='Computational resources are not optimal.')
spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message='Computational resources are not optimal.')
spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message='Computational resources are not optimal.')
spec.exit_code(399,
'ERROR_SOMETHING_WENT_WRONG',
message='FleurCalculation failed and FleurBaseWorkChain has no strategy '
'to resolve this')
def validate_inputs(self):
"""
Validate inputs that might depend on each other and cannot be validated by the spec.
Also define dictionary `inputs` in the context, that will contain the inputs for the
calculation that will be launched in the `run_calculation` step.
"""
self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation))
self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes']
self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para['max_queue_wallclock_sec']
input_options = self.inputs.options.get_dict()
self.ctx.optimize_resources = input_options.pop('optimize_resources', True)
self.ctx.inputs.metadata.options = input_options
if 'description' in self.inputs:
self.ctx.inputs.metadata.description = self.inputs.description
else:
self.ctx.inputs.metadata.description = ''
if 'label' in self.inputs:
self.ctx.inputs.metadata.label = self.inputs.label
else:
self.ctx.inputs.metadata.label = ''
if not self.ctx.optimize_resources:
self.ctx.can_be_optimised = False # set this for handlers to not change resources
return
resources_input = self.ctx.inputs.metadata.options['resources']
try:
self.ctx.num_machines = int(resources_input['num_machines'])
self.ctx.num_mpiprocs_per_machine = int(resources_input['num_mpiprocs_per_machine'])
except KeyError:
self.ctx.can_be_optimised = False
self.report('WARNING: Computation resources were not optimised.')
else:
try:
self.ctx.num_cores_per_mpiproc = int(resources_input['num_cores_per_mpiproc'])
self.ctx.use_omp = True
self.ctx.suggest_mpi_omp_ratio = self.ctx.num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc
except KeyError:
self.ctx.num_cores_per_mpiproc = 1
self.ctx.use_omp = False
self.ctx.suggest_mpi_omp_ratio = 1
status = self.check_kpts()
if status is None:
self.ctx.can_be_optimised = True
else:
self.report('ERROR: Not optimal computational resources.')
return status
def check_kpts(self):
"""
This routine checks if the total number of requested cpus
is a factor of kpts and makes an optimisation.
If suggested number of num_mpiprocs_per_machine is 60% smaller than
requested, it throws an exit code and calculation stop withour submission.
"""
if 'fleurinp' in self.ctx.inputs:
fleurinp = self.ctx.inputs.fleurinp
else:
fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.parent_folder)
only_even_MPI = self.inputs.add_comp_para['only_even_MPI']
forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']
try:
machines, mpi_tasks, omp_threads, message = optimize_calc_options(self.ctx.num_machines,
self.ctx.num_mpiprocs_per_machine,
self.ctx.num_cores_per_mpiproc,
self.ctx.use_omp,
self.ctx.suggest_mpi_omp_ratio,
fleurinp,
only_even_MPI=only_even_MPI,
forbid_single_mpi=forbid_single_mpi)
except ValueError as exc:
self.report(exc)
return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES
self.report(message)
self.ctx.inputs.metadata.options['resources']['num_machines'] = machines
self.ctx.inputs.metadata.options['resources']['num_mpiprocs_per_machine'] = mpi_tasks
if self.ctx.use_omp:
self.ctx.inputs.metadata.options['resources']['num_cores_per_mpiproc'] = omp_threads
if 'environment_variables' not in self.ctx.inputs.metadata.options:
self.ctx.inputs.metadata.options['environment_variables'] = {}
self.ctx.inputs.metadata.options['environment_variables']['OMP_NUM_THREADS'] = str(omp_threads)
@process_handler(priority=1,
exit_codes=[
FleurCalculation.exit_codes.ERROR_FLEUR_CALC_FAILED,
FleurCalculation.exit_codes.ERROR_MT_RADII,
FleurCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER,
FleurCalculation.exit_codes.ERROR_OPENING_OUTPUTS,
FleurCalculation.exit_codes.ERROR_NO_OUTXML,
FleurCalculation.exit_codes.ERROR_XMLOUT_PARSING_FAILED,
FleurCalculation.exit_codes.ERROR_RELAX_PARSING_FAILED,
FleurCalculation.exit_codes.ERROR_MISSING_DEPENDENCY,
])
def _handle_general_error(self, calculation):
"""
Calculation failed for unknown reason.
"""
self.ctx.restart_calc = calculation
self.ctx.is_finished = True
self.report('Calculation failed for a reason that can not be resolved automatically')
self.results()
return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG)
@process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.ERROR_DROP_CDN)
def _handle_dirac_equation(self, calculation):
"""
Sometimes relaxation calculation fails with Diraq problem which is usually caused by
problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn.
"""
# try to drop remote folder and see if it helps
is_fleurinp_from_relax = False
if 'fleurinp' in self.ctx.inputs:
if 'relax.xml' in self.ctx.inputs.fleurinp.files:
is_fleurinp_from_relax = True
if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax:
del self.ctx.inputs.parent_folder
self.ctx.restart_calc = None
self.ctx.is_finished = False
self.report('Calculation seems to fail due to corrupted charge density (can happen'
'during relaxation). I drop cdn from previous step')
return ProcessHandlerReport(True)
self.ctx.restart_calc = calculation
self.ctx.is_finished = True
self.report('Can not drop charge density. If I drop the remote folder, there will be no inp.xml')
self.results()
return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG)
@process_handler(priority=52, exit_codes=FleurCalculation.exit_codes.ERROR_VACUUM_SPILL_RELAX)
def _handle_vacuum_spill_error(self, calculation):
"""
Calculation failed for unknown reason.
"""
self.ctx.restart_calc = calculation
self.ctx.is_finished = True
self.report('FLEUR calculation failed because an atom spilled to the vacuum during'
'relaxation. Can be fixed via RelaxBaseWorkChain.')
self.results()
return ProcessHandlerReport(True, self.exit_codes.ERROR_VACUUM_SPILL_RELAX)
@process_handler(priority=51, exit_codes=FleurCalculation.exit_codes.ERROR_MT_RADII_RELAX)
def _handle_mt_relax_error(self, calculation):
"""
Calculation failed for unknown reason.
"""
self.ctx.restart_calc = calculation
self.ctx.is_finished = True
self.report('FLEUR calculation failed due to MT overlap. Can be fixed via RelaxBaseWorkChain')
self.results()
return ProcessHandlerReport(True, self.exit_codes.ERROR_MT_RADII_RELAX)
@process_handler(priority=50, exit_codes=FleurCalculation.exit_codes.ERROR_NOT_ENOUGH_MEMORY)
def _handle_not_enough_memory(self, calculation):
"""
Calculation failed due to lack of memory.
Probably works for JURECA only, has to be tested for other systems.
"""
if not self.ctx.can_be_optimised:
self.ctx.restart_calc = calculation
self.ctx.is_finished = True
self.report('I am not allowed to optimize your settings. Consider providing at least'
'num_machines and num_mpiprocs_per_machine')
self.results()
return ProcessHandlerReport(True, self.exit_codes.ERROR_MEMORY_ISSUE_NO_SOLUTION)
self.ctx.restart_calc = None
self.ctx.is_finished = False
self.report('Calculation failed due to lack of memory, I resubmit it with twice larger'
' amount of computational nodes and smaller MPI/OMP ratio')
# increase number of nodes
propose_nodes = self.ctx.num_machines * 2
if propose_nodes > self.ctx.max_queue_nodes:
propose_nodes = self.ctx.max_queue_nodes
self.ctx.num_machines = propose_nodes
self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2
status = self.check_kpts()
if status is not None:
self.ctx.is_finished = True
self.results()
return ProcessHandlerReport(True, self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES)
if 'settings' not in self.ctx.inputs:
settings = {}
else:
settings = self.ctx.inputs.settings.get_dict()
settings.setdefault('remove_from_remotecopy_list', [])
if 'mixing_history*' not in settings['remove_from_remotecopy_list']:
settings['remove_from_remotecopy_list'].append('mixing_history*')
self.ctx.inputs.settings = orm.Dict(dict=settings)
#check if the cdn.hdf can be reused
#Out of memory can also occur after a couple of iterations if the mixing_history gets too large
remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')
if _is_remote_reusable(self.ctx.inputs, calculation):
if 'fleurinp' in self.ctx.inputs:
del self.ctx.inputs.fleurinp
self.ctx.inputs.parent_folder = remote
return ProcessHandlerReport(True)
@process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.ERROR_TIME_LIMIT)
def _handle_time_limits(self, calculation):
"""
If calculation fails due to time limits, we simply resubmit it.
"""
from aiida.common.exceptions import NotExistent
# if previous calculation failed for the same reason, do not restart
try:
prev_calculation_remote = calculation.base.links.get_incoming().get_node_by_label('parent_folder')
prev_calculation_status = prev_calculation_remote.creator.exit_status
if prev_calculation_status in FleurCalculation.get_exit_statuses(['ERROR_TIME_LIMIT']):
self.ctx.is_finished = True
self.results()
return ProcessHandlerReport(True)
except NotExistent:
pass
self.report('FleurCalculation failed due to time limits, I restart it from where it ended')
# increase wallclock time
propose_wallclock = self.ctx.inputs.metadata.options['max_wallclock_seconds'] * 2
if propose_wallclock > self.ctx.max_queue_wallclock_sec:
propose_wallclock = self.ctx.max_queue_wallclock_sec
self.ctx.inputs.metadata.options['max_wallclock_seconds'] = propose_wallclock
# increase number of nodes
propose_nodes = self.ctx.num_machines * 2
if propose_nodes > self.ctx.max_queue_nodes:
propose_nodes = self.ctx.max_queue_nodes
self.ctx.num_machines = propose_nodes
remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')
# resubmit providing inp.xml and cdn from the remote folder
self.ctx.is_finished = False
if _is_remote_reusable(self.ctx.inputs, calculation):
if 'fleurinp' in self.ctx.inputs:
del self.ctx.inputs.fleurinp
self.ctx.inputs.parent_folder = remote
return ProcessHandlerReport(True)
def _is_remote_reusable(inputs, calculation):
"""
Check whether the remote folder of the given calculation
can be resubmitted
"""
can_use_remote = False
#If no charge density file is available to restart from the calculation will except
#with a not nice error message. So we can only reuse the charge density if these files are available
retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()
if any(file in retrieved_filenames for file in (
'cdn_last.hdf',
'cdn1',
)):
can_use_remote = True
if 'fleurinp' in inputs:
modes = inputs.fleurinp.get_fleur_modes()
if modes['force_theorem'] or modes['dos'] or modes['band']:
# in modes listed above it makes no sense copying cdn.hdf
can_use_remote = False
# without fleurinp it is harder to extract modes in this case
# - simply try to reuse cdn.hdf and hope it works
return can_use_remote
|
4,160 | d5dae7ab6eb34c82ae795730ecae666c4f81f10a | from db_connector import insert_item_details, insert_user_details
from Item_details import ItemDetails
def mechant_service(user_id):
print('================================')
print('Merchant Page')
print('================================')
heading='=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n'
int_value=''
while int_value!=1 and int_value!=2 and int_value!=3 and int_value!=4 and int_value!=5:
result=input(heading)
try:
int_value=int(result)
if int_value==1:
create_item(user_id)
int_value=''
elif int_value==2:
print('view item')
int_value=''
elif int_value==3:
print('View order list')
int_value=''
elif int_value==4:
print('Accept')
int_value=''
elif int_value==5:
print('logout successfully')
return
except Exception as e:
print(e)
print('\n\ninvalid input')
def create_item(user_id):
flag=False
while flag==False:
product_name=input('Enter the name of the product : ')
flag=validate_product_name(product_name)
flag=False
while flag==False:
price=input('Enter the price : ')
flag=validate_product_price(price)
flag=False
while flag==False:
qty=input('Enter the qty : ')
flag=validate_product_quantity(qty)
item_detail=ItemDetails(product_name,price,qty,user_id)
insert_item_details(item_detail)
print('successfully created')
def validate_product_name(name):
if name.isalpha():
if len(name) > 3 and len(name) <=10:
return True
else:
print('name should be more than 3 and less than 10 letters')
return False
else:
print("name should contain only alphanumeric")
return False
def validate_product_price(price):
if price.isnumeric():
return True
else:
print("price should contain only numeric")
return False
def validate_product_quantity(qty):
if qty.isnumeric():
return True
else:
print("qty should contain only numeric")
return False
|
4,161 | 9e98c6b59433369bca3d4f7ae261f7e7ab3aae6b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
archivo = open("salida2.csv", "a+")
startTime = datetime.now()
def mergeSort(alist):
print("Splitting ",alist)
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
a=0
b=0
k=0
while a < len(lefthalf) and b < len(righthalf):
if lefthalf[a] < righthalf[b]:
alist[k]=lefthalf[a]
a=a+1
else:
alist[k]=righthalf[b]
b=b+1
k=k+1
while a < len(lefthalf):
alist[k]=lefthalf[a]
a=a+1
k=k+1
while b < len(righthalf):
alist[k]=righthalf[b]
b=b+1
k=k+1
alist = []
N = int(input(""))
nums = input("").split()
for a in nums:
alist.append(int(a))
mergeSort(alist)
print(' '.join(str(a) for a in alist)+' \n')
tiempo = datetime.now() - startTime
archivo.write(str(N)+",")
archivo.write(str(tiempo)+"\n")
archivo.close() |
4,162 | 21aee78e8cbb1ca150bca880e79dc0d84326e2d4 | print("RUNNING ON CPU")
from library import config, utils, broker_funcs, portfolio
import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
from fbm.fbmlib import fbm
import time
import pickle
assert config.changePrice == True
print(config.config)
t0 = time.localtime()
t0str = time.strftime("%H:%M:%S",t0)
traderIDs = portfolio.portfGen()
transactions = pd.DataFrame()
totalOrders = pd.DataFrame()
broker = pd.DataFrame()
for t in range(993,4592):
broker, totalOrders = broker_funcs.thresholdBrokerage(traderIDs, t, broker, totalOrders)
broker, transactions = broker_funcs.instantMatch(traderIDs, broker, transactions)
portfolio.priceChange(time=t)
print("New threshold 500", t)
# with open('./results/traderIDs_cpu_nothreshold' + '.pkl', 'wb') as f:
# pickle.dump(traderIDs, f, pickle.HIGHEST_PROTOCOL)
# Ttransactions = pd.DataFrame()
# TtotalOrders = pd.DataFrame()
# Tbroker = pd.DataFrame()
# for key,portf in traderIDs.items():
# portf.reset(ptile=70)
# for t in range (993,4592):
# Tbroker, TtotalOrders = broker_funcs.thresholdBrokerage(traderIDs, t, Tbroker, TtotalOrders)
# Tbroker, Ttransactions = broker_funcs.instantMatch(traderIDs, Tbroker, Ttransactions)
# portfolio.priceChange(time=t)
# print(t)
t1 = time.localtime()
t1str = time.strftime("%H:%M:%S",t1)
with open('./results/traderIDs_500_newthreshold' + '.pkl', 'wb') as f:
pickle.dump(traderIDs, f, pickle.HIGHEST_PROTOCOL)
print("CPU RUN TIME | nportfs: ", config.nportfs)
print(t0str)
print(t1str)
TstockPool, ThurstPool = portfolio.stockChars()
transactions.to_csv('./results/transactions_500_newthreshold.csv')
totalOrders.to_csv('./results/totalOrders_500_newthreshold.csv')
np.save('./results/stockPool_500_newthreshold.npy',TstockPool)
np.save('./results/hurstPool_500_newthreshold.npy',ThurstPool)
conf = open('./results/config_500_newthresholded' + '.txt',"w")
conf.write(str(config.config))
conf.close() |
4,163 | 0719448e7eb8d48e636be1332c904beebf27e02d | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PredictionQueryToken(Model):
"""PredictionQueryToken.
:param session:
:type session: str
:param continuation:
:type continuation: str
:param max_count:
:type max_count: int
:param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'
:type order_by: str or
~azure.cognitiveservices.vision.customvision.training.models.enum
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]
:param iteration_id:
:type iteration_id: str
:param start_time:
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param application:
:type application: str
"""
_attribute_map = {
'session': {'key': 'Session', 'type': 'str'},
'continuation': {'key': 'Continuation', 'type': 'str'},
'max_count': {'key': 'MaxCount', 'type': 'int'},
'order_by': {'key': 'OrderBy', 'type': 'str'},
'tags': {'key': 'Tags', 'type': '[PredictionQueryTag]'},
'iteration_id': {'key': 'IterationId', 'type': 'str'},
'start_time': {'key': 'StartTime', 'type': 'iso-8601'},
'end_time': {'key': 'EndTime', 'type': 'iso-8601'},
'application': {'key': 'Application', 'type': 'str'},
}
def __init__(self, session=None, continuation=None, max_count=None, order_by=None, tags=None, iteration_id=None, start_time=None, end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
|
4,164 | 9d27b8844ab4070bb53afd89620177b89013956e | from pickle import dump, load
def save(parameters):
# Функция сохранения прогресса в файл
with open('saves/save.zs', 'wb') as game_save:
dump(parameters, game_save)
game_save.close()
def load_settings():
# Функция загрузки сохранения при выборе опции продолжения игры
try:
with open('saves/save.zs', 'rb') as game_save:
return load(game_save)
except FileNotFoundError:
return False
|
4,165 | c153c7a3a11a09ed645540632daec42e8905432a | #!/bin/env python3
"""
A tool for painting and saving Game Boy tiles.
Usage: `python3 gb-tile-painter.py`
Please see: README.md.
"""
from sys import argv, exit
# If we got an argument and it is --help or -h
if len(argv) == 2 and (argv[1] == "--help" or argv[1] == "-h"):
print(__doc__) # Print the docstring
exit(0) # And exit
from MainWindow import MainWindow
if __name__ == "__main__":
window = MainWindow()
window.mainloop() |
4,166 | f653e906d3026de4bb1e705162f4321bb75e8705 | import tensorflow as tf
import blood_model
import os
import numpy as np
FLAGS = tf.app.flags.FLAGS
RUN = 'new_test_hm'
tf.app.flags.DEFINE_string('checkpoint_dir', RUN+'/checkpoints',
"""Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_string('summaries_dir', RUN+'/summaries',
"""Summaries directory""")
tf.app.flags.DEFINE_string('max_steps', 20000,
"""Maximum steps to train the model""")
tf.app.flags.DEFINE_string('continue_run', True,
"""Continue from when training stopped?""")
def train():
"""Train blood_model for a number of steps. Periodically evaluate training and validation accuracies """
global_step = tf.Variable(0, name='global_step', trainable=False)
# Get images and labels for blood_model.
blood_datasets = blood_model.inputs(eval_data=False)
# randomize the inputs look
x, y_, data, keep_prob = blood_model.prepare_input()
# build the convolution network
conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)
# Calculate loss.
loss = blood_model.loss(conv_output, y_)
accuracy = blood_model.accuracy(conv_output, y_)
train_op = blood_model.train(loss, global_step)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
saver = tf.train.Saver()
check_filesystem()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)
_ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)
for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):
batch = blood_datasets.train.next_batch()
_, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
assert not np.isnan(loss_output)
if step % 100 == 0:
summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
train_writer.add_summary(summary, step)
print("step %d, training accuracy %g, loss %g" % (step, train_accuracy, loss_output))
if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:
batch = blood_datasets.validation.next_batch()
summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
validation_writer.add_summary(summary_validation, step)
print("validation accuracy %g" % accuracy_validation)
# save checkpoint
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
print("saving checkpoint")
def check_filesystem():
"""
either start a new checkpoint or continue from existing checkpoint folder
"""
if FLAGS.continue_run:
# start a new run, set flag to continue, so there is nothing
# check if something there, if not, create, but don't delete
if not tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.MakeDirs(FLAGS.summaries_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
else:
# delete checkpoints and event summaries because training restarted
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))
if tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
def reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):
"""
restore existing model from checkpoint data
"""
global_step = -1
if FLAGS.continue_run:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# extract global_step from it.
global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("checkpoint found at step %d", global_step)
# ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash
train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
else:
print('No checkpoint file found')
return global_step
def main(argv=None):
train()
if __name__ == '__main__':
tf.app.run()
|
4,167 | f153da7e4537f807f6c9d9d268a00443933d8315 | #!/usr/bin/env python
# coding: utf-8
# # Cabecera
# In[1]:
# -*- coding: utf-8 -*-
# ------------- Cantidad de segundos que has vivido -------------
# # Definición de variables
# In[2]:
# Definición de variables
anios = 30
dias_por_anio = 365
horas_por_dia = 24
segundos_por_hora = 60
# # Operación
# In[3]:
# Operación
print (anios * dias_por_anio * horas_por_dia * segundos_por_hora)
# In[ ]:
|
4,168 | cc019c732003ed72db80a7893096a0bef0f12e47 | """
Iterations over :term:`hosts<host>`, :term:`roles<role>`,
:term:`components<component>` and config files.
"""
from contextlib import contextmanager
from fabric.api import env, settings, abort
from os.path import join
from pkg_resources import iter_entry_points
from warnings import warn
from fabric.network import ssh_config
from confab.options import options
from confab.validate import assert_exists
from confab.loaders import FileSystemEnvironmentLoader
from confab.data import DataLoader
from confab.conffiles import ConfFiles
@contextmanager
def this_hostname(hostname):
"""
Context manager that uses the current SSH confg to switch Fabric to a specific hostname.
Updates hostname and port.
"""
host_config = ssh_config(hostname)
host_string = hostname
port = host_config.get("port", env.default_port)
with settings(host_string=host_string,
port=port):
yield
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort("Environment needs to be configured")
environmentdef = env.environmentdef
# If we're running via `fab`, we should restrict the environment
# to the current host.
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
# fabric needs the host if we're calling from main()
with this_hostname(host.host):
yield host
def iter_hosts_and_roles():
"""
Iterate over all hosts and roles in the configured environment.
"""
environmentdef = _get_environmentdef()
for host_and_role in environmentdef.all():
# fabric needs the host if we're calling from main()
with this_hostname(host_and_role.host):
yield host_and_role
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
# Construct directories
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role,
FileSystemEnvironmentLoader(*templates_dirs),
DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group="confab.extensions"):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
|
4,169 | efeb069a7e2aab7262a557236c693752d2973523 | import sys
import time
from abc import ABC, abstractmethod
from PySide6.QtGui import QPixmap
from PySide6.QtWidgets import QApplication
import inupdater.resource
from inupdater.splash import SplashScreen
class UserInterface(ABC):
"""Interface for GUI element"""
def __init__(self) -> None:
self.state = 0
@abstractmethod
def show_message(self, msg: str):
"""Show a message"""
@abstractmethod
def set_state(self, state: int):
"""Set the program progress by a state value"""
@abstractmethod
def close(self):
"""Close the updtater UI"""
class CmdUI(UserInterface):
"""Commande line UI"""
def __init__(self) -> None:
super().__init__()
def show_message(self, msg: str):
print(self.state, msg)
def set_state(self, state: int):
"""Set the program progress by a state value"""
self.state = state
def close(self):
pass
class QtUI(UserInterface):
def __init__(self) -> None:
super().__init__()
app = QApplication(sys.argv)
qpix = QPixmap(":/src/inupdater/data/splash.png")
self.splash = SplashScreen(qpix)
self.splash.set_progress_max(10)
self.splash.show()
def show_message(self, msg: str):
self.splash.set_message(msg)
def set_state(self, state: int):
"""Set the program progress by a state value"""
self.splash.set_progress_value(self.state)
self.state = state
time.sleep(1)
def close(self):
self.splash.close()
|
4,170 | 81535b43437f9bcb18973ceaa5c3340ad9bd4f0f | from django.forms import ModelForm
from django import forms
from models import *
from django.forms.widgets import *
class CommentForm(ModelForm):
# tags = TagField(widget=TagAutocomplete())
class Meta:
model=Comment
# fields = ('title', 'description', 'tags', 'enable_comments', 'owner')#, 'first_card' )
# widgets = {
# 'slug': HiddenInput,
# 'number_of_cards': HiddenInput,
# }
|
4,171 | 377143635939cf113e4188b5c4f55cec068a17b1 | #!/usr/bin/env python
# coding:utf-8
import time
from SocketServer import (TCPServer as TCP,
StreamRequestHandler as SRH)
HOST = '127.0.0.1'
PORT = 8888
BUFSIZE = 1024
ADDR = (HOST, PORT)
class MyRequestHandler(SRH):
def handle(self):
print '...connected from :', self.client_address
self.wfile.write('[%s] %s' % (time.ctime(),
self.rfile.readline()))
tcpServ = TCP(ADDR, MyRequestHandler)
print 'waiting for connection...'
tcpServ.serve_forever( ) |
4,172 | 7ed84706ace2cbf523021887df1e13d113f9ce4c | import os.path
import numpy as np
import matplotlib.pyplot as plt
import util
import collections
def learn_distributions(file_lists_by_category):
"""
Estimate the parameters p_d, and q_d from the training set
Input
-----
file_lists_by_category: A two-element list. The first element is a list of
spam files, and the second element is a list of ham files.
Output
------
probabilities_by_category: A two-element tuple. The first element is a dict
whose keys are words, and whose values are the smoothed estimates of p_d;
the second element is a dict whose keys are words, and whose values are the
smoothed estimates of q_d
"""
### TODO: Write your code here
#get word frequncies in each email category
#key:word, value: number of occurences in this email loader
spam_dict = util.get_word_freq(file_lists_by_category[0])
ham_dict = util.get_word_freq(file_lists_by_category[1])
#get total length of each email loader
spam_length = sum(spam_dict.values())
ham_length = sum(ham_dict.values())
#get the length of the dictionary: D
dict_D = util.Counter()
for key in spam_dict:
dict_D[key] += spam_dict[key]
for key in ham_dict:
dict_D[key] += ham_dict[key]
D = len(dict_D)
spam_distribution = {}
ham_distribution = {}
#get the distributions of two email loaders
for i in dict_D:
spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)
for i in dict_D:
ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)
#create the required tuple
probabilities_by_category = (spam_distribution, ham_distribution)
return probabilities_by_category
def classify_new_email(filename,probabilities_by_category,prior_by_category):
"""
Use Naive Bayes classification to classify the email in the given file.
Inputs
------
filename: name of the file to be classified
probabilities_by_category: output of function learn_distributions
prior_by_category: A two-element list as [\pi, 1-\pi], where \pi is the
parameter in the prior class distribution
Output
------
classify_result: A two-element tuple. The first element is a string whose value
is either 'spam' or 'ham' depending on the classification result, and the
second element is a two-element list as [log p(y=1|x), log p(y=0|x)],
representing the log posterior probabilities
"""
### TODO: Write your code here
spam_distribution = 0
ham_distribution = 0
word_frequency = util.get_word_freq([filename])
for w in word_frequency:
if w in probabilities_by_category[0]:
spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])
if w in probabilities_by_category[1]:
ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])
spam_distribution += np.log(prior_by_category[0])
ham_distribution += np.log(prior_by_category[1])
predict = ""
if(spam_distribution > ham_distribution):
predict = "spam"
else:
predict = "ham"
word_distribution = [spam_distribution, ham_distribution]
classify_result = (predict, word_distribution)
return classify_result
if __name__ == '__main__':
# folder for training and testing
spam_folder = "data/spam"
ham_folder = "data/ham"
test_folder = "data/testing"
# generate the file lists for training
file_lists = []
for folder in (spam_folder, ham_folder):
file_lists.append(util.get_files_in_folder(folder))
# Learn the distributions
probabilities_by_category = learn_distributions(file_lists)
# prior class distribution
priors_by_category = [0.5, 0.5]
# Store the classification results
performance_measures = np.zeros([2,2])
# explanation of performance_measures:
# columns and rows are indexed by 0 = 'spam' and 1 = 'ham'
# rows correspond to true label, columns correspond to guessed label
# to be more clear, performance_measures = [[p1 p2]
# [p3 p4]]
# p1 = Number of emails whose true label is 'spam' and classified as 'spam'
# p2 = Number of emails whose true label is 'spam' and classified as 'ham'
# p3 = Number of emails whose true label is 'ham' and classified as 'spam'
# p4 = Number of emails whose true label is 'ham' and classified as 'ham'
# Classify emails from testing set and measure the performance
for filename in (util.get_files_in_folder(test_folder)):
# Classify
label,log_posterior = classify_new_email(filename,
probabilities_by_category,
priors_by_category)
# Measure performance (the filename indicates the true label)
base = os.path.basename(filename)
true_index = ('ham' in base)
guessed_index = (label == 'ham')
performance_measures[int(true_index), int(guessed_index)] += 1
template="You correctly classified %d out of %d spam emails, and %d out of %d ham emails."
# Correct counts are on the diagonal
correct = np.diag(performance_measures)
# totals are obtained by summing across guessed labels
totals = np.sum(performance_measures, 1)
print(template % (correct[0],totals[0],correct[1],totals[1]))
### TODO: Write your code here to modify the decision rule such that
### Type 1 and Type 2 errors can be traded off, plot the trade-off curve
print("----type 1 and 2 here-----")
offset = [-1E2, -1E1, -1E0, 1E0, 1E1]
type1 = []
type2 = []
for offset_value in offset:
performance_measures = np.zeros([2, 2])
for filename in (util.get_files_in_folder(test_folder)):
# Classify
label, log_posterior = classify_new_email(filename,
probabilities_by_category,
priors_by_category)
#add offset
if(log_posterior[0] + offset_value > log_posterior[1]):
label = "spam"
else:
label = "ham"
# Measure performance (the filename indicates the true label)
base = os.path.basename(filename)
true_index = ('ham' in base)
guessed_index = (label == 'ham')
performance_measures[int(true_index), int(guessed_index)] += 1
type1.append(performance_measures[0][1])
type2.append(performance_measures[1][0])
template = "You correctly classified %d out of %d spam emails, and %d out of %d ham emails."
# Correct counts are on the diagonal
correct = np.diag(performance_measures)
# totals are obtained by summing across guessed labels
totals = np.sum(performance_measures, 1)
print(template % (correct[0], totals[0], correct[1], totals[1]))
plt.title("Type1 vs Type2 Error")
for i in range(0, len(type1)):
plt.scatter(type1[i], type2[i])
plt.xlabel("type1")
plt.ylabel("type2")
plt.legend(offset, loc='best')
plt.show()
|
4,173 | d67a2eca4e2fde443b99f5133c2657cdf4ac00de | #!/usr/bin/python3
"Places module"
from flask import jsonify, request, Response, abort
from api.v1.views import app_views
from models import storage
from models.place import Place
@app_views.route('cities/<city_id>/places', strict_slashes=False,
methods=['GET'])
def get_all_places(city_id):
''' gets all places in a city '''
city = storage.get("City", city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"Gets a place by place id"
place = storage.get("Place", place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False,
methods=['DELETE'])
def delete_place(place_id):
''' deletes places'''
place = storage.get("Place", place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
@app_views.route('/cities/<city_id>/places', strict_slashes=False,
methods=['POST'])
def post_place(city_id):
'''posts a new place to city'''
kwargs = request.get_json()
if not kwargs:
abort(400, 'Not a JSON')
elif 'name' not in kwargs:
abort(400, 'Missing name')
elif 'user_id' not in kwargs:
abort(400, 'Missing user_id')
else:
city = storage.get("City", city_id)
user = storage.get("User", kwargs['user_id'])
if not city:
abort(404)
if not user:
abort(404)
# overwrites or adds w/ valid state_id in case they provide in post
kwargs['city_id'] = city_id
kwargs['user_id'] = user.id
new_place = Place(**kwargs)
storage.save()
return jsonify(new_place.to_dict()), 201
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])
def update_place(place_id):
''' updates place '''
params = request.get_json()
if not params:
abort(400, 'Not a JSON')
place = storage.get('Place', place_id)
if not place:
abort(404)
for k, v in params.items():
if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:
setattr(place, k, v)
storage.save()
return jsonify(place.to_dict()), 200
|
4,174 | f4ab6df8efc334fa338ade7deecd36d8cd859e96 | # Title: K번째 수
# Link: https://www.acmicpc.net/problem/11004
import sys
sys.setrecursionlimit(10 ** 6)
def read_list_int():
return list(map(int, sys.stdin.readline().strip().split(' ')))
def read_single_int():
return int(sys.stdin.readline().strip())
def selection_sort(nums, k):
sorted_index = 0
while True:
minimum = 9999999999
min_index = 0
for i, n in enumerate(nums[sorted_index:], sorted_index):
if n < minimum:
minimum = n
min_index = i
k -= 1
if k == 0:
return minimum
nums[sorted_index], nums[min_index] = nums[min_index], nums[sorted_index]
sorted_index += 1
def partition(nums, left, right, pivot_index):
pivot_value = nums[pivot_index]
nums[pivot_index], nums[right] = nums[right], nums[pivot_index]
store_index = left
for i in range(left, right):
if nums[i] < pivot_value:
nums[store_index], nums[i] = nums[i], nums[store_index]
store_index += 1
nums[right], nums[store_index] = nums[store_index], nums[right]
return store_index
def quick_select(nums, left, right, k):
while True:
if left == right:
return nums[left]
pivot_index = right
pivot_index = partition(nums, left, right, pivot_index)
if k-1 == pivot_index:
return nums[k-1]
elif k-1 < pivot_index:
right = pivot_index - 1
else:
left = pivot_index + 1
def get_kth_number(nums, k):
# TLE
# selection_sort(nums, k)
return quick_select(nums, 0, len(nums)-1, k)
if __name__ == '__main__':
N, K = read_list_int()
A = read_list_int()
print(get_kth_number(A, K))
|
4,175 | 8743be809953f59bd14431e509042c4c51d9fab4 | import torch
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import requests
from io import BytesIO
from net import Net
class predict_guitar():
def __init__(self):
"""Model is loaded on init of the class"""
self.model = Net()
if torch.cuda.is_available():
map_location=torch.device('cuda')
else:
map_location=torch.device('cpu')
# load parameters
self.model.load_state_dict(torch.load('model.pt',
map_location=map_location))
if torch.cuda.is_available():
self.model.cuda()
else:
self.model.cpu()
self.model.eval()
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self,url):
"""Generating prediction of image url"""
# get image
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(),
transforms.Resize((128,128)),
transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster','Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
|
4,176 | 6f8ce77dd45f555ca092482715b6ccaa33414fd8 | # --------------------------------------------------------------------------------------------------
# Property of UAH
# IDS module for ladder logic monitoring
# This codes is Written by Rishabh Das
# Date:- 18th June 2018
# --------------------------------------------------------------------------------------------------
import hashlib
import os
# ---------------------------------------------------------------------------------------------------
# This section declares the Global variables of the project
# ---------------------------------------------------------------------------------------------------
Monitoredlist=[]
list_create=[]
list_compare=[]
# ---------------------------------------------------------------------------------------------------
# This section notes the number of files in the directory and creates the list of the files that needs
# to be monitored
# ---------------------------------------------------------------------------------------------------
def Create_list():
i=0
for file in os.listdir(os.getcwd()):
if file.endswith("openplc"):
Monitoredlist.append(file)
i += 1
if i==0:
print("No Files are being monitored!")
else:
print("The files being monitored are as follows")
print(Monitoredlist)
# ---------------------------------------------------------------------------------------------------
# This is the Hasher module that creates the hash for the files and maintains a table of the file
# hashes
# ---------------------------------------------------------------------------------------------------
def Hasher():
BLOCKSIZE = 65536
hasher = hashlib.sha1()
del list_create[:]
for i in range(len(Monitoredlist)):
list_create.append(Monitoredlist[i])
with open(Monitoredlist[i], 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
list_create.append(hasher.hexdigest())
#print(list_create)
# --------------------------------------------------------------------------------------------------
# This Function records the hash of the files being monitored to a text file. This should only be
# called when the program is being executed for the first time
# --------------------------------------------------------------------------------------------------
def Create_record():
progpath = os.getcwd()
dirpath = progpath + '/Details'
if not os.path.exists(dirpath):
os.makedirs(dirpath)
os.chdir(dirpath)
file = open('Record.txt',"w")
for item in list_create:
file.write("%s\n" % item)
file.close()
os.chdir(progpath)
# --------------------------------------------------------------------------------------------------
# This module parses the stored hashes and stores them into a fresh python list
# --------------------------------------------------------------------------------------------------
def Read_hash():
progpath = os.getcwd()
dirpath = progpath + '/Details'
os.chdir(dirpath)
file = open('Record.txt', 'r')
list_compare=[]
list_compare = file.readlines()
list_compare = [x[:-1] for x in list_compare]
os.chdir(progpath)
#print(list_compare)
#print(list_create)
if list_compare == list_create:
Response(0)
else:
Response(1)
# --------------------------------------------------------------------------------------------------
# Once the change is detected this module is used to respond to the threat
# flag ->>>> 1 Change is detected
# flag ->>>> 0 No change
# --------------------------------------------------------------------------------------------------
def Response(flag):
if flag==1:
print("Ladder Logic Tampered")
#Launch recovery routine
else:
print("Ladder Logic is Secure")
# --------------------------------------------------------------------------------------------------
# The main Function
# --------------------------------------------------------------------------------------------------
def main():
Create_list()
Hasher()
print(list_create)
Create_record()
Read_hash() # First call with 0 argument
while(1):
Hasher()
Read_hash() # Next calls are all performed by argument
# 1. Create the folder for storing the new file->Done
# 2. Module to compare the files with a new file->Done
# 3. Module to backup the ladder logics
# 4. Module to restore the ladder logic
# 5. Reporting unit->Done
# 6. Push code to GitHub->Done
if __name__ == "__main__": main()
|
4,177 | 1df1081308ead28c023774a8671df8a0671a1bba | from Song import Song
class FroggyWoogie(Song):
def __init__(self):
super(FroggyWoogie, self).__init__()
self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'
self.plan = [[0.0, 32, 'W', 16.271],[16.271, 16, 'S', 8.135],[24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18, 'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118], [81.355, 32, 'W', 16.293],[97.648, 32, 'S', 16.25], [113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44, 64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S', 16.271],[211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271], [244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61, 32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S', 16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]
|
4,178 | 7eeba06e78bd1e7139b1706574c4d040465d4566 | import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
'go-', label='line 1', linewidth=2)
plt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],
'rs--', label='line 2', linewidth=4)
plt.axis([0, 6, 0, 26])
plt.legend(loc="upper right")
plt.show()
|
4,179 | 32e904a39d03d3166369420b49db0b9b118110a3 | import hashlib
import json
import logging
import os
import urllib.parse
import uuid
from datetime import datetime
import pytz
from celery import states as celery_states
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.contrib.sessions.models import Session
from django.core.cache import cache
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.files.storage import FileSystemStorage
from django.core.mail import send_mail
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.db import IntegrityError
from django.db import models
from django.db.models import Count
from django.db.models import Exists
from django.db.models import F
from django.db.models import Index
from django.db.models import JSONField
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import Sum
from django.db.models import UUIDField as DjangoUUIDField
from django.db.models import Value
from django.db.models.expressions import ExpressionList
from django.db.models.expressions import RawSQL
from django.db.models.functions import Lower
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import DeferredAttribute
from django.db.models.sql import Query
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import gettext as _
from django_celery_results.models import TaskResult
from django_cte import With
from le_utils import proquint
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import languages
from le_utils.constants import roles
from model_utils import FieldTracker
from mptt.models import MPTTModel
from mptt.models import raise_if_unsaved
from mptt.models import TreeForeignKey
from postmark.core import PMMailInactiveRecipientException
from postmark.core import PMMailUnauthorizedException
from rest_framework.authtoken.models import Token
from rest_framework.fields import get_attribute
from rest_framework.utils.encoders import JSONEncoder
from contentcuration.constants import channel_history
from contentcuration.constants import completion_criteria
from contentcuration.constants import user_history
from contentcuration.constants.contentnode import kind_activity_map
from contentcuration.db.models.expressions import Array
from contentcuration.db.models.functions import ArrayRemove
from contentcuration.db.models.functions import Unnest
from contentcuration.db.models.manager import CustomContentNodeTreeManager
from contentcuration.db.models.manager import CustomManager
from contentcuration.statistics import record_channel_stats
from contentcuration.utils.cache import delete_public_channel_cache_keys
from contentcuration.utils.parser import load_json_string
from contentcuration.viewsets.sync.constants import ALL_CHANGES
from contentcuration.viewsets.sync.constants import ALL_TABLES
EDIT_ACCESS = "edit"
VIEW_ACCESS = "view"
DEFAULT_CONTENT_DEFAULTS = {
'license': None,
'language': None,
'author': None,
'aggregator': None,
'provider': None,
'copyright_holder': None,
'license_description': None,
'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,
'm_value': 5,
'n_value': 5,
'auto_derive_video_thumbnail': True,
'auto_derive_audio_thumbnail': True,
'auto_derive_document_thumbnail': True,
'auto_derive_html5_thumbnail': True,
'auto_derive_exercise_thumbnail': True,
'auto_randomize_questions': True,
}
DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)
def to_pk(model_or_pk):
if isinstance(model_or_pk, models.Model):
return model_or_pk.pk
return model_or_pk
class UserManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
if not email:
raise ValueError('Email address not specified')
new_user = self.model(
email=self.normalize_email(email),
)
new_user.set_password(password)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.save(using=self._db)
return new_user
def create_superuser(self, email, first_name, last_name, password=None):
new_user = self.create_user(email, first_name, last_name, password=password)
new_user.is_admin = True
new_user.save(using=self._db)
return new_user
class UniqueActiveUserIndex(Index):
def create_sql(self, model, schema_editor, using='', **kwargs):
"""
This is a vendored and modified version of the Django create_sql method
We do this so that we can monkey patch in the unique index statement onto the schema_editor
while we create the statement for this index, and then revert it to normal.
We should remove this as soon as Django natively supports UniqueConstraints with Expressions.
This should hopefully be the case in Django 3.3.
"""
include = [model._meta.get_field(field_name).column for field_name in self.include]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
sql = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s"
# Store the normal SQL statement for indexes
old_create_index_sql = schema_editor.sql_create_index
# Replace it with our own unique index so that this index actually adds a constraint
schema_editor.sql_create_index = sql
# Generate the SQL staetment that we want to return
return_statement = schema_editor._create_index_sql(
model, fields=fields, name=self.name, using=using,
db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,
opclasses=self.opclasses, condition=condition, include=include,
expressions=expressions, **kwargs,
)
# Reinstate the previous index SQL statement so that we have done no harm
schema_editor.sql_create_index = old_create_index_sql
# Return our SQL statement
return return_statement
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField('active', default=False,
help_text='Designates whether this user should be treated as active.')
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')
disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')
information = JSONField(null=True)
content_defaults = JSONField(default=dict)
policies = JSONField(default=dict, null=True)
feature_flags = JSONField(default=dict, null=True)
deleted = models.BooleanField(default=False, db_index=True)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"disk_space",
])
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def __unicode__(self):
return self.email
def delete(self):
"""
Soft deletes the user account.
"""
self.deleted = True
# Deactivate the user to disallow authentication and also
# to let the user verify the email again after recovery.
self.is_active = False
self.save()
self.history.create(user_id=self.pk, action=user_history.DELETION)
def recover(self):
"""
Use this method when we want to recover a user.
"""
self.deleted = False
self.save()
self.history.create(user_id=self.pk, action=user_history.RECOVERY)
def hard_delete_user_related_data(self):
"""
Hard delete all user related data. But keeps the user record itself intact.
User related data that gets hard deleted are:
- sole editor non-public channels.
- sole editor non-public channelsets.
- sole editor non-public channels' content nodes and its underlying files that are not
used by any other channel.
- all user invitations.
"""
from contentcuration.viewsets.common import SQCount
# Hard delete invitations associated to this account.
self.sent_to.all().delete()
self.sent_by.all().delete()
editable_channels_user_query = (
User.objects.filter(editable_channels__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount(
editable_channels_user_query, field="id")).filter(num_editors=1, public=False)
# Point sole editor non-public channels' contentnodes to orphan tree to let
# our garbage collection delete the nodes and underlying files.
ContentNode._annotate_channel_id(ContentNode.objects).filter(channel_id__in=list(
non_public_channels_sole_editor.values_list("id", flat=True))).update(parent_id=settings.ORPHANAGE_ROOT_ID)
# Hard delete non-public channels associated with this user (if user is the only editor).
non_public_channels_sole_editor.delete()
# Hard delete non-public channel collections associated with this user (if user is the only editor).
user_query = (
User.objects.filter(channel_sets__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1, public=False).delete()
# Create history!
self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION)
def can_edit(self, channel_id):
return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()
def check_space(self, size, checksum):
if self.is_admin:
return True
active_files = self.get_user_active_files()
if active_files.filter(checksum=checksum).exists():
return True
space = self.get_available_space(active_files=active_files)
if space < size:
raise PermissionDenied(_("Not enough space. Check your storage under Settings page."))
def check_channel_space(self, channel):
active_files = self.get_user_active_files()
staging_tree_id = channel.staging_tree.tree_id
channel_files = self.files\
.filter(contentnode__tree_id=staging_tree_id)\
.values('checksum')\
.distinct()\
.exclude(checksum__in=active_files.values_list('checksum', flat=True))
staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)
if self.get_available_space(active_files=active_files) < (staged_size):
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def check_staged_space(self, size, checksum):
if self.staged_files.filter(checksum=checksum).exists():
return True
space = self.get_available_staged_space()
if space < size:
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def get_available_staged_space(self):
space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0
return float(max(self.disk_space - space_used, 0))
def get_available_space(self, active_files=None):
return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))
def get_user_active_trees(self):
return self.editable_channels.exclude(deleted=True)\
.values(tree_id=F("main_tree__tree_id"))
def get_user_active_files(self):
cte = With(self.get_user_active_trees().distinct())
return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\
.with_cte(cte)\
.values('checksum')\
.distinct()
def get_space_used(self, active_files=None):
active_files = active_files or self.get_user_active_files()
files = active_files.aggregate(total_used=Sum('file_size'))
return float(files['total_used'] or 0)
def set_space_used(self):
self.disk_space_used = self.get_space_used()
self.save()
return self.disk_space_used
def get_space_used_by_kind(self):
active_files = self.get_user_active_files()
files = active_files.values('preset__kind_id')\
.annotate(space=Sum('file_size'))\
.order_by()
kind_dict = {}
for item in files:
kind_dict[item['preset__kind_id']] = item['space']
return kind_dict
def email_user(self, subject, message, from_email=None, **kwargs):
try:
# msg = EmailMultiAlternatives(subject, message, from_email, [self.email])
# msg.attach_alternative(kwargs["html_message"],"text/html")
# msg.send()
send_mail(subject, message, from_email, [self.email], **kwargs)
except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:
logging.error(str(e))
def clean(self):
super(User, self).clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def get_token(self):
token, _ = Token.objects.get_or_create(user=self)
return token.key
def save(self, *args, **kwargs):
from contentcuration.utils.user import calculate_user_storage
super(User, self).save(*args, **kwargs)
if 'disk_space' in self._field_updates.changed():
calculate_user_storage(self.pk)
changed = False
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
changed = True
if not self.clipboard_tree:
self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC)
self.clipboard_tree.save()
changed = True
if changed:
self.save()
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
indexes = [
UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx")
]
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
# all shared editors
all_editable = User.editable_channels.through.objects.all()
editable = all_editable.filter(
channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
# all shared viewers
all_view_only = User.view_only_channels.through.objects.all()
view_only = all_view_only.filter(
channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
return queryset.filter(
Q(pk=user.pk)
| Q(pk__in=editable.values_list("user_id", flat=True))
| Q(pk__in=view_only.values_list("user_id", flat=True))
)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(pk=user.pk)
@classmethod
def get_for_email(cls, email, deleted=False, **filters):
"""
Returns the appropriate User record given an email, ordered by:
- those with is_active=True first, which there should only ever be one
- otherwise by ID DESC so most recent inactive shoud be returned
Filters out deleted User records by default. To include both deleted and
undeleted user records pass None to the deleted argument.
:param email: A string of the user's email
:param filters: Additional filters to filter the User queryset
:return: User or None
"""
user_qs = User.objects.filter(email__iexact=email.strip())
if deleted is not None:
user_qs = user_qs.filter(deleted=deleted)
return user_qs.filter(**filters).order_by("-is_active", "-id").first()
class UUIDField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def get_default(self):
result = super(UUIDField, self).get_default()
if isinstance(result, uuid.UUID):
result = result.hex
return result
def to_python(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
class MPTTTreeIDManager(models.Model):
"""
Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,
the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.
As we are using this table only for the ID generation, it does not need any fields.
We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's
concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures
that the number of records (and thus id) matches the max tree ID number when this table gets added.
"""
def file_on_disk_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
return generate_file_on_disk_name(instance.checksum, filename)
def generate_file_on_disk_name(checksum, filename):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, ext = os.path.splitext(filename)
directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, h + ext.lower())
def object_storage_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
default_ext = ''
if instance.file_format_id:
default_ext = '.{}'.format(instance.file_format_id)
return generate_object_storage_name(instance.checksum, filename, default_ext)
def generate_object_storage_name(checksum, filename, default_ext=''):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, actual_ext = os.path.splitext(filename)
ext = actual_ext if actual_ext else default_ext
# Use / instead of os.path.join as Windows makes this \\
directory = "/".join([settings.STORAGE_ROOT, h[0], h[1]])
return os.path.join(directory, h + ext.lower())
def generate_storage_url(filename, request=None, *args):
"""
Generate a storage URL for the given content filename.
"""
path = generate_object_storage_name(os.path.splitext(filename)[0], filename)
# There are three scenarios where Studio might be run as:
#
# 1. In normal kubernetes, nginx will proxy for us. We'll know we're in kubernetes when the
# environment variable RUN_MODE=k8s
#
# 2. In Docker Compose and bare metal runserver, we'll be running in runserver, and minio
# will be exposed in port 9000 in the host's localhost network.
# Note (aron): returning the true storage URL (e.g. https://storage.googleapis.com/storage/a.mp4)
# isn't too important, because we have CDN in front of our servers, so it should be cached.
# But change the logic here in case there is a potential for bandwidth and latency improvement.
# Detect our current state first
run_mode = os.getenv("RUN_MODE")
# if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,
# and let nginx handle proper proxying.
if run_mode == "k8s":
url = "/content/{path}".format(
path=path,
)
# if we're in docker-compose or in baremetal, just return the object storage URL as localhost:9000
elif run_mode == "docker-compose" or run_mode is None:
# generate the minio storage URL, so we can get the GET parameters that give everyone
# access even if they don't need to log in
params = urllib.parse.urlparse(default_storage.url(path)).query
host = "localhost"
port = 9000 # hardcoded to the default minio IP address
url = "http://{host}:{port}/{bucket}/{path}?{params}".format(
host=host,
port=port,
bucket=settings.AWS_S3_BUCKET_NAME,
path=path,
params=params,
)
return url
class FileOnDiskStorage(FileSystemStorage):
"""
Overrider FileSystemStorage's default save method to ignore duplicated file.
"""
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
# if the file exists, do not call the superclasses _save method
logging.warn('Content copy "%s" already exists!' % name)
return name
return super(FileOnDiskStorage, self)._save(name, content)
class SecretToken(models.Model):
"""Tokens for channels"""
token = models.CharField(max_length=100, unique=True)
is_primary = models.BooleanField(default=False)
@classmethod
def exists(cls, token):
"""
Return true when the token string given by string already exists.
Returns false otherwise.
"""
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
"""
Creates a primary secret token for the current channel using a proquint
string. Creates a secondary token containing the channel id.
These tokens can be used to refer to the channel to download its content
database.
"""
token = proquint.generate()
# Try 100 times to generate a unique token.
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
# after TRIALS attempts and we didn't get a unique token,
# just raise an error.
# See https://stackoverflow.com/a/9980160 on what for-else loop does.
else:
raise ValueError("Cannot generate new token")
# We found a unique token! Save it
return token
def __str__(self):
return "{}-{}".format(self.token[:5], self.token[5:])
def get_channel_thumbnail(channel):
if not isinstance(channel, dict):
channel = channel.__dict__
if channel.get("thumbnail_encoding"):
thumbnail_data = channel.get("thumbnail_encoding")
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"):
return generate_storage_url(channel.get("thumbnail"))
return '/static/img/kolibri_placeholder.png'
CHANNEL_NAME_INDEX_NAME = "channel_name_idx"
# A list of all the FKs from Channel object
# to ContentNode trees
# used for permissions filtering
CHANNEL_TREES = (
"main_tree",
"chef_tree",
"trash_tree",
"staging_tree",
"previous_tree",
)
def boolean_val(val):
return Value(val, output_field=models.BooleanField())
class PermissionCTE(With):
tree_id_fields = [
"channel__{}__tree_id".format(tree_name)
for tree_name in CHANNEL_TREES
]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id)\
.annotate(
tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())
)
super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte")
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte")
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values("user_id"))
class Channel(models.Model):
""" Permissions come from association with organizations """
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='editable_channels',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
viewers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='view_only_channels',
verbose_name="viewers",
help_text="Users with view only rights",
blank=True,
)
language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='bookmarked_channels',
verbose_name="bookmarked by",
)
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text="Order to display public channels")
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(
SecretToken,
related_name='channels',
verbose_name="secret tokens",
blank=True,
)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True)
# Fields to calculate when channel is published
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField(
"Language",
related_name='channels',
verbose_name="languages",
blank=True,
)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"description",
"language_id",
"thumbnail",
"name",
"thumbnail_encoding",
# watch these fields for changes
# but exclude them from setting changed
# on the main tree
"deleted",
"public",
"main_tree_id",
"version",
])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
# it won't return anything
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef("id"))
edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id"))
view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id"))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(
edit=edit,
view=view,
)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(
"channel_id", flat=True
)
permission_filter = (
Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)
)
return queryset.filter(permission_filter | Q(deleted=False, public=True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()
def resource_size_key(self):
return "{}_resource_size".format(self.pk)
# Might be good to display resource size, but need to improve query time first
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item')\
.filter(contentnode__tree_id=tree_id)\
.values('checksum', 'file_size')\
.distinct()\
.aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
original_channel_id=self.id,
source_channel_id=self.id,
changed=True,
complete=True,
)
# Ensure that locust or unit tests raise if there are any concurrency issues with tree ids.
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
)
# if this change affects the published channel list, clear the channel cache
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set([
"public",
"main_tree_id",
"version",
])
if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):
# Changing channel metadata should also mark main_tree as changed
self.main_tree.changed = True
# Check if original thumbnail is no longer referenced
if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]:
filename, ext = os.path.splitext(original_values["thumbnail"])
delete_empty_file_reference(filename, ext[1:])
# Refresh storage for all editors on the channel
if "deleted" in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
# Delete db if channel has been deleted and mark as unpublished
if "deleted" in original_values and not original_values["deleted"]:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
# if this change affects the published channel list, clear the channel cache
if "public" in original_values and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
"""
Sets the current channel object to be public and viewable by anyone.
If bypass_signals is True, update the model in such a way that we
prevent any model signals from running due to the update.
Returns the same channel object.
"""
if bypass_signals:
self.public = True # set this attribute still, so the object will be updated
Channel.objects.filter(id=self.id).update(public=True)
# clear the channel cache
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
"""
Get all public channels.
If defer_nonmain_trees is True, defer the loading of all
trees except for the main_tree."""
if defer_nonmain_trees:
c = (Channel.objects
.filter(public=True)
.exclude(deleted=True)
.select_related('main_tree')
.prefetch_related('editors')
.defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = "Channel"
verbose_name_plural = "Channels"
indexes = [
models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME),
]
index_together = [
["deleted", "public"]
]
CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id"
class ChannelHistory(models.Model):
"""
Model for tracking certain actions performed on a channel
"""
channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
"""
Prunes history records by keeping the most recent actions for each channel and type,
and deleting all other older actions
"""
keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = "Channel history"
verbose_name_plural = "Channel histories"
indexes = [
models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),
]
class UserHistory(models.Model):
"""
Model that stores the user's action history.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name="history", on_delete=models.CASCADE)
action = models.CharField(max_length=32, choices=user_history.choices)
performed_at = models.DateTimeField(default=timezone.now)
class ChannelSet(models.Model):
# NOTE: this is referred to as "channel collections" on the front-end, but we need to call it
# something else as there is already a ChannelCollection model on the front-end
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='channel_sets',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
class License(models.Model):
"""
Normalize the license of ContentNode model
"""
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(
default=False,
verbose_name="license exists",
help_text="Tells whether or not a content item is licensed to share",
)
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
NODE_ID_INDEX_NAME = "node_id_idx"
NODE_MODIFIED_INDEX_NAME = "node_modified_idx"
NODE_MODIFIED_DESC_INDEX_NAME = "node_modified_desc_idx"
CONTENTNODE_TREE_ID_CACHE_KEY = "contentnode_{pk}__tree_id"
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
# Random id used internally on Studio (See `node_id` for id used in Kolibri)
id = UUIDField(primary_key=True, default=uuid.uuid4)
# the content_id is used for tracking a user's interaction with a piece of
# content, in the face of possibly many copies of that content. When a user
# interacts with a piece of content, all substantially similar pieces of
# content should be marked as such as well. We track these "substantially
# similar" types of content by having them have the same content_id.
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
# TODO: disallow nulls once existing models have been set
original_channel_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True) # Original channel copied from
source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from
# Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)
original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank=True)
prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',
through='PrerequisiteContentRelationship', symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',
symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)
# No longer used
sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order",
help_text="Ascending, lowest number shown first")
copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="",
help_text="Organization of person who holds the essential rights")
# legacy field...
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name="created")
modified = models.DateTimeField(auto_now=True, verbose_name="modified")
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?",
null=True)
aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?",
null=True)
provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?",
null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
# Fields for metadata labels
# These fields use a map to store applied labels
# {
# "<label_id1>": true,
# "<label_id2>": true,
# }
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
# A field for storing a suggested duration for the content node
# this duration should be in seconds.
suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)")
objects = CustomContentNodeTreeManager()
# Track all updates and ignore a blacklist of attributes
# when we check for changes
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef("tree_id"))
@classmethod
def _annotate_channel_id(cls, queryset):
# Annotate channel id
return queryset.annotate(
channel_id=Subquery(
Channel.objects.filter(
main_tree__tree_id=OuterRef("tree_id")
).values_list("id", flat=True)[:1]
)
)
@classmethod
def filter_by_pk(cls, pk):
"""
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always
returns a queryset filtered by pk.
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode
for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If
a ContentNode does not exist for `pk` then an empty queryset is returned.
"""
query = ContentNode.objects.filter(pk=pk)
if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:
tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))
if tree_id:
query = query.filter(tree_id=tree_id)
else:
tree_id = ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True).first()
if tree_id:
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None)
query = query.filter(tree_id=tree_id)
else:
query = query.none()
return query
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
)
@raise_if_unsaved
def get_root(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(
tree_id=self._mpttfield('tree_id'),
parent=None,
)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {
"title": self.title,
"kind": self.kind_id,
"node_id": self.node_id,
"studio_id": self.id,
}
children = self.children.all()
if levels > 0:
node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {
"title": self.title,
"kind": self.kind_id,
"count": self.assessment_items.count(),
"node_id": self.node_id,
"studio_id": self.id,
}
return {
"title": self.title,
"kind": self.kind_id,
"file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],
"node_id": self.node_id,
"studio_id": self.id,
}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \
ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = "associated_presets_{}".format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, "channel_id"):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
# Problems with json.loads, so use ast.literal_eval to get dict
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get("base64"):
return thumbnail_data["base64"]
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ""
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
from contentcuration.viewsets.common import SQJSONBKeyArrayAgg
node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by()
descendants = (
self.get_descendants()
.values("id")
)
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": 0,
"resource_size": 0,
"includes": {"coach_content": 0, "exercises": 0},
"kind_count": [],
"languages": [],
"accessible_languages": [],
"licenses": [],
"tags": [],
"copyright_holders": [],
"authors": [],
"aggregators": [],
"providers": [],
"sample_pathway": [],
"original_channels": [],
"sample_nodes": [],
"levels": [],
"categories": [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
# Get resources
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("checksum", "file_size")
.order_by(),
name="nodes",
)
file_query = (
nodes.queryset().with_cte(nodes).values("checksum", "file_size").distinct()
)
l_nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("language_id", "preset_id")
.order_by(),
name="l_nodes",
)
accessible_languages_query = (
l_nodes.queryset()
.filter(preset_id=format_presets.VIDEO_SUBTITLE)
.with_cte(l_nodes)
.values("language__native_name")
.distinct()
)
tags_query = str(
ContentTag.objects.filter(
tagged_content__pk__in=descendants.values_list("pk", flat=True)
)
.values("tag_name")
.annotate(count=Count("tag_name"))
.query
).replace("topic", "'topic'")
kind_count_query = str(
resources.values("kind_id").annotate(count=Count("kind_id")).query
).replace("topic", "'topic'")
node = node.annotate(
resource_count=SQCount(resources, field="id"),
resource_size=SQSum(file_query, field="file_size"),
copyright_holders=SQArrayAgg(
resources.distinct("copyright_holder").order_by("copyright_holder"),
field="copyright_holder",
),
authors=SQArrayAgg(
resources.distinct("author").order_by("author"), field="author"
),
aggregators=SQArrayAgg(
resources.distinct("aggregator").order_by("aggregator"),
field="aggregator",
),
providers=SQArrayAgg(
resources.distinct("provider").order_by("provider"), field="provider"
),
languages=SQRelatedArrayAgg(
descendants.exclude(language=None)
.distinct("language__native_name")
.order_by(),
field="language__native_name",
fieldname="native_name",
),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query,
field="language__native_name",
fieldname="native_name",
),
licenses=SQRelatedArrayAgg(
resources.exclude(license=None)
.distinct("license__license_name")
.order_by("license__license_name"),
field="license__license_name",
fieldname="license_name",
),
kind_count=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(
kind_count_query
),
(),
),
tags_list=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(tags_query), ()
),
coach_content=SQCount(
resources.filter(role_visibility=roles.COACH), field="id"
),
exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field="id"
),
levels=SQJSONBKeyArrayAgg(
descendants.exclude(grade_levels__isnull=True),
field="grade_levels",
),
all_categories=SQJSONBKeyArrayAgg(
descendants.exclude(categories__isnull=True),
field="categories",
),
)
# Get sample pathway by getting longest path
# Using resources.aggregate adds a lot of time, use values that have already been fetched
max_level = max(
resources.values_list("level", flat=True).order_by().distinct() or [0]
)
m_nodes = With(
resources.values("id", "level", "tree_id", "lft").order_by(),
name="m_nodes",
)
deepest_node_record = (
m_nodes.queryset()
.with_cte(m_nodes)
.filter(level=max_level)
.values("id")
.order_by("tree_id", "lft")
.first()
)
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record["id"])
pathway = (
list(
deepest_node.get_ancestors()
.order_by()
.exclude(parent=None)
.values("title", "node_id", "kind_id")
.order_by()
)
if deepest_node_record
else []
)
sample_nodes = (
[
{
"node_id": n.node_id,
"title": n.title,
"description": n.description,
"thumbnail": n.get_thumbnail(),
"kind": n.kind_id,
}
for n in deepest_node.get_siblings(include_self=True)[0:4]
]
if deepest_node_record
else []
)
# Get list of channels nodes were originally imported from (omitting the current channel)
channel_id = channel and channel.id
originals = (
resources.values("original_channel_id")
.annotate(count=Count("original_channel_id"))
.order_by("original_channel_id")
)
originals = {c["original_channel_id"]: c["count"] for c in originals}
original_channels = (
Channel.objects.exclude(pk=channel_id)
.filter(pk__in=originals.keys(), deleted=False)
.order_by()
)
original_channels = [
{
"id": c.id,
"name": "{}{}".format(
c.name, _(" (Original)") if channel_id == c.id else ""
),
"thumbnail": c.get_thumbnail(),
"count": originals[c.id],
}
for c in original_channels
]
node = (
node.order_by()
.values(
"id",
"resource_count",
"resource_size",
"copyright_holders",
"authors",
"aggregators",
"providers",
"languages",
"accessible_languages",
"coach_content",
"licenses",
"tags_list",
"kind_count",
"exercises",
"levels",
"all_categories",
)
.first()
)
for_educators = {
"coach_content": node["coach_content"],
"exercises": node["exercises"],
}
# Serialize data
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": node.get("resource_count", 0),
"resource_size": node.get("resource_size", 0),
"includes": for_educators,
"kind_count": node.get("kind_count") or [],
"languages": node.get("languages") or [],
"accessible_languages": node.get("accessible_languages") or [],
"licenses": node.get("licenses") or [],
"tags": node.get("tags_list") or [],
"original_channels": original_channels,
"sample_pathway": pathway,
"sample_nodes": sample_nodes,
# source model fields for the below default to an empty string, but can also be null
"authors": list(filter(bool, node["authors"])),
"aggregators": list(filter(bool, node["aggregators"])),
"providers": list(filter(bool, node["providers"])),
"copyright_holders": list(filter(bool, node["copyright_holders"])),
"levels": node.get("levels") or [],
"categories": node.get("all_categories") or [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
# Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.
blacklist = set([
'changed',
'modified',
'publishing',
mptt_opts.tree_id_attr,
mptt_opts.left_attr,
mptt_opts.right_attr,
mptt_opts.level_attr,
])
original_values = self._field_updates.changed()
return any((True for field in original_values if field not in blacklist))
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():
calculate_user_storage(editor)
def mark_complete(self): # noqa C901
errors = []
# Is complete if title is falsy but only if not a root node.
if not (bool(self.title) or self.parent_id is None):
errors.append("Empty title")
if self.kind_id != content_kinds.TOPIC:
if not self.license:
errors.append("Missing license")
if self.license and self.license.is_custom and not self.license_description:
errors.append("Missing license description for custom license")
if self.license and self.license.copyright_holder_required and not self.copyright_holder:
errors.append("Missing required copyright holder")
if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists():
errors.append("Missing default file")
if self.kind_id == content_kinds.EXERCISE:
# Check to see if the exercise has at least one assessment item that has:
if not self.assessment_items.filter(
# Item with non-blank raw data
~Q(raw_data="") | (
# A non-blank question
~Q(question='')
# Non-blank answers
& ~Q(answers='[]')
# With either an input question or one answer marked as correct
& (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'"correct":\s*true'))
)
).exists():
errors.append("No questions with question text and complete answers")
# Check that it has a mastery model set
# Either check for the previous location for the mastery model, or rely on our completion criteria validation
# that if it has been set, then it has been set correctly.
criterion = self.extra_fields.get("options", {}).get("completion_criteria")
if not (self.extra_fields.get("mastery_model") or criterion):
errors.append("Missing mastery criterion")
if criterion:
try:
completion_criteria.validate(criterion, kind=content_kinds.EXERCISE)
except completion_criteria.ValidationError:
errors.append("Mastery criterion is defined but is invalid")
self.complete = not errors
return errors
def make_content_id_unique(self):
"""
If self is NOT an original contentnode (in other words, a copied contentnode)
and a contentnode with same content_id exists then we update self's content_id.
"""
is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id
node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id)
if (not is_node_original) and node_same_content_id.exists():
ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
self.set_default_learning_activity()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
self.save()
# Update tree_id cache when node is moved to another tree
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.tree_id, None)
# Recalculate storage if node was moved to or from the trash tree
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def set_default_learning_activity(self):
if self.learning_activities is None:
if self.kind in kind_activity_map:
self.learning_activities = {
kind_activity_map[self.kind]: True
}
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
# Logic borrowed from mptt - do a simple check to see if we have changed
# the parent of the node. We use the mptt specific cached fields here
# because these get updated by the mptt move methods, and so will be up to
# date, meaning we can avoid locking the DB twice when the fields have already
# been updated in the database.
# If most moves are being done independently of just changing the parent
# and then calling a save, locking within the save method itself should rarely
# be triggered - meaning updates to contentnode metadata should only rarely
# trigger a write lock on mptt fields.
old_parent_id = self._field_updates.changed().get("parent_id")
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
# Lock the mptt fields for the trees of the old and new parent
with ContentNode.objects.lock_mptt(*ContentNode.objects
.filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])
.values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
# Copied from MPTT
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
# Lock the mptt fields for the tree of this node
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
# Copied from MPTT
delete.alters_data = True
def copy_to(
self,
target=None,
position="last-child",
pk=None,
mods=None,
excluded_descendants=None,
can_edit_source_channel=None,
batch_size=None,
progress_tracker=None
):
return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]
def copy(self):
return self.copy_to()
def is_publishable(self):
return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists()
class Meta:
verbose_name = "Topic"
verbose_name_plural = "Topics"
# Do not allow two nodes with the same name on the same level
# unique_together = ('parent', 'title')
indexes = [
models.Index(fields=["node_id"], name=NODE_ID_INDEX_NAME),
models.Index(fields=["-modified"], name=NODE_MODIFIED_DESC_INDEX_NAME),
]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True) # Render on client side
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip(".")
f = FormatPreset.objects.filter(
allowed_formats__extension=ext,
display=True
)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return "{code}-{subcode}".format(code=self.lang_code,
subcode=self.lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
ASSESSMENT_ID_INDEX_NAME = "assessment_id_idx"
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default="multiplechoice")
question = models.TextField(blank=True)
hints = models.TextField(default="[]")
answers = models.TextField(default="[]")
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
# Track all updates
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [
models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME),
]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("contentnode__tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)
FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx"
FILE_MODIFIED_DESC_INDEX_NAME = "file_modified_desc_idx"
FILE_DURATION_CONSTRAINT = "file_media_duration_int"
MEDIA_PRESETS = [
format_presets.AUDIO,
format_presets.AUDIO_DEPENDENCY,
format_presets.VIDEO_HIGH_RES,
format_presets.VIDEO_LOW_RES,
format_presets.VIDEO_DEPENDENCY,
]
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,
blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(
Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(public=True).filter(
Q(main_tree__tree_id=OuterRef("contentnode__tree_id"))
| Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id"))
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
# TODO(aron): write tests for this
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
# since modified was added later as a nullable field to File, we don't use a default but
# instead we'll just make sure it's always updated through our serializers
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
# check if the file format exists in file_formats.choices
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError("Invalid file_format")
if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file
if self.checksum is None or self.checksum == "":
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError("Files of type `{}` are not supported.".format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [
models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),
models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME),
]
constraints = [
# enforces that duration is null when not a media preset, but the duration may be null for media presets
# but if not-null, should be greater than 0
models.CheckConstraint(
check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)),
name=FILE_DURATION_CONSTRAINT
)
]
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem if no other File objects are referencing the same file on disk
when corresponding `File` object is deleted.
Be careful! we don't know if this will work when perform bash delete on File obejcts.
"""
# Recalculate storage
from contentcuration.utils.user import calculate_user_storage
if instance.uploaded_by_id:
calculate_user_storage(instance.uploaded_by_id)
def delete_empty_file_reference(checksum, extension):
filename = checksum + '.' + extension
if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():
storage_path = generate_object_storage_name(checksum, filename)
if default_storage.exists(storage_path):
default_storage.delete(storage_path)
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
# self reference exception
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
# immediate cyclic exception
if PrerequisiteContentRelationship.objects.using(self._state.db) \
.filter(target_node=self.prerequisite, prerequisite=self.target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
# distant cyclic exception
# elif <this is a nice to have exception, may implement in the future when the priority raises.>
# raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % (
# self.target_node, self.prerequisite
# ))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % (self.pk)
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
# self reference exception
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
# handle immediate cyclic
if RelatedContentRelationship.objects.using(self._state.db) \
.filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):
return # silently cancel the save
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Invitation"
verbose_name_plural = "Invitations"
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
# channel is a nullable field, so check that it exists.
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
| Q(channel__viewers=user)
).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
# We need to store the user who is applying this change
# so that we can validate they have permissions to do so
# allow to be null so that we don't lose changes if a user
# account is hard deleted.
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name="changes_by_user")
# Almost all changes are related to channels, but some are specific only to users
# so we allow this to be nullable for these edge cases.
# Indexed by default because it's a ForeignKey field.
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE)
# For those changes related to users, store a user value instead of channel
# this may be different to created_by, as changes to invitations affect individual users.
# Indexed by default because it's a ForeignKey field.
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name="changes_about_user")
# Use client_rev to keep track of changes coming from the client side
# but let it be blank or null for changes we generate on the server side
client_rev = models.IntegerField(null=True, blank=True)
# client_rev numbers are by session, we add the session key here for bookkeeping
# to allow a check within the same session to return whether a change has been applied
# or not, and hence remove it from the frontend
session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
# Use the DRF JSONEncoder class as the encoder here
# so that we can handle anything that has been deserialized by DRF
# or that will be later be serialized by DRF
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None, user_id=None, session_key=None, applied=False, table=None, rev=None, **data):
change_type = data.pop("type")
if table is None or table not in ALL_TABLES:
raise TypeError("table is a required argument for creating changes and must be a valid table name")
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError("change_type is a required argument for creating changes and must be a valid change type integer")
return cls(
session_id=session_key,
created_by_id=created_by_id,
channel_id=channel_id,
user_id=user_id,
client_rev=rev,
table=table,
change_type=change_type,
kwargs=data,
applied=applied
)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None, applied=False):
obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ["kwargs"]).copy()
datum.update({
"server_rev": get_attribute(change, ["server_rev"]),
"table": get_attribute(change, ["table"]),
"type": get_attribute(change, ["change_type"]),
"channel_id": get_attribute(change, ["channel_id"]),
"user_id": get_attribute(change, ["user_id"]),
"created_by_id": get_attribute(change, ["created_by_id"])
})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
# user shouldn't be null, but in order to append the field, this needs to be allowed
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="tasks", on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)])
# a hash of the task name and kwargs for identifying repeat tasks
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(
user_id=self.user_id,
channel_id=self.channel_id,
progress=self.progress,
)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith("_") and field not in ('contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
# manually add Meta afterwards
setattr(model_class._meta, 'indexes', getattr(model_class._meta, 'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [
# add index that matches query usage for signature
models.Index(
fields=['signature'],
name='task_result_signature_idx',
condition=Q(status__in=celery_states.UNREADY_STATES),
),
]
# trigger class contributions immediately
TaskResultCustom.contribute_to_class()
|
4,180 | 1b4c9841fd10d065983974e93fe5dcbe048c1281 | #! /usr/bin/env python3
#
# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,
# and is Copyright (C) North Carolina State University, 2017. It is licensed
# under the three-clause BSD license; see LICENSE.
#
# -*- coding: utf-8 -*-
# python libs
import sys
import itertools
# local imports
from toboggan.dp import solve as solve_dp
# Print iterations progress
def print_progress(iteration, total, prefix='', suffix='', decimals=1,
bar_length=100):
"""
Call in a loop to create terminal progress bar.
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent
complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',
suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
# In the following, we replace very occurenve of 'None' in the
# weight-array by the minimum/maximum possible value (given by the
# last/the first
# non-None value next to it).
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
# If the flow value lies outside of the sum-of-weight estimates,
# the current guessed set of weights is infeasible.
return sum(min_weights) <= flow and sum(max_weights) >= flow
def solve(instance, silent=True, max_weight_lower=1,
max_weight_upper=float('inf'), scoring="sink distance"):
"""Solve the provided instance of path-flow decomposition."""
flow = instance.flow
k = instance.k
# quit right away if the instance has weight bounds that can't be satisfied
if instance.has_bad_bounds():
return set()
# if k equals the size of the largest edge cut, the weights are
# predetermined
if instance.k == max(len(C) for C in instance.edge_cuts):
largest_cut = max(instance.edge_cuts, key=len)
# Important: path weights must be sorted, otherwise our
# subsequent optimizations will remove this constraint.
weights = list(sorted(w for _, w in largest_cut))
return solve_dp(instance, silent=True, guessed_weights=weights)
max_weight = instance.max_weight_bounds[1]
feasible_weights = list(filter(lambda w: w <= max_weight,
instance.weights))
if not silent:
print(instance.weights, feasible_weights)
# figure out whether we get the first or last positions for free
largest_free = False
smallest_free = False
# check largest weight first
if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:
largest_free = True
largest = instance.max_weight_bounds[0]
if min(instance.weights) == 1:
smallest_free = True
smallest = 1
positions = list(range(int(smallest_free), k-int(largest_free)))
# iterate over the number of unguessed weights
for diff in range(k+1):
if not silent:
print("Diff =", diff)
# iterate over positions of guessed weights. We want them to be
# ordered, but choose the smallest first to be removed
for rev_indices in itertools.combinations(reversed(positions), k-diff):
indices = list(reversed(rev_indices))
p = len(indices)
# when k-1 values are determined, it also determines the kth value
if p == k-1:
continue
# iterate over choices for those guessed weights
for chosen_weights in itertools.combinations(feasible_weights, p):
weights = [None] * k
# assign the chosen weights to the guessed positions
for p, w in zip(indices, chosen_weights):
weights[p] = w
# add in free values
if smallest_free:
weights[0] = smallest
if largest_free:
weights[k-1] = largest
# quit if this didn't work
if not is_feasible(weights, flow, max_weight):
continue
if not silent:
print("Trying weights", weights)
sol = solve_dp(instance, silent=True, guessed_weights=weights)
if len(sol) > 0:
if not silent:
try:
for s in sol:
print(s, sum(s.path_weights), flow)
except AttributeError:
print("Unterdetermined solution")
return sol
|
4,181 | 419aee3045a0d532afa0fc314df9cdef7aab5219 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
|
4,182 | 8855747f58b48bedc362930662e147b1fc4ebd63 | """
MAIN IDEA --> Keep 2 pointers. i points to current 0 element and j searches for first non zero element which comes after i.
As soon as we get a j, we swap i and j. So index i now becomes non zero. Now move i to next index i.e i+1 and now check if i
is zero or non zero. If i is still zero, then again search for 1st non zero element represented by j and swap again.
If i is now non-zero then no need to swap we already have a non zero in the beginning of array, so we just move the pointer i forward
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
i = 0
j = 1
n = len(nums)
while j < n and i < j: # j searches for the 1st non zero element after pointer i
if nums[i] == 0 and nums[j] != 0: # If element at i is zero and j != 0, swap them
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
i += 1
j += 1
continue
if nums[i] != 0: # If i is not zero, then just increment both i and j
i += 1
#j += 1
continue
if nums[i] == 0 and nums[j] == 0:
j += 1
continue
return nums
nums = [-959151711,623836953,209446690,-1950418142,1339915067,-733626417,481171539,-2125997010,-1225423476,1462109565,147434687,-1800073781,-1431212205,-450443973,50097298,753533734,-747189404,-2070885638,0,-1484353894,-340296594,-2133744570,619639811,-1626162038,669689561,0,112220218,502447212,-787793179,0,-726846372,-1611013491,204107194,1605165582,-566891128,2082852116,0,532995238,-1502590712,0,2136989777,-2031153343,371398938,-1907397429,342796391,609166045,-2007448660,-1096076344,-323570318,0,-2082980371,2129956379,-243553361,-1549960929,1502383415,0,-1394618779,694799815,78595689,-1439173023,-1416578800,685225786,-333502212,-1181308536,-380569313,772035354,0,-915266376,663709718,1443496021,-777017729,-883300731,-387828385,1907473488,-725483724,-972961871,-1255712537,383120918,1383877998,1722751914,0,-1156050682,1952527902,-560244497,1304305692,1173974542,-1313227247,-201476579,-298899493,-1828496581,-1724396350,1933643204,1531804925,1728655262,-955565449,0,-69843702,-461760848,268336768,1446130876]
s = Solution()
print(s.moveZeroes(nums))
|
4,183 | f0168a737b9215520ce600470f9b27837dafb593 | from django.apps import AppConfig
class TermserviceConfig(AppConfig):
name = 'termservice'
|
4,184 | b3f0aae91c885d0e15ff3e456b5cab43fca65b67 | from . import *
from ..utils.constants import NUM_SEARCH_RESULT
def get_course_by_id(course_id):
return Course.query.filter_by(id=course_id).first()
def get_course_by_subject_and_course_num(subject_code, course_num):
return Course.query.filter_by(subject_code=subject_code, course_num=course_num).first()
def create_course(subject_code, course_num, title):
optional_course = get_course_by_subject_and_course_num(subject_code, course_num)
if optional_course:
return optional_course
course = Course(subject_code=subject_code, course_num=course_num, title=title)
db.session.add(course)
db.session.commit()
return course
def search_courses(query):
results = Course.query.filter(Course.search_string.ilike("%{}%".format(query))).limit(NUM_SEARCH_RESULT)
results = sorted(results, key = lambda r : find_query_index(r, query))
return results
def find_query_index(course, key):
try:
return course.search_string.lower().index(key.lower())
except(ValueError):
return -1
def clear_table():
Course.query.delete()
|
4,185 | 379ab72f5cc74cf6ed4319fff76437ce84aaca23 | import os
import sys
import random
import string
trainingData = open('./Data.txt').readlines()
# Used for storing the Markov states
table = {}
# Stores all the words
words = []
# The size of the tuple that represents the Markov State
ChainLength = 2
# Length of hte output chain
Size = int(sys.argv[1])
if(len(sys.argv) >= 3): ChainLength = int(sys.argv[2])
# Read in data and split into words
for line in trainingData:
for word in line.split():
#word = word.translate(string.maketrans("",""), string.punctuation)
words.append(word)
# For each set of words
for idx in xrange(0,len(words)-ChainLength):
# Now we have ChainLength+1 amount of words
ws = words[idx:idx+ChainLength+1]
# Construct our table
# For example Chain Lenght of 2
# A valid key val pair would be
# table[('see', 'spot')] = ['run','play']
# Indicating that if you are in teh state of ('see', 'spot') the next word has a 50% chance of being run and a 50% chance of being play
key = tuple(ws[:ChainLength])
val = ws[ChainLength]
if key in table:
table[key].append(val)
else:
table[key] = [val]
seed = random.randint(0, len(words)-ChainLength+1)
ws = words[seed:seed+ChainLength]
gen = []
for i in xrange(0,int(sys.argv[1])):
gen.append(ws[0])
# Actually find the next word randomally given the current state Ie: the tuple of words
val = random.choice(table[tuple(ws)])
ws.append(val)
ws.pop(0)
print ' '.join(gen)
|
4,186 | 00a1b5f20f15994a659eda56201ba7c45d49a4db | import os
import json
from .utils import *
def _unique_predict(solve_list):
valid_solve_list = filter(lambda x: x[0] is not None, solve_list)
valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])
unique_solve_list = list()
current_no = -1
for e in valid_solve_list:
if current_no != e[0]:
current_no = e[0]
unique_solve_list.append(e)
return unique_solve_list
@safe_one_retval_wrapper
def _analysis_data(answer_root, kind, result):
if result["pass"] != 1:
result["score"] = -1
raise Exception(result['message'])
predict_suites = result["predict_suites"]
total = 0
correct = 0
# unique predict suites
for suite in predict_suites:
with open(os.path.join(answer_root, suite + ".answer.json"), "r", encoding="utf-8") as fh:
answer_dict = json.load(fh)
# get unique solve list by id (the first element)
solve_list = _unique_predict(predict_suites[suite])
total = total + len(answer_dict)
for q in solve_list:
if q[1] == answer_dict[str(q[0])]['answer']:
correct = correct + 1
total = total if total else 1
return correct / total
def analysis_data(answer_root, kind, result):
if result.get('pass') == -1:
return {"pass": -1, "score": -1, "message": None}
message, score = _analysis_data(answer_root, kind, result)
if message is None:
return {"pass": 1, "score": score, "message": message}
return {"pass": 0, "score": -1, "message": message}
@safe_one_retval_wrapper
def _run_analysis(data_root, work_root, answer_root):
with open(os.path.join(data_root, "config.json"), "r", encoding="utf-8") as fh:
config = json.load(fh)
predict_file = os.path.join(work_root, "output.answer.json")
with open(predict_file, "r", encoding="utf-8") as fh:
predict = json.load(fh)
analysis_result = {}
for kind, result in predict.items():
analysis_result[kind] = analysis_data(answer_root, kind, result)
path = os.path.join(work_root, "result.json")
with open(path, "w", encoding="utf-8") as fh:
json.dump(analysis_result, fh, ensure_ascii=False)
return True
def run_analysis(data_root, work_root, answer_root):
msg, code = _run_analysis(data_root, work_root, answer_root)
result_file = os.path.join(work_root, "result.json")
if msg is None:
print("Succ:output to %s" % result_file)
else:
with open(result_file, "w", encoding="utf-8") as fh:
fh.write(msg)
print("Fail:output to %s" % result_file)
return msg, code
|
4,187 | 82c10076ba73723b696e3e33280296c2a24f20b9 | from django.apps import AppConfig
class PrimaryuserConfig(AppConfig):
name = 'PrimaryUser'
|
4,188 | 133bd0b2affc3d29390edeab51299d294dafb709 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 18:52:27 2021
@author: burak
"""
import veriler
import gorsel
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
neighbors = np.arange(1,13)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
for n, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k, metric='minkowski')
knn.fit(veriler.X_train, veriler.y_train.ravel())
train_accuracy[n] = knn.score(veriler.X_train, veriler.y_train)
test_accuracy[n] = knn.score(veriler.X_test, veriler.y_test)
gorsel.plot_show('K-NN Degisen Komsu Sayisi', neighbors, test_accuracy, train_accuracy, 'Test Dogrulugu', 'Egitim Dogrulugu', 'Komsu Sayisi', 'Dogruluk')
#Yukardaki verileri inceleyerek n_neighbors=9 verirsek
knn = KNeighborsClassifier(n_neighbors=9, metric='minkowski')
knn.fit(veriler.X_train, veriler.y_train.ravel())
accuracy = knn.score(veriler.X_test, veriler.y_test)
gorsel.plot_confusion_matrix_show(knn, veriler.X_test, veriler.y_test, str(accuracy)) |
4,189 | 6285d1665bacbff746f44f42ce65981f937fff64 | # Generated by Django 3.2.3 on 2021-05-29 16:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('login', '0014_auto_20210529_1637'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(blank=True, max_length=255, null=True)),
('postal_code', models.IntegerField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=255, null=True)),
('country', models.CharField(blank=True, max_length=255, null=True)),
('facility', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='StudySession',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('date', models.DateField()),
('available_spots', models.IntegerField(default=1)),
('taken_spots', models.IntegerField(default=0)),
('description', models.CharField(blank=True, max_length=500, null=True)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.location')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='login.subject')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('study_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.studysession')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
4,190 | 6bf1a0fbf65895eac9baa71bc5e04e861f0a3ed5 | from gpiozero import Motor
class Car:
def __init__(self, speed: float = 1):
self.speed = speed
self.forward_right_motor = Motor(forward=12, backward=16)
self.backward_right_motor = Motor(forward=21, backward=20)
self.forward_left_motor = Motor(forward=23, backward=18)
self.backward_left_motor = Motor(forward=24, backward=25)
def stop(self):
self.forward_left_motor.stop()
self.forward_right_motor.stop()
self.backward_left_motor.stop()
self.backward_right_motor.stop()
def forward(self, speed: float = None):
speed = max(min(speed if speed else self.speed, 1), 0)
self.forward_left_motor.forward(speed=speed)
self.forward_right_motor.forward(speed=speed)
self.backward_left_motor.forward(speed=speed)
self.backward_right_motor.forward(speed=speed)
def backward(self, speed: float = None):
speed = max(min(speed if speed else self.speed, 1), 0)
self.forward_left_motor.backward(speed=speed)
self.forward_right_motor.backward(speed=speed)
self.backward_left_motor.backward(speed=speed)
self.backward_right_motor.backward(speed=speed)
def left(self, speed: float = None):
speed = max(min(speed if speed else self.speed, 1), 0)
self.forward_left_motor.backward(speed=speed)
self.forward_right_motor.forward(speed=speed)
self.backward_left_motor.backward(speed=speed)
self.backward_right_motor.forward(speed=speed)
def right(self, speed: float = None):
speed = max(min(speed if speed else self.speed, 1), 0)
self.forward_left_motor.forward(speed=speed)
self.forward_right_motor.backward(speed=speed)
self.backward_left_motor.forward(speed=speed)
self.backward_right_motor.backward(speed=speed)
|
4,191 | 31801f62942337b0cdf0e022dc75a9e125be54e3 | from django.db import models
from django.conf import settings
from django.utils.text import slugify
from six import python_2_unicode_compatible
from ckeditor_uploader.fields import RichTextUploadingField
from ckeditor.fields import RichTextField
# Create your models here.
class topic(models.Model):
name = models.CharField(max_length=255, primary_key=True)
showname = models.CharField(max_length=255, null= True)
def __str__(self):
return self.name
class article(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique= True, blank=True, editable=True, null = True)
topic = models.ForeignKey(topic, on_delete=models.CASCADE)
author = models.CharField(max_length=255)
opening = models.TextField()
body = RichTextUploadingField()
date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(null = True)
view = models.IntegerField(default=0, null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(article, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey(article, on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
|
4,192 | c14d76493cd3dacc55c993f588dec555b7a4a13c | # %% import libs
import os
import argparse
import logging as logger
import mxnet as mx
import tqdm
from mxnet import autograd
from mxnet import gluon
from gluoncv.utils import makedirs
import datasets as gan_datasets
from utils import vis, get_cpus, TrainingHistory
import models
mx.random.seed(5)
logger.basicConfig(level=logger.INFO, filename='logs/train_loss-dcgan.log')
arg = argparse.ArgumentParser(description="training parameters")
arg.add_argument('--lr', type=float, default=0.001, help='learning rate')
arg.add_argument('--batch', type=int, default=32, help='batch size')
arg.add_argument('--epoch', type=int, default=30000, help='training epochs')
arg.add_argument('--continue', type=bool, default=True, help='should continue with last checkpoint')
arg.add_argument('--save_checkpoint', type=bool, default=True, help='whether save checkpoint')
arg.add_argument('--save_per_epoch', type=int, default=250, help='save checkpoint every specific epochs')
arg.add_argument('--save_dir', type=str, default='saved/params-dcgan', help='check point save path')
arg.add_argument('--cuda', type=bool, default=False, help='whether use gpu, default is True')
arg.add_argument('--pred_per_gen', type=int, default=15, help='make a pred every specific epoch')
arg.add_argument('--validation', type=bool, default=False, help='whether use validation set, default: False')
arg.add_argument('--dataset', type=str, default='rem_face', help='rem, miku, face,rem_face')
opt = arg.parse_args()
# %% define parameters
epoch = opt.epoch
epoch_start = 0
batch_size = opt.batch
lr = opt.lr
should_save_checkpoint = opt.save_checkpoint
save_per_epoch = opt.save_per_epoch
save_dir = opt.save_dir
pred_per_epoch = opt.pred_per_epoch
should_use_val = opt.validation
dataset = opt.dataset
dataset_loader = getattr(gan_datasets, 'load_{}'.format(dataset))
CTX = mx.gpu() if opt.cuda else mx.cpu()
logger.info('Will use {}'.format(CTX))
# %% define dataloader
logger.info("Prepare data")
# noinspection PyTypeChecker
tfs_train = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.Resize(size=(256, 256), interpolation=2),
gluon.data.vision.transforms.RandomFlipLeftRight(),
gluon.data.vision.transforms.RandomSaturation(0.005),
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# noinspection PyTypeChecker
tfs_val = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.Resize(size=(256, 256), interpolation=2),
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_set, val_set = dataset_loader()
train_loader = gluon.data.DataLoader(train_set.transform_first(tfs_train),
batch_size=batch_size, shuffle=True,
last_batch='rollover', num_workers=get_cpus(), pin_memory=True)
if val_set:
val_loader = gluon.data.DataLoader(val_set.transform_first(tfs_val),
batch_size=batch_size, shuffle=False,
last_batch='rollover', num_workers=get_cpus(), pin_memory=True)
# %% define models
generator = models.make_gen('v4')
discriminator = models.make_dis()
generator.initialize(init=mx.init.Normal(0.02), ctx=CTX)
discriminator.initialize(init=mx.init.Normal(0.02), ctx=CTX)
if getattr(opt, 'continue'):
import utils
makedirs(save_dir)
epoch_start = utils.load_model_from_params(generator, discriminator, save_dir)
logger.info('Continue training at {}, and rest epochs {}'.format(epoch_start, epoch - epoch_start))
generator.hybridize()
discriminator.hybridize()
# %% prepare training
logger.info("Prepare training")
if should_use_val:
history_labels = ['gloss', 'gval_loss', 'dloss', 'dval_loss']
else:
history_labels = ['gloss', 'dloss']
history = TrainingHistory(labels=history_labels)
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
trainer_gen = gluon.Trainer(generator.collect_params(), optimizer='adam', optimizer_params={
'learning_rate': lr,
'beta1': 0.5
})
trainer_dis = gluon.Trainer(discriminator.collect_params(), optimizer='adam', optimizer_params={
'learning_rate': lr,
'beta1': 0.5
})
true_label = mx.nd.ones((batch_size,), ctx=CTX)
fake_label = mx.nd.zeros((batch_size,), ctx=CTX)
def make_noises(bs):
return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32').reshape((bs, 512, 1, 1))
pred_noise = make_noises(1)
mx.nd.save('pred_noise', pred_noise)
def validation(g, d, val_loader):
g_val_loss = 0.0
d_val_loss = 0.0
iter_times = 0
for data, _ in tqdm.tqdm(
val_loader,
desc="Validating",
leave=False,
unit='batch',
unit_scale=True,
mininterval=1,
maxinterval=5,
dynamic_ncols=True):
iter_times += 1
bs = len(data)
nosise = make_noises(bs)
data = data.as_in_context(CTX)
with autograd.predict_mode():
# loss for d
out = d(data)
err2real = loss(out, true_label)
fake_img = g(nosise)
out = d(fake_img)
err2fake = loss(out, fake_label)
err4dis = err2real + err2fake
d_val_loss += err4dis.mean().asscalar()
# loss for g
fake_img = g(nosise)
out = d(fake_img)
err4gen = loss(out, true_label)
g_val_loss += err4gen.mean().asscalar()
return g_val_loss / iter_times, d_val_loss / iter_times
# %% begin training
d_iter_times = 0
g_iter_times = 0
d_update_times = 0
g_update_times = 0
g_train_loss = 0.0
d_train_loss = 0.0
logger.info("Begin training")
for ep in tqdm.tqdm(range(epoch_start, epoch + 1),
total=epoch,
desc="Total Progress",
leave=False,
initial=epoch_start,
unit='epoch',
unit_scale=True,
mininterval=10,
maxinterval=100,
dynamic_ncols=True):
for data, _ in tqdm.tqdm(
train_loader,
desc="Epoch {}".format(ep),
leave=False,
unit='batch',
unit_scale=True,
mininterval=1,
maxinterval=5,
dynamic_ncols=True):
bs = len(data)
nosise = make_noises(bs)
data = data.as_in_context(CTX)
# begin training discriminator
with autograd.record():
d_iter_times += 1
d_update_times += 1
# train with real image
out = discriminator(data)
err2real = loss(out, true_label)
# train with fake image
# detach the input, or its gradients will be computed
with autograd.predict_mode():
fake_img = generator(nosise)
out = discriminator(fake_img.detach())
err2fake = loss(out, fake_label)
err4dis = err2real + err2fake
err4dis.backward()
trainer_dis.step(bs)
d_train_loss += err4dis.mean().asscalar()
if d_iter_times % 5 == 0:
g_iter_times += 1
g_update_times += 1
# begin training generator
with autograd.record():
fake_img = generator(nosise)
with autograd.predict_mode():
out = discriminator(fake_img)
err4gen = loss(out, true_label)
err4gen.backward()
trainer_gen.step(bs)
g_train_loss += err4gen.mean().asscalar()
g_train_loss /= d_iter_times
d_train_loss /= g_iter_times
# use validation set or not
if should_use_val:
g_val_loss, d_val_loss = validation(generator, discriminator, val_loader)
history.update([g_train_loss, g_val_loss, d_train_loss, d_val_loss])
logger.info("Generator[train: {}, val: {}]".format(g_train_loss, g_val_loss))
logger.info("Discriminator[train: {}, val: {}]".format(d_train_loss, d_val_loss))
else:
history.update([g_train_loss, d_train_loss])
logger.info("Generator[{}], Discriminator[{}]".format(g_train_loss, d_train_loss))
g_train_loss = 0.0
d_train_loss = 0.0
d_iter_times = 0
g_iter_times = 0
# make a prediction
if g_update_times % pred_per_epoch == 0:
fake = generator(make_noises(1))[0]
unique_fake = generator(pred_noise)[0]
pred_path = 'logs/pred-dcgan'
pred_unique_path = os.path.join(pred_path, 'unique')
makedirs(pred_path)
makedirs(pred_unique_path)
vis.show_img(fake.transpose((1, 2, 0)), save_path=pred_path)
vis.show_img(unique_fake.transpose((1, 2, 0)), save_path=pred_unique_path)
# save history plot every epoch
history.plot(save_path='logs/histories-dcgan')
# save checkpoint
if should_save_checkpoint:
if ep % save_per_epoch == 0:
generator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.format(ep)))
discriminator.save_parameters(os.path.join(save_dir, 'discriminator_{:04d}.params'.format(ep)))
history.plot(save_path='logs/histories-dcgan')
generator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.format(ep)))
|
4,193 | b2b47b394eadebda5c51e89abd27832f9dbd4c8c | from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDeterministic,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
|
4,194 | ae82ecadb61fd87afbc83926b9dc9d5f7e8c35a0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-31 18:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0007_auto_20170731_1812'),
]
operations = [
migrations.AddField(
model_name='category',
name='is_root',
field=models.BooleanField(default=False, verbose_name='是否是一级分类'),
),
migrations.AlterField(
model_name='category',
name='parent',
field=models.ForeignKey(blank=True, default=0, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='product.Category', verbose_name='上级分类'),
),
]
|
4,195 | a7be2f43c6ec8d1576ed194a75762a36089cb052 | num_str = "1"
num_str1 = "\u00b2"
num_str2 = "一千零一"
# 判断字符串是否只包含数字
# 1.三种方法都不能判断小数
# 2.isdigit 和 isnumeric 比 isdecimal 强大一些,后者只能判断正常数字,前两者可以判断带有数字的符号,如平方
# isnumeric 还可以判断中文数字
print(num_str)
print(num_str1)
print(num_str.isdecimal())
print(num_str1.isdecimal())
print(num_str.isdigit())
print(num_str1.isdigit())
print(num_str.isnumeric())
print(num_str1.isnumeric())
print(num_str2.isnumeric()) |
4,196 | 34aa08b9a5a89d3fca129271a9e812e2382ca88e | 207. Course Schedule
Some courses may have prerequisites, for example to take course 0 you have to first take course 1,
which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
For example:
2, [[1,0]]
There are a total of 2 courses to take. To take course 1 you should have finished course 0. So it is possible.
2, [[1,0],[0,1]]
There are a total of 2 courses to take. To take course 1 you should have finished course 0,
and to take course 0 you should also have finished course 1. So it is impossible.
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices.
Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
1. intuition: as long as there is no cycle, return true
how do we look for cycle?
hashmap to track which pairs weve seen
hashmap to track pairs [a,b]
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
graph = [[] for _ in range(numCourses)]
visit = [0 for _ in range(numCourses)]
for x, y in prerequisites:
graph[x].append(y)
def dfs(i):
if visit[i] == -1: return False
if visit[i] == 1: return True
visit[i] = -1
for j in graph[i]:
if not dfs(j): return False
visit[i] = 1
return True
for i in range(numCourses):
if not dfs(i):
return False
return True
|
4,197 | 109a0ba0952bd5923ecbefa41556de7aa9f9eea8 | # Copyright (c) 2012 - Samuel Loretan <tynril at gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import urllib2
try:
import json
except ImportError:
import simplejson as json
class Gw2Spidy:
"""This utility class allows easy access to the GW2Spidy data."""
headers = {'User-Agent': 'gw2spidy.py'}
@staticmethod
def getTypesList():
"""Get a list of item types and subtypes."""
return Gw2Spidy._request('types')['results']
@staticmethod
def getDisciplinesList():
"""Get a list of crafting disciplines."""
return Gw2Spidy._request('disciplines')['results']
@staticmethod
def getRaritiesList():
"""Get a list of item rarities."""
return Gw2Spidy._request('rarities')['results']
@staticmethod
def getAllItemsList():
"""Get a list of all items."""
return Gw2Spidy._request('all-items', 'all')['results']
@staticmethod
def getItemsOfType(typeId):
"""Get a list of all items of a certain type."""
return Gw2Spidy._request('all-items', str(typeId))['results']
@staticmethod
def getItemData(itemId):
"""Get the data of a particular item. High frequency of update."""
return Gw2Spidy._request('item', str(itemId))['result']
@staticmethod
def getItemBuyListings(itemId, allPages = False):
"""Get a list of all buy offers for a certain item."""
return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'buy')
@staticmethod
def getItemSellListings(itemId, allPages = False):
"""Get a list of all sell offers for a certain item."""
return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'sell')
@staticmethod
def searchItems(name, allPages = False):
"""Search items by name. Might be slow, not recommended."""
return Gw2Spidy._paginatedRequest(allPages, 'item-search', name)
@staticmethod
def getAllRecipesList(allPages = False):
"""Get a list of all crafting recipes."""
return Gw2Spidy._paginatedRequest(allPages, 'recipes', 'all')
@staticmethod
def getRecipesOfDiscipline(disciplineId, allPages = False):
"""Get a list of all crafting recipes for a certain discipline."""
return Gw2Spidy._paginatedRequest(allPages, 'recipes', str(disciplineId))
@staticmethod
def getRecipeData(recipeId):
"""Get the data of a particular recipe."""
return Gw2Spidy._request('recipe', str(recipeId))
@staticmethod
def getGemPrice():
"""Get the current gem/gold conversion rate."""
return Gw2Spidy._request('gem-price')
@staticmethod
def _paginatedRequest(allPages, *args):
"""Handle paginated requests, downloading all pages if requested."""
data = []
currentPage = 0
while True:
newData = Gw2Spidy._request(*(args + (str(currentPage),)))
if not allPages:
return newData['results']
data.extend(newData['results'])
currentPage = currentPage + 1
if newData['page'] == newData['last_page']:
break
return data
@staticmethod
def _request(*args):
"""Makes a request on the GW2Spidy API."""
url = 'http://www.gw2spidy.com/api/v0.9/json/' + '/'.join(args)
r = urllib2.Request(url, headers=Gw2Spidy.headers)
if 'Cookie' not in Gw2Spidy.headers:
resp = urllib2.urlopen(r)
if 'set-cookie' in resp.headers:
Gw2Spidy.headers['Cookie'] = resp.headers['set-cookie'].split(';', 1)[0]
return json.loads(resp.read())
return json.loads(urllib2.urlopen(r).read())
|
4,198 | 7a5106456d0fdd905829c5aa1f4a69b027f3a04c | from pymongo import MongoClient
import Config
DB = Config.DB
COLLECTION = Config.COLLECTION
def connectMongo():
uri = "mongodb://localhost"
client = MongoClient(uri)
return client[DB]
def connectMongoCollection(collection = COLLECTION):
uri = "mongodb://localhost"
client = MongoClient(uri)
db = client[DB]
return db[collection]
|
4,199 | 601089c2555e6fc75803087ee1d8af7f8180f651 | from collections import defaultdict
def solution(clothes):
answer = 1
hash_map = defaultdict(lambda: 0)
for value, key in clothes:
hash_map[key] += 1
for v in hash_map.values():
answer *= v + 1
return answer - 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.