index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,900 | afd01cac2bed20d34cf345338ef18826b08abfee | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 10 18:35:13 2019
@author: abhinav
"""
#Linear Regression Model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
dataset = pd.read_csv('weather.csv')
print (dataset.shape)
print (dataset.describe())
dataset.plot(x='MinTemp', y='MaxTemp',style='o')
plt.title('MinTemp vs MaxTemp')
plt.xlabel('MinTemp')
plt.ylabel('MaxTemp')
plt.show()
#Average of MaxTemp
plt.figure(figsize=(10,15))
plt.tight_layout()
seabornInstance.distplot(dataset['MaxTemp'])
plt.show()
#Data Splicing
X = dataset['MinTemp'].values.reshape(-1,1)
y = dataset['MaxTemp'].values.reshape(-1,1)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)
#Training the Model
regressor = LinearRegression()
regressor.fit(X_train,y_train)
#Intercept and Coefficient
print ('Intercept',regressor.intercept_)
print ('Coefficient',regressor.coef_)
#Predicting for X_test
y_pred = regressor.predict(X_test)
#Comparison Between Actual and Predicted
df = pd.DataFrame({'Actual':y_test.flatten(),'Predicted':y_pred.flatten()})
#df.to_excel('LinearRegressionResult.xlsx',sheet_name='Actual and Predicted')
print (df)
df1=df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major',linestyle='-',linewidth='0.5', color='green')
plt.grid(which='minor',linestyle=':',linewidth='0.5', color='black')
plt.show()
plt.scatter(X_test,y_test,color='grey')
plt.plot(X_test,y_pred,linewidth=2)
plt.show()
#Performance
print("Mean Absolute Error:",metrics.mean_absolute_error(y_test,y_pred))
print("Mean Squared Error:",metrics.mean_squared_error(y_test,y_pred))
print("Root Mean Squared Error:",np.sqrt(metrics.mean_squared_error(y_test,y_pred))) |
993,901 | 60afe4851e38e2debb6296f6992280d295d5250b | # -*- coding:utf-8 -*-
class Solution:
def FirstNotRepeatingChar(self, s):
# write code here
if len(s)==0: return -1
dic = {}
for i in s:
if i in dic:
dic[i] += 1
else:
dic[i] = 1
for i in s:
if dic[i]==1:
return s.find(i)
return -1
|
993,902 | 3c78e5e47f8bc4ddbb36fe5f12ab21a6425b4a35 | #! /usr/bin/env python
import rospy
import pymap3d as pm
from sensor_msgs.msg import NavSatFix
from nav_msgs.msg import Odometry
from geodetic_to_enu_conversion_pkg.msg import Gps
def gps_callback(msg):
global datum_lat
global datum_lon
global i
if i==0:
datum_lat = msg.latitude
datum_lon = msg.longitude
i = i + 1
current_lat = msg.latitude
current_lon = msg.longitude
x, y, _ = pm.geodetic2enu(current_lat, current_lon, 0, datum_lat, datum_lon, 0)
position = Gps()
position.x = x
position.y = y
pub.publish(position)
if __name__ == '__main__':
global i
i = 0
rospy.init_node("geodetic_to_enu_conversion_node")
sub = rospy.Subscriber("/gps", NavSatFix, gps_callback)
pub = rospy.Publisher("/odometry/gps", Gps, queue_size=1)
rospy.spin() |
993,903 | 888e138ab9f77621222bdc0fc424c4bcc4df920f | bind = '127.0.0.1:8000'
workers = 3
user = 'amplua'
timeout = 120 |
993,904 | 81b7db4420021de023cd6c4033041d3e79f6498f | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Apply copyright and licensing to all source files present in a project.
This is to comply with OpenChain certification;
https://github.com/OpenChain-Project/Curriculum/blob/master/guides/reusing_software.md#2-include-a-copyright-notice-and-license-in-each-file
"""
import argparse
import logging
import subprocess
import sys
import tempfile
from datetime import datetime
from mbed_tools_ci_scripts.utils.configuration import configuration, ConfigurationVariable
from mbed_tools_ci_scripts.utils.logging import set_log_level, log_exception
from mbed_tools_ci_scripts.utils.python_helpers import flatten_dictionary
from pathlib import Path
logger = logging.getLogger(__name__)
LICENCE_HEADER_TEMPLATE = """Copyright (C) {date} {author}. All rights reserved.
SPDX-License-Identifier: {licence_identifier}
"""
FILES_TO_IGNORE = ["*.yml", "*.yaml"]
def add_licence_header(verbose_count: int) -> None:
"""Puts a copyright notice at the top of every source file.
Wrapper over the [licenseheaders tool](https://github.com/johann-petrak/licenseheaders).
"""
# copyright (https://github.com/knipknap/copyright) was first considered but
# comprises quite a few bugs and does not seem active anymore.
template_string = _generate_header_template()
with tempfile.NamedTemporaryFile(suffix=".tmpl", delete=False) as template_file:
template_file_path = Path(template_file.name)
logger.debug(f"Creates template file in {str(template_file_path)}")
template_file.write(template_string.encode("utf8"))
template_file.close()
copyright_config = get_tool_config(template_file_path)
_call_licensehearders(copyright_config, verbose_count)
def _generate_header_template() -> str:
"""Generates the header template which is put at the top of source files."""
return LICENCE_HEADER_TEMPLATE.format(
licence_identifier=configuration.get_value(ConfigurationVariable.FILE_LICENCE_IDENTIFIER),
author="${owner}",
date="${years}",
)
def _call_licensehearders(config: dict, verbose_count: int) -> None:
"""Runs licenseheaders tool."""
args = ["licenseheaders"]
args_dict = {f"--{k}": v for (k, v) in config.items()}
args.extend(flatten_dictionary(args_dict))
if verbose_count > 0:
args.append(f"-{''.join(['v'] * verbose_count)}")
subprocess.check_call([str(arg) for arg in args])
def _determines_copyright_dates() -> str:
"""Determines the years the copyright is in use for."""
this_year = datetime.now().year
copyright_start_date = configuration.get_value(ConfigurationVariable.COPYRIGHT_START_DATE)
return _to_copyright_date_string(copyright_start_date, this_year)
def _to_copyright_date_string(start: int, current: int) -> str:
return f"{current}" if current == start else f"{start}-{current}"
def get_tool_config(template_file: Path) -> dict:
"""Gets the configuration for licenseheaders."""
copyright_dates = _determines_copyright_dates()
return {
"owner": configuration.get_value(ConfigurationVariable.ORGANISATION),
"dir": configuration.get_value(ConfigurationVariable.PROJECT_ROOT),
"projname": configuration.get_value(ConfigurationVariable.PROJECT_NAME),
"tmpl": str(template_file),
"years": copyright_dates,
"additional-extensions": "python=.toml",
"exclude": FILES_TO_IGNORE,
}
def main() -> int:
"""Creates a CLI."""
parser = argparse.ArgumentParser(description="Adds licence header to every source file of a project.")
parser.add_argument("-v", "--verbose", action="count", default=0, help="Verbosity, by default errors are reported.")
args = parser.parse_args()
set_log_level(args.verbose)
try:
add_licence_header(args.verbose)
except Exception as e:
log_exception(logger, e)
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
993,905 | 58fc2a36f449f88a3b1d8d1fd58199bd303abded | import argparse, codecs
def manipulate_data(golds, hyps):
# log.info("Lemma acc, Lemma Levenshtein, morph acc, morph F1")
count = 0
morph_acc = 0
f1_precision_scores = 0
f1_precision_counts = 0
f1_recall_scores = 0
f1_recall_counts = 0
for r, o in zip(golds, hyps):
#log.debug("{}\t{}\t{}\t{}".format(r.LEMMA, o.LEMMA, r.FEATS, o.FEATS))
gold_ = set()
hyp_ = set()
for k,v in r.items():
if v == "NULL":
continue
gold_.add(v)
for k,v in o.items():
if v == "NULL":
continue
hyp_.add(v)
count += 1
morph_acc += gold_ == hyp_
union_size = len(gold_ & hyp_)
reference_size = len(gold_)
output_size = len(hyp_)
f1_precision_scores += union_size
f1_recall_scores += union_size
f1_precision_counts += output_size
f1_recall_counts += reference_size
f1_precision = f1_precision_scores / (f1_precision_counts or 1)
f1_recall = f1_recall_scores / (f1_recall_counts or 1)
f1 = 2 * (f1_precision * f1_recall) / (f1_precision + f1_recall + 1E-20)
return (100 * morph_acc / count, 100 * f1, 100 * f1_precision, 100 * f1_recall)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str)
args = parser.parse_args()
golds, hyps = [],[]
with codecs.open(args.input, "r", encoding='utf-8') as fin:
for line in fin:
if line == "" or line == "\n":
continue
else:
info = line.strip().split("\t")
gold_data = info[6]
pred_data = info[5]
p,g = {},{}
for feat in pred_data.split("|"):
key, value = feat.split("=")[0], feat.split("=")[1]
if value == "_":
value = "NULL"
p[key] = value
for feat in gold_data.split("|"):
key, value = feat.split("=")[0], feat.split("=")[1]
if value == "_":
value = "NULL"
g[key] = value
golds.append(g)
hyps.append(p)
(acc, f1, p, r) = manipulate_data(golds, hyps)
print(acc, f1)
|
993,906 | 45f79179d3c9e2ec08624cdc4ded0e298cb6e4a4 | """
There are 100 chairs arranged in a circle.
These chairs are numbered sequentially from One to One Hundred.
At some point in time, the person in chair #1 will be
told to leave the room. The person in chair #2 will
be skipped, and the person in chair #3 will be told to
leave. This pattern of skipping one person and telling
the next to leave will keep going around the circle until
there is only one person remaining.. the survivor.
Answer: 72
"""
import numpy as np
def NumChairs(n):
if n<=0:
print("Number of chairs must be greater than zero")
return
chairs = np.arange(1,n+1)
s = len(chairs)
idx = 0
while s>1:
chairs = np.delete(chairs,idx)
idx = idx+1
idx = idx%len(chairs)
s = len(chairs)
print("The survivor is chair number " + str(chairs[0]))
def main():
NumChairs(100)
if __name__=='__main__':
main()
|
993,907 | c4397fb934504ba4177ab6448264d491b12b9e29 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ShopScoreResultInfo import ShopScoreResultInfo
class ShopDataDetail(object):
def __init__(self):
self._city_name = None
self._county_name = None
self._poi_id = None
self._province_name = None
self._shop_address = None
self._shop_name = None
self._shop_score_result = None
@property
def city_name(self):
return self._city_name
@city_name.setter
def city_name(self, value):
self._city_name = value
@property
def county_name(self):
return self._county_name
@county_name.setter
def county_name(self, value):
self._county_name = value
@property
def poi_id(self):
return self._poi_id
@poi_id.setter
def poi_id(self, value):
self._poi_id = value
@property
def province_name(self):
return self._province_name
@province_name.setter
def province_name(self, value):
self._province_name = value
@property
def shop_address(self):
return self._shop_address
@shop_address.setter
def shop_address(self, value):
self._shop_address = value
@property
def shop_name(self):
return self._shop_name
@shop_name.setter
def shop_name(self, value):
self._shop_name = value
@property
def shop_score_result(self):
return self._shop_score_result
@shop_score_result.setter
def shop_score_result(self, value):
if isinstance(value, ShopScoreResultInfo):
self._shop_score_result = value
else:
self._shop_score_result = ShopScoreResultInfo.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.city_name:
if hasattr(self.city_name, 'to_alipay_dict'):
params['city_name'] = self.city_name.to_alipay_dict()
else:
params['city_name'] = self.city_name
if self.county_name:
if hasattr(self.county_name, 'to_alipay_dict'):
params['county_name'] = self.county_name.to_alipay_dict()
else:
params['county_name'] = self.county_name
if self.poi_id:
if hasattr(self.poi_id, 'to_alipay_dict'):
params['poi_id'] = self.poi_id.to_alipay_dict()
else:
params['poi_id'] = self.poi_id
if self.province_name:
if hasattr(self.province_name, 'to_alipay_dict'):
params['province_name'] = self.province_name.to_alipay_dict()
else:
params['province_name'] = self.province_name
if self.shop_address:
if hasattr(self.shop_address, 'to_alipay_dict'):
params['shop_address'] = self.shop_address.to_alipay_dict()
else:
params['shop_address'] = self.shop_address
if self.shop_name:
if hasattr(self.shop_name, 'to_alipay_dict'):
params['shop_name'] = self.shop_name.to_alipay_dict()
else:
params['shop_name'] = self.shop_name
if self.shop_score_result:
if hasattr(self.shop_score_result, 'to_alipay_dict'):
params['shop_score_result'] = self.shop_score_result.to_alipay_dict()
else:
params['shop_score_result'] = self.shop_score_result
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ShopDataDetail()
if 'city_name' in d:
o.city_name = d['city_name']
if 'county_name' in d:
o.county_name = d['county_name']
if 'poi_id' in d:
o.poi_id = d['poi_id']
if 'province_name' in d:
o.province_name = d['province_name']
if 'shop_address' in d:
o.shop_address = d['shop_address']
if 'shop_name' in d:
o.shop_name = d['shop_name']
if 'shop_score_result' in d:
o.shop_score_result = d['shop_score_result']
return o
|
993,908 | cc4d62f71b28100a5510ac105f7320e71c13c6a7 | def abc050_b():
_ = int(input())
T = list(map(int, input().split()))
M = int(input())
Query = [tuple(map(int, input().split())) for _ in range(M)]
tot = sum(T)
for p, x in Query:
ans = tot - T[p-1] + x
print(ans)
abc050_b() |
993,909 | 49781a6050eb6b5767b23c56f938ecd30cac0d80 | import asyncio
import json
import logging
from aiohttp import web
from prometheus_client import Summary
from prometheus_client import Histogram
from prometheus_async.aio import time
REQ_TIME = Summary("cancel_req_time", "time spent with cancel endpoint")
REQ_HISTOGRAM_TIME = Histogram("cancel_req_histogram", "Histogram for cancel endpoint")
logger = logging.getLogger(__name__)
@time(REQ_TIME)
@time(REQ_HISTOGRAM_TIME)
async def handle_cancel(request):
task_id_to_cancel = request.match_info['task_id']
logger.info("Cancel request obtained: " + str(task_id_to_cancel))
if not task_id_to_cancel:
response = await bad_response("Task id is not provided")
return response
all_tasks = asyncio.Task.all_tasks()
cancelled_tasks = False
for task in all_tasks:
task_id_of_task = getattr(task, "task_id", None)
if task_id_of_task == task_id_to_cancel:
task.cancel()
cancelled_tasks = True
if cancelled_tasks:
response = await success_response("Tasks with task_id: " + str(task_id_to_cancel) + " cancelled")
return response
else:
# if we are here, the task id wasn't found
response = await bad_response("task id " + str(task_id_to_cancel) + " not found!")
return response
async def bad_response(error_message):
logger.warn(error_message)
response = web.Response(
status=400,
content_type="application/json",
text=json.dumps(
obj=[{
"error_message": error_message,
}],
ensure_ascii=False,
),
)
return response
async def success_response(message):
logger.info(message)
response = web.Response(
status=200,
content_type="application/json",
text=json.dumps(
obj=[{
"message": message,
}],
ensure_ascii=False,
),
)
return response
|
993,910 | 7aaed8b8811b21660642769f982afae5dd813180 | #!/usr/bin/env python
# coding: utf-8
# In[6]:
import nltk
#nltk.download("stopwords")
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
import sys
word_array=[]
def cleantxt(word_array,index): # printing the tri-gram
if(index==0 and len(word_array)>=3):
print("%s\t%s"%(("$_"+word_array[index+1]+"_"+word_array[index+2]),"1"))
elif(index==len(word_array)-1 and len(word_array)>=3):
print("%s\t%s"%((word_array[index-2]+"_"+word_array[index-1]+"_$"),"1"))
elif((index==1 or index==len(word_array)-2) and len(word_array)>=3):
print("%s\t%s"%((word_array[index-1]+"_$_"+word_array[index+1]),"1"))
else:
print("%s\t%s"%((word_array[index-2]+"_"+word_array[index-1]+"_$"),"1"))
print("%s\t%s"%((word_array[index-1]+"_$_"+word_array[index+1]),"1"))
print("%s\t%s"%(("$_"+word_array[index+1]+"_"+word_array[index+2]),"1"))
def text_preprocess(word):
#stop_words = set(stopwords.words('english'))
word = word.lower() # making all the letters to lower case
processed_word = []
#if word not in stop_words: #removing all the stopwords like is, the ,on,a etc.
pattern = re.compile('[\w\d\'-]') #creating a regular expression that allows only words, letters, " ' " and " -"
for i in range(0, len(word)):
if pattern.match(word[i]): #applying the regular expression to each word
processed_word.append(word[i]) #appending each word
return ''.join(processed_word) #combining all letters of a word
def wordFunc(word_array): #function for matching the given keywords for generating tri-grams
new_words=[]
count=0;
for i in range(len(word_array)):
if(word_array[i]=="science" or word_array[i]=="sea" or word_array[i]=="fire"):
(cleantxt(word_array,i))
for words in sys.stdin:
words = words.strip().split() #first removing the trailing space and then splitting the sentence on each word
for word in words:
clean_word=text_preprocess(word)#to preprocess the data
#print(clean_word)
if clean_word:
word_array.append(clean_word)#to append in the word_array all the clean words
wordFunc(word_array)
|
993,911 | 7b556f18a55b454a8748866f317ef0ee6c08750b | class person:
def __init__(self, name, age):
self.name = name
self.age = age
def cridentiald(self):
with open("cridentials.txt",'a') as f:
f.writelines(self.name + "\n")
f.writelines(str(self.age))
f.close()
def cridentiald2(self):
with open("cridentials.txt",'a') as f:
f.writelines(self.name +"\n")
f.writelines(str(self.age))
f.close()
|
993,912 | 4a694c9227049b68c0af44d9ff435d58e1dfd0a1 | from qiskit import QuantumCircuit, execute, Aer
from qiskitt.visualization import plot_histogram
circuit = QuantumCircuit(1,1)
circuit.h(0)
circuit.measure([0], [0])
circuit.draw()
from qiskit.visualization import plot_bloch_multivector
backend = Aer.get_backend9'statevector_simulator')
result = execute(circuit, backend).result()
states = result.get_statevector()
print(states)
plot_bloch_multivector(states)
#U-Gate
from numpy import pi
circuit = QuantumCircuit(1, 1)
thet, phi, lamb = pi/4, pi/4, 0
circuit.u(thet, phi, lamb, 0)
circuit.measure([0], [0])
circuit.draw()
#Controlled-NOT Gate
from qiskit import QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram
circuit = QuantumCircuit(2, 2)
circuit.h(0)
circuit.cx(0, 1)
circuit.measure([0, 1], [0, 1])
circuit.draw()
backend = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend).result()
counts = result.get_counts()
print(counts)
plot_histogram(counts)
|
993,913 | 7180d45d75d60ddffa6f03347078049410558633 | from flask import Flask, request, Response, json
from db import select_stats
from functions import is_mutant
app = Flask(__name__)
@app.route('/')
def home():
return 'Home'
@app.route('/mutant/', methods=['POST'])
def mutant():
json_data = request.json
dna = json_data["dna"]
board = []
for i in dna:
board.append(list(i))
is_m = is_mutant(dna, board, 0)
if is_m:
return Response(status=200)
else:
return Response(status=403)
@app.route('/stats/', methods=['POST'])
def stats():
(_mutant, human, ratio) = select_stats()
return Response(response=json.dumps({
"count_mutant_dna": _mutant,
"count_human_dna": human,
"ratio": ratio,
}), status=200, mimetype='application/json')
if __name__ == '__main__':
app.run(port=8080)
|
993,914 | f599df5da86cf45e4426a2eeb6c392b10249fd85 | import RPi.GPIO as GPIO
import boto3
import cv2
from PIL import Image
import io
import time
import os
import argparse
import numpy as np
import sys
from threading import Thread
import importlib.util
from botocore.exceptions import ClientError
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT)
s3 = boto3.client('s3',aws_access_key_id='AWS_ACCESS_KEY_ID', #create s3 client
aws_secret_access_key='AWS_SECRET_ACCESS_KEY_ID',
region_name='us-east-1')
rekog = boto3.client('rekognition', #create rekognition client
aws_access_key_id='AWS_ACCESS_KEY_ID',
aws_secret_access_key='AWS_SECRET_ACCESS_KEY_ID',
region_name='us-east-1'
)
ses = boto3.client('ses',aws_access_key_id='AWS_ACCESS_KEY_ID',# create ses client
aws_secret_access_key='AWS_SECRET_ACCESS_KEY_ID',
region_name='us-east-1')
i =0
def capture():
global i
cv2.imwrite("test%s.jpg"%i,frame)#take a snapshot of the frame
upload('test%s.jpg'%i)
time.sleep(1)
email(i)
i+=1
def upload(file_name):#Upload image to S3
# S3 file name is the same as local file
object_name = file_name
#Public S3 bucket in use
bucket='notification-rpi'
try:
response = s3.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def email(number):# Send email to customer by SES
SENDER = "khuongkhoadk1999@gmail.com" #sender's email (company emails in most cases)
RECIPIENT = "bukhonnhat001@gmail.com" #customer's email
# The subject line for the email.
SUBJECT = "Raspberry Pi Security Notification"
BODY_HTML = """<html>
<head></head>
<body>
<h1>Raspberry Pi Security Notification</h1>
<p>This email was sent with
<a href='https://aws.amazon.com/ses/'>Amazon SES</a> using the
<a href='https://aws.amazon.com/sdk-for-python/'>
AWS SDK for Python (Boto)</a>.</p>
<h2>Someone just entered your house</h2>
<img src="https://notification-rpi.s3.amazonaws.com/test{number}.jpg" >
</body>
</html>
""".format(number=number)
# The character encoding for the email.
CHARSET = "UTF-8"
BODY_TEXT = ("Amazon SES Test (Python)\r\n"
"This email was sent with Amazon SES using the "
"AWS SDK for Python (Boto)."
)
# Try to send the email.
try:
#Provide the contents of the email.
response = ses.send_email(
Destination={
'ToAddresses': [
RECIPIENT,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': BODY_HTML,
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
}
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
},
Source=SENDER,
)
# Display an error if something goes wrong.
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['MessageId'])
cur_frame = 0
success = True
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
frame_skip = 10 # analyze every 100 frames to cut down on Rekognition API calls
# Initialize video stream
cap = cv2.VideoCapture(0)
if (cap.isOpened()== False):
print("Error opening video stream or file")
pause_counter=0
pause=0
time.sleep(1)
counter =0
while (cap.isOpened()):
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
ret,frame = cap.read() # get next frame from video
if cur_frame % frame_skip == 0: # only analyze every n frames
print('frame: {}'.format(cur_frame))
pil_img = Image.fromarray(frame) # convert opencv frame (with type()==numpy) into PIL Image
stream = io.BytesIO()
pil_img.save(stream, format='JPEG') # convert PIL Image to Bytes
bin_img = stream.getvalue()
try:
response = rekog.search_faces_by_image(CollectionId='myCollection', Image={'Bytes': bin_img}, MaxFaces=1, FaceMatchThreshold=85) # call Rekognition
if response['FaceMatches']: #
for face in response['FaceMatches']:
print('Hello, ',face['Face']['ExternalImageId'])
print('Similarity: ',face['Similarity'])
counter +=1
else:
print('No faces matched')
except:print('No face detected')
if counter >= 10:
print('send email')
capture()
pause=1
counter=0
if pause==1:
print('DOOR OPEN')
GPIO.output(18,GPIO.HIGH)
pause_counter+=1
if pause_counter >20:
print('DOOR CLOSE')
GPIO.output(18,GPIO.LOW)
pause_counter=0
pause =0
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
#print('counter',counter)
#print('pause',pause)
#print('pause_counter',pause_counter)
if cv2.waitKey(1) == ord('q'):
break
cur_frame += 1
# Clean up
cv2.destroyAllWindows()
videostream.stop() |
993,915 | 574effb6839583a07e4ad584fd01d1da142d1cf8 |
def difference(nums):
nums.sort()
x = len(nums)
return nums[x-1]-nums[0]
|
993,916 | 0a990daafcb7789510a5197241d853b77de00422 | import unittest
import requests
class testPUT(unittest.TestCase):
def testPostTournament(self):
load = {'start_date':'01-01-2020', 'name': 'tournament1', 'city': 'krakow', 'location': 'ulica Pokątna'}
r = requests.post('http://77.55.192.26:2137/api/tournament', json = load)
self.assertEqual(r.status_code,200)
def testPostAPhaseToATournament(self):
load = {'name': 'nazwa', 'structure': {}}
r = requests.post('http://77.55.192.26:2137/api/tournament/3/phase', json = load)
self.assertEqual(r.status_code,200)
def testPostDebateToPhase(self):
load = {'d_time': '23:00:00.000', 'd_date': '2022-03-03', 'location': 'pod cerkwia'}
r = requests.post('http://77.55.192.26:2137/api/tournament/3/phase/8/debate',json = load)
self.assertEqual(r.status_code,200)
if __name__ == '__main__':
unittest.main() |
993,917 | 26278c110b1a2e5674e930cf88e651adf9e1e299 | #!flask/bin/python
from trs import app
app.run(debug=True,host='0.0.0.0',port=8081)
|
993,918 | f76ad912bf3b77f21f563103dbbc36800a587041 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineExtension(Resource):
"""
Describes a Virtual Machine Extension.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param force_update_tag: Gets or sets how the extension handler should be
forced to update even if the extension configuration has not changed.
:type force_update_tag: str
:param publisher: Gets or sets the name of the extension handler
publisher.
:type publisher: str
:param virtual_machine_extension_type: Gets or sets the type of the
extension handler.
:type virtual_machine_extension_type: str
:param type_handler_version: Gets or sets the type version of the
extension handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Gets or sets whether the extension
handler should be automatically upgraded across minor versions.
:type auto_upgrade_minor_version: bool
:param settings: Gets or sets Json formatted public settings for the
extension.
:type settings: object
:param protected_settings: Gets or sets Json formatted protected settings
for the extension.
:type protected_settings: object
:param provisioning_state: Gets or sets the provisioning state, which
only appears in the response.
:type provisioning_state: str
:param instance_view: Gets or sets the virtual machine extension instance
view.
:type instance_view: :class:`VirtualMachineExtensionInstanceView
<azure.mgmt.compute.models.VirtualMachineExtensionInstanceView>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'virtual_machine_extension_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
}
def __init__(self, location, tags=None, force_update_tag=None, publisher=None, virtual_machine_extension_type=None, type_handler_version=None, auto_upgrade_minor_version=None, settings=None, protected_settings=None, provisioning_state=None, instance_view=None):
super(VirtualMachineExtension, self).__init__(location=location, tags=tags)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.virtual_machine_extension_type = virtual_machine_extension_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = provisioning_state
self.instance_view = instance_view
|
993,919 | 88f947ccb201da27597a9591c9ebe2794c34c40e | """Setup file to generate a distribution of njord
usage: python setup.py sdist
python setup.py install
"""
from setuptools import setup
setup(name = 'gitcheck',
version = '0.5',
description = 'Check multiple git repository in one pass',
long_description = "README.md",
author = 'Bruno Adele',
author_email = 'bruno.adele@jesuislibre.org',
url = 'https://github.com/badele/gitcheck',
py_modules = ['gitcheck'],
entry_points={'console_scripts':['gitcheck = gitcheck:main'] }
)
|
993,920 | 92d062a8556a211d090e242438ece870c7509ee2 | # 394. Decode String
# Given an encoded string, return its decoded string.
# The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.
# You may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.
# Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].
# Examples:
# s = "3[a]2[bc]", return "aaabcbc".
# s = "3[a2[c]]", return "accaccacc".
# s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
def decodeString(s):
stack = []
curNum = 0
curString = ''
for c in s:
if c == '[':
stack.append(curString)
stack.append(curNum)
curString = ''
curNum = 0
elif c == ']':
num = stack.pop()
prevString = stack.pop()
curString = prevString + num*curString
elif c.isdigit():
curNum = curNum*10 + int(c)
else:
curString += c
return curString
print(decodeString("3[a]2[bc]")) # return "aaabcbc".
print(decodeString("3[a2[c]]")) # "accaccacc".
print(decodeString("2[abc]3[cd]ef")) # "abcabccdcdcdef".
|
993,921 | 5f93a77eab6da8f486f09e10b536721d9695f5a5 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import json
import pymysql
import requests
import time
from selenium import webdriver
from fake_useragent import UserAgent
__author__ = "xin nix"
# 导入:
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from pyquery import PyQuery as pq
html = """"""
qingqiucanshu = {
'__VIEWSTATE': '''/ wEPDwUKLTMyODk2Nzk2Nw9kFgICAQ9kFggCAQ9kFgQCBQ8PFgIeBFRleHQFBueZu + W9lWRkAgcPFgIfAAXcBTx1bD48bGk + PGEgaHJlZj0iLi4vRGVmYXVsdC5hc3B4P01lbnVJZD0zNCIgdGFyZ2V0PSJfc2VsZiIgdGl0bGU9IummlumhtSIgY2xhc3M9IlNlbGVjdGVkIj7pppbpobU8L2E + PC9saT48bGk + PGEgaHJlZj0iLi4vRGF0YU1ldGEvU2VhcmNoTGlzdC5hc3B4P01lbnVJZD0zNiIgdGFyZ2V0PSJfc2VsZiIgdGl0bGU9IuWFg + aVsOaNriI + 5YWD5pWw5o2uPC9hPjwvbGk + PGxpPjxhIGhyZWY9Ii4uL0RhdGFUYWJsZS9TZWFyY2hMaXN0LmFzcHg / TWVudUlkPTM3IiB0YXJnZXQ9Il9zZWxmIiB0aXRsZT0i5pWw5o2uIj7mlbDmja48L2E + PC9saT48bGk + PGEgaHJlZj0i…gYeG1BhZ2VTcGxpdEJvdHRvbV90b3RhbFJlY29yZAK / Ax4YUGFnZVNwbGl0Qm90dG9tX2N1cnJQYWdlAgEeGFBhZ2VTcGxpdEJvdHRvbV9QYWdlU2l6ZQIKZBYCAgYPEGRkFgBkAg0PEGRkFgFmZBgBBR5fX0NvbnRyb2xzUmVxdWlyZVBvc3RCYWNrS2V5X18WBgUgUGFnZVNwbGl0Qm90dG9tJEltYWdlQnV0dG9uRmlyc3QFHlBhZ2VTcGxpdEJvdHRvbSRJbWFnZUJ1dHRvblByZQUfUGFnZVNwbGl0Qm90dG9tJEltYWdlQnV0dG9uTmV4dAUfUGFnZVNwbGl0Qm90dG9tJEltYWdlQnV0dG9uTGFzdAUfUGFnZVNwbGl0Qm90dG9tJEltYWdlQnV0dG9uR290bwUkUGFnZVNwbGl0Qm90dG9tJEltYWdlQnV0dG9uR290b0NvdW50Zy6rm2KfWCoGzV2KIeAxhM / WnjLBQrvah0b3lDdSz88 =''',
'__VIEWSTATEGENERATOR': '7B37A01C',
'__EVENTVALIDATION': '/ wEdABCICeCvPq5dojeUznC6NAYjdd / LZ + E8u4j0WjnM8OIt9ylPoWxsDo8AHyruJ6 / EO3jNZs5DOkylD / xsi5Wzri / +dOaS6 + L64K2I / RjypYe1LAF7r1QOqZVl5ra7Evso46Dp72ZTX0uduvKZf3Rh6HzVPPXB + 9 V6mtWKAHDIgICY + Uw4svsZlq2PFdW7HAQNkExQADPJb9qk3dm65SRf1v / BU5 / YCCqOy6ltlKT6dPJ4Gjh4fKA2Ltrb2EAXDs / YPSDkZdCzDguI5q0eJt7oKyBil0a6EDy9Bq9cy6Il5gVgLDuUpDNy2SiL4sSws50u4KexM / Y5NC6d07Sbkq1EubGFeHHmV5pKMhb1heedN5rQKHJ0tWKsjfPIcZX2dDpFUQw =',
'PageSplitBottom$textBoxPageSize': "{}",
'DropDownListRowCount': '50',
}
headers = {
'Host': 'stb.agridata.cn',
'Proxy-Connection': 'keep-alive',
'Content-Length': '82',
'Cache-Control': 'max-age=0',
'User-Agent': getattr(UserAgent(), 'random'),
'Upgrade-Insecure-Requests': "{}",
'Origin': 'http://stb.agridata.cn',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'http://stb.agridata.cn/Site/DataTable/List.aspx?DataCategoryGId=3d8af79e-d8ab-40ff-8f55-a1f11afad890',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'ASP.NET_SessionId=stcujiyxlxprvdpz0pc2fh3j',
}
# 创建对象的基类:
Base = declarative_base()
# 定义User对象:
class User(Base):
# 表的名字:
__tablename__ = '农作物名、优、特新品种数据库'
# 表的结构:
# category = Column(String(128), name="作物种类")
# brands = Column(String(128), name="作物品种")
# name = Column(String(128), name="品种名称", primary_key=True)
# desc = Column(String(4), name="基本情况")
# feature = Column(String(4), name="特征特性")
# mader = Column(String(255), name="培育者")
# location = Column(String(4), name="地区及技术")
# brands_cate = Column(String(128), name="品种类别")
# test = Column(String(255), name="审定情况")
# caiji = Column(String(10), name="资源采集日")
category = Column(String(128))
brands = Column(String(128))
name = Column(String(128), primary_key=True)
desc = Column(String(4))
feature = Column(String(4))
mader = Column(String(255))
location = Column(String(4))
brands_cate = Column(String(128))
test = Column(String(255))
caiji = Column(String(10))
conn = pymysql.connect(host='127.0.0.1', user='root', password='123456', db='myprojects', charset='utf8', cursorclass=pymysql.cursors.DictCursor,)
curser = conn.cursor()
# 初始化数据库连接:
# engine = create_engine('mysql+mysqldb://root:123456@localhost:3306/myprojects?charset=utf8')
# Base.metadata.create_all(engine)
# 创建DBSession类型:
# DBSession = sessionmaker(bind=engine)
# 创建session对象:
# session = DBSession()
# 创建新User对象:
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
a = 0
url = "http://stb.agridata.cn/Site/DataTable/List.aspx?DataCategoryGId=0180539b-98ab-43af-a76c-83362df4c25d"
driver = webdriver.Firefox()
wait = WebDriverWait(driver, 10)
driver.get(url)
htmls = pq(driver.page_source).find(".ListContent1.EllipsisTable a").items()
errors = []
driver.find_element_by_css_selector("#DropDownListRowCount > option:nth-child(3)").click()
for j in range(1, 62):
if j >= 1:
num = 1
for i in htmls:
num += 1
driver.find_element_by_css_selector(".ListContent1 > tbody:nth-child(1) > tr:nth-child({}) > td:nth-child(2) > a".format(num)).click()
key = driver.find_element_by_css_selector(".ListContent1 > tbody:nth-child(1) > tr:nth-child({}) > td:nth-child(3)".format(num)).text
now_handle = driver.current_window_handle
all_handles = driver.window_handles
driver.switch_to.window(all_handles[-1])
data = driver.page_source
data = pq(data).find("#form1 > div.Box10.ClearFloat > div.Box77.FloatLeft.MarginLeft10 > div > table > tbody > tr")
# print(data)
content = data.items()
insert_data = []
for k in content:
values = k.find('td').text().replace("\xa0", "").replace("\xa01", "").replace("\xa04", "").replace("\xa02", "").replace("\ua004", "").replace("\xa03", "").replace("\xa025", "")
values = values.replace("\ue0043", "").replace("\ue0042", "").replace("\ue0044", "").replace("\ue0045", "").replace("\ue0046", "")
insert_data.append(values)
try:
# print(insert_data)
sql = """INSERT INTO `myprojects`.`农作物名、优、特新品种数据库`(`作物种类`, `作物品种`, `品种名称`, `基本情况`, `特征特性`, `培育者`, `地区及技术`, `品种类别`, `审定情况`, `资源采集日`) VALUES ("{0}", "{1}", "{2}", "{3}", "{4}", "{5}", "{6}", "{7}", "{8}", "{9}") on duplicate key update `作物种类`= VALUES (`作物种类`)"""
result = curser.execute(sql.format(*insert_data))
conn.commit()
print(result, insert_data)
except Exception as e:
print(e)
errors.append(key)
driver.close()
driver.switch_to.window(now_handle)
if j <= 60:
driver.find_element_by_css_selector("#PageSplitBottom_ImageButtonNext").click()
# driver.find_element_by_css_selector("#DropDownListRowCount > option:nth-child(3)").click()
htmls = pq(driver.page_source).find(".ListContent1.EllipsisTable a").items()
with open("errors.json", 'w', encoding='utf-8') as f:
f.write(json.dumps(errors)) |
993,922 | a7f33e2ad905fad11e3cc4c43d693c598b6d86a1 | from django.shortcuts import render, redirect
from django.conf import settings
import requests
from requests.auth import HTTPBasicAuth
import json
from products import utils
from orders.models import Order
from django.urls import reverse
from cart.views import view_cart
# Create your views here.
klarna_un = settings.KLARNA_UN
klarna_pw = settings.KLARNA_PW
def checkout(request):
""" Sets up checkout by creating a request to call the klarna api,
calls the api and renders the respons to the page """
auth = HTTPBasicAuth(klarna_un, klarna_pw)
headers = {'content-type': 'application/json'}
cart = request.session.get('cart')
total = 0
orderlines = []
order_id = 0
try:
order_id = request.session['order_id']
except:
pass
for item in cart:
product = utils.get_product(item)
orderlines.append({
'name': product[1].name,
'reference': product[1].id,
'unit_price': int(product[1].price * 100),
'quantity': int(cart[item]),
'tax_rate': int(00),
'total_amount': int(product[1].price * cart[item] * 100),
"total_tax_amount": 0
})
total += product[1].price * cart[item] * 100
integer_total = int(total)
if order_id:
response = requests.get(
settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +
order_id,
auth=auth,
headers=headers,
)
klarna_order = response.json()
if klarna_order['order_lines'] == orderlines:
context = {
'klarna_order': klarna_order
}
return render(request, 'checkout/checkout.html', context)
else:
body = {
"purchase_country": "se",
"purchase_currency": "eur",
"locale": "en-GB",
"order_amount": integer_total,
"order_tax_amount": 0,
"order_lines": orderlines,
"merchant_urls": {
"terms": "https://beerbrewing-supply.herokuapp.com" + "/checkout/terms",
"checkout": "https://beerbrewing-supply.herokuapp.com" + "/checkout/completed",
"confirmation": "https://beerbrewing-supply.herokuapp.com" + "/checkout/completed",
"push": "https://beerbrewing-supply.herokuapp.com" + "/orders/register_order?sid={checkout.order.id}"
},
"shipping_options": [
{
"id": "free_shipping",
"name": "Free Shipping",
"description": "Delivers in 5-7 days",
"price": 0,
"tax_amount": 0,
"tax_rate": 0,
"preselected": True,
"shipping_method": "Home"
},
{
"id": "pick_up_store",
"name": "Pick up at closest store",
"price": 399,
"tax_amount": 0,
"tax_rate": 0,
"preselected": False,
"shipping_method": "PickUpStore"
}
]
}
data = json.dumps(body)
response = requests.post(
settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +
order_id,
auth=auth,
headers=headers,
data=data)
klarna_order = response.json()
context = {
'klarna_order': klarna_order
}
return render(request, 'checkout/checkout.html', context)
else:
cart = request.session.get('cart')
total = 0
orderlines = []
for item in cart:
product = utils.get_product(item)
orderlines.append({
'name': product[1].name,
'product_id': product[1].id,
'unit_price': int(product[1].price * 100),
'quantity': int(cart[item]),
'tax_rate': int(00),
'total_amount': int(product[1].price * cart[item] * 100),
"total_tax_amount": 0
})
total += product[1].price * cart[item] * 100
integer_total = int(total)
body = {
"purchase_country": "se",
"purchase_currency": "eur",
"locale": "en-GB",
"order_amount": integer_total,
"order_tax_amount": 0,
"order_lines": orderlines,
"merchant_urls": {
"terms": "https://beerbrewing-supply.herokuapp.com" + "/checkout/terms",
"checkout": "https://beerbrewing-supply.herokuapp.com" + "/checkout/completed",
"confirmation": "https://beerbrewing-supply.herokuapp.com" + "/checkout/completed",
"push": "https://beerbrewing-supply.herokuapp.com" + "/orders/register_order?sid={checkout.order.id}"
},
"shipping_options": [
{
"id": "free_shipping",
"name": "Free Shipping",
"description": "Delivers in 5-7 days",
"price": 0,
"tax_amount": 0,
"tax_rate": 0,
"preselected": True,
"shipping_method": "Home"
},
{
"id": "pick_up_store",
"name": "Pick up at closest store",
"price": 399,
"tax_amount": 0,
"tax_rate": 0,
"preselected": False,
"shipping_method": "PickUpStore"
}
]
}
data = json.dumps(body)
response = requests.post(
settings.KLARNA_BASE_URL + '/checkout/v3/orders',
auth=auth,
headers=headers,
data=data)
klarna_order = response.json()
context = {
'klarna_order': klarna_order
}
order_id = klarna_order['order_id']
request.session['order_id'] = order_id
return render(request, 'checkout/checkout.html', context)
def terms(request):
print('terms')
return render(request, 'checkout/terms.html')
def completed(request):
""" view that is directed to from klarna once checkout is completed,
order_id is thenretrived from session and the order information is
collected from klarna, An order is then created in in the database,
once that is done the cart and order_id is cleared from the session.
"""
order_id = ''
try:
order_id = request.session['order_id']
except:
pass
if order_id != '':
auth = HTTPBasicAuth(klarna_un, klarna_pw)
headers = {'content-type': 'application/json'}
response = requests.get(
settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +
order_id,
auth=auth,
headers=headers,
)
klarna_order = response.json()
order = Order(
order_id=klarna_order['order_id'],
status=klarna_order['status'],
given_name=klarna_order['billing_address']['given_name'],
family_name=klarna_order['billing_address']['family_name'],
email=klarna_order['billing_address']['email'],
phone_number=klarna_order['billing_address']['phone'],
country=klarna_order['billing_address']['country'],
postcode=klarna_order['billing_address']['postal_code'],
town_or_city=klarna_order['billing_address']['city'],
street_address1=klarna_order['billing_address']['street_address'],
order_total=klarna_order['order_amount'],
klarna_line_items=klarna_order['order_lines']
)
order.save()
request.session['cart'] = {}
request.session['order_id'] = ''
context = {
'klarna_order': klarna_order
}
return render(request, 'checkout/completed.html', context)
else:
return redirect(reverse(view_cart))
|
993,923 | aa4a7c1799595d4aa8375f96913f9c54c1b8c8a3 | # -*- coding: utf-8 -*-
"""
Algoritmo de Dijkstra (com lista de adjascencias)
Autores:
Edsger Dijkstra
Colaborador:
Péricles Lopes Machado (pericles.raskolnikoff@gmail.com)
Tipo:
graphs
Descrição:
O Algoritmo de Dijsktra é um algoritmo em grafos clássico que determina a
menor distância de um determinado vértice para todos os outros. Nessa implementação
utiliza-se uma heap
Complexidade:
O(|E| log |V|)
Dificuldade:
medio
Referências:
[1] http://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
[2] Cormem, Thomas H. Introduction to Algorithms, 3rd Edition.
ISBN 978-0-262-53305-8. Páginas 658-659.
"""
from heapq import *;
"""
Função para imprimir rota
"""
def imprime_rota(pi, u):
if pi[u] != None:
imprime_rota(pi, pi[u]);
print " ", u;
"""
Lista de adjacência; Para cada nó 'u' é fornecida uma lista de pares (v, d), onde 'v' é um
nó que está conectado a 'u' e 'd' é a distancia entre 'u' e 'v'
"""
G = [
[(1, 2), (3, 4), (5, 3), (8, 9)],
[(2, 7), (4, 6), (7, 8)],
[(4, 9), (7, 9)],
[(1, 13), (4, 4), (6, 3), (2, 3)],
[(1, 23), (7, 4), (5, 3), (8, 1), (4, 9)],
[(3, 11), (4, 7), (8, 9)],
[(5, 2), (3, 5), (4, 3), (5, 9)],
[(1, 2), (7, 4), (5, 9), (6, 8)],
[(7, 2), (2, 3), (1, 1), (3, 1)],
];
"""
Origem s e destino t
"""
s = 1;
t = 6;
N = len(G);
"""
Estimativa de distancia inicial
None representa o infinito e código pai usado para recuperar a rota
"""
D = [];
pi = [];
for i in range(0, N):
D += [None];
pi += [None];
"""
Priority queue utilizada para o acesso rápido a melhor estimativa
"""
Q = [];
D[s] = 0;
heappush(Q, (0, s));
"""
Enquanto a fila de prioridade não estiver vazia tente verificar se o topo
da fila é melhor opção de rota para se chegar nos adjascentes. Como o topo
já é o mínimo, então garante-se que D[u] já está minimizado no momento.
"""
while Q:
u = heappop(Q)[1];
for adj in G[u]:
v = adj[0];
duv = adj[1];
if D[v] > D[u] + duv or D[v] == None:
D[v] = D[u] + duv;
pi[v] = u;
heappush(Q, (D[v], v));
if D[t] != None:
print "Distância(", s, ",", t, ") = ", D[t];
print "Rota:";
imprime_rota(pi, t);
else:
print "Não há rota entre os nós ", s, " e ", t;
|
993,924 | 2c0e7c5379c9d94d0d2a846dc90ea3c7af3fefbe | from comet_ml import Experiment
import os
from torch.utils.data import DataLoader
import json
import glob
import torch
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch.nn as nn
import argparse
from sklearn import metrics
from preprocess import get_raw_data, get_labels, get_tokenized_inputs, get_token2int
from models.AttnModel import AttnModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
token2int = get_token2int()
in_features = ['sequence', 'structure', "predicted_loop_type"]
hyperparams = {
"batch_size": 400,
"num_epochs": 10,
"learning_rate": 0.0005,
"model_dim": 128,
"embedding_size": 128,
"num_heads": 1,
"num_encoder_layers": 3,
"num_decoder_layers": 2,
"seq_len": 107,
"dropout": 0.1,
"num_in_features": len(in_features),
"only_encoder": True,
"vocab_size": len(token2int.items())
}
possible_params = {
"learning_rate": [0.00005],
"model_dim": [64, 512],
"embedding_size": [64, 512],
"num_encoder_layers": [3, 6],
"dropout": [0.1],
}
def build_hyperparam_grid(base_hyperparams):
grid = []
for learning_rate in possible_params['learning_rate']:
for model_dim in possible_params['model_dim']:
for embedding_size in possible_params['embedding_size']:
for num_encoder_layers in possible_params['num_encoder_layers']:
for dropout in possible_params['dropout']:
modified_hyperparams = base_hyperparams.copy()
modified_hyperparams['learning_rate'] = learning_rate
modified_hyperparams['model_dim'] = model_dim
modified_hyperparams['embedding_size'] = embedding_size
modified_hyperparams['num_encoder_layers'] = num_encoder_layers
modified_hyperparams['dropout'] = dropout
grid.append(modified_hyperparams)
return grid
def prepare_model():
print("Preparing model...")
raw_train, raw_test = get_raw_data()
tokenized_inputs = get_tokenized_inputs(raw_train, cols=in_features)
labels = get_labels(raw_train)
split_index_inputs = int(0.9*tokenized_inputs.shape[0])
split_index_labels = int(0.9*labels.shape[0])
train_inputs = tokenized_inputs[:split_index_inputs]
train_labels = labels[:split_index_labels]
test_inputs = tokenized_inputs[split_index_inputs:]
test_labels = labels[split_index_labels:]
train_loader = DataLoader(
list(zip(train_inputs, train_labels)), batch_size=hyperparams["batch_size"])
test_loader = DataLoader(list(zip(test_inputs, test_labels)),
batch_size=hyperparams["batch_size"])
model = AttnModel(hyperparams).to(device)
return model, train_loader, test_loader
def train(model, loader, hyperparams, experiment):
optimizer = torch.optim.Adam(
model.parameters(), lr=hyperparams["learning_rate"])
loss = nn.MSELoss()
with experiment.train():
with tqdm(total=hyperparams["num_epochs"]) as epochbar:
for epoch in range(0, hyperparams["num_epochs"]):
total_loss = 0
i = 0
for (inputs, labels) in loader:
target = torch.zeros(inputs[:, 0, :].shape)
target[::, :labels.size(1)] = labels
inputs = inputs.to(device)
target = target.to(device)
predictions = model(inputs)
l = loss(predictions[::, :68].reshape(-1).float(),
target[::, :68].reshape(-1).float())
total_loss += l.detach().cpu().numpy()
i += 1
optimizer.zero_grad()
l.backward()
optimizer.step()
desc = f'Epoch {epoch}, loss {total_loss/i}, batch {i}/{len(loader)}'
epochbar.set_description(desc)
epochbar.update(1)
def test_metrics(seq1, seq2, cutoff=0.7):
true_labels = np.where(seq2 > cutoff, 1, 0)
rocauc = metrics.roc_auc_score(true_labels, seq1)
pr = metrics.precision_recall_curve(true_labels, seq1)
arg1 = pr[0].argsort()
prauc = metrics.auc(pr[0][arg1], pr[1][arg1])
return prauc, rocauc
def test(model, loader, hyperparams, experiment):
loss = nn.MSELoss()
i = 0
total_loss = 0
total_dist = 0
total_rocauc = 0
total_prauc = 0
with experiment.test():
for j in range(0, 1):
for (inputs, labels) in loader:
target = torch.zeros(inputs[:, 0, :].shape)
target[::, :labels.size(1)] = labels
inputs = inputs.to(device)
target = target.to(device)
predictions = model(inputs)
l = loss(predictions[::, :68].reshape(-1).float(),
target[::, :68].reshape(-1).float())
total_loss += l.detach().cpu().numpy()
i += 1
prauc, rocauc = test_metrics(predictions[::, :68].reshape(-1).detach(
).cpu().numpy(), target[::, :68].reshape(-1).detach().cpu().numpy())
total_prauc += prauc
total_rocauc += rocauc
print("PRAUC: ", total_prauc/i)
print("ROCAUC: ", total_rocauc/i)
print("Loss: ", total_loss/i)
experiment.log_metric("ROCAUC", total_rocauc/i)
experiment.log_metric("test_loss", total_loss/i)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--load", action="store_true",
help="load model.pt")
parser.add_argument("-s", "--save", action="store_true",
help="save model.pt")
parser.add_argument("-T", "--train", action="store_true",
help="run training loop")
parser.add_argument("-t", "--test", action="store_true",
help="run testing loop")
parser.add_argument("-H", "--hyperparamsearch", action="store_true",
help="run hyperparameter search")
args = parser.parse_args()
if args.hyperparamsearch:
print("Running hyperparameter search")
grid = build_hyperparam_grid(hyperparams)
else:
grid = [hyperparams]
for hyperparam_dict in grid:
model, train_loader, test_loader = prepare_model()
print("Running an experiment with hyperparams: ", hyperparam_dict)
experiment = Experiment(project_name="danaomics")
experiment.log_parameters(hyperparam_dict)
if args.load:
print("Loading saved model...")
model.load_state_dict(torch.load("./model.pt"))
if args.train:
print("Running training loop...")
train(model, train_loader, hyperparam_dict, experiment)
if args.test:
print("Running testing loop...")
test(model, test_loader, hyperparam_dict, experiment)
if args.save:
print("Saving model...")
torch.save(model.state_dict(), "./model.pt")
|
993,925 | 7d07305d369a5358e58e4f8556521b49f687d6fb | number = input()
limit = input()
x = 0
sum = 0
cnt = 0
while x < limit:
if(x % number == 0):
sum = sum+x
cnt = cnt+1
x = x+1
avg = sum/cnt
print(avg)
|
993,926 | 23a9c803c85ca7ac75becc248597f112d61a1c5c | import sys
sys.stdin = open("4861.txt", "r")
T = int(input())
for tc in range(1, T+1):
print("#%d" %(tc), end=" ")
a, b = map(int, input().split())
li = []
for _ in range(a):
li.append(str(input()))
result = []
count = 0
for y in range(len(li)):
for x in range(len(li[0])):
if li[y][x] == li[y][x+b-]:
result.append(li[y][x])
count += 1
elif li[y][x] == li[y + b-1][x]:
result.append(li[y][x])
count += 1
if count == int(b/2):
print(result)
break
else:
count = 0
|
993,927 | a46b201fc831e8efe46cddefce94a452aeecd736 | n = int(raw_input())
a = map(int, raw_input().split())
z = dict()
for q in a:
if q not in z:
z[q] = 0
z[q] += 1
ans = 0
for (v, q) in z.items():
if v != 0:
if q > 2:
ans = -1
break
if q == 2:
ans += 1
print ans |
993,928 | 025cb0b757f95d651e7e9abef86f5a55e5cb47fe | from django.conf.urls import url, include
from .views import (CubesCategoryPriceAPIView,
CubesCategoryFiltersAPIView,
CubesCategoryAPIView,
CubesCategoryNodeInputsAPIView,
CubesCategoryNodeOutputsAPIView,
CubesCategoryNodeListAPIView)
urls_categories = ([
url(r'^$',
CubesCategoryNodeListAPIView.as_view(),
name='list'),
url(r'^(?P<pk>(([\d]+)))/$',
CubesCategoryAPIView.as_view(),
name='category'),
url(r'^(?P<pk>(([\d]+)))/inputs/$',
CubesCategoryNodeInputsAPIView.as_view(),
name='inputs'),
url(r'^(?P<pk>(([\d]+)))/outputs/$',
CubesCategoryNodeOutputsAPIView.as_view(),
name='outputs'),
url(r'^(?P<pk>([-_\.\d\w]+))/prices/$',
CubesCategoryPriceAPIView.as_view(),
name='prices'),
url(r'^(?P<pk>([-_\.\d\w]+))/filters/$',
CubesCategoryFiltersAPIView.as_view(),
name='filters'),
], 'categories')
|
993,929 | 8bf59a72782d25bb7382292af4220afbe6949f18 | from django import forms
from django.forms import Textarea
from questions.models import Case
from common.models import Comment
from questions.models import UploadFile
from knowledge_base.models import KB_Item
class KnowledgebaseForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(KnowledgebaseForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['title'].widget.attrs.update({
'placeholder': 'Title'})
self.fields['kb_area'].widget.attrs.update({
'label': 'Area'})
self.fields['citation'].widget.attrs.update({
'placeholder': 'Citation'})
self.fields['judge'].widget.attrs.update({
'placeholder': 'Judge'})
self.fields['statute_chapter'].widget.attrs.update({
'placeholder': 'Statute Chapter'})
self.fields['statute_number'].widget.attrs.update({
'placeholder': 'Statute Number'})
self.fields['statute_heading'].widget.attrs.update({
'placeholder': 'Statute Heading'})
self.fields['body'].widget.attrs.update({
'placeholder': 'Body', 'widget':'Textarea'})
self.fields['judge'].required = False
self.fields['citation'].required = False
self.fields['statute_chapter'].required = False
self.fields['statute_number'].required = False
self.fields['statute_heading'].required = False
# self.fields['trigger'].required = False
# self.fields['aff_resp'].required = False
# self.fields['neg_resp'].required = False
self.fields['body'].required = False
# self.fields['plain_body'].required = False
# self.fields['related_document'].required = False
issue_detail = forms.CharField(widget=forms.Textarea)
class Meta:
model = KB_Item
widgets = {
'body': Textarea(attrs={'size': 80, 'rows': 20, 'title':'body'})
}
fields = (
'kb_area',
# 'kb_type',
'title',
'citation',
'judge',
'statute_chapter',
'statute_number',
'statute_heading',
'state',
'body',
# 'plain_body',
'kb_area',
# 'trigger',
# 'aff_resp',
# 'neg_resp',
'related_document'
)
def clean_name(self):
name = self.cleaned_data['title']
kb_item = KB_Item.objects.filter(name__iexact=name).exclude(id=self.instance.id)
|
993,930 | 161466bb53c4d2c4458785bf32fe03ce398818b2 | """
program: basic_list_exception
Author: Ondrea Li
Last date modfied: 06/20/20
The purpose of this program is to write the function to input
and print user input as a list.
"""
# this function will get one input and return it
def get_input():
"""
Use reST style
:param enter_number: represents user input for number
:return: returns a string
:raises keyError: raises an exception
"""
enter_number = int(input("Enter a number:"))
return enter_number
#return a string
# this function will return a list of input
def make_list():
"""
Use reST style
:param x: this calls function get_input as integer
:return: retuns a list
:raises keyError: raises an exception
"""
list = []
#declare a list variable
#call get input,
try:
for n in range(0,3):
x = (get_input())
list.append(x)
if 1 > x and x > 50:
raise ValueError
#Try and cast to a numeric type
#make sure above 1 and below 50
else:
continue
except ValueError as err:
print("Value Error!", err)
raise ValueError
if __name__ == '__main__':
print(make_list())
|
993,931 | 5db2c821fbbc3e8717079f3e064ccc497ac2b2fa | # http://www.cnblogs.com/huxi/archive/2010/07/04/1771073.html |
993,932 | cf0265a10bc1f8f0a98bcf14bad46b9a31df1cc5 | from django.urls import include, path
app_name = 'users'
urlpatterns = [
# path('')
] |
993,933 | 40389dd43b0670412d504568090a06181970ea73 | '''
Copyright (C) 2019 Naoki Akai.
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this file,
You can obtain one at https://mozilla.org/MPL/2.0/.
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import math
from numpy.random import normal, random
import matplotlib.pyplot as plt
class RobotSim:
def __init__(self, x=0.0, y=0.0, yaw=0.0):
self.gt_x = x
self.gt_y = y
self.gt_yaw = yaw
self.sim_x = x
self.sim_y = y
self.sim_yaw = yaw
self.sim_v = 0.0
self.sim_w = 0.0
self.odom_noise1 = 1.0
self.odom_noise2 = 0.5
self.odom_noise3 = 0.5
self.odom_noise4 = 2.0
self.sim_time_step = 0.05
self.max_measurement_range = 5.0
self.measurement_range_variance = 0.1 * 0.1
self.measurement_angle_variance = 0.01 * 0.01
self.random_measurement_rate = 0.05
self.landmarks = []
self.plot_size_x = 5.0
self.plot_size_y = 5.0
self.PI = 3.14159265359
self.PI2 = 6.28318530718
def set_odom_noises(self, odom_noise1, odom_noise2, odom_noise3, odom_noise4):
self.odom_noise1 = odom_noise1
self.odom_noise2 = odom_noise2
self.odom_noise3 = odom_noise3
self.odom_noise4 = odom_noise4
def set_plot_sizes(self, plot_size_x, plot_size_y):
self.plot_size_x = plot_size_x
self.plot_size_y = plot_size_y
def set_sim_time_step(self, sim_time_step):
self.sim_time_step = sim_time_step
def set_max_measurement_range(self, max_measurement_range):
self.max_measurement_range = max_measurement_range
def set_measurement_variances(self, measurement_range_variance, measurement_angle_variance):
self.measurement_range_variance = measurement_range_variance
self.measurement_angle_variance = measurement_angle_variance
def set_random_measurement_rate(self, random_measurement_rate):
self.random_measurement_rate = random_measurement_rate
def add_landmark(self, x, y):
self.landmarks.append([x, y])
def print_landmarks(self):
for i in range(len(self.landmarks)):
print i, 'th landmark: x =', self.landmarks[i][0], '[m], y =', self.landmarks[i][1], ' [m]'
print ''
def mod_yaw(self, yaw):
while yaw < -self.PI:
yaw += self.PI2
while yaw > self.PI:
yaw -= self.PI2
return yaw
def print_gt_pose(self):
print 'gt: x =', self.gt_x, '[m], y =', self.gt_y, '[m], yaw =', self.gt_yaw * 180.0 / self.PI, '[deg]'
def get_gt_pose(self):
return self.gt_x, self.gt_y, self.gt_yaw
def print_sim_pose(self):
print 'sim: x =', self.sim_x, '[m], y =', self.sim_y, '[m], yaw =', self.sim_yaw * 180.0 / self.PI, '[deg]'
def get_sim_pose(self):
return self.sim_x, self.sim_y, self.sim_yaw
def update_pose(self, v, w):
# update the ground truth pose
delta_dist = v * self.sim_time_step
delta_yaw = w * self.sim_time_step
x = self.gt_x + delta_dist * math.cos(self.gt_yaw)
y = self.gt_y + delta_dist * math.sin(self.gt_yaw)
yaw = self.gt_yaw + delta_yaw
self.gt_x = x
self.gt_y = y
self.gt_yaw = self.mod_yaw(yaw)
# update the simulation pose and calculate the simulation velocities
delta_dist2 = delta_dist * delta_dist
delta_yaw2 = delta_yaw * delta_yaw
delta_dist_sim = delta_dist * 0.9 + normal(0.0, self.odom_noise1 * delta_dist2 + self.odom_noise2 * delta_yaw2)
delta_yaw_sim = delta_yaw * 0.9 + normal(0.0, self.odom_noise3 * delta_dist2 + self.odom_noise4 * delta_yaw2)
x = self.sim_x + delta_dist_sim * math.cos(self.sim_yaw)
y = self.sim_y + delta_dist_sim * math.sin(self.sim_yaw)
yaw = self.sim_yaw + delta_yaw_sim
self.sim_x = x
self.sim_y = y
self.sim_yaw = self.mod_yaw(yaw)
self.sim_v = delta_dist_sim / self.sim_time_step
self.sim_w = delta_yaw_sim / self.sim_time_step
def print_simulated_velocities(self):
print 'v =', self.sim_v, '[m/sec], w =', self.sim_w, '[rad/sec]'
def get_simulated_velocities(self):
return self.sim_v, self.sim_w
def get_sensor_measurements(self):
measurements = []
for i in range(len(self.landmarks)):
dx = self.landmarks[i][0] - self.gt_x
dy = self.landmarks[i][1] - self.gt_y
dl = normal(math.sqrt(dx * dx + dy * dy), self.measurement_range_variance)
if dl <= self.max_measurement_range:
dyaw = normal(math.atan2(dy, dx) - self.gt_yaw, self.measurement_angle_variance)
dyaw = self.mod_yaw(dyaw)
# simulate random range measurement
if random() < self.random_measurement_rate:
dl = random() * self.max_measurement_range
measurements.append([dl, dyaw])
return measurements
def plot_sim_world(self, x, y, yaw, measurements):
plt.clf()
plt.xlim(x - self.plot_size_x, x + self.plot_size_x)
plt.ylim(y - self.plot_size_y, y + self.plot_size_y)
plt.grid(which='major', color='black', linestyle='-')
# plt.grid(which='minor', color='black', linestyle='-')
for i in range(len(self.landmarks)):
plt.plot(self.landmarks[i][0], self.landmarks[i][1], marker='o', color='black', markersize=30)
for i in range(len(measurements)):
if measurements[i][0] > 0.0:
mx = measurements[i][0] * math.cos(yaw + measurements[i][1]) + x
my = measurements[i][0] * math.sin(yaw + measurements[i][1]) + y
plt.plot(mx, my, marker='o', color='red', markersize=20)
plt.plot(self.gt_x, self.gt_y, marker='o', color='black', markersize=30)
plt.plot(x, y, marker='o', color='green', markersize=20)
plt.pause(self.sim_time_step)
|
993,934 | 838d9ba8338eaf797ec2081f2b47ca16d60265f9 | from django.db import models
from Babies.models import Baby
# Create your models here.
class Event(models.Model):
event_type= models.IntegerField(default=1)
baby_id= models.ForeignKey(Baby, on_delete=models.PROTECT,related_name="events")
note= models.CharField(max_length=500)
timestamp= models.DateTimeField(auto_now_add=True, blank=True)
class Meta:
db_table='events'
|
993,935 | 922e6079c01853d1f242acdeeaf1605d981a07ab | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
@Time : 2021/5/14 20:21
@Auth : 可优
@File : serializers.py
@IDE : PyCharm
@Motto: ABC(Always Be Coding)
@Email: keyou100@qq.com
@Company: 湖南省零檬信息技术有限公司
@Copyright: 柠檬班
-------------------------------------------------
"""
from rest_framework import serializers
from interfaces.models import Interfaces
from .models import Projects
from debugtalks.models import DebugTalks
from utils.validators import ManualValidateIsExist
from utils.base_serializers import RunSerializer
class ProjectModelSerializer(serializers.ModelSerializer):
class Meta:
model = Projects
exclude = ('update_time', )
extra_kwargs = {
"create_time": {
"read_only": True,
"format": "%Y年%m月%d日 %H:%M:%S"
}
}
def create(self, validated_data):
instance = super().create(validated_data)
DebugTalks.objects.create(project=instance)
return instance
class ProjectsNamesModelSerailizer(serializers.ModelSerializer):
class Meta:
model = Projects
fields = ('id', 'name')
class InterfacesNamesModelSerializer(serializers.ModelSerializer):
class Meta:
model = Interfaces
fields = ('id', 'name')
class InterfacesProjectsModelSerializer(serializers.ModelSerializer):
interfaces = InterfacesNamesModelSerializer(label='项目所属接口信息', help_text='项目所属接口信息',
many=True, read_only=True)
class Meta:
model = Projects
fields = ('interfaces', )
# class ProjectRunSerializer(RunSerializer):
#
# class Meta(RunSerializer.Meta):
# model = Projects
class ProjectRunSerializer(RunSerializer):
env_id = serializers.IntegerField(label="所属环境id", help_text="所属环境id",
validators=[ManualValidateIsExist('env')])
class Meta:
model = Projects
fields = ('id', 'env_id')
|
993,936 | b5da3828b72d2586eefb2a89bb437dcb1f95bcc6 | from __future__ import division
import numpy as np
from numpy import fft
import time
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
import skcuda.fft as cu_fft
from skcuda import misc
from pycuda import cumath
REAL_DTYPE = np.float32
COMPLEX_DTYPE = np.complex64
# TODO: Damping is wrong. It is 1.0 in two of the corners.
# Simulation parameters
dt = 0.1
T_MAX = 20
N_TIMESTEPS = int((1.0 * T_MAX) / dt)
N_RUNS = 2
times = np.zeros(N_RUNS)
# Number of points. It's best to keep this as a multiple of 2 - see fftw
# documentation
N = 512
assert N % 2 == 0, "N is not even. This is going to cause all sorts of\
problems!"
# Spatial extent. The spatial grid will run roughly from - max_XY to max_XY in
# both dimesnions
max_XY = 55
grid_element_size = (2.0 * max_XY) / (N - 1)
# Array of coordinates along the x-axis
x_axis = np.linspace(-max_XY, max_XY, num=N)
x, y = np.meshgrid(x_axis, x_axis)
assert x.shape == (N, N)
# Construct k-space grid
k_axis = ((2 * np.pi) / (2 * max_XY)) * fft.fftshift(np.linspace(- N/2, N/2 - 1,
num=N))
k_x, k_y = np.meshgrid(k_axis, k_axis)
K = k_x ** 2 + k_y ** 2
# plt.contourf(np.absolute(psi)) Physical Parameters
g = 0.187
g_R = 2.0 * g
# Also denoted gamma_C
gamma = 1.0
gamma_R = 1.5 * gamma
R = 0.1
Pth = (gamma * gamma_R) / R
# Initial wavefunction and exponential factors
# psi0 = 0.2 * (x ** 2 + y ** 2) ** 0 * np.exp(-0.04 * (x ** 2 + y ** 2))
psi0 = np.random.normal(size=[N, N]) + 0j
currentDensity = np.absolute(psi0) ** 2
# Pumping
sigma = 15.0
P = 1.5 * (200 * Pth / (sigma) ** 2) * np.exp(-1 / (2 * sigma ** 2)
* (x ** 2 + y ** 2))
n = np.array(0.0 * P)
kineticFactorHalf = np.exp(-1.0j * K * dt / 2.0)
# expFactorPolariton = np.exp(-1.0j * (n * (1j * R + g_R) + g * currentDensity
# - 1j * gamma))
# Quadratic Potential
# :potential = 40 * np.exp(- 0.01 * (x ** 2 + 2 * y ** 2))
# Toroidal trap
# potential = -0.001 * np.exp(-0.01 * (x **2 + y ** 2)) * (x **2 + y **2) ** 2
potential = 0.0
# Damping boundaries?
damping = (0.5 * np.tanh(5 * (x + max_XY - 5)) * np.tanh(5 + (y + max_XY - 5))
+ 0.5 * np.tanh(5 * (- x + max_XY - 5)) *
np.tanh(5 * (- y + max_XY - 5)))
# damping = 1.0
# Set up arrays to store the density
# First two dimensions are spatial, third is time
# density = np.zeros(x.shape + tuple([N_TIMESTEPS]))
# Run simulation
psi = psi0
# make GPU versions of all of our arrays.
psi_gpu = gpuarray.to_gpu(psi.astype(COMPLEX_DTYPE))
n_gpu = gpuarray.to_gpu(n.astype(REAL_DTYPE))
kineticFactorHalf_gpu = gpuarray.to_gpu(kineticFactorHalf.astype(COMPLEX_DTYPE))
damping_gpu = gpuarray.to_gpu(damping.astype(REAL_DTYPE))
currentDensity_gpu = gpuarray.to_gpu(currentDensity.astype(REAL_DTYPE))
Pdt_gpu = gpuarray.to_gpu((P*dt).astype(REAL_DTYPE))
gammaRdt_gpu = gpuarray.to_gpu(np.array(gamma_R * dt).astype(REAL_DTYPE))
Rdt_gpu = gpuarray.to_gpu(np.array(R * dt).astype(REAL_DTYPE))
spectrum = gpuarray.to_gpu(np.zeros((N_TIMESTEPS, N)).astype(COMPLEX_DTYPE))
# expFactorExciton_gpu = cumath.exp(-gammaRdt_gpu +
# (Rdt_gpu * currentDensity_gpu))
expFactorPolFirst = (0.5 * R - 1j * g_R) * dt
expFactorPolSecond = -1j * g * dt
expFactorPolThird = -0.5 * gamma * dt
expFactorPolFirst_gpu = gpuarray.to_gpu(np.array((0.5 * R - 1j * g_R) * dt)
.astype(COMPLEX_DTYPE))
expFactorPolSecond_gpu = gpuarray.to_gpu(np.array(-1j * g * dt)
.astype(COMPLEX_DTYPE))
expFactorPolThird_gpu = gpuarray.to_gpu(np.array(-0.5 * gamma * dt)
.astype(COMPLEX_DTYPE))
# make FFT plans
# TODO: Are these 2D????
plan_forward = cu_fft.Plan((N, N), COMPLEX_DTYPE, COMPLEX_DTYPE)
plan_inverse = cu_fft.Plan((N, N), COMPLEX_DTYPE, COMPLEX_DTYPE)
mod = SourceModule("""
#include <pycuda-complex.hpp>
__global__ void modSquared(pycuda::complex<float> *a,
float *dest, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int ind = idx + N * idy;
dest[ind] = a[ind]._M_re * a[ind]._M_re
+ a[ind]._M_im * a[ind]._M_im;
}
""")
mod2 = SourceModule("""
#include <pycuda-complex.hpp>
__device__ pycuda::complex<float> comExp(pycuda::complex<float> z)
{
pycuda::complex<float> res;
float s, c;
float e = expf(z.real());
sincosf(z.imag(), &s, &c);
res._M_re = c * e;
res._M_im = s * e;
return res;
}
__global__ void test(pycuda::complex<float> a1,
pycuda::complex<float> a2, pycuda::complex<float> a3,
pycuda::complex<float> *dest, float *x1, float *x2, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int ind = idx + N * idy;
dest[ind] *= comExp(a1 * x1[ind] + a2 * x2[ind] + a3);
}
""")
modSquared = mod.get_function("modSquared")
psiNonlinear = mod2.get_function("test")
modSquared.prepare(["P", "P", "I"])
psiNonlinear.prepare("FFFPPPI")
block = (16, 16, 1)
grid = (64, 64)
for n in np.arange(N_RUNS):
start = time.time()
for step in xrange(N_TIMESTEPS):
# print step
# Implementing split-step method
# Update wavefunction and resovoir, record density
cu_fft.fft(psi_gpu, psi_gpu, plan_forward)
psi_gpu *= kineticFactorHalf_gpu
cu_fft.ifft(psi_gpu, psi_gpu, plan_inverse, scale=True)
# currentDensity_gpu = abs(psi_gpu) ** 2
# currentDensity_gpu = psi_gpu.real **2 + psi_gpu.imag ** 2
currentDensity_gpu = (psi_gpu * psi_gpu.conj()).real
# modSquared.prepared_call(grid, block, psi_gpu.gpudata,
# currentDensity_gpu.gpudata, 1024)
# n_gpu *= cumath.exp(-gammaRdt_gpu + Rdt_gpu * currentDensity_gpu)
n_gpu *= cumath.exp(misc.add(- gammaRdt_gpu,
- misc.multiply(Rdt_gpu, currentDensity_gpu)))
n_gpu += Pdt_gpu
psi_gpu *= cumath.exp(
misc.add(
misc.add(misc.multiply(expFactorPolFirst_gpu, n_gpu),
misc.multiply(expFactorPolSecond_gpu, currentDensity_gpu)),
expFactorPolThird_gpu))
# psiNonlinear.prepared_call(grid, block, expFactorPolFirst,
# expFactorPolSecond, expFactorPolThird,
# psi_gpu.gpudata, n_gpu.gpudata,
# currentDensity_gpu.gpudata, 1024)
cu_fft.fft(psi_gpu, psi_gpu, plan_forward)
# record spectrum
drv.memcpy_dtod(spectrum[step, :].gpudata, psi_gpu[N//2, :].gpudata,
psi_gpu[N//2, :].nbytes)
psi_gpu *= kineticFactorHalf_gpu
cu_fft.ifft(psi_gpu, psi_gpu, plan_inverse, scale=True)
psi_gpu *= damping_gpu
n_gpu *= damping_gpu
# # Record density
# print("Time = %f" % (step * dt))
# for step in np.arange(N_TIMESTEPS):
# cu_fft.fft(psi_gpu, psi_gpu, plan_forward)
# cu_fft.ifft(psi_gpu, psi_gpu, plan_inverse, scale=True)
# cu_fft.fft(psi_gpu, psi_gpu, plan_forward)
# cu_fft.ifft(psi_gpu, psi_gpu, plan_inverse, scale=True)
# ding!
end = time.time()
times[n] = (end - start) / N_TIMESTEPS
print("%f +/- %f seconds per step" % (times.mean(), times.std()))
# print("%d steps in %f seconds. On average, %f seconds per step"
# % (N_TIMESTEPS, end - start, (end - start) / N_TIMESTEPS))
# os.system('play --no-show-progress --null --channels 1 synth %s sine %f'
# % (1, 400))
psiFinal = psi_gpu.get()
nFinal = n_gpu.get()
|
993,937 | f0cb2327a7e3bd386d9a39124a29be3c21a60a20 | # This file is part of pyrerp
# Copyright (C) 2012 Nathaniel Smith <njs@pobox.com>
# See file COPYING for license information.
# How to compute incremental std dev:
# http://mathcentral.uregina.ca/QQ/database/QQ.09.02/carlos1.html
import numpy as np
from numpy.linalg import solve, inv
from scipy import stats, sparse
# These are copied with trivial syntactic modification from R: see
# stats:::Pillai, stats:::Wilks, etc. They each return a 4-tuple
# (raw test value, approximate F, df1, df2)
# NB: this means that they are GPLed and I don't have the power to change
# that except by rewriting them from scratch.
_mv_tests = {}
def _pillai(eig, q, df_res):
test = np.sum(eig * 1. / (1 + eig))
p = len(eig)
s = min(p, q)
n = 0.5 * (df_res - p - 1)
m = 0.5 * (abs(p - q) - 1)
tmp1 = 2 * m + s + 1
tmp2 = 2 * n + s + 1
return (test, (tmp2 * 1. / tmp1 * test)/(s - test), s * tmp1, s * tmp2)
_mv_tests["Pillai"] = _pillai
# Test insensitivity to eigenvalue order:
_mv_test_vecs = [(np.array([10, 0.3]), 2, 8),
(np.array([0.3, 10]), 2, 8)]
def test__pillai():
for tv in _mv_test_vecs:
assert np.allclose(_pillai(*tv),
[1.13986013986014, 5.30081300813008, 4, 16])
def _wilks(eig, q, df_res):
test = np.prod(1./(1 + eig))
p = len(eig)
tmp1 = df_res - 0.5 * (p - q + 1)
tmp2 = (p * q - 2) * 1. / 4
tmp3 = p ** 2 + q ** 2 - 5
if tmp3 > 0:
tmp3 = np.sqrt(((p * q) ** 2 - 4) * 1. /tmp3)
else:
tmp3 = 1
return (test, ((test ** (-1./tmp3) - 1) * (tmp1 * tmp3 - 2 * tmp2)) * 1./p/q,
p * q, tmp1 * tmp3 - 2 * tmp2)
_mv_tests["Wilks"] = _wilks
def test__wilks():
for tv in _mv_test_vecs:
assert np.allclose(_wilks(*tv),
[0.0699300699300699, 9.7353692808323267, 4, 14])
def _hl(eig, q, df_res):
test = np.sum(eig)
p = len(eig)
m = 0.5 * (abs(p - q) - 1)
n = 0.5 * (df_res - p - 1)
s = min(p, q)
tmp1 = 2 * m + s + 1
tmp2 = 2 * (s * n + 1)
return (test, (tmp2 * test) * 1./s/s/tmp1, s * tmp1, tmp2)
_mv_tests["Hotelling-Lawley"] = _hl
def test__hl():
for tv in _mv_test_vecs:
assert np.allclose(_hl(*tv),
[10.30, 15.45, 4.00, 12.00])
def _roy(eig, q, df_res):
p = len(eig)
test = np.max(eig)
tmp1 = max(p, q)
tmp2 = df_res - tmp1 + q
return (test, (tmp2 * test) * 1. /tmp1, tmp1, tmp2)
_mv_tests["Roy"] = _roy
def test__roy():
for tv in _mv_test_vecs:
assert np.allclose(_roy(*tv),
[10, 40, 2, 8])
class LSResult(object):
def __init__(self, coef, vcov, rssp, rdf):
self._coef = coef
self._vcov = vcov
self._rssp = rssp
self._rdf = rdf
def coef(self):
"""Returns a matrix of coefficients. Each column is the coefficients
for one column of the outcome vector Y."""
return self._coef
def vcov(self):
return self._vcov
def rss(self):
"""Returns the residual sum-of-squares vector.
Each entry is the residual sum of squares for the corresponding column
of y.
If this is the result of a weighted least squares fit, then it is
the weighted sum of the residual squares."""
return self._rssp.diagonal()
def rssp(self):
"""Returns the residual sum-of-squares-and-products matrix.
Each entry is the residual sum of products for the corresponding
columns of y. The diagonal contains the residual sum of squares.
If this is the result of a weighted least squares fit, then it is the
weighted sum of the residual squares and products."""
return self._rssp
def rdf(self):
"Returns the residual degrees of freedom."
return self._rdf
def scaled_vcov(self):
"""Returns the scaled variance-covariance matrix.
This is a 3-dimensional array of shape (N, N, D), where D is the
number of columns in y, and for each such column there is an NxN
matrix of estimated variance-covariances of the coefficients fit to
that column."""
return (self.rss()[np.newaxis, np.newaxis, :]
* 1. / self._rdf * self._vcov[:, :, np.newaxis])
def se(self):
"""Returns the standard errors of the coefficient estimates.
This is a matrix of the same shape as .coef()."""
return np.sqrt(self.scaled_vcov().diagonal().T)
def t_tests(self):
"""For each coefficient, performs a t-test where the null is that that
coefficient has the true value of 0.
Returns a tuple (t, p).
Each is a matrix of the same shape as .coef(). 't' contains t values
for each coefficient, and 'p' contains corresponding two-tailed
p-values for the t-test with .rdf() degrees of freedom."""
se = self.se()
t = self._coef / se
p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)
return (t, p)
def lht_by_dim(self, hypothesis_matrix, rhs=None):
hypothesis_matrix = np.atleast_2d(hypothesis_matrix)
y_dim = self._coef.shape[1]
q = hypothesis_matrix.shape[0]
if rhs is None:
rhs = np.zeros((q, y_dim))
# If a 1d vector is given, assume it was meant as a column:
rhs = np.atleast_1d(rhs)
if rhs.ndim == 1:
rhs = rhs.reshape((-1, 1))
trans_coef = np.dot(hypothesis_matrix, self._coef) - rhs
vcov = self.scaled_vcov()
F = np.empty(y_dim)
for i in xrange(y_dim):
ssh = np.dot(trans_coef[:, [i]].T,
solve(np.dot(np.dot(hypothesis_matrix,
vcov[:, :, i]),
hypothesis_matrix.T),
trans_coef[:, [i]]))
F[i] = ssh * 1. / q
p = stats.distributions.f.sf(F, q, self.rdf())
return F, q, self.rdf(), p
def lht_multivariate(self, hypothesis_matrix, rhs=None, subset=None):
# Returns a dict with one entry per standard multivariate test, and
# each entry is a tuple of length 5:
# (raw statistic, approximate F, df1, df2, p)
# Optionally, this can be done on just a subset of the exogenous
# variable dimensions. 'subset' can be any way of indexing some subset
# of the dimensions; setting it to an integer produces a
# unidimensional lht.
hypothesis_matrix = np.atleast_2d(hypothesis_matrix)
if subset is None:
subset = slice(None)
subset = np.atleast_1d(np.arange(self._rssp.shape[0])[subset])
y_dim = len(subset)
q = hypothesis_matrix.shape[0]
if rhs is None:
rhs = np.zeros((q, y_dim))
# If a 1d vector is given, assume it was meant as a column:
rhs = np.atleast_1d(rhs)
if rhs.ndim == 1:
rhs = rhs.reshape((-1, 1))
# SSPH <- t(L %*% B - rhs) %*% solve(L %*% V %*% t(L)) %*% (L %*% B - rhs)
# where L is the hypothesis matrix, B is the coefs, rhs is the null
# values for L*B, and V is the unscaled variance-covariance matrix.
trans_coef = np.dot(hypothesis_matrix, self._coef[:, subset]) - rhs
ssph = np.dot(trans_coef.T,
solve(np.dot(np.dot(hypothesis_matrix, self._vcov),
hypothesis_matrix.T),
trans_coef))
sspe = self._rssp[subset.reshape((-1, 1)), subset.reshape((1, -1))]
eigs = np.linalg.eigvals(np.linalg.lstsq(sspe, ssph)[0]).real
results = {}
for name, fn in _mv_tests.iteritems():
(stat, F, df1, df2) = fn(eigs, q, self._rdf)
p = stats.distributions.f.sf(F, df1, df2)
results[name] = (stat, F, df1, df2, p)
return results
class QRIncrementalLS(object):
"""Perform least-squares regression with very large model matrices.
Supports arbitrary numbers of predictors, and for a given model matrix can
simultaneously solve multiple regression problems with differing outcome
vectors.
This uses .incremental_qr.IncrementalQR to form the QR
decomposition. It is slower than XtXIncrementalLS, and cannot accept
sparse matrices, but it may be more numerically stable."""
def __init__(self):
self._qr = None
self._x_cols = None
self._y_cols = None
self._y_ssp = None
self._x_rows = 0
def append(self, x_strip, y_strip):
from .incremental_qr import IncrementalQR
assert x_strip.ndim == y_strip.ndim == 2
if self._qr is None:
self._x_cols = x_strip.shape[1]
self._y_cols = y_strip.shape[1]
self._qr = IncrementalQR(self._x_cols, self._y_cols)
self._y_ssp = np.zeros((self._y_cols, self._y_cols))
self._qr.append(x_strip, y_strip)
self._y_ssp += np.dot(y_strip.T, y_strip)
self._x_rows += x_strip.shape[0]
def fit(self):
assert self._qr is not None, "Must append at least 1 row!"
r = self._qr.r()
qty = self._qr.qty()
assert qty.shape == (self._x_cols, self._y_cols)
coef = solve(r, qty)
vcov = inv(np.dot(r.T, r))
# Q'y is a projection of y onto the subspace of R^nrows that is
# spanned by the X vectors. In other words, Q'y is (an orthonormal
# projection of) that part of y which can be explained by
# X. Therefore, the explained SS is (Q'y ** 2).sum() (if you want to
# do ordered-entry anova type stuff, then these have the values you
# need), and the residual sum of squares is the difference between the
# total sum of squares and the explained sum of squares:
explained_ssp = np.dot(qty.T, qty)
rssp = self._y_ssp - explained_ssp
return LSResult(coef, vcov, rssp, self._x_rows - self._x_cols)
class GroupWeightedLSResult(LSResult):
def __init__(self, coef, vcov, rss, rdf,
group_weights, group_rssp, group_df):
LSResult.__init__(self, coef, vcov, rss, rdf)
self._group_weights = group_weights
self._group_rssp = group_rssp
self._group_df = group_df
def group_weights(self):
"""Returns the group weights used to generate this fit."""
return self._group_weights
def group_rssp(self):
"""Returns a dictionary of the sum-of-squares-and-products matrices
for each group. Keys are group names, values are matrices.
These matrices are *unweighted*."""
return self._group_rssp
def group_df(self):
"""Returns a dictionary of the degrees of freedom for each group. Keys
are group names, values are degrees of freedom.
Note that these are not residual degrees of freedom, but rather a
simple count of how many rows were given for each group."""
return self._group_df
class _XtXAccumulator(object):
def __init__(self, x_cols, y_cols):
self.xtx = np.zeros((x_cols, x_cols))
self.xty = np.zeros((x_cols, y_cols))
self.y_ssp = np.zeros((y_cols, y_cols))
self.rows = 0
@classmethod
def append_top_half(cls, x_strip, y_strip):
assert x_strip.ndim == y_strip.ndim == 2
assert x_strip.shape[0] == y_strip.shape[0]
if sparse.issparse(x_strip):
xtx = x_strip.T * x_strip
else:
xtx = np.dot(x_strip.T, x_strip)
if sparse.issparse(x_strip) or sparse.issparse(y_strip):
xty = x_strip.T * y_strip
else:
xty = np.dot(x_strip.T, y_strip)
if sparse.issparse(y_strip):
y_ssp = y_strip.T * y_strip
else:
y_ssp = np.dot(y_strip.T, y_strip)
return (x_strip.shape[0], xtx, xty, y_ssp)
def append_bottom_half(self, top_half_rv):
(rows, xtx, xty, y_ssp) = top_half_rv
self.rows += rows
# If you add a dense array to a sparse matrix, what you get out is a
# dense np.matrix, and we just want to deal with np.ndarray's.
self.xtx += xtx
if isinstance(self.xtx, np.matrix):
self.xtx = np.asarray(self.xtx)
self.xty += xty
if isinstance(self.xty, np.matrix):
self.xty = np.asarray(self.xty)
self.y_ssp += y_ssp
if isinstance(self.y_ssp, np.matrix):
self.y_ssp = np.asarray(self.y_ssp)
class XtXGroupWeightedIncrementalLS(object):
"""Perform weighted least-squares regression with very large model
matrices.
Supports arbitrary numbers of predictors, and for a given model matrix can
simultaneously solve multiple regression problems with differing outcome
vectors.
For each set of data points you pass in, you also specify which "group" it
belongs too. When fitting your data, you must specify a weight for each
group of data (e.g., to handle heteroskedasticity). You may call
fit() repeatedly with different weights; this is much faster than
recreating your model matrix from scratch.
Memory usage is roughly (x_cols + y_cols)^2 doubles PER GROUP.
This works by the direct method (forming X'X and solving it). It is quite
fast, and can handle sparse matrices (in the sense of scipy.sparse). It
may be less numerically stable than QR-based methods."""
def __init__(self):
self._x_cols = None
self._y_cols = None
self._accumulators = {}
@classmethod
def append_top_half(cls, group, x_strip, y_strip):
"""The stateless part of append(), split out to ease parallel
processing. You can run many append_top_half's in different processes
in parallel, and then queue them into append_bottom_half."""
return (group,
x_strip.shape[1], y_strip.shape[1],
_XtXAccumulator.append_top_half(x_strip, y_strip))
def append_bottom_half(self, top_half_rv):
(group, x_cols, y_cols, accumulator_top_half_rv) = top_half_rv
self._x_cols = x_cols
self._y_cols = y_cols
if not group in self._accumulators:
self._accumulators[group] = _XtXAccumulator(x_cols, y_cols)
self._accumulators[group].append_bottom_half(accumulator_top_half_rv)
def append(self, group, x_strip, y_strip):
self.append_bottom_half(self.append_top_half(group, x_strip, y_strip))
def groups(self):
return self._accumulators.keys()
def fit_unweighted(self):
ones = dict([(g, 1) for g in self._accumulators.keys()])
return self.fit(ones)
def fgls(self, maxiter=100):
fit = self.fit_unweighted()
group_df = fit.group_df()
old_group_weights = [1] * len(self._accumulators)
for i in xrange(maxiter):
print "iter %s" % (i,)
group_rssp = fit.group_rssp()
group_weights = {}
for group, rssp in group_rssp.iteritems():
# Assume that -- if we are multivariate -- the
# heteroskedasticity parameters are the same for each
# dimension:
rss = rssp.diagonal().mean()
group_weights[group] = 1. / (rss * 1. / group_df[group])
fit = self.fit(group_weights)
# XX stupid convergence criterion:
print group_weights
if np.allclose(old_group_weights, group_weights.values()):
break
old_group_weights = group_weights.values()
else:
raise Exception, fit
return fit
def fit(self, group_weights):
assert self._x_cols is not None, "Need at least 1 row!"
xtwx = np.zeros((self._x_cols, self._x_cols))
xtwy = np.zeros((self._x_cols, self._y_cols))
for group, accumulator in self._accumulators.iteritems():
xtwx += group_weights[group] * accumulator.xtx
xtwy += group_weights[group] * accumulator.xty
coef = solve(xtwx, xtwy)
vcov = inv(xtwx)
df = 0
group_df = {}
group_rssp = {}
rssp = np.zeros((self._y_cols, self._y_cols))
for group, accumulator in self._accumulators.iteritems():
df += accumulator.rows
group_df[group] = accumulator.rows
# Residual sum of squares and products matrix is
# (Y - XB)'(Y - XB)
# = Y'Y - B'X'Y - (B'X'Y')' + B'X'XB
this_btxty = np.dot(coef.T, accumulator.xty)
this_rssp = (accumulator.y_ssp
- this_btxty
- this_btxty.T
+ np.dot(np.dot(coef.T, accumulator.xtx), coef))
group_rssp[group] = this_rssp
rssp += group_weights[group] * this_rssp
rdf = df - self._x_cols
return GroupWeightedLSResult(coef, vcov, rssp, rdf,
dict(group_weights), group_rssp, group_df)
class XtXIncrementalLS(object):
"""Perform least-squares regression with very large model matrices.
Supports arbitrary numbers of predictors, and for a given model matrix can
simultaneously solve multiple regression problems with differing outcome
vectors.
This is faster than QRIncrementalLS, and can accept sparse matrices (using
scipy.sparse), but it may be less numerically stable. It is a thin wrapper
around XtXGroupWeightedIncrementalLS."""
_test_sparse = None
def __init__(self):
self._gwils = XtXGroupWeightedIncrementalLS()
@classmethod
def append_top_half(cls, x_strip, y_strip):
return XtXGroupWeightedIncrementalLS.append_top_half("foo",
x_strip, y_strip)
def append_bottom_half(self, append_top_half_rv):
return self._gwils.append_bottom_half(append_top_half_rv)
def append(self, *args):
return self.append_bottom_half(self.append_top_half(*args))
def fit(self):
return self._gwils.fit({"foo": 1})
def _incremental_ls_tst(class_):
x = np.arange(10)
r1 = np.array([-1.34802662, 0.88780193, 0.97355492, 1.09878012,
-1.24346173, 0.03237138, 1.70651768, -0.70375099,
0.21029281, 0.80667505])
r2 = np.array([0.48670125, -1.82877490, -0.32244478, -1.00960602,
0.54804895, -0.24075048, 0.43178080, -1.14938703,
-0.07269548, -1.75175427])
y1 = 1 + 2 * x + r1
y2 = 3 + 7 * x + r2
X = np.hstack([np.ones((10, 1)), x.reshape((-1, 1))])
Y = np.hstack([y1.reshape((-1, 1)), y2.reshape((-1, 1))])
# True values calculated with R:
def check(fit):
assert np.allclose(fit.coef(),
np.array([[0.9867409, 2.739645],
[2.0567410, 6.948770]]))
assert fit.rdf() == 8
assert np.allclose(fit.rss(),
[9.558538, 7.01811])
assert np.allclose(fit.rssp(),
np.array([[9.558538163832505, -2.846727419732163],
[-2.846727419732163, 7.018110344812262]]))
y1_svcov = np.array([[0.41275506, -0.065171851],
[-0.06517185, 0.014482634]])
y2_svcov = np.array([[0.30305476, -0.047850752],
[-0.047850752, 0.010633501]])
svcov = np.concatenate([y1_svcov[..., np.newaxis],
y2_svcov[..., np.newaxis]],
axis=2)
assert np.allclose(fit.scaled_vcov(), svcov)
assert np.allclose(fit.se(),
np.array([[0.6424602, 0.5505041],
[0.1203438, 0.1031189]]))
(t, p) = fit.t_tests()
assert np.allclose(t,
np.array([[1.535879, 4.976612],
[17.090542, 67.386024]]))
assert np.allclose(p,
np.array([[1.631190e-01, 1.084082e-03],
[1.396098e-07, 2.617624e-12]]))
F, df1, df2, p = fit.lht_by_dim([1, 0])
assert np.allclose(F, [2.358923445752836, 24.76666631211173])
assert df1 == 1
assert df2 == 8
assert np.allclose(p, [0.1631190208872526, 0.001084082203556753])
F, df1, df2, p = fit.lht_by_dim(np.eye(2))
assert np.allclose(F, [585.023031528357, 8862.63814885004])
assert df1 == 2
assert df2 == 8
assert np.allclose(p, [2.12672266281485e-09, 4.1419421127122e-14])
F, df1, df2, p = fit.lht_by_dim([0, 1], rhs=2)
assert np.allclose(F, [0.2223036807600276, 2303.129418461575])
assert df1 == 1
assert df2 == 8
assert np.allclose(p, [0.6498810865566884, 3.931182195134797e-11])
for rhs in ([3, 2],
[[3, 3], [2, 2]]):
F, df1, df2, p = fit.lht_by_dim(np.eye(2), rhs=[3, 2])
assert np.allclose(F, [13.04324941825649, 3912.42755464922])
assert df1 == 2
assert df2 == 8
assert np.allclose(p, [0.003034103325876033, 1.088126971915703e-12])
mv = fit.lht_multivariate(np.eye(2))
assert np.allclose(mv["Pillai"],
(1.0062957480131434, 4.0506850846259770,
4, 16, 0.01866246907197198))
assert np.allclose(mv["Wilks"],
(0.0003166157715902, 193.1988279124278733,
4, 14, 4.4477e-12))
assert np.allclose(mv["Hotelling-Lawley"],
(3136.5178539783428278, 4704.7767809675142416,
4, 12, 4.684822e-19))
assert np.allclose(mv["Roy"],
(3136.5111954638095995, 12546.0447818552383978,
2, 8, 1.0320e-14))
for rhs in ([3, 2],
[[3, 3], [2, 2]]):
mv = fit.lht_multivariate([[3, 1], [0, 2]], rhs)
assert np.allclose(mv["Pillai"],
(1.0000722571514054, 4.0005780989830289,
4, 16, 0.0195210910404859))
assert np.allclose(mv["Wilks"],
(0.0004862616193171, 155.2205092909892130,
4, 14, 1.989176787533242e-11))
assert np.allclose(mv["Hotelling-Lawley"],
(2054.3575308552108254, 3081.5362962828162381,
4, 12, 5.920032e-18))
assert np.allclose(mv["Roy"],
(2054.3569717521481834, 8217.4278870085927338,
2, 8, 5.6033913856139e-14))
# Check multivariate tests on a single variable (via 'subset') give
# the same answer as univariate tests:
for subset in (0, [0], np.asarray([True, False])):
print subset
mv = fit.lht_multivariate([1, 0], subset=subset)
F, df1, df2, p = fit.lht_by_dim([1, 0])
for (mv_raw, mv_F, mv_df1, mv_df2, mv_p) in mv.itervalues():
assert np.allclose(mv_F, F[0])
assert np.allclose(mv_df1, df1)
assert np.allclose(mv_df2, df2)
assert np.allclose(mv_p, p[0])
def do_fit(iterable):
ls = class_()
for x, y in iterable:
ls.append(x, y)
fit = ls.fit()
check(fit)
for test_sparse in ([], ["X"], ["Y"], ["X", "Y"]):
print "test_sparse", test_sparse
if test_sparse and not hasattr(class_, "_test_sparse"):
continue
if "X" in test_sparse:
tX = sparse.csr_matrix(X)
else:
tX = X
if "Y" in test_sparse:
tY = sparse.csr_matrix(Y)
else:
tY = Y
# One-shot:
do_fit([(tX, tY)])
# Two-shot incremental:
do_fit([(tX[:5, :], tY[:5, :]),
(tX[5:, :], tY[5:, :])])
# Generator:
def gen():
for i in xrange(10):
xrow = tX[i, :]
# Weird fiddling because this code needs to work for both
# dense and sparse matrices:
if xrow.ndim == 1:
xrow = xrow.reshape((1, -1))
yrow = tY[i, :]
if yrow.ndim == 1:
yrow = yrow.reshape((1, -1))
yield xrow, yrow
do_fit(gen())
def test_incremental_ls():
for class_ in (XtXIncrementalLS, QRIncrementalLS):
_incremental_ls_tst(class_)
def _group_weighted_incremental_ls_tst(class_):
x = np.arange(10)
r1 = np.array([-1.34802662, 0.88780193, 0.97355492, 1.09878012,
-1.24346173, 0.03237138, 1.70651768, -0.70375099,
0.21029281, 0.80667505])
r2 = np.array([0.48670125, -1.82877490, -0.32244478, -1.00960602,
0.54804895, -0.24075048, 0.43178080, -1.14938703,
-0.07269548, -1.75175427])
y1 = 1 + 2 * x + r1
y2 = 3 + 7 * x + r2
X = np.hstack([np.ones((10, 1)), x.reshape((-1, 1))])
Y = np.hstack([y1.reshape((-1, 1)), y2.reshape((-1, 1))])
groups = np.array(["a"] * 5 + ["b"] * 5)
Xa = X[groups == "a", :]
Xb = X[groups == "b", :]
Ya = Y[groups == "a", :]
Yb = Y[groups == "b", :]
# True values calculated with R:
def check(ls):
fit11 = ls.fit({"a": 1, "b": 1})
resid = Y - np.dot(X, fit11.coef())
assert np.allclose(fit11.coef(),
np.array([[0.9867409, 2.739645],
[2.0567410, 6.948770]]))
assert fit11.rdf() == 8
assert np.allclose(fit11.rss(),
[9.558538, 7.01811])
assert np.allclose(fit11.rssp(),
np.array([[9.558538163832505, -2.846727419732163],
[-2.846727419732163, 7.018110344812262]]))
y1_svcov = np.array([[0.41275506, -0.065171851],
[-0.06517185, 0.014482634]])
y2_svcov = np.array([[0.30305476, -0.047850752],
[-0.047850752, 0.010633501]])
svcov = np.concatenate([y1_svcov[..., np.newaxis],
y2_svcov[..., np.newaxis]],
axis=2)
assert np.allclose(fit11.scaled_vcov(), svcov)
assert np.allclose(fit11.se(),
np.array([[0.6424602, 0.5505041],
[0.1203438, 0.1031189]]))
(t, p) = fit11.t_tests()
assert np.allclose(t,
np.array([[1.535879, 4.976612],
[17.090542, 67.386024]]))
assert np.allclose(p,
np.array([[1.631190e-01, 1.084082e-03],
[1.396098e-07, 2.617624e-12]]))
# Not from R:
assert fit11.group_df() == {"a": 5, "b": 5}
assert np.allclose(fit11.group_rssp()["a"],
np.array([[6.267541851659734, -4.28037434739278],
[-4.280374347392778, 4.24310595227175]]))
assert np.allclose(fit11.group_rssp()["b"],
np.array([[3.290996312172771, 1.433646927660614],
[1.433646927660614, 2.775004392540514]]))
fit12 = ls.fit({"a": 1, "b": 2})
assert np.allclose(fit12.coef(),
np.array([[1.009419062117645, 2.877818731529399],
[2.054144681852942, 6.926762882588236]]))
assert fit12.rdf() == 8
# Really we care about rssp and vcov being accurate, but R doesn't
# provide them, it does provide standard error, and standard error
# involves all of the things we *do* care about, so we can just check
# it:
assert np.allclose(fit12.se(),
np.array([[0.7142305799847996, 0.6216165223656146],
[0.1190384299974666, 0.1036027537276024]]))
assert fit12.group_df() == {"a": 5, "b": 5}
assert np.allclose(fit12.group_rssp()["a"],
np.array([[6.27300555451370, -4.253108798401872],
[-4.25310879840187, 4.415039078639084]]))
assert np.allclose(fit12.group_rssp()["b"],
np.array([[3.287297566115147, 1.415398625005633],
[1.415398625005633, 2.658347656920942]]))
# Big chunks:
ls_big = class_()
ls_big.append("a", Xa, Ya)
ls_big.append("b", Xb, Yb)
check(ls_big)
# Row at a time, interleaved:
ls_row = class_()
for i in xrange(5):
ls_row.append("a", Xa[[i], :], Ya[[i], :])
ls_row.append("b", Xb[[i], :], Yb[[i], :])
check(ls_row)
def test_group_weighted_ls():
for class_ in (XtXGroupWeightedIncrementalLS,):
_group_weighted_incremental_ls_tst(class_)
if __name__ == "__main__":
import nose
nose.runmodule()
|
993,938 | 83c718d48e254e1b94aff96cc70cce88a9278d6a | import random
from firebase import firebase
### TAXI
# int pos_x, pos_y;
# int axis; --> str from axisList
# str direction;
axisList = ['Horizontal', 'Vertical']
# taxiList[i] = Taxi ID Tag
# NOTE: Taxis are automatically added to taxiList upon initialization
taxiList = []
taxiCoords = []
class Taxi:
def __init__(self):
self.pos_x = random.randint(1, 100)
self.pos_y = random.randint(1, 100)
coin = random.random()
if(coin > 0.5):
# HI = Vertical
self.axis = 1
if(self.pos_y < 100):
self.direction = 'N'
else:
self.direction = 'S'
else:
# LO = Horizontal
self.axis = 0
if(self.pos_x < 100):
self.direction = 'E'
else:
self.direction = 'W'
taxiList.append(self)
taxiCoords.append([self.pos_x, self.pos_y])
def info(self):
print "(" + str(self.pos_x) + "," + str(self.pos_y) + ")"
#print "Axis = " + axisList[self.axis] + "\nDirection = " + self.direction
def updatePos():
# Vertical case first
for i in range(0, (len(taxiList) - 1)):
if(taxiList[i].axis):
if((taxiList[i].direction == 'N') & (taxiList[i].pos_y < 100)):
taxiList[i].pos_y += 1
else:
if((taxiList[i].direction == 'N') & (taxiList[i].pos_y == 100)):
taxiList[i].direction = 'S'
taxiList[i].pos_y -= 1
else:
if((taxiList[i].direction == 'S') & (taxiList[i].pos_y > 0)):
taxiList[i].pos_y -= 1
else:
taxiList[i].pos_y += 1
taxiList[i].direction = 'N'
# Horizontal case
else:
if((taxiList[i].direction == 'E') & (taxiList[i].pos_x < 100)):
taxiList[i].pos_x += 1
else:
if((taxiList[i].direction == 'E') & (taxiList[i].pos_x == 100)):
taxiList[i].direction = 'W'
taxiList[i].pos_x -= 1
else:
if((taxiList[i].direction == 'W') & (taxiList[i].pos_x > 0)):
taxiList[i].pos_x -= 1
else:
taxiList[i].pos_x += 1
taxiList[i].direction = 'E'
def displayCoords():
for i in range(0, (len(taxiList) - 1)):
taxiList[i].info()
for i in range(0, 100):
Taxi()
'''
print "--------------"
print
a = Taxi()
a.info()
print "+++"
a.updatePos()
a.info()
print
print "---------------"
print "Starting Point (x, y):\t(" + str(pos_x) + "," + str(pos_y) + ")"
print "Axis = " + axisList[axis] + "\nDirection = " + direction
'''
|
993,939 | 7e7984241a1a0d4731c09eab7f358092bec020f2 | import numpy as np
import pandas as pd
from scipy.interpolate import *
import math
import matplotlib.pyplot as plt
#fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
#layer_z = 0.4
def partition_steps(path,nominal_step = 1, dev_factor=0):
#finds places where there is a change in extruder temperature
f = (path['extruding'].diff(periods = -1) != 0) | (path['extruding'].diff(periods = 1) != 0)
t = path[['time','extruding']].loc[f].copy()
#plt.vlines(t,ymin = 0, ymax = 200)
#find places where nominal step distance is exceeded
t = t.drop_duplicates(subset = 'time')
t['step_time'] = t['time'].diff()
#finds places where the step distance in too high
t['ratio'] = t['step_time']/nominal_step
long_step = t.loc[t['ratio']>1].copy()
long_step = long_step.loc[long_step['extruding'] == 1]
long_step = long_step.loc[long_step['ratio'] != np.nan].copy()
for i,row in long_step.iterrows():
#inserts the new partitions
partitions = int(math.ceil(row['ratio']))
dist = row['step_time']/partitions
for j in range(1,partitions):
c_dist = row['time'] - j*dist
t = t.append({"time":c_dist,"step_time":dist,"ratio":0,"extruding":1},ignore_index = True)
#plt.vlines(t['distance'],linestyle = 'dashed',ymin = 0, ymax = 200)
#determines disttances between partiation
t = t['time']
t = t.sort_values()
return(t)
"""
df_funct
creates a dictinary of lamda functions that linearly interpolates with respect to x_col
args:
df: dataframe to be parametrized
x_col: the coloumn containing x values
"""
def df_functs(df,x_col):
functs = {}
for i in list(df):
#linear interpolation between steps
functs[i] = interp1d(df[x_col],df[i])
functs['extruding'] = interp1d(df['time'],df['extruding'],kind = 'next')
return functs
#CODE = """M104 T200
#M106 F100
#M140 T60
#G1 X0 Y0 Z0 F80
#G1 X1 Y0 Z0
#G1 X6 Y0 Z0
#G3 X6 Y2 I0 J1
#G2 X6 Y4 I0 J1
#G0 X0 Y0 Z0.4
#G1 X0 Y0
#G1 X1 Y0
#G1 X6 Y0
#G3 X6 Y2 I0 J1
#G2 X6 Y4 I0 J1
#"""
#path = read_path(CODE)
#f = df_functs(path,'distance')
#
#print(path[['time','distance']])
#ax.plot3D(path['x'],path['y'],path['z'],color = 'r',linestyle = 'dashed')
#partitions = path_divide(path)
#partitions = partitions['distance']
#partitions = partitions.sort_values()
#print(partitions)
#ax.set_xlabel('x (mm)')
#ax.set_xlim(0,7)
#ax.set_ylim(0,7)
#ax.set_zlim(0,7)
#ax.set_ylabel('y (mm)')
#ax.set_zlabel('z (mm)')
#ax.scatter(f['x'](partitions),
# f['y'](partitions),
# f['z'](partitions))
#
#steps = create_steps(partitions,f)
#print(steps)
#plt.show()
|
993,940 | ff69c68c197fcc4ef9c664e82bbcdccbdebf0e8e | import duden
from psycopg2.extras import execute_values
import psycopg2
ktype = 'duden'
from time import sleep
from random import randint
import constants as const
from translations.database_handler import connect
import json
def get_word_url(term):
res = duden.search(term, return_words=False)
print(f"found {len(res)} sense(s).")
#if len(res) > 1:
# res = res[0] # string
return res
def get_entry(word_url):
w = duden.get(word_url)
if w is not None:
w = w.export()
else:
w = None
return w
def store(timestamp, insert=True):
query = "select distinct term, sense from public.german;"
conn = connect(const.postgres_config)
cur = conn.cursor()
cur2 = conn.cursor()
cur.execute(query)
res = cur.fetchall()
for t in res:
url = get_word_url(t[0])
if len(url) > 1: # only get the first
url = url[0]
pos_of_speech = t[1]
for l in url:
print(f" get entry {l}")
entry = get_entry(l)
if entry is not None:
print(entry)
tup = (t[0], pos_of_speech, ktype, json.dumps(entry, ensure_ascii=False), timestamp)
lst = []
lst.append(tup)
insert_entry(lst, cur2, conn)
print(lst)
time = randint(const.min_secs, const.max_secs)
print(f"time : {time}")
sleep(time)
def insert_entry(lst, cur, conn):
sql = """INSERT INTO german(term, sense, ktype, value, update)
VALUES %s
on conflict do nothing;"""
try:
execute_values(cur, sql, lst)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(f"postgress error:: {error}")
def parse_duden_context(entry):
## duden_entry : {'name': 'zuschlagen', 'urlname': 'zuschlagen', 'title': 'zuschlagen', 'article': None, 'part_of_speech': 'starkes Verb', 'usage': None, 'frequency': 2, 'word_separation': ['zu', 'schla', 'gen'], 'meaning_overview': '\n\nBedeutungen (8)\n\nInfo\n\n\n\n\nmit Schwung, Heftigkeit geräuschvoll schließen\nGrammatik\nPerfektbildung mit „hat“\nBeispiele\n\nden Kofferraum zuschlagen\njemandem die Tür vor der Nase zuschlagen\nein Buch zuschlagen (zuklappen)\n\n\n\nmit einem Schlag (1b) zufallen\nGrammatik\nPerfektbildung mit „ist“\nBeispiel\n\npass auf, dass [dir] die Wohnungstür nicht zuschlägt\n\n\n\n\ndurch [Hammer]schläge [mit Nägeln o.\xa0Ä.] fest zumachen, verschließen\nGebrauch\nselten\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\neine Kiste zuschlagen\n\n\n\ndurch Schlagen, Hämmern in eine bestimmte Form bringen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nSteine für eine Mauer [passend] zuschlagen\n\n\n\nmit einem Schläger zuspielen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\ndem Partner den Ball zuschlagen\n\n\n\n\neinen Schlag (1a), mehrere Schläge gegen jemanden führen\nGrammatik\nPerfektbildung mit „hat“\nBeispiele\n\nkräftig, hart, mit der Faust zuschlagen\nder Täter holte aus und schlug zu\n〈in übertragener Bedeutung:〉 die Polizei schlug zu\n〈in übertragener Bedeutung:〉 das Schicksal, der Tod schlug zu\n\n\n\netwas Bestimmtes tun (besonders etwas, was jemand gewohnheitsmäßig tut, was typisch für ihn ist [und was allgemein gefürchtet ist, nicht gutgeheißen wird])\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nder Mörder hat wieder zugeschlagen\n\n\n\nsich beim Essen, Trinken keinerlei Zurückhaltung auferlegen\nGebrauch\numgangssprachlich\nGrammatik\nPerfektbildung mit „hat“\nBeispiele\n\nnach der Diät wieder [richtig, voll] zuschlagen können\nbeim Champagner haben sie ganz schön zugeschlagen\n〈in übertragener Bedeutung:〉 (umgangssprachlich) die Stadt will jetzt bei den Parkgebühren zuschlagen (will sie kräftig erhöhen)\n\n\n\nein Angebot, eine gute Gelegenheit o.\xa0Ä. wahrnehmen, einen Vorteil nutzen\nGebrauch\numgangssprachlich\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nbei diesem günstigen Angebot musste ich einfach zuschlagen\n\n\n\n\n\n(bei einer Versteigerung) durch Hammerschlag als Eigentum zuerkennen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\ndas Buch wurde [einer Schweizer Bieterin] mit fünftausend Euro zugeschlagen\n\n\n\nim Rahmen einer Ausschreibung (als Auftrag) erteilen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nder Auftrag, der Neubau wurde einer belgischen Firma zugeschlagen\n\n\n\nals weiteren Bestandteil hinzufügen, angliedern o.\xa0Ä.\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\ndas Haus wurde dem Erbe des Sohnes zugeschlagen\n\n\n\n\n(einen Betrag o.\xa0Ä.) auf etwas aufschlagen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\n[zu] dem/auf den Preis werden noch 10\u2004% zugeschlagen\n\n\n\neinen bestimmten Stoff bei der Herstellung von Mörtel und Beton oder bei der Verhüttung von Erzen zusetzen\nGebrauch\nBautechnik, Hüttenwesen\nGrammatik\nPerfektbildung mit „hat“\n\n\n', 'origin': None, 'compounds': {'adjektive': ['blitzschnell', 'eiskalt', 'erbarmungslos', 'erneut', 'gleich', 'gnadenlos', 'hart', 'richtig'], 'substantive': ['Autotür', 'Mal', 'Mörder', 'Nase', 'Schicksal', 'Transfermarkt', 'Tür', 'Wagentür']}, 'grammar_raw': None, 'synonyms': ['schließen, zuklappen, zuschmettern, zuwerfen'], 'words_before': ['zuschicken', 'zuschieben', 'zuschießen', 'zuschippen', 'Zuschlag'], 'words_after': ['zuschlagfrei', 'Zuschlagkalkulation', 'Zuschlagkarte', 'zuschlagpflichtig', 'Zuschlagsatz']}
term = entry['name']
context = entry['meaning_overview']
if context is not None:
parsed = parse_context(context)
formatted = format_context(parsed, term)
else:
formatted = None
return formatted
def format_context(merg, term):
# ('mit Schwung, Heftigkeit geräuschvoll schließen', ['den Kofferraum zuschlagen', 'jemandem die Tür vor der Nase zuschlagen', 'ein Buch zuschlagen (zuklappen)'])
# ('mit einem Schlag (1b) zufallen', ['pass auf, dass [dir] die Wohnungstür nicht zuschlägt'])
"""▢ mit Schwung, Heftigkeit geräuschvoll schließen
▪ den Kofferraum zuschlagen ▪ jemandem die Tür vor der Nase zuschlagen ▪ ein Buch zuschlagen (zuklappen)"""
res = []
for i in merg[0:3]: # we need to limit the example when trying to learn the term for the first time
ex = i[1]
ex = ex[0:2]
if "umgangssprachlich" not in i[0] or "veraltend" not in i[0]:
meaning = remove_noise(i[0], term)
res.append("\n▢ " + meaning + "\n" + "▪ " + remove_noise(" ▪ ".join(ex), term) + "\n")
# put new lines
str = "".join(res)
return str
def remove_noise(str, target):
str = str.replace(target, '~')
str = str.replace("\u2004%", '')
str = str.replace("o.\xa0Ä", '')
str = str.replace("Perfektbildung mit „hat“", '')
str = str.replace("umgangssprachlich", '')
str = str.strip()
return str
def parse_context(c):
# find meaning
merg = None
splits = c.split('Beispiel')
if len(splits) == 0:
return merg
##print(splits)
cnt = 0
definition = []
examples = []
for i in splits:
s2 = i.split('\n')
cl = clean(s2)
if cnt == 0:
# first element is a def
definition.append(cl[0])
else:
# last element is def
if len(cl) > 0:
definition.append(cl[-1])
# rest example
examples.append(cl[0:-1])
else:
pass
cnt = cnt + 1
merg = list(zip(definition, examples))
return merg
def clean(lst):
temp = []
for i in lst:
if len(i) == 0 or i == 'e' or i.startswith('Bedeutungen', 0) or i.startswith('Info', 0) or i.startswith('Grammatik',0) \
or i.startswith('Perfektbildung',0) or i.startswith('Gebrauch') or i.startswith('selten'):
pass
else:
temp.append(i)
return temp
timestamp = '2020-11-14 18:23:58'
if __name__ == '__main__':
context = {'name': 'zuschlagen', 'urlname': 'zuschlagen', 'title': 'zuschlagen', 'article': None, 'part_of_speech': 'starkes Verb', 'usage': None, 'frequency': 2, 'word_separation': ['zu', 'schla', 'gen'], 'meaning_overview': '\n\nBedeutungen (8)\n\nInfo\n\n\n\n\nmit Schwung, Heftigkeit geräuschvoll schließen\nGrammatik\nPerfektbildung mit „hat“\nBeispiele\n\nden Kofferraum zuschlagen\njemandem die Tür vor der Nase zuschlagen\nein Buch zuschlagen (zuklappen)\n\n\n\nmit einem Schlag (1b) zufallen\nGrammatik\nPerfektbildung mit „ist“\nBeispiel\n\npass auf, dass [dir] die Wohnungstür nicht zuschlägt\n\n\n\n\ndurch [Hammer]schläge [mit Nägeln o.\xa0Ä.] fest zumachen, verschließen\nGebrauch\nselten\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\neine Kiste zuschlagen\n\n\n\ndurch Schlagen, Hämmern in eine bestimmte Form bringen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nSteine für eine Mauer [passend] zuschlagen\n\n\n\nmit einem Schläger zuspielen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\ndem Partner den Ball zuschlagen\n\n\n\n\neinen Schlag (1a), mehrere Schläge gegen jemanden führen\nGrammatik\nPerfektbildung mit „hat“\nBeispiele\n\nkräftig, hart, mit der Faust zuschlagen\nder Täter holte aus und schlug zu\n〈in übertragener Bedeutung:〉 die Polizei schlug zu\n〈in übertragener Bedeutung:〉 das Schicksal, der Tod schlug zu\n\n\n\netwas Bestimmtes tun (besonders etwas, was jemand gewohnheitsmäßig tut, was typisch für ihn ist [und was allgemein gefürchtet ist, nicht gutgeheißen wird])\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nder Mörder hat wieder zugeschlagen\n\n\n\nsich beim Essen, Trinken keinerlei Zurückhaltung auferlegen\nGebrauch\numgangssprachlich\nGrammatik\nPerfektbildung mit „hat“\nBeispiele\n\nnach der Diät wieder [richtig, voll] zuschlagen können\nbeim Champagner haben sie ganz schön zugeschlagen\n〈in übertragener Bedeutung:〉 (umgangssprachlich) die Stadt will jetzt bei den Parkgebühren zuschlagen (will sie kräftig erhöhen)\n\n\n\nein Angebot, eine gute Gelegenheit o.\xa0Ä. wahrnehmen, einen Vorteil nutzen\nGebrauch\numgangssprachlich\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nbei diesem günstigen Angebot musste ich einfach zuschlagen\n\n\n\n\n\n(bei einer Versteigerung) durch Hammerschlag als Eigentum zuerkennen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\ndas Buch wurde [einer Schweizer Bieterin] mit fünftausend Euro zugeschlagen\n\n\n\nim Rahmen einer Ausschreibung (als Auftrag) erteilen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\nder Auftrag, der Neubau wurde einer belgischen Firma zugeschlagen\n\n\n\nals weiteren Bestandteil hinzufügen, angliedern o.\xa0Ä.\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\ndas Haus wurde dem Erbe des Sohnes zugeschlagen\n\n\n\n\n(einen Betrag o.\xa0Ä.) auf etwas aufschlagen\nGrammatik\nPerfektbildung mit „hat“\nBeispiel\n\n[zu] dem/auf den Preis werden noch 10\u2004% zugeschlagen\n\n\n\neinen bestimmten Stoff bei der Herstellung von Mörtel und Beton oder bei der Verhüttung von Erzen zusetzen\nGebrauch\nBautechnik, Hüttenwesen\nGrammatik\nPerfektbildung mit „hat“\n\n\n', 'origin': None, 'compounds': {'adjektive': ['blitzschnell', 'eiskalt', 'erbarmungslos', 'erneut', 'gleich', 'gnadenlos', 'hart', 'richtig'], 'substantive': ['Autotür', 'Mal', 'Mörder', 'Nase', 'Schicksal', 'Transfermarkt', 'Tür', 'Wagentür']}, 'grammar_raw': None, 'synonyms': ['schließen, zuklappen, zuschmettern, zuwerfen'], 'words_before': ['zuschicken', 'zuschieben', 'zuschießen', 'zuschippen', 'Zuschlag'], 'words_after': ['zuschlagfrei', 'Zuschlagkalkulation', 'Zuschlagkarte', 'zuschlagpflichtig', 'Zuschlagsatz']}
formatted = parse_duden_context(context)
print(formatted)
|
993,941 | cd8609c63ade90751b627cbee6cf2cfed420085d | with open('input.txt', 'r') as f:
d = f.read().splitlines()
def part1():
i = 0
prev = int(d[0].strip())
for line in d[1:]:
value = int(line.strip())
if value > prev:
i += 1
prev = value
print(i)
def part2():
result = 0
def getWindow(collection):
if len(collection) != 3:
raise Exception(f"len of {str(collection)} is {len(collection)}")
return sum([int(line) for line in collection])
window = getWindow(d[:3])
for i in range(len(d[1:-1])):
current_window = getWindow(d[i:i+3])
if current_window > window:
result += 1
window = current_window
print(result)
part1()
part2()
|
993,942 | 4fa58c5a29c563a0ae78df39cfd2961800e1a68d | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .resnet import ResNet
from .utils import Conv2dWithNorm, UpSample2d
class FPN(nn.Module):
def __init__(
self,
bottom_up,
input_size,
out_channels,
num_classes=0,
in_features=[],
out_features=[]
):
super().__init__()
self.bottom_up = bottom_up
self.input_size = input_size
self.num_classes = num_classes
self.in_features = in_features
self.out_features = out_features
_stage_outputs = bottom_up.stage_outputs()
# print(_stage_outputs)
in_strides = [_stage_outputs[s]['stride'] for s in in_features]
in_channels = [_stage_outputs[s]['channels'] for s in in_features]
lateral_convs = []
output_convs = []
for n, in_channels in enumerate(in_channels):
lateral_conv = Conv2dWithNorm(
in_channels,
out_channels,
kernel_size=1,
bias=False,
norm=nn.BatchNorm2d(out_channels)
)
output_conv = Conv2dWithNorm(
out_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False,
norm=nn.BatchNorm2d(out_channels),
)
nn.init.kaiming_uniform_(lateral_conv.weight, a=1)
nn.init.kaiming_uniform_(output_conv.weight, a=1)
# if module.bias is not None:
# nn.init.constant_(module.bias, 0)
stage = int(math.log2(in_strides[n]))
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# reverse order to match bottom_up order
self.laterals = lateral_convs[::-1]
self.outputs = output_convs[::-1]
if 'logits' in self.out_features and num_classes > 0:
self.upsample = UpSample2d(size=input_size)
self.logits = Conv2dWithNorm(
out_channels,
num_classes,
kernel_size=1,
padding=0
)
nn.init.kaiming_uniform_(self.logits.weight, a=1)
self.in_channels = in_channels
# dicts {stage_name: properties}
self.out_feature_strides = {
"p{}".format(int(math.log2(s))): s for s in in_strides}
self._out_features = list(self.out_feature_strides.keys())
if 'logits' in self.out_features:
self._out_features.insert(0, 'logits')
self.out_feature_channels = {
k: out_channels for k in self._out_features}
# print(self._out_features)
def forward(self, inputs):
# forward through the bottom_up cnn
h = self.bottom_up(inputs)
# list out_feature maps in a top_down fashion
features = [h[name] for name in self.in_features[::-1]]
# top layer goes through lateral and output (no fusion)
curr_features = self.laterals[0](features[0])
out_features = []
out_features.append(self.outputs[0](curr_features))
# we loop through remaining layers: curr = prev + lateral
for feature, lateral, output in zip(
features[1:], self.laterals[1:], self.outputs[1:]):
prev_feature = F.interpolate(
curr_features, scale_factor=2, mode='nearest')
curr_features = output(prev_feature + lateral(feature))
out_features.append(curr_features)
if 'logits' in self.out_features and self.num_classes > 0:
feature = self.upsample(curr_features)
feature_logits = self.logits(feature)
out_features.append(feature_logits)
# name are given bottom_up but features are stored top_down
# so we reverse order of features (back to bottom_up)
return dict(zip(self._out_features, out_features[::-1]))
@staticmethod
def build_resnet_fpn(
name,
input_size,
input_channels,
output_channels,
num_classes=0,
in_features=[],
out_features=[]
):
resnet = ResNet.build(
name=name,
input_channels=input_channels,
num_classes=0,
out_features=in_features
)
return FPN(
resnet,
input_size,
output_channels,
num_classes,
in_features,
out_features
)
if __name__ == '__main__':
inputs = torch.rand((1, 3, 224, 224))
print(inputs.shape)
model = FPN.build_resnet_fpn(
name='resnet50',
input_size=(224, 224),
input_channels=3,
output_channels=256,
num_classes=1,
in_features=['stem', 'res2', 'res3', 'res4'],
out_features=['p5', 'p4', 'p3', 'p2', 'logits']
)
outputs = model(inputs)
print(outputs['p5'].shape)
print(outputs['p4'].shape)
print(outputs['p3'].shape)
print(outputs['p2'].shape)
# print(outputs['p1'].shape)
print(outputs['logits'].shape)
|
993,943 | d036d659de058034f34be0851bbf8547248fb4cb | import json
from django.test import TestCase
from django.urls import reverse
from rest_framework.authtoken.models import Token
from .models import CustomerModel
def create_user():
customer = CustomerModel.objects.create_user(username='menooa2015@gmail.com',
first_name='menooa',
last_name='eskandarian',
phone='09016718130',
password='1332',
reset_password_code='some_codesome_codesome_codesome_codesome')
return customer
class TestRegisterLogin(TestCase):
def setUp(self) -> None:
create_user()
def test_register_user(self):
request_data = {'username': 'menooa2013@gmail.com',
'first_name': 'menooa',
'last_name': 'eskandarian',
'phone': '09016718130',
'password1': '1332',
'password2': '1332'}
response = self.client.post(reverse('register_login'), data=request_data)
assert response.data.get('msg') and response.data.get('msg') == "User created successfully"
# existing username request
request_data_2 = {'username': 'menooa2013@gmail.com',
'first_name': 'menooa',
'last_name': 'eskandarian',
'phone': '09016718130',
'password1': '1332',
'password2': '1332'}
response_2 = self.client.post(reverse('register_login'), data=request_data_2)
assert response_2.data.get('msg') and response_2.data.get('msg') == "This emial is taken"
def test_login(self):
request_data = {"username": "menooa2015@gmail.com", "password": "1332"}
response = self.client.put(reverse('register_login'), data=request_data, content_type='application/json')
assert response.data.get('token')
request_data2 = {'username': 'menooa2019@gmail.com',
'password': '1332'}
response2 = self.client.put(reverse('register_login'), data=request_data2, content_type='application/json')
assert not response2.data.get('token')
class TestResetPassword(TestCase):
def setUp(self) -> None:
create_user()
def test_reset_password(self):
reset_password_code = 'some_codesome_codesome_codesome_codesome'
request_data = {'code': reset_password_code,
'password1': '1332',
'password2': '1332'}
response = self.client.put(reverse('reset_password'), data=request_data, content_type='application/json')
assert response.data.get('msg') == 'password updated successfully'
def test_reset_password_request(self):
request_data = {"email": 'menooa2015@gmail.com'}
response = self.client.post(reverse('reset_password'), data=request_data)
assert response.status_code == 200
class TestCheckUserAuth(TestCase):
def setUp(self) -> None:
customer = create_user()
token = Token(user=customer)
token.save()
self.token = str(token)
def test_user_auth(self):
response = self.client.get(reverse('check_user_auth'), AUTHORIZATION='Token ' + self.token)
assert response.data
|
993,944 | 48c13065825b2fd055f23a034661e6ba374cb00b | import numpy as np
import matplotlib.pyplot as plt
from numpy import pi , sin
from numpy.fft import fft
from scipy.signal import square
##1 --
fo = 120
fe = 8000
t = np.linspace(0,1,fe+1)
signal = sin(2*pi*fo*t) + (1/3)* sin(2*pi*fo*t*3) + (1/5)*sin(2*pi*fo*t*5) + (1/7)*sin(2*pi*fo*t*7) + (1/9)*sin(2*pi*fo*t*9)
signal2 = square ( 2*pi*fo*t)
plt.grid(True)
plt.title("Signal Carré")
plt.plot (t,signal2 ,label = "signal")
plt.xlabel ("Temps $t$")
plt.ylabel ("signal $x(t)$")
#É -- fft
X =fft(signal2)
print (X) # Ce sont des valeurs complexes |
993,945 | 41c64c94ee1e1cbd9f18c39b7603d481c01aa13b | from .manager import SQLiteManager, connect
|
993,946 | b53deeb9357cc276b773f4da9a7e425113f288dc | #1. Description
'''
Click though rate prediction using logistic regression.
Please download the data from https://www.kaggle.com/c/avazu-ctr-prediction/
manually (registration required) and place `CTR_train` file in `datasets` directory.
Only part of the data is used, because it takes too large memory for one VE card.
In online advertising, click-through rate (CTR) is a very important metric
for evaluating ad performance.
As a result, click prediction systems are essential and widely used for
sponsored search and real-time bidding.
This data is 11 days worth of Avazu data to build and test prediction models.
'''
#2. Data Preprocessing
from collections import OrderedDict
import os
import time
import seaborn as sns
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LogisticRegression as skLogisticRegression
from sklearn.svm import LinearSVC as skSVC
import pandas as pd
import frovedis
from frovedis.mllib.linear_model import LogisticRegression as frovLogisticRegression
from frovedis.mllib.svm import LinearSVC as frovSVC
from frovedis.exrpc.server import FrovedisServer
def preprocess_data(fname):
'''
For CTR classification we will perform some data preparation and data cleaning steps.
'''
#Sample size of 1 million has been taken for execution from CTR dataset
n_rows = 800000
df = pd.read_csv(fname, nrows=n_rows)
class_names = {0:'Not click', 1:'Click'}
print(df.click.value_counts().rename(index = class_names))
# We dropped 'click', 'id', 'hour', 'device_id', 'device_ip' from the dataset,
# which does not contribute the prediction.
x = df.drop(['click', 'id', 'hour', 'device_id', 'device_ip'], axis=1).values
y = df['click'].values
n_rows = df.shape[0]
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.05)
# Other features are one-hot encoded; so the feature matrix becomes sparse matrix
enc = OneHotEncoder(handle_unknown='ignore')
x_train_enc = enc.fit_transform(x_train)
x_test_enc = enc.transform(x_test)
return x_train_enc, x_test_enc, y_train, y_test
#---- Data Preparation ----
DATA_FILE = "datasets/ctr_train.csv"
x_train, x_test, y_train, y_test = preprocess_data(DATA_FILE)
print("shape of train data: {}".format(x_train.shape))
print("shape of test data: {}".format(x_test.shape))
sns.countplot(y_train)
#3. Algorithm Evaluation
train_time = []
test_time = []
accuracy = []
precision = []
recall = []
f1 = []
estimator_name = []
def evaluate(estimator, estimator_nm,
x_train, y_train,
x_test, y_test):
'''
To generate performance report for both frovedis and sklearn estimators
'''
estimator_name.append(estimator_nm)
start_time = time.time()
estimator.fit(x_train, y_train)
train_time.append(round(time.time() - start_time, 4))
start_time = time.time()
pred_y = estimator.predict(x_test)
test_time.append(round(time.time() - start_time, 4))
accuracy.append(metrics.accuracy_score(y_test, pred_y))
precision.append(metrics.precision_score(y_test, pred_y))
recall.append(metrics.recall_score(y_test, pred_y))
f1.append(metrics.f1_score(y_test, pred_y))
return metrics.classification_report(y_test, pred_y)
#3.1 Binary LogisticRegression with sag solver
TARGET = "binary_logistic_regression_sag"
FrovedisServer.initialize("mpirun -np 8 " + os.environ["FROVEDIS_SERVER"])
f_est = frovLogisticRegression(penalty='l2', solver='sag')
E_NM = TARGET + "_frovedis_" + frovedis.__version__
f_report = evaluate(f_est, E_NM, x_train, y_train, x_test, y_test)
f_est.release()
FrovedisServer.shut_down()
s_est = skLogisticRegression(penalty='l2', solver='sag')
E_NM = TARGET + "_sklearn_" + sklearn.__version__
s_report = evaluate(s_est, E_NM, x_train, y_train, x_test, y_test)
print("Frovedis LogisticRegression matrices: ")
print(f_report)
print("Sklearn LogisticRegression matrices: ")
print(s_report)
#3.2 Linear SVC
TARGET = "Linear_SVC"
FrovedisServer.initialize("mpirun -np 8 " + os.environ["FROVEDIS_SERVER"])
f_est = frovSVC(loss='hinge', max_iter=10000)
E_NM = TARGET + "_frovedis_" + frovedis.__version__
f_report = evaluate(f_est, E_NM, x_train, y_train, x_test, y_test)
f_est.release()
FrovedisServer.shut_down()
s_est = skSVC(loss='hinge', max_iter=10000)
E_NM = TARGET + "_sklearn_" + sklearn.__version__
s_report = evaluate(s_est, E_NM, x_train, y_train, x_test, y_test)
# SVC: Precision, Recall and F1 score for each class
print("Frovedis Linear SVC metrices: ")
print(f_report)
print("Sklearn Linear SVC metrices: ")
print(s_report)
#4. Performance summary
# ---- evaluation summary ----
summary = pd.DataFrame(OrderedDict({ "estimator": estimator_name,
"train time": train_time,
"test time": test_time,
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1-score": f1
}))
print(summary)
|
993,947 | 7004ed0fbfb9641e85a7295d6b7b241bb88f58a5 | import cv2
import os
import numpy as np
eigenface = cv2.face.EigenFaceRecognizer_create()
fisherface = cv2.face.FisherFaceRecognizer_create()
lbph = cv2.face.LBPHFaceRecognizer_create()
def getImagemComId():
caminhos = [os.path.join('fotos', f) for f in os.listdir('fotos')]
faces = []
ids = []
for caminhoImagem in caminhos:
imagemFace = cv2.cvtColor(cv2.imread(caminhoImagem), cv2.COLOR_BGR2GRAY )
#Divide uma string em strings (split) type Array
id = int(os.path.split(caminhoImagem) [-1].split('.')[1])
ids.append(id)
faces.append(imagemFace)
return np.array(ids), faces
ids, faces = getImagemComId()
print("Treinado...");
eigenface.train(faces, ids)
eigenface.write('classificadores/classificadorEigen.yml')
fisherface.train(faces, ids)
fisherface.write('classificadores/classificadorFisher.yml')
lbph.train(faces, ids)
lbph.write('classificadores/classificadorLBPH.yml')
print("Treinamento realizado") |
993,948 | 8277b287060246fde64dd575520129ebd1037336 |
"""
Pattern name - SingleTon (Mono state pattern)
Pattern type - Creational Design Pattern
"""
# Solution - 2
class Borg(object):
_shared = {}
def __init__(self):
self.__dict__ = self._shared
class SingleTon(Borg):
def __init__(self, arg):
Borg.__init__(self)
self.val = arg
# def __str__(self):
# return "<{} - Object>".format(self.val)
def main():
spacer = "=" * 20
print(spacer)
o1 = SingleTon("Hardik")
print("Object - 1 ==>", o1)
print("Object - 1 val ==>", o1.val)
o2 = SingleTon("Aarav")
print("Object - 2 ==>", o2)
print("Object - 2 val ==>", o2.val)
print("Object - 1 val ==>", o1.val)
print(o1.__dict__)
print(o2.__dict__)
print(spacer)
if __name__ == "__main__":
main()
|
993,949 | d88665969e9b1053e17daaa9c8b89e61189fd8f4 | animales = ['perro', 'gato', 'tortuga']
for animal in animales:
print("Un " + animal + " es una excelente mascota.")
print("\nCualquiera de estos animales sería una excelente mascota.")
|
993,950 | 17a303281c94023d5d33893267a4e7f5951cf595 | ############################
# Libraries imports #
############################
from datetime import datetime
import time
import calendar
import json
import math
import os, sys
import socket
import traceback
import urllib2 as urllib
##############################################################################
# Program settings #
##############################################################################
# Please enter the hour at which program is launched on the server :
hour = 16
minute = 25
##############################################################################
# Please specify if you are testing the program & if you want to delay run :
test = False
Delay = True
##############################################################################
# Discharge of the porous hose network Q[L/min] :
Q = 1.5 / 60
##############################################################################
# Calibration parameters :
m_calib = 96
p_calib = - 30
##############################################################################
## QUALITY CHECK
# Number of successive values to consider :
N = 5
# Admissible max std for environmental param :
LIM_Rn = 322.5866
LIM_Thr = 2.8370
LIM_u2 = 1.231
LIM_P = 0.0678
# Admissible max std for WC probes :
LIM_HUM4 = 0.1131
LIM_HUM5 = 0.1340
LIM_HUM6 = 0.1135
#TODO compute std space
LIM_HUM456 = 1
##############################################################################
## IRRIGATION DECISION
# Minimum water content admissible :
Water_Content_Limit = 30
# Crop coefficient
Kl = 0.7
# Waiting time between first irrigation and post-irrig check :
waiting_time = 3600 * 2
# Default irrigation time [seconds] :
default_irrig = 60 * 30
# Dimensions of the pot/module [m2] :
Area = 0.75 * 0.14
# Volume of the pot/module [L] :
Pot_volume = 12.6
##############################################################################
##########################################
# COMMUNICATION PROTOCOLS settings #
##########################################
user = "GW2"
if test:
host = "greenwall.gembloux.uliege.be"
print('Script running on ''local'' mode')
else:
print('Script running on ''network'' mode')
host = "localhost"
# Ensure to run in the user home directory
DIR_BASE = os.path.expanduser("~")
if not os.path.samefile(os.getcwd(), DIR_BASE):
os.chdir(DIR_BASE)
print(os.getcwd())
# Ensure to be the only instance to run
pid = str(os.getpid())
_lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
_lock_socket.bind('\0' + user)
print('Socket ' + user + ' now locked for process #' + pid)
# Make the current pid available to be able to kill the process...
open("pid.txt", 'w').write(pid)
except socket.error:
current = open("pid.txt", 'r').read()
print(user + ' lock exists for process #' + current + " : may be you should ./clean.sh !")
sys.exit()
##################################
# TIME MANAGEMENT settings #
##################################
# EPOCH time is the number of seconds since 1/1/1970
def get_timestamp():
return int(time.time())
# Transform an EPOCH time in a lisible date (for Grafana)
def formatDate(epoch):
dt = datetime.fromtimestamp(epoch)
return dt.isoformat()
# Transform an EPOCH time in a lisible date (for Grafana)
def formatDateGMT(epoch):
dt = datetime.fromtimestamp(epoch - (2 * 60 * 60)) # We are in summer and in Belgium !
return dt.isoformat()
delimiters = ' \t\n\r\"\''
if Delay:
# waiting_time is the number of seconds between now and the next 6AM
start_delay = (24 - hour + 6) * 3600 - (60 * minute) # [seconds]
timestamp = get_timestamp()
print 'The script has been loaded successfully at', time.strftime('%I:%M%p', time.localtime(timestamp))
print 'Irrigation algorithm will start tomorrow at', time.strftime('%I:%M%p', time.localtime(timestamp+start_delay)), ', within', start_delay / 3600, 'hours'
# To get messages in nohup.out
sys.stdout.flush()
time.sleep(start_delay)
else:
print 'No delay before starting the scrip has been set'
print 'The script will start right away'
print ''
print 'Here is the list of all the input setting of the script :'
print '========================================================='
print ''
print '* The discharge of the drip pipe :', round(Q, 2), 'L/min'
print '* The number of successive measures used for data quality check :', int(N)
print '* The minimum admissible water content :', int(Water_Content_Limit), '%'
print '* The landscape coefficient Kl', round(Kl, 2), '-'
print '* The delay between first irrigation and post irrigation check : ', int(waiting_time/60), 'minutes'
print '* The surface of the module :', round(Area, 2), 'm2'
print '* The volume of the module :', round(Pot_volume, 2), 'L'
print '* The admissible standard deviation for the sensors over time :'
print ' * HUM4 :', round(LIM_HUM4,2)
print ' * HUM5 :', round(LIM_HUM5,2)
print ' * HUM6 :', round(LIM_HUM6,2)
print ' * Rn :', round(LIM_Rn, 2)
print ' * Thr :', round(LIM_Thr, 2)
print ' * u2 :', round(LIM_u2, 2)
print ' * P :', round(LIM_P, 2)
print '* The admissible standard deviation for the sensors over space :'
print ' * HUM456 :', round(LIM_HUM456,2)
################################################################################
# *** IRRIGATION DECISION ALGORITHM *** #
################################################################################
while (True):
print ''
print '==============================================================================='
print '= A new day of irrigation management of the WattWall starts ='
print '==============================================================================='
timestamp = get_timestamp()
print 'Today is', time.strftime('%A %d %B %Y', time.localtime(timestamp))
print 'It is now', time.strftime('%I:%M%p', time.localtime(timestamp)),', watering of the living wall module will start'
# Getting tomorrow starting time (for script shut down period)
tomorrow = get_timestamp() + 24 * 60 * 60
#########################
# Data collection #
#########################
print('Data from the last 24h is being collected...')
dataFile = None
try: # urlopen not usable with "with"
url = "http://" + host + "/api/grafana/query"
now = get_timestamp()
gr = {'range': {'from': formatDateGMT(now - (24 * 60 * 60)), 'to': formatDateGMT(now)}, \
'targets': [{'target': 'HUM4'}, {'target': 'HUM5'}, {'target': 'HUM6'}, {'target': 'SDI0'},
{'target': 'SDI1'}, {'target': 'SDI4'}, {'target': 'SDI7'}, {'target': 'SDI8'},
{'target': 'SDI9'}, {'target': 'SDI10'}]}
data = json.dumps(gr)
# print(data)
dataFile = urllib.urlopen(url, data, 20)
result = json.load(dataFile)
if result:
# print(result)
for target in result:
# print target
index = target.get('target')
for datapoint in target.get('datapoints'):
value = datapoint[0]
stamp = datapoint[1] / 1000
# print(index + ": " + formatDate(stamp) + " = " + str(value))
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
if dataFile:
dataFile.close()
# TODO mean WC over 24h or 5 min ??
# indeed at 24h, to be changed
# mean HUM value calculation
somme = 0
length_result = len(result[0].get('datapoints'))
for i in range(length_result-N, length_result):
somme = somme + result[0].get('datapoints')[i][0]
averageHUM4 = somme / N
somme = 0
length_result = len(result[1].get('datapoints'))
for i in range(length_result-N, length_result):
somme = somme + result[1].get('datapoints')[i][0]
averageHUM5 = somme / N
somme = 0
length_result = len(result[2].get('datapoints'))
for i in range(length_result-5, length_result):
somme = somme + result[2].get('datapoints')[i][0]
averageHUM6 = somme / N
# Mean water content value - averageHUM456
averageHUM456 = (averageHUM4 + averageHUM5 + averageHUM6) / 3
# TODO here thze std is over the full day ...
# std deviation between sensors
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_HUM456 = math.sqrt((1/float(2))*(pow((averageHUM4 - averageHUM456), 2) + pow((averageHUM5 - averageHUM456), 2) + pow((averageHUM6 - averageHUM456), 2)))
VWC = m_calib * averageHUM456 + p_calib
###########################
# Data QUALIY CHECK #
###########################
print 'Reliability of the collected data will determined'
# 3/ computation of std for water content probes
# pre allocation
SCE_HUM4 = 0
SCE_HUM5 = 0
SCE_HUM6 = 0
# mean squared error and std calculation
length_result = len(result[0].get('datapoints'))
# for the last N measures of the day
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_HUM4 = SCE_HUM4 + pow((result[0].get('datapoints')[i][0] - averageHUM4), 2)
# computation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_HUM4 = math.sqrt((1/float(N-1)) * SCE_HUM4)
# mean squared error and std calculation
length_result = len(result[1].get('datapoints'))
# for the last N measures of the day
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_HUM5 = SCE_HUM5 + pow((result[1].get('datapoints')[i][0] - averageHUM5), 2)
# computation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_HUM5 = math.sqrt((1/float(N-1)) * SCE_HUM5)
# mean squared error and std calculation
length_result = len(result[0].get('datapoints'))
# for the last N measures of the day
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_HUM6 = SCE_HUM6 + pow((result[2].get('datapoints')[i][0] - averageHUM6), 2)
# computation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_HUM6 = math.sqrt((1/float(N-1)) * SCE_HUM6)
# 4/ computation of std for weather station data
# pre allocations
sum_Rn = 0
sum_Thr = 0
sum_u2 = 0
sum_P = 0
# Compute sum & mean
length_result = len(result[3].get('datapoints'))
for i in range(length_result - N, length_result): # for the last N measures of the day
sum_Rn = sum_Rn + result[3].get('datapoints')[i][0] # sum of data points
mean_Rn = sum_Rn / N # mean of datapoints
# Compute sum & mean for pressure
length_result = len(result[6].get('datapoints'))
for i in range(length_result - N, length_result): # for the last N measures of the day
sum_Thr = sum_Thr + result[6].get('datapoints')[i][0]
# mean of datapoints
mean_Thr = sum_Thr / N
# Compute sum & mean for pressure
length_result = len(result[5].get('datapoints'))
for i in range(length_result - N, length_result): # for the last N measures of the day
sum_u2 = sum_u2 + result[5].get('datapoints')[i][0]
# mean of datapoints
mean_u2 = sum_u2 / N
# Compute sum & mean for pressure
length_result = len(result[8].get('datapoints'))
for i in range(length_result - N, length_result): # for the last N measures of the day
sum_P = sum_P + result[8].get('datapoints')[i][0] # sum of data points
# mean of datapoints
mean_P = sum_P / N
# pre allocation
SCE_Rn = 0
SCE_Thr = 0
SCE_u2 = 0
SCE_P = 0
# TODO split for loop calculation, unconsistant lenth of vectors
# mean squared error and std calculation
# For the last N measures of the day
length_result = len(result[3].get('datapoints'))
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_Rn = SCE_Rn + pow((result[3].get('datapoints')[i][0] - mean_Rn), 2)
# compuation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_Rn = math.sqrt((1/float(N-1)) * SCE_Rn)
length_result = len(result[6].get('datapoints'))
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_Thr = SCE_Thr + pow((result[6].get('datapoints')[i][0] - mean_Thr), 2)
# compuation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_Thr = math.sqrt((1/float(N-1)) * SCE_Thr)
length_result = len(result[5].get('datapoints'))
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_u2 = SCE_u2 + pow((result[5].get('datapoints')[i][0] - mean_u2), 2)
# compuation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_u2 = math.sqrt((1/float(N-1)) * SCE_u2)
length_result = len(result[8].get('datapoints'))
for i in range(length_result - N, length_result):
# calculation of the sum of deviations
SCE_P = SCE_P + pow((result[8].get('datapoints')[i][0] - mean_P), 2)
# compuation of std
# sqrt( (1/N-1) * sum(measure - mean)^2 )
std_P = math.sqrt((1/float(N-1)) * SCE_P)
print ""
print 'Sensors quality check :'
print '======================='
print 'Here is the standard deviation over', int(N), 'minutes for the sensors :'
print ' * HUM4 : ', round(std_HUM4, 3)
print ' * HUM5 : ', round(std_HUM5, 3)
print ' * HUM6 : ', round(std_HUM6, 3)
print ' * Rn : ', round(std_Rn, 3)
print ' * Temp : ', round(std_Thr, 3)
print ' * Wind : ', round(std_u2, 3)
print ' * atmPres : ', round(std_P, 3)
print 'Here is the standard deviation over space for HUM sensors :'
print ' * HUM456 : ', round(std_HUM456, 3)
print''
print('Quality check result :')
# 1/ Quality check for all 3 WC sensor
if std_HUM4 < LIM_HUM4 and std_HUM5 < LIM_HUM5 and std_HUM6 < LIM_HUM6 and std_HUM456 < LIM_HUM456:
HUM_QualCheck = True
print('-> Water content probe is working')
else:
HUM_QualCheck = False
print('-> Water content probe is NOT working')
# 2/ Quality check for weather station data
if std_Rn < LIM_Rn and std_Thr < LIM_Thr and std_u2 < LIM_u2 and std_P < LIM_P:
WS_QualCheck = True
print('-> Weather station is working')
else:
WS_QualCheck = False
print('-> Weather station is NOT working')
########################################
# CHECK IF FIRST IRRIG IS NEEDED #
########################################
if VWC < Water_Content_Limit:
Irrig_Needed = True
else:
Irrig_Needed = False
################################
# IRRIGATION BASED ON WS #
################################
# 1/ ET0 calculation for the last 24 h :
# if quality check ok
if HUM_QualCheck == True and WS_QualCheck == True:
# pre allocations
SommeRn = range(23)
Thr = range(23)
eThr = range(23)
ea = range(23)
u2 = range(23)
delta = range(23)
P = range(23)
gamma = range(23)
ET0 = range(23)
Pluie = range(23)
# for each hour of the day
for j in range(0, 23):
# A - Computation of irradiation for each hour - Rn[MJ/(m2 hour)]
somme = 0
a = 0
length_result = len(result[3].get('datapoints'))
for i in range(length_result - (60 * 24) + (j * 60) + 1, length_result - ((23 - j) * 60)):
somme = somme + result[3].get('datapoints')[i][0] * 60 / (10 ** 6)
SommeRn[j] = somme
# B - computation of mean temprature for each hour - Thr[degree C]
somme = 0
length_result = len(result[6].get('datapoints'))
for i in range(length_result - (60 * 24) + (j * 60) + 1, length_result - ((23 - j) * 60)):
somme = somme + result[6].get('datapoints')[i][0]
Thr[j] = somme / 60
# C - Computation of the vapor pressure for each hour of the day August-Roche-Magnus equation - eThr[kPa]
# For equation see : https://en.wikipedia.org/wiki/Vapour_pressure_of_water
eThr[j] = 0.61094 * math.exp(17.625 * Thr[j] / (Thr[j] + 243.04))
# D - Computation of real vapor pressure - ea[kPa]
somme = 0
length_result = len(result[7].get('datapoints'))
for i in range(length_result - (60 * 24) + (j * 60) + 1, length_result - ((23 - j) * 60)):
somme = somme + result[7].get('datapoints')[i][0]
ea[j] = somme / 60
# E - Computation of mean wind speed for each hour - u2[m/s]
somme = 0
length_result = len(result[5].get('datapoints'))
for i in range(length_result - (60 * 24) + (j * 60) + 1, length_result - ((23 - j) * 60)):
somme = somme + result[5].get('datapoints')[i][0]
u2[j] = somme / 60
# u2 = 0.01
# F - Computation of the slope of ths saturation vapor pressure curve - delta [kPa /degree C]
delta[j] = 1635631.478 * math.exp(3525 * Thr[j] / (200 * Thr[j] + 48608)) / (25 * Thr[j] + 6076) ** 2
# G - Computation of mean atmospheric pressure for each hour - P[kPa]
somme = 0
length_result = len(result[8].get('datapoints'))
for i in range(length_result - (60 * 24) + (j * 60) + 1, length_result - ((23 - j) * 60)):
somme = somme + result[8].get('datapoints')[i][0]
P[j] = somme / 60
# H - Computation of the psychrometric constant - gamma [kPa/ degree C]
# For additional information see https://en.wikipedia.org/wiki/Psychrometric_constant
Cp = 0.001005 # Specific Heat Capacity of Air at 300 K [MJ/Kg K]
# Source : https://www.ohio.edu/mechanical/thermo/property_tables/air/air_Cp_Cv.html
lambdav = 2.26 # Latent heat of water vaporization [MJ / kg]
MW_ratio = 0.622 # Ratio molecular weight of water vapor/dry air
gamma[j] = Cp * P[j] / (lambdav * MW_ratio)
# I - Formula for hourly ET0 [mm/hour]
ET0[j] = (0.408 * delta[j] * SommeRn[j] + gamma[j] * (37 / (Thr[j] + 273)) * u2[j] * (eThr[j] - ea[j])) / (
delta[j] + gamma[j] * (1 + 0.34 * u2[j]))
# K - Rain for each hour - Pluie [mm/h]
somme = 0
length_result = len(result[4].get('datapoints'))
for i in range(length_result - (60 * 24) + (j * 60) + 1, length_result - ((23 - j) * 60)):
# Dividing by 60 because we cumulate rain intensity
Pluvio = somme + result[4].get('datapoints')[i][0] / 60
Pluie[j] = Pluvio * Area
print''
print "Recorded weather data for the last 24h :"
print "========================================"
print " * Total radiation was : ", round(sum(SommeRn), 2), " MJ/(m2 day)"
print " * Mean temperature was : ",round(sum(Thr)/24, 2), " degree C"
print " * Mean vapor pressure was : ",round(sum(ea)/24, 2), " kPa"
print " * Mean wind speed for was : ",round(sum(u2)/24, 2), " m/s"
print " * Mean atmospheric pressure was : ",round(sum(P)/24, 2), " kPa"
print " * Yesterday it rained : ",round(sum(Pluie)/Area,2), " mm"
print " * The ET0 for yesterday was : ",round(sum(ET0), 2), " mm"
print''
print('Irrigation decision :')
print('=====================')
print '* Average WC probe signal =', round(averageHUM456, 2), ' V'
print '* The water content in the wall today is approximated to', int(VWC), "%"
print '* Irrigation will start if water content is lower than', int(Water_Content_Limit), '%'
if Irrig_Needed:
print('* Irrigation is needed')
# Computation of total water dose to apply - Dosis[L]
Dose = sum(ET0) * Kl * Area - sum(Pluie)
if Dose < 0:
Dose = 0
print "* No water has to be applied today, net water flux through the pot for the last 24h is negative "
print "* The dose of water to apply today is ", round(Dose, 3), "L"
# Valve opening time - t[sec]
t = (Dose / Q)*60
print "* The valve will be opened for", round(t/60, 2), " minutes"
print "* This calculation is made for a Kl of ", Kl
print "* If canopy cover has evolved on the module this value might have to be updated"
timestamp = get_timestamp()
# erase the current file and open the valve in 30 seconds
open("valve.txt", 'w').write(str(timestamp + 30) + ";1\n")
# append to the file and close the valve t+30 minute later
open("valve.txt", 'a').write(str(timestamp + int(t) + 30) + ";0\n")
print("* Irrigation has been programmed")
else:
print '* Irrigation is NOT needed'
print '* Within', int(waiting_time / 3600), 'hours, water content will be checked again'
# sleep for 'irrigation time PLUS x hours'
sys.stdout.flush()
if not test :
time.sleep(waiting_time)
###############################
# POST IRRIGATION CHECK #
###############################
if HUM_QualCheck == True:
print ""
print 'Post-watering check :'
print '====================='
print
#TODO
timestamp = get_timestamp()
print 'It is now', time.strftime('%I:%M%p', time.localtime(timestamp))
print '* Watering has been done', int(waiting_time / 3600), "hours ago, it will now be checked if extra water is needed"
print'* Data is being collected over', int(N), 'minutes...'
# Get WC measures during 5 minutes
##################################
dataFile = None
# pre allocation
Last_WC_HUM4 = range(0, N)
Last_WC_HUM5 = range(0, N)
Last_WC_HUM6 = range(0, N)
# reading HUM values during 5 minutes for all 3 HUM sensors
for i in range(0, N):
# HUM4
try: # urlopen not usable with "with"
url = "http://" + host + "/api/get/%21s_HUM4"
# get datapoint for HUM sensor
dataFile = urllib.urlopen(url, None, 20)
# store it
data = dataFile.read(80000)
Last_WC_HUM4[i] = data
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
# HUM5
try: # urlopen not usable with "with"
url = "http://" + host + "/api/get/%21s_HUM5"
# get datapoint for HUM sensor
dataFile = urllib.urlopen(url, None, 20)
# store it
Last_WC_HUM5[i] = dataFile.read(80000)
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
# HUM6
try: # urlopen not usable with "with"
url = "http://" + host + "/api/get/%21s_HUM6"
# get datapoint for HUM sensor
dataFile = urllib.urlopen(url, None, 20)
# store it
Last_WC_HUM6[i] = dataFile.read(80000)
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
# Pour avoir les messages dans nohup.out
sys.stdout.flush()
# sleep for 1 minutes (until next measure is recored)
if not test :
time.sleep(60)
# Mean WC over 5 minutes
somme = 0
for i in range(1, N):
# little trick to remove extra quotes (otherwise under the form '"str"' not readable)...
WC = Last_WC_HUM4[i]
somme = somme + float(WC[1:len(WC) - 1])
Last_WC_HUM4_mean = somme / (N-1)
somme = 0
for i in range(1, N):
WC = Last_WC_HUM5[i]
somme = somme + float(WC[1:len(WC) - 1])
Last_WC_HUM5_mean = somme / (N-1)
somme = 0
for i in range(1, N):
WC = Last_WC_HUM6[i]
somme = somme + float(WC[1:len(WC) - 1])
Last_WC_HUM6_mean = somme / (N-1)
# Mean values of the 3 sensors
Last_WC_mean = (Last_WC_HUM4_mean + Last_WC_HUM5_mean + Last_WC_HUM6_mean) / 3
Last_VWC = m_calib * Last_WC_mean + p_calib
# QUALITY CHECK post irrig check
################################
# pre allocations
SCE_WC4 = 0
SCE_WC5 = 0
SCE_WC6 = 0
for i in range(0, N):
# Sum of deviations calculation
WC = Last_WC_HUM4[i]
SCE_WC4 = SCE_WC4 + pow(float(WC[1:len(WC) - 1]) - Last_WC_HUM4_mean, 2)
WC = Last_WC_HUM5[i]
SCE_WC5 = SCE_WC5 + pow(float(WC[1:len(WC) - 1]) - Last_WC_HUM5_mean, 2)
WC = Last_WC_HUM6[i]
SCE_WC6 = SCE_WC6 + pow(float(WC[1:len(WC) - 1]) - Last_WC_HUM6_mean, 2)
# Std computation
std_WC4 = math.sqrt((1/(float(N)-1)) * SCE_WC4)
std_WC5 = math.sqrt((1/(float(N)-1)) * SCE_WC5)
std_WC6 = math.sqrt((1/(float(N)-1)) * SCE_WC6)
# TODO check std over space...
std_WC456 = math.sqrt((1/float(2)) * (pow((Last_WC_HUM4_mean - Last_WC_mean), 2) + pow((Last_WC_HUM5_mean - Last_WC_mean), 2) + pow(
(Last_WC_HUM6_mean - Last_WC_mean), 2)))
print '* Reliability of the collected data will determined'
print ' Here is the standard deviation over the last', int(N), 'minutes for the HUM sensors :'
print ' * HUM4 : ', round(std_WC4, 3)
print ' * HUM5 : ', round(std_WC5, 3)
print ' * HUM6 : ', round(std_WC6, 3)
# TODO over space of the last measure ?
print ' Here is the standard deviation over space for HUM sensors :'
print ' * HUM456 : ', round(std_WC456, 3)
# TODO add condition for space reliability check
if std_WC4 < LIM_HUM4 and std_WC5 < LIM_HUM5 and std_WC6 < LIM_HUM6:
print "* The probes are still working"
print "* Post watering check can be processed"
print '* Average WC probe signal =', round(Last_WC_mean, 2), 'V'
print '* Water content is now at', int(Last_VWC), '%'
# Determine if additional watering is needed
if Last_VWC < Water_Content_Limit:
print '* This is too low, extra water is needed'
# Calculation of additional watering needed
Dose = (Water_Content_Limit * Pot_volume - Last_VWC * Pot_volume)/100
# Calculation of irrigation time (seconds)
t = (Dose / Q)*60
print '* An additional', round(Dose,2), 'L is needed'
print '* The valve will be opened for', round(t/60,2), 'more minutes today'
# make irrigation happen
timestamp = get_timestamp()
# erase the current file and open the valve in 30 seconds
open("valve.txt", 'a').write(str(timestamp + 30) + ";1\n")
# append to the file and close the valve X minute later
open("valve.txt", 'a').write(str(timestamp + int(t) + 30) + ";0\n")
print("* Extra watering has been programmed")
else:
print('* This is sufficient, no extra watering is needed')
else:
print "* The probes are not working anymore, post watering check is not possible today"
print "* A technical check up of the sensors is required"
##################################################################
# DEFAULT IRRIGATION if WC probes and weather station are down #
##################################################################
if HUM_QualCheck == False and WS_QualCheck == False:
print('* Both water content probes and weather station are out of use')
timestamp = get_timestamp()
open("valve.txt", 'w').write(str(timestamp + 30) + ";1\n")
open("valve.txt", 'a').write(str(int(timestamp + default_irrig) + 30) + ";0\n")
print '* A security watering has been processed'
print '* The valve will be opened for', round(default_irrig/60, 2), 'minutes today'
print '* Nevertheless, a check up of the monitoring system is needed'
print('This is it for today, new watering will start tomorrow at 6AM')
# Pour avoir les messages dans nohup.out
sys.stdout.flush()
# Shut down script until the next day
now = get_timestamp()
time_to_sleep = tomorrow - now
if test:
break
else :
time.sleep(time_to_sleep)
|
993,951 | 85db6f5c90bb39d794755709be064046469d43dc | from .producer import producer
import os
class ftree_producer(producer):
name = "ftree_producer"
basecommand = "python3 prepareEventVariablesFriendTree.py"
wz_modules = "CMGTools.TTHAnalysis.tools.nanoAOD.wzsm_modules"
jobname = "happyTreeFriend"
def add_more_options(self, parser):
self.parser = parser
parser.add_option("--step",
dest = "step",
type = int,
default = 1,
help = '''Which friend-tree to run.''')
parser.add_option("--treename",
dest = "treename",
default = "NanoAOD",
help = ''' Name of the tree file ''')
parser.add_option("--chunksize",
dest = "chunksize",
default = 50000,
help = ''' Number of chunks to split jobs''')
return
def submit_InCluster(self):
queue = self.queue
logpath = self.outname
newcommand = self.command + " --env oviedo -q %s --log-dir %s"%(queue, logpath)
return newcommand
def run(self):
self.inpath = os.path.join(self.inpath, self.doData, self.year)
module_name = self.modules[self.year][self.step][self.doData]
outfriend_folder = self.modules[self.year][self.step]["outname"]
self.outname = os.path.join(self.outname, self.doData.lower(), self.year, outfriend_folder)
self.commandConfs = ["%s"%self.inpath,
"%s"%self.outname,
"--name %s"%self.jobname,
"-t %s"%self.treename,
"-n -I %s %s"%(self.wz_modules, module_name),
" -N %s"%self.chunksize,
"%s"%self.extra,
self.add_friends(self.step)]
return
|
993,952 | c073cfb2e35293b730128a03cff7fd4dbe696d5c | import pymysql
# see https://github.com/PyMySQL/PyMySQL/issues/790
pymysql.version_info = (1, 4, 6, 'final', 0)
pymysql.install_as_MySQLdb()
|
993,953 | f1746e3b69cfcb3128c6eb8122115faa0ed5e948 | from rest_framework import viewsets
from resume.models import personal_info, Skill, Education, Achievement
from resume.serializers import personal_infoSerializer, SkillSerializer, EducationSerializer, AchievementSerializer
class personal_infoViewSet(viewsets.ModelViewSet):
queryset = personal_info.objects.all()
serializer_class = personal_infoSerializer |
993,954 | a0b3ed3145193cac1140e49962456552fa76d507 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from common.waterfall import failure_type
from gae_libs.gitiles.cached_gitiles_repository import CachedGitilesRepository
from gae_libs.http.http_client_appengine import HttpClientAppengine
from libs import time_util
from model import analysis_approach_type
from model.wf_suspected_cl import WfSuspectedCL
from waterfall import build_util
def GetCLInfo(cl_info_str):
"""Gets CL's repo_name and revision."""
return cl_info_str.split('/')
def _GetsStatusFromSameFailure(builds, failures):
for build in builds.values():
if build['status'] is not None and build['failures'] == failures:
return build['status']
return None
@ndb.transactional
def UpdateSuspectedCL(
repo_name, revision, commit_position,
approach, master_name, builder_name, build_number, cl_failure_type,
failures, top_score):
suspected_cl = (
WfSuspectedCL.Get(repo_name, revision) or
WfSuspectedCL.Create(repo_name, revision, commit_position))
if not suspected_cl.identified_time: # pragma: no cover.
suspected_cl.identified_time = time_util.GetUTCNow()
suspected_cl.updated_time = time_util.GetUTCNow()
if approach not in suspected_cl.approaches:
suspected_cl.approaches.append(approach)
if cl_failure_type not in suspected_cl.failure_type:
suspected_cl.failure_type.append(cl_failure_type)
build_key = build_util.CreateBuildId(
master_name, builder_name, build_number)
if build_key not in suspected_cl.builds:
suspected_cl.builds[build_key] = {
'approaches': [approach],
'failure_type': cl_failure_type,
'failures': failures,
'status': _GetsStatusFromSameFailure(suspected_cl.builds, failures),
'top_score': top_score
}
else:
build = suspected_cl.builds[build_key]
if approach not in build['approaches']:
build['approaches'].append(approach)
suspected_cl.put()
def _RoundConfidentToInteger(confidence):
return int(round(confidence * 100))
def GetSuspectedCLConfidenceScore(confidences, cl_from_analyzed_build):
if not confidences or not cl_from_analyzed_build:
return None
if cl_from_analyzed_build['failure_type'] == failure_type.COMPILE:
if sorted(cl_from_analyzed_build['approaches']) == sorted([
analysis_approach_type.HEURISTIC, analysis_approach_type.TRY_JOB]):
return _RoundConfidentToInteger(
confidences.compile_heuristic_try_job.confidence)
elif cl_from_analyzed_build['approaches'] == [
analysis_approach_type.TRY_JOB]:
return _RoundConfidentToInteger(
confidences.compile_try_job.confidence)
elif (cl_from_analyzed_build['approaches'] == [
analysis_approach_type.HEURISTIC] and
cl_from_analyzed_build['top_score']):
for confidences_info in confidences.compile_heuristic:
if confidences_info.score == cl_from_analyzed_build['top_score']:
return _RoundConfidentToInteger(confidences_info.confidence)
return None
else:
if sorted(cl_from_analyzed_build['approaches']) == sorted([
analysis_approach_type.HEURISTIC, analysis_approach_type.TRY_JOB]):
return _RoundConfidentToInteger(
confidences.test_heuristic_try_job.confidence)
elif cl_from_analyzed_build['approaches'] == [
analysis_approach_type.TRY_JOB]:
return _RoundConfidentToInteger(confidences.test_try_job.confidence)
elif (cl_from_analyzed_build['approaches'] == [
analysis_approach_type.HEURISTIC] and
cl_from_analyzed_build['top_score']):
for confidences_info in confidences.test_heuristic:
if confidences_info.score == cl_from_analyzed_build['top_score']:
return _RoundConfidentToInteger(confidences_info.confidence)
return None
def _HasNewFailures(current_failures, new_failures):
"""Checks if there are any new failures in the current build."""
if current_failures == new_failures:
return False
for step, tests in current_failures.iteritems():
if not new_failures.get(step): # New step.
return True
for test in tests:
if not test in new_failures[step]: # New test.
return True
return False
def GetSuspectedCLConfidenceScoreAndApproach(
confidences, cl_from_analyzed_build, cl_from_first_failed_build):
if not confidences or (
not cl_from_analyzed_build and not cl_from_first_failed_build):
return None, None
if (cl_from_first_failed_build and (
not cl_from_analyzed_build or
not _HasNewFailures(cl_from_analyzed_build.get('failures'),
cl_from_first_failed_build.get('failures')))):
# For non-first-time failures, the try job result is not recorded.
# If there is no new failures in current build, use first failed build to
# make sure the confidence score is correct.
cl_from_analyzed_build = cl_from_first_failed_build
confidence = GetSuspectedCLConfidenceScore(
confidences, cl_from_analyzed_build)
approach = (
analysis_approach_type.TRY_JOB if analysis_approach_type.TRY_JOB in
cl_from_analyzed_build['approaches'] else
analysis_approach_type.HEURISTIC)
return confidence, approach
def GetCulpritInfo(repo_name, revision):
"""Returns culprit info of the given revision.
Returns commit position, code-review url, host and change_id.
"""
# TODO(stgao): get repo url at runtime based on the given repo name.
# unused arg - pylint: disable=W0612,W0613
repo = CachedGitilesRepository(
HttpClientAppengine(),
'https://chromium.googlesource.com/chromium/src.git')
change_log = repo.GetChangeLog(revision)
return {
'commit_position': change_log.commit_position,
'code_review_url': change_log.code_review_url,
'review_server_host': change_log.review_server_host,
'review_change_id': change_log.review_change_id
} |
993,955 | 27a768a17fae78262ec96cc692447297855f23f7 | import os
import platform
import sys
import time
import mysql.connector
class User:
def __init__(self):
self.name = None
self.login = None
self.password = None
self.age = None
self.min_age = 10
self.max_age = 150
self.entering_system()
@staticmethod
def init_message():
print(""" Welcome
Register [1]
Log in [2]""")
def entering_system(self):
self.clear()
self.init_message()
init_input = input("Enter your option: ").strip()
option = ['1', '2']
while init_input not in option or not init_input.isnumeric():
self.clear()
print("Invalid input. Please, try again and enter only options below: ")
print(option)
init_input = input("Enter your option: ").strip()
self.register() if init_input == "1" else self.log_in()
def register(self):
self.clear()
print("Welcome registering part")
name = input("Enter your name: ").strip().capitalize()
while not name.isalpha():
self.clear()
print("Invalid input. Try again")
name = input("Enter your name: ").strip().capitalize()
login = input("Enter your login: ").strip()
while not login.isalnum() or self.is_login_exists(login):
self.clear()
print("Invalid input or exist login. Try again")
login = input("Enter your login: ").strip()
password = input("Enter your password(more than 5 characters): ")
confirm_pass = input("Confirm your password: ")
while len(password) == 0 or password != confirm_pass or len(password) < 5:
self.clear()
print("Invalid password or your password didn't match. Try again")
password = input("Enter your password(more than 5 characters): ")
confirm_pass = input("Confirm your password: ")
age = input("Enter your age: ").strip()
while not age.isnumeric():
print("Please, enter only numbers")
age = input("Enter your age: ").strip()
age = int(age)
while age < self.min_age or age > self.max_age:
self.clear()
print(f"Invalid age. To enter this website your age must be between {self.min_age}-{self.max_age} ages!")
age = input("Enter your age: ").strip()
age = int(age)
self.name = name
self.login = login
self.password = password
self.age = age
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"insert into login_info values('{self.name}', '{self.login}', '{self.password}', '{self.age}');")
my_data.commit()
print("You have entered the system!")
def is_login_exists(self, login1):
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"select login from login_info where login='{login1}';")
result = my_cursor.fetchall()
if len(result) == 0:
return False
elif result[0][0] == login1:
return True
@staticmethod
def database():
my_data = mysql.connector.connect(
host='localhost',
user="eugene09",
password="12345678",
database="login"
)
return my_data
def log_in(self):
self.clear()
input_login = input("Enter your login: ").strip()
while not self.is_login_exists(input_login):
self.clear()
print("The login didn't match. Try again")
input_login = input("Enter your login: ").strip()
self.login = input_login
input_password = input("Enter your password: ")
while not self.is_password_correct(self.login, input_password):
self.clear()
print("Wrong password. Please, try again")
input_password = input("Enter your password: ")
self.clear()
print("You've entered the system")
print(""" Update account [1]
Delete account [2]
Exit [3]""")
choice = input("Your choice: ").strip()
options = ['1', '2', '3']
while choice not in options:
self.clear()
print("Invalid input. Try again")
choice = input("Your choice: ").strip()
if choice == "1":
self.update_account()
elif choice == "2":
self.delete_account()
else:
self.__init__()
def update_account(self):
self.clear()
print("""What do you want to change?
Login [1]
Password [2]
Username [3]
Age [4]
Exit [0]""")
option = input("Enter your option: ").strip()
options = ['0','1', '2', '3', '4']
while option not in options:
self.clear()
print("Invalid input. Try again")
print(f"Options: {options}")
option = input("Enter your option: ").strip()
if option == '1':
self.change_login()
elif option == '2':
self.change_password()
elif option == '3':
self.change_username()
elif option == '4':
self.change_age()
else:
sys.exit()
def change_login(self):
self.clear()
login1 = input("Enter your login: ").strip()
while login1 != self.login:
self.clear()
print("Your login didn't match. Try again")
login1 = input("Enter your login: ").strip()
new_login = input("Enter your new login: ").strip()
while self.is_login_exists(new_login) or not new_login.isalnum():
self.clear()
print("This login exists or invalid login")
new_login = input("Enter your new login: ").strip()
check = input(f"Are you sure to change your login to {new_login}? [y/n]: ").strip().lower()
options = ['y', 'yes', 'n', 'no']
while check not in options:
self.clear()
print("Invalid input. Please, try again")
check = input(f"Are you sure to change your login to '{new_login}'? [y/n]: ").strip().lower()
if check in options[:2]:
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"update login_info set login='{new_login}' where login='{self.login}'")
my_data.commit()
print("Your login is changed")
else:
self.update_account()
def change_password(self):
self.clear()
login1 = input("Enter your login: ").strip()
while login1 != self.login:
self.clear()
print("Your login didn't match. Try again")
login1 = input("Enter your login: ").strip()
old_password = input("Enter your previous password: ")
while not self.is_password_correct(login1, old_password):
self.clear()
print("Password didn't match. Try again")
old_password = input("Enter your previous password: ")
new_password = input("Enter your new password(more than 5 characters): ")
while len(new_password) < 5:
self.clear()
print("Please, enter more than 5 characters")
new_password = input("Enter your new password: ")
check = self.check()
options = ['y', 'yes', 'n', 'no']
if check in options[:2]:
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"update login_info set password='{new_password}' where login='{login1}'")
my_data.commit()
print("Your password is changed")
else:
self.update_account()
def change_username(self):
self.clear()
login1 = input("Enter your login: ").strip()
while login1 != self.login:
self.clear()
print("Your login didn't match. Try again")
login1 = input("Enter your login: ").strip()
password = input("Enter your password: ")
while not self.is_password_correct(login1, password):
self.clear()
print("Password didn't match. Try again")
password = input("Enter your password: ")
username = input("Enter your new username: ").strip().capitalize()
while not username.isalpha():
print("Invalid username. Try again")
username = input("Enter your new username: ").strip().capitalize()
check = self.check()
options = ['y', 'yes', 'n', 'no']
if check in options[:2]:
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"update login_info set name='{username}' where login='{login1}'")
my_data.commit()
print("Your password is changed")
else:
self.update_account()
def change_age(self):
self.clear()
login1 = input("Enter your login: ").strip()
while login1 != self.login:
self.clear()
print("Your login didn't match. Try again")
login1 = input("Enter your login: ").strip()
password = input("Enter your password: ")
while not self.is_password_correct(login1, password):
self.clear()
print("Password didn't match. Try again")
password = input("Enter your password: ")
new_age = input("Enter your age: ").strip()
while not new_age.isnumeric() or int(new_age) < self.min_age or int(new_age) > self.max_age:
self.clear()
print("Wrong age. Please, try again")
new_age = input("Enter your age: ").strip()
check = self.check()
options = ['y', 'yes', 'n', 'no']
if check in options[:2]:
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"update login_info set age='{int(new_age)}' where login='{login1}'")
my_data.commit()
print("Your age is changed")
else:
self.update_account()
def delete_account(self):
self.clear()
login1 = input("Enter your login: ").strip()
while login1 != self.login:
self.clear()
print("Your login didn't match. Try again")
login1 = input("Enter your login: ").strip()
password = input("Enter your password: ")
while not self.is_password_correct(login1, password):
self.clear()
print("Password didn't match. Try again")
password = input("Enter your password: ")
options = ['y', 'yes', 'n', 'no']
check = self.check()
if check in options[:2]:
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"delete from login_info where login='{login1}'")
my_data.commit()
print("Your account has been deleted")
time.sleep(2.5)
self.__init__()
else:
self.update_account()
def check(self):
check = input(f"Are you sure to change your age? [y/n]: ").strip().lower()
options = ['y', 'yes', 'n', 'no']
while check not in options:
self.clear()
print("Invalid input. Please, try again")
check = input(f"Are you sure to change your age? [y/n]: ").strip().lower()
return check
def is_password_correct(self, login, password):
my_data = self.database()
my_cursor = my_data.cursor()
my_cursor.execute(f"select password from login_info where login='{login}';")
result = my_cursor.fetchall()
return True if result[0][0] == password else False
@staticmethod
def clear():
if platform.system() == "Linux":
os.system("clear")
elif platform.system() == "Windows":
os.system("cls")
user1 = User()
|
993,956 | 3295a8653b3c2f407a71e6a97033a0d030901de0 | import discord, asyncio, sys, traceback, checks, asyncpg, useful, credentialsFile
from discord.ext import commands
def getPrefix(bot, message):
prefixes = ["traa!","valtarithegreat!","tt!","tt?"]
return commands.when_mentioned_or(*prefixes)(bot, message)
async def run():
description = "/r/Traa community help bot! tt!help for commands"
credentials = credentialsFile.getCredentials()
db = await asyncpg.create_pool(**credentials)
await db.execute('''CREATE TABLE IF NOT EXISTS Users(userID bigint PRIMARY KEY,
banned boolean DEFAULT false);
CREATE TABLE IF NOT EXISTS Guilds(guildID bigint PRIMARY KEY,
prefix text,
raidroleid bigint,
kicktext text,
bantext text,
games boolean DEFAULT true,
pubquiz boolean DEFAULT true,
pubquiztime smallint DEFAULT 10,
ongoingpubquiz boolean DEFAULT false,
pubquiztext text,
pubquizendtext text,
pubquizchannel bigint,
pubquizquestionuserid bigint,
pubquizquestionnumber integer DEFAULT 0,
pubquizquestionactive boolean DEFAULT false,
pubquizlastquestionsuper boolean DEFAULT false,
bluetext boolean DEFAULT true,
misc boolean DEFAULT true,
NSFW boolean DEFAULT false,
welcome boolean DEFAULT false,
welcomeChannel bigint,
welcomeText text,
leave boolean DEFAULT false,
leaveChannel bigint,
leaveText text,
administrator boolean DEFAULT true,
banned boolean DEFAULT false);
CREATE TABLE IF NOT EXISTS Games(gameID serial PRIMARY KEY,
gameName text,
gameReleaseDate text,
gamePublisher text,
gameDescription text);
CREATE TABLE IF NOT EXISTS Roles(roleID bigint PRIMARY KEY,
guildID bigint references Guilds(guildID) ON DELETE CASCADE ON UPDATE CASCADE,
administrator boolean DEFAULT false,
muted boolean DEFAULT false,
pqStart boolean DEFAULT false,
pqEnd boolean DEFAULT false,
pqQuestion boolean DEFAULT false,
pqSuperQuestion boolean DEFAULT false,
pqOverride boolean DEFAULT false,
pqSetTime boolean DEFAULT false,
pqJoin boolean DEFAULT true,
pqQMHelp boolean DEFAULT false,
pqsettext boolean DEFAULT false,
pqleaderboard boolean DEFAULT false,
pqcorrect boolean DEFAULT false,
pqanswer boolean DEFAULT false,
bluetext boolean DEFAULT true,
bluetextcode boolean DEFAULT true,
setWelcomeChannel boolean DEFAULT false,
setWelcomeText boolean DEFAULT false,
setLeaveChannel boolean DEFAULT false,
setLeaveText boolean DEFAULT false,
toggleRaid boolean DEFAULT false,
setRaidRole boolean DEFAULT false,
setRaidText boolean DEFAULT false,
mute boolean DEFAULT false,
cute boolean DEFAULT true,
conch boolean DEFAULT true,
eightball boolean DEFAULT true,
setMuteRole boolean DEFAULT false,
esix boolean DEFAULT false,
setbantext boolean DEFAULT false,
setkicktext boolean DEFAULT false,
selfAssignable boolean DEFAULT false);
CREATE TABLE IF NOT EXISTS GuildUsers(userID bigint references Users(userID) ON DELETE CASCADE ON UPDATE CASCADE,
guildID bigint references Guilds(guildID) ON DELETE CASCADE ON UPDATE CASCADE,
pubquizScoreTotal integer DEFAULT 0,
pubquizScoreWeekly integer DEFAULT 0,
PRIMARY KEY(userID, guildID));
CREATE TABLE IF NOT EXISTS UserGameAccounts(accountID serial PRIMARY KEY,
userID bigint references Users(userID) ON DELETE CASCADE ON UPDATE CASCADE,
gameID serial references Games(gameID) ON DELETE CASCADE ON UPDATE CASCADE,
accountRank text,
accountName text,
accountRegion text,
accountPublic boolean DEFAULT true,
accountInfo text,
accountPlatform text);''')
bot = Bot(description=description, db=db)
initial_extensions = ['admin', 'setup', 'misc', 'roles', 'pubquiz', 'nsfw']
if __name__ == '__main__':
for extension in initial_extensions:
try:
bot.load_extension(extension)
except Exception as e:
print('Failed to load extension ' + extension, file=sys.stderr)
traceback.print_exc()
try:
await bot.start(credentialsFile.getToken())
except KeyboardInterrupt:
await db.close()
await bot.logout()
class Bot(commands.Bot):
def __init__(self, **kwargs):
super().__init__(
description=kwargs.pop("description"),
command_prefix=getPrefix
)
self.pubquizAnswers = []
self.db = kwargs.pop("db")
self.currentColour = -1
self.outcomes = ["It is certain", "It is decidedly so", "Without a doubt", "Yes - definitely",
"You may rely on it",
"As I see it, yes", "Most likely", "Outlook good", "Yes", "Signs point to yes",
"Reply hazy, try again", "Ask again later", "Better not tell you now",
"Cannot predict now", "Concentrate and ask again", "Don't count on it",
"My reply is no", "My sources say no", "Outlook not so good", "Very doubtful"]
async def on_ready(self):
print("Username: {0}\nID: {0.id}".format(self.user))
game = discord.Game("chess with Rainbow Restarter!")
await self.change_presence(status=discord.Status.online, activity=game)
def getcolour(self):
colours = ["5C6BC0", "AB47BC", "EF5350", "FFA726", "FFEE58", "66BB6A", "5BCEFA", "F5A9B8", "FFFFFF", "F5A9B8", "5BCEFA"]
self.currentColour += 1
if self.currentColour == len(colours):
self.currentColour = 0
return discord.Colour(int(colours[self.currentColour], 16))
def conchcolour(self, number):
if number < 10 and number > -1:
return discord.Colour(int("00FF00", 16))
elif number > 9 and number < 15:
return discord.Colour(int("FFFF00", 16))
else:
return discord.Colour(int("FF0000", 16))
loop = asyncio.get_event_loop()
loop.run_until_complete(run()) |
993,957 | 001eb264038c4141bc1c7db41580fbda64b10372 | import PIL
import matplotlib.pyplot as plt # single use of plt is commented out
import os.path
import PIL.ImageDraw
def redlogo(original_image,red,template):
""" Rounds the corner of a PIL.Image
original_image must be a PIL.Image
Returns a new PIL.Image with rounded corners, where
0 < percent_of_side < 1
is the corner radius as a portion of the shorter dimension of original_image
"""
#set the radius of the rounded corners
width, height = template.size
resized_image = original_image.resize((width,height))
###
#create a mask
###
#start with transparent mask
rounded_mask = PIL.Image.new('RGBA', (width, height))
drawing_layer = PIL.ImageDraw.Draw(rounded_mask)
# Overwrite the RGBA values with A=255.
# The 127 for RGB values was used merely for visualizing the mask
rounded_mask.paste(red, (0,0))
# Uncomment the following line to show the mask
# plt.imshow(rounded_mask)
# Make the new image, starting with all transparent
#resizes the image to 200 to 200 for easy pasting
result = PIL.Image.new('RGBA', template.size, (0,0,0,0))
result.paste(resized_image, (0,0), mask=rounded_mask)
return result
def bluelogo(original_image,blue,template):
""" Rounds the corner of a PIL.Image
original_image must be a PIL.Image
Returns a new PIL.Image with rounded corners, where
0 < percent_of_side < 1
is the corner radius as a portion of the shorter dimension of original_image
"""
#set the radius of the rounded corners
width, height = template.size
resized_image = original_image.resize((width,height))
###
#create a mask
###
#start with transparent mask
rounded_mask = PIL.Image.new('RGBA', (width, height))
drawing_layer = PIL.ImageDraw.Draw(rounded_mask)
# Overwrite the RGBA values with A=255.
# The 127 for RGB values was used merely for visualizing the mask
rounded_mask.paste(blue, (0,0))
# Uncomment the following line to show the mask
# plt.imshow(rounded_mask)
# Make the new image, starting with all transparent
#resizes the image to 200 to 200 for easy pasting
result = PIL.Image.new('RGBA', template.size, (0,0,0,0))
result.paste(resized_image, (0,0), mask=rounded_mask)
return result
def get_images(directory=None):
""" Returns PIL.Image objects for all the images in directory.
If directory is not specified, uses current directory.
Returns a 2-tuple containing
a list with a PIL.Image object for each image file in root_directory, and
a list with a string filename for each image file in root_directory
"""
if directory == None:
directory = os.getcwd() # Use working directory if unspecified
image_list = [] # Initialize aggregaotrs
file_list = []
directory_list = os.listdir(directory) # Get list of files
for entry in directory_list:
if len(file_list)<2:
absolute_filename = os.path.join(directory, entry)
try:
image = PIL.Image.open(absolute_filename)
file_list += [entry]
image_list += [image]
except IOError:
pass # do nothing with errors tying to open non-images
return image_list, file_list
def pepsi(directory=None):
""" Saves a modfied version of each image in directory.
Uses current directory if no directory is specified.
Places images in subdirectory 'modified', creating it if it does not exist.
New image files are of type PNG and have transparent rounded corners.
"""
if directory == None:
directory = os.getcwd() # Use working directory if unspecified
# Create a new directory 'modified'
new_directory = os.path.join(directory, 'modified')
try:
os.mkdir(new_directory)
except OSError:
pass # if the directory already exists, proceed
#load all the images
image_list, file_list = get_images(directory)
#go through the images and save modified versions
red = PIL.Image.open(os.path.join(directory, 'red.png'))
blue = PIL.Image.open(os.path.join(directory, 'blue.png'))
template =PIL.Image.open(os.path.join(directory, 'template.png'))
topp = PIL.Image.open(os.path.join(directory, '1.jpeg'))
bottomm = PIL.Image.open(os.path.join(directory, '2.jpg'))
# Round the corners with radius = 30% of short side
top = redlogo(topp,red,template)
bottom = bluelogo(bottomm,blue,template)
new_image = template
new_image.paste(bottom,(0,0), mask=bottom)
new_image.paste(top,(0,0), mask=top)
#save the altered image, suing PNG to retain transparency
new_image_filename = os.path.join(new_directory, 'final' + '.png')
new_image.save(new_image_filename) #9b: |
993,958 | f2944e50601c9b451b4e303dcc8ecd2b3de1983b | # # -*- coding: utf-8 -*-
# import caffe
# import caffe.proto.caffe_pb2 as caffe_pb2
# from caffe import layers as L, params as P
# import numpy as np
# from utils import conv2d, depthwise_conv2d, bottleneck,heartmap,subnet,deconv_relu
#
# from utils import get_npy, decode_npy_model, decode_pth_model
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
#
# def mobilenetv2_centernet_inference(netspec, input_node):
# layer_cfg = [['conv2d', 32, 3, 2, 'relu', False, True, 'conv1'],
# ['bottleneck_0_0', 32, 16, 1, 'LinearBottleneck0_0'],
# ['bottleneck_1_0', 96,24, 2, 'LinearBottleneck1_0'],
# ['bottleneck_1_1', 144,24, 1, 'LinearBottleneck1_1'],
# ['bottleneck_2_0', 144,32, 2, 'LinearBottleneck2_0'],
# ['bottleneck_2_1', 192,32, 1, 'LinearBottleneck2_1'],
# ['bottleneck_3_0', 192,64, 2, 'LinearBottleneck3_0'],
# ['bottleneck_3_1', 384,64, 1, 'LinearBottleneck3_1'],
# ['bottleneck_3_2', 384,64, 1, 'LinearBottleneck3_2'],
# ['bottleneck_4_0', 384,96, 1, 'LinearBottleneck4_0'],
# ['bottleneck_4_1', 576,96, 1, 'LinearBottleneck4_1'],
# ['bottleneck_4_2', 576,96, 1, 'LinearBottleneck4_2'],
# ['bottleneck_5_0', 576,160, 2, 'LinearBottleneck5_0'],
# ['bottleneck_5_1', 960,160, 1, 'LinearBottleneck5_1'],
# ['bottleneck_6_0', 960,320, 1, 'LinearBottleneck6_0']]
# heartmap_cfg=[['heartmap', 32, 'MapHeatmap_2'],
# ['heartmap', 96, 'MapHeatmap_4'],
# ['heartmap', 320, 'MapHeatmap_6']]
#
# n = netspec
# blobs_lst = []
# layer = layer_cfg[0]
# n.conv = conv2d(n, input_node, num_output=32, kernel_size=3,
# stride=2,
# activation_fn='relu',
# bias_term=False,
# use_bn=True,
# scope='conv1')
# resnet_block = False
# layer = layer_cfg[1]
# n.bottleneck_0_0 = bottleneck(n, n.conv, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[2]
# n.bottleneck_1_0 = bottleneck(n, n.bottleneck_0_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[3]
# n.bottleneck_1_1 = bottleneck(n, n.bottleneck_1_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[4]
# n.bottleneck_2_0 = bottleneck(n, n.bottleneck_1_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[5]
# n.bottleneck_2_1 = bottleneck(n, n.bottleneck_2_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[6]
# n.bottleneck_3_0 = bottleneck(n, n.bottleneck_2_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[7]
# n.bottleneck_3_1 = bottleneck(n, n.bottleneck_3_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[8]
# n.bottleneck_3_2 = bottleneck(n, n.bottleneck_3_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[9]
# n.bottleneck_4_0 = bottleneck(n, n.bottleneck_3_2, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[10]
# n.bottleneck_4_1 = bottleneck(n, n.bottleneck_4_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[11]
# n.bottleneck_4_2 = bottleneck(n, n.bottleneck_4_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[12]
# n.bottleneck_5_0 = bottleneck(n, n.bottleneck_4_2, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[13]
# n.bottleneck_5_1 = bottleneck(n, n.bottleneck_5_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
# layer = layer_cfg[14]
# n.bottleneck_6_0 = bottleneck(n, n.bottleneck_5_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
# resnet_block=resnet_block)
#
# n.deconv1 = L.de
# heartmap_layer = heartmap_cfg[0]
# n.hm1, n.wh1, n.reg1 = subnet(n, n.bottleneck_2_1, c_i=heartmap_layer[1], scope=heartmap_layer[2])
# heartmap_layer = heartmap_cfg[1]
# n.hm2, n.wh2, n.reg2 = subnet(n, n.bottleneck_4_2, c_i=heartmap_layer[1], scope=heartmap_layer[2])
# heartmap_layer = heartmap_cfg[2]
# n.hm3, n.wh3, n.reg3 = subnet(n, n.bottleneck_6_0, c_i=heartmap_layer[1], scope=heartmap_layer[2])
#
# return n
#
#
# def create_model(depth_coe=1.):
# n = caffe.NetSpec()
#
# n.data = L.Input(shape=[dict(dim=[1, 3, 512, 512])], ntop=1)
# n = mobilenetv2_centernet_inference(n, n.data)
#
# return n
#
#
#
# def parse_caffemodel(caffemodel):
# MODEL_FILE = '/home/amax/workspace/pytorch_caffe/deploy.prototxt'
# # 预先训练好的caffe模型
# PRETRAIN_FILE = caffemodel
#
# # 保存参数的文件
# params_txt = 'params.txt'
# pf = open(params_txt, 'w')
#
# # 让caffe以测试模式读取网络参数
# net = caffe.Net(MODEL_FILE, PRETRAIN_FILE, caffe.TEST)
#
# # 遍历每一层
# for param_name in net.params.keys():
# # 权重参数
# weight = net.params[param_name][0].data
# # 偏置参数
# bias = net.params[param_name][1].data
#
# # 该层在prototxt文件中对应“top”的名称
# pf.write(param_name)
# pf.write('\n')
#
# # 写权重参数
# pf.write('\n' + param_name + '_weight:\n\n')
# # 权重参数是多维数组,为了方便输出,转为单列数组
# weight.shape = (-1, 1)
#
# for w in weight:
# pf.write('%ff, ' % w)
#
# # 写偏置参数
# pf.write('\n\n' + param_name + '_bias:\n\n')
# # 偏置参数是多维数组,为了方便输出,转为单列数组
# bias.shape = (-1, 1)
# for b in bias:
# pf.write('%ff, ' % b)
#
# pf.write('\n\n')
#
# pf.close()
#
# print('--')
#
# def gen_prototxt(model_name='MobileNet_CenterNet'):
# net = create_model()
# with open('%s.prototxt' % model_name, 'w') as f:
# f.write(str(net.to_proto()))
#
# def save_conv2caffe(weights=None, biases=None, conv_param=None):
# if conv_param is not None:
# if biases is not None:
# conv_param[1].data[...] = biases
# if weights is not None:
# conv_param[0].data[...] = weights
#
#
# def save_fc2caffe(weights, biases, fc_param):
# print(biases.size(), weights.size())
# print(fc_param[1].data.shape)
# print(fc_param[0].data.shape)
# fc_param[1].data[...] = biases
# fc_param[0].data[...] = weights
#
#
# def save_bn2caffe(running_mean=None, running_var=None, bn_param=None):
# if bn_param is not None:
# if running_mean is not None:
# bn_param[0].data[...] = running_mean
# if running_var is not None:
# bn_param[1].data[...] = running_var
# bn_param[2].data[...] = np.array([1.0])
#
#
# def save_scale2caffe(weights=None, biases=None, scale_param=None):
# if scale_param is not None:
# if biases is not None:
# scale_param[1].data[...] = biases
# if weights is not None:
# scale_param[0].data[...] = weights
# def map_torch_bn_layer_to_caffe_bn(bn_layer_name):
# layer_name = bn_layer_name.replace('bn', 'conv')
# lst = layer_name.split('.')
# if 'run' in layer_name:
# new_lst = lst[2:-1]+['BatchNorm']
# else:
# new_lst = lst[2:-1] + ['scale']
# caffe_bn_layer_name = '/'.join(new_lst)
# return caffe_bn_layer_name
#
#
#
# def save_caffemodel(model_name,pth_path=None):
# # if meta_file is not None and ckpt_file is not None:
# # convert_meta_to_npy(meta_file, ckpt_file, npy_file)
# pth_path = '/data1/exp/ctdet/mobilenetv2/model_last.pth'
# data_dict = decode_pth_model(pth_path)
# # data_dict = decode_npy_model(npy_file)
# keys = list(data_dict.keys())
# # var_name_lst = [key for key in keys if 'pfld_inference' in key]
# var_name_lst = keys
#
# net = caffe.Net('./%s.prototxt' % model_name, caffe.TEST)
#
# # idx_w_notBN = {'weight': 0, 'depthwise_weight': 0, 'bias': 1}
# # idx_w_BN = {'running_mean': 0, 'running_var': 1}
#
# for var_name in var_name_lst:
# if 'bottleneck' in var_name: # bottleneck layer
# if 'conv' in var_name:
# layer_name = '/'.join(var_name.split('.')[2:-1])
# if 'weight' in var_name:
# weight = data_dict[var_name]
# save_conv2caffe(weights=weight,conv_param=net.params[layer_name])
# elif 'bias' in var_name:
# bias = data_dict[var_name]
# save_conv2caffe(biases=bias,conv_param=net.params[layer_name])
# elif 'bn' in var_name:
# layer_name = map_torch_bn_layer_to_caffe_bn(var_name)
# if 'mean' in var_name:
# mean = data_dict[var_name]
# save_bn2caffe(running_mean=mean, bn_param=net.params[layer_name])
# elif 'var' in var_name:
# var = data_dict[var_name]
# save_bn2caffe(running_var=var, bn_param=net.params[layer_name])
# elif 'weight' in var_name:
# weight = data_dict[var_name]
# save_scale2caffe(weights=weight,scale_param=net.params[layer_name])
# elif 'bias' in var_name:
# bias = data_dict[var_name]
# save_scale2caffe(biases=bias,scale_param=net.params[layer_name])
# else:
# continue
# else:
# continue
# elif 'mapheatmap' in var_name: # heatmap layer
# layer_num = var_name.split('.')[-2]
# if layer_num in ['0','4']:
# #conv with bias
# layer_name='/'.join(var_name.split('.')[1:-1])
# if 'weight' in var_name:
# weight = data_dict[var_name]
# save_conv2caffe(weights=weight,conv_param=net.params[layer_name])
# elif 'bias' in var_name:
# bias = data_dict[var_name]
# save_conv2caffe(biases=bias, conv_param=net.params[layer_name])
#
# elif layer_num=='1':
# # bn
# if 'run' in var_name:
# layer_name = '/'.join(var_name.split('.')[1:3]+['0/BatchNorm'])
# else:
# layer_name = '/'.join(var_name.split('.')[1:3]+['0/scale'])
# if 'mean' in var_name:
# mean = data_dict[var_name]
# save_bn2caffe(running_mean=mean, bn_param=net.params[layer_name])
# elif 'var' in var_name:
# var = data_dict[var_name]
# save_bn2caffe(running_var=var, bn_param=net.params[layer_name])
# elif 'weight' in var_name:
# weight = data_dict[var_name]
# save_scale2caffe(weights=weight, scale_param=net.params[layer_name])
# elif 'bias' in var_name:
# bias = data_dict[var_name]
# save_scale2caffe(biases=bias, scale_param=net.params[layer_name])
# else:
# continue
# elif layer_num=='2':
# # conv without bias
# layer_name='/'.join(var_name.split('.')[1:-1])
# if 'weight' in var_name:
# weight = data_dict[var_name]
# save_conv2caffe(weights=weight,conv_param=net.params[layer_name])
# else:
# continue
# elif 'conv1' in var_name:
# if 'weight' in var_name:
# weight = data_dict[var_name]
# layer_name = var_name.split('.')[0]
# save_conv2caffe(weights=weight,conv_param= net.params[layer_name])
# elif 'bn1' in var_name:
# if 'run' in var_name:
# layer_name = 'conv1/BatchNorm'
# else:
# layer_name = 'conv1/scale'
# if 'mean' in var_name:
# mean = data_dict[var_name]
# save_bn2caffe(running_mean=mean, bn_param=net.params[layer_name])
# elif 'var' in var_name:
# var = data_dict[var_name]
# save_bn2caffe(running_var=var, bn_param=net.params[layer_name])
# elif 'weight' in var_name:
# weight = data_dict[var_name]
# save_scale2caffe(weights=weight, scale_param=net.params[layer_name])
# elif 'bias' in var_name:
# bias = data_dict[var_name]
# save_scale2caffe(biases=bias, scale_param=net.params[layer_name])
# else:
# continue
#
#
# net.save('./%s.caffemodel' % model_name)
#
#
# def test_model(model_name):
# import cv2
# import torch
# from mobilenetv2 import MobileNetV2
# checkpoint_path = '/data1/exp/ctdet/mobilenetv2/model_last.pth'
# image = cv2.imread('540e3f90874dfa66.jpg')
# # image = np.random.randn(112, 112, 3)*255
# # image = image.astype(np.uint8)
# input = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)
# # input = image.copy()[...,::-1]
# input = cv2.resize(input, (512, 512))
# # debug
# # input = input[:8, :8, :]
# input = input.astype(np.float32) / 256.0
# input = np.expand_dims(input, 0)
# torch_input = input.copy().transpose((0, 3, 1, 2))
# tensor_input = torch.from_numpy(torch_input)
# input_ = input.copy()
# heads = {'hm': 6, 'wh': 2, 'reg': 2}
# num_layers = 34
# model3 = MobileNetV2(heads, head_conv=64)
# model3.load_state_dict(torch.load(checkpoint_path)['state_dict'])
#
# pytorch_result = model3(tensor_input)
#
#
# net = caffe.Net('./%s.prototxt' % model_name, './%s.caffemodel' % model_name, caffe.TEST)
# input_ = input.transpose((0, 3, 1, 2))
#
# net.blobs['data'].data[...] = input_
# output_ = net.forward()
# # 把数据经过xxx层后的结果输出来
# out = net.blobs['Convolution1'].data[0]
# # print(output_)
# keys = list(output_.keys())
# print(output_[keys[0]].shape)
# caffe_output = output_[keys[0]]
#
# def cal_MPA(caffe_output, cmp_output):
# try:
# error = np.abs(caffe_output - cmp_output)
# except:
# cmp_output = cmp_output.transpose((0, 3, 1, 2))
# error = np.abs(caffe_output - cmp_output)
# zeros = np.zeros_like(error)
# error = np.where(np.less(error, 1e-5), zeros, error)
# print('error: ', np.sum(error))
# MPA = np.max(error) / np.max(np.abs(cmp_output)) * 100.
# print('MPA: %f' % MPA)
#
# cmp_output = pytorch_result
# cal_MPA(caffe_output, cmp_output)
#
# bin_file = '/data2/SharedVMs/nfs_sync/model_speed_test/mobileResult.bin'
# hisi_result = np.fromfile(bin_file, dtype=np.float32)
# hisi_result = np.reshape(hisi_result, [1, 196])
# cal_MPA(caffe_output, hisi_result)
#
# caffe_output.astype(dtype=np.float32)
# caffe_output.tofile('./data/caffe_varify_output.bin')
#
#
#
# print('Done.')
#
#
# def main():
# model_name = 'MobileNet_CenterNet'
# # gen_pfld_prototxt(model_name=model_name)
# npy_file = './mobilenet_centernet.npy'
# # meta_file = './TF_model/model.meta'
# # ckpt_file = './TF_model/model.ckpt-312'
# # save_caffemodel(npy_file, model_name=model_name,
# # meta_file=meta_file, ckpt_file=ckpt_file)
# save_caffemodel(model_name)
# test_model(model_name)
#
#
# if __name__ == '__main__':
# gen_prototxt()
# main()
# -*- coding: utf-8 -*-
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
from caffe import layers as L, params as P
import numpy as np
from utils import conv2d, depthwise_conv2d, bottleneck, heartmap, subnet, deconv
from utils import get_npy, decode_npy_model, decode_pth_model
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
def mobilenetv2_centernet_inference(netspec, input_node):
layer_cfg = [['conv2d', 32, 3, 2, 'relu', False, True, 'conv1'],
['bottleneck_0_0', 32, 16, 1, 'LinearBottleneck0_0'],
['bottleneck_1_0', 96, 24, 2, 'LinearBottleneck1_0'],
['bottleneck_1_1', 144, 24, 1, 'LinearBottleneck1_1'],
['bottleneck_2_0', 144, 32, 2, 'LinearBottleneck2_0'],
['bottleneck_2_1', 192, 32, 1, 'LinearBottleneck2_1'],
['bottleneck_3_0', 192, 64, 2, 'LinearBottleneck3_0'],
['bottleneck_3_1', 384, 64, 1, 'LinearBottleneck3_1'],
['bottleneck_3_2', 384, 64, 1, 'LinearBottleneck3_2'],
['bottleneck_4_0', 384, 96, 1, 'LinearBottleneck4_0'],
['bottleneck_4_1', 576, 96, 1, 'LinearBottleneck4_1'],
['bottleneck_4_2', 576, 96, 1, 'LinearBottleneck4_2'],
['bottleneck_5_0', 576, 160, 2, 'LinearBottleneck5_0'],
['bottleneck_5_1', 960, 160, 1, 'LinearBottleneck5_1'],
['bottleneck_6_0', 960, 320, 1, 'LinearBottleneck6_0']]
deconv_cfg = [['deconv', 160, 'deconv1'],
['deconv', 160, 'deconv2'],
['deconv', 64, 'deconv3']]
n = netspec
blobs_lst = []
layer = layer_cfg[0]
n.conv = conv2d(n, input_node, num_output=32, kernel_size=3,
stride=2,
activation_fn='relu',
bias_term=False,
use_bn=True,
scope='conv1')
resnet_block = [False,False,True,False,True,False,True,True,False,True,True,False,True,False]
layer = layer_cfg[1]
n.bottleneck_0_0 = bottleneck(n, n.conv, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[0])
layer = layer_cfg[2]
n.bottleneck_1_0 = bottleneck(n, n.bottleneck_0_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[1])
layer = layer_cfg[3]
n.bottleneck_1_1 = bottleneck(n, n.bottleneck_1_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[2])
layer = layer_cfg[4]
n.bottleneck_2_0 = bottleneck(n, n.bottleneck_1_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[3])
layer = layer_cfg[5]
n.bottleneck_2_1 = bottleneck(n, n.bottleneck_2_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[4])
layer = layer_cfg[6]
n.bottleneck_3_0 = bottleneck(n, n.bottleneck_2_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[5])
layer = layer_cfg[7]
n.bottleneck_3_1 = bottleneck(n, n.bottleneck_3_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[6])
layer = layer_cfg[8]
n.bottleneck_3_2 = bottleneck(n, n.bottleneck_3_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[7])
layer = layer_cfg[9]
n.bottleneck_4_0 = bottleneck(n, n.bottleneck_3_2, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[8])
layer = layer_cfg[10]
n.bottleneck_4_1 = bottleneck(n, n.bottleneck_4_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[9])
layer = layer_cfg[11]
n.bottleneck_4_2 = bottleneck(n, n.bottleneck_4_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[10])
layer = layer_cfg[12]
n.bottleneck_5_0 = bottleneck(n, n.bottleneck_4_2, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[11])
layer = layer_cfg[13]
n.bottleneck_5_1 = bottleneck(n, n.bottleneck_5_0, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[12])
layer = layer_cfg[14]
n.bottleneck_6_0 = bottleneck(n, n.bottleneck_5_1, c_e=layer[1], c_o=layer[2], stride=layer[3], scope=layer[4],
resnet_block=resnet_block[13])
deconv_layer = deconv_cfg[0]
n.deconv1 = deconv(n,n.bottleneck_6_0,deconv_layer[1], scope=deconv_layer[2])
deconv_layer = deconv_cfg[1]
n.deconv2 = deconv(n, n.deconv1, deconv_layer[1], scope=deconv_layer[2])
deconv_layer = deconv_cfg[2]
n.deconv3 = deconv(n, n.deconv2, deconv_layer[1], scope=deconv_layer[2])
n.hm, n.wh, n.reg = subnet(n, n.deconv3, c_i=64, scope='heatmap_layer')
return n
def create_model(depth_coe=1.):
n = caffe.NetSpec()
n.data = L.Input(shape=[dict(dim=[1, 3, 512, 512])], ntop=1)
n = mobilenetv2_centernet_inference(n, n.data)
return n
def parse_caffemodel(caffemodel):
MODEL_FILE = '/home/amax/workspace/pytorch_caffe/deploy.prototxt'
# 预先训练好的caffe模型
PRETRAIN_FILE = caffemodel
# 保存参数的文件
params_txt = 'params.txt'
pf = open(params_txt, 'w')
# 让caffe以测试模式读取网络参数
net = caffe.Net(MODEL_FILE, PRETRAIN_FILE, caffe.TEST)
# 遍历每一层
for param_name in net.params.keys():
# 权重参数
weight = net.params[param_name][0].data
# 偏置参数
bias = net.params[param_name][1].data
# 该层在prototxt文件中对应“top”的名称
pf.write(param_name)
pf.write('\n')
# 写权重参数
pf.write('\n' + param_name + '_weight:\n\n')
# 权重参数是多维数组,为了方便输出,转为单列数组
weight.shape = (-1, 1)
for w in weight:
pf.write('%ff, ' % w)
# 写偏置参数
pf.write('\n\n' + param_name + '_bias:\n\n')
# 偏置参数是多维数组,为了方便输出,转为单列数组
bias.shape = (-1, 1)
for b in bias:
pf.write('%ff, ' % b)
pf.write('\n\n')
pf.close()
print('--')
def gen_prototxt(model_name='new_MobileNet_CenterNet'):
net = create_model()
with open('%s.prototxt' % model_name, 'w') as f:
f.write(str(net.to_proto()))
def save_conv2caffe(weights=None, biases=None, conv_param=None):
if conv_param is not None:
if biases is not None:
conv_param[1].data[...] = biases
if weights is not None:
conv_param[0].data[...] = weights
def save_deconv2caffe(weights=None, biases=None, deconv_param=None):
if deconv_param is not None:
if biases is not None:
deconv_param[1].data[...] = biases
if weights is not None:
deconv_param[0].data[...] = weights
def save_fc2caffe(weights, biases, fc_param):
print(biases.size(), weights.size())
print(fc_param[1].data.shape)
print(fc_param[0].data.shape)
fc_param[1].data[...] = biases
fc_param[0].data[...] = weights
def save_bn2caffe(running_mean=None, running_var=None, bn_param=None):
if bn_param is not None:
if running_mean is not None:
bn_param[0].data[...] = running_mean
if running_var is not None:
bn_param[1].data[...] = running_var
bn_param[2].data[...] = np.array([1.0])
def save_scale2caffe(weights=None, biases=None, scale_param=None):
if scale_param is not None:
if biases is not None:
scale_param[1].data[...] = biases
if weights is not None:
scale_param[0].data[...] = weights
def map_torch_bn_layer_to_caffe_bn(bn_layer_name):
layer_name = bn_layer_name.replace('bn', 'conv')
lst = layer_name.split('.')
if 'run' in layer_name:
new_lst = lst[2:-1] + ['BatchNorm']
else:
new_lst = lst[2:-1] + ['scale']
caffe_bn_layer_name = '/'.join(new_lst)
return caffe_bn_layer_name
def save_caffemodel(model_name, pth_path=None):
# if meta_file is not None and ckpt_file is not None:
# convert_meta_to_npy(meta_file, ckpt_file, npy_file)
pth_path = '/data1/exp/ctdet/default/model_best.pth'
data_dict = decode_pth_model(pth_path)
# data_dict = decode_npy_model(npy_file)
keys = list(data_dict.keys())
# var_name_lst = [key for key in keys if 'pfld_inference' in key]
var_name_lst = keys
net = caffe.Net('./%s.prototxt' % model_name, caffe.TEST)
# idx_w_notBN = {'weight': 0, 'depthwise_weight': 0, 'bias': 1}
# idx_w_BN = {'running_mean': 0, 'running_var': 1}
for var_name in var_name_lst:
if 'bottleneck' in var_name: # bottleneck layer
if 'conv' in var_name:
layer_name = '/'.join(var_name.split('.')[2:-1])
if 'weight' in var_name:
weight = data_dict[var_name]
save_conv2caffe(weights=weight, conv_param=net.params[layer_name])
elif 'bias' in var_name:
bias = data_dict[var_name]
save_conv2caffe(biases=bias, conv_param=net.params[layer_name])
elif 'bn' in var_name:
layer_name = map_torch_bn_layer_to_caffe_bn(var_name)
if 'mean' in var_name:
mean = data_dict[var_name]
save_bn2caffe(running_mean=mean, bn_param=net.params[layer_name])
elif 'var' in var_name:
var = data_dict[var_name]
save_bn2caffe(running_var=var, bn_param=net.params[layer_name])
elif 'weight' in var_name:
weight = data_dict[var_name]
save_scale2caffe(weights=weight, scale_param=net.params[layer_name])
elif 'bias' in var_name:
bias = data_dict[var_name]
save_scale2caffe(biases=bias, scale_param=net.params[layer_name])
else:
continue
else:
continue
elif 'mapheatmap' in var_name: # heatmap layer
layer_num = var_name.split('.')[-2]
if layer_num in ['0', '4']:
# conv with bias
layer_name = '/'.join(var_name.split('.')[1:-1])
if 'weight' in var_name:
weight = data_dict[var_name]
save_conv2caffe(weights=weight, conv_param=net.params[layer_name])
elif 'bias' in var_name:
bias = data_dict[var_name]
save_conv2caffe(biases=bias, conv_param=net.params[layer_name])
elif layer_num == '1':
# bn
if 'run' in var_name:
layer_name = '/'.join(var_name.split('.')[1:3] + ['0/BatchNorm'])
else:
layer_name = '/'.join(var_name.split('.')[1:3] + ['0/scale'])
if 'mean' in var_name:
mean = data_dict[var_name]
save_bn2caffe(running_mean=mean, bn_param=net.params[layer_name])
elif 'var' in var_name:
var = data_dict[var_name]
save_bn2caffe(running_var=var, bn_param=net.params[layer_name])
elif 'weight' in var_name:
weight = data_dict[var_name]
save_scale2caffe(weights=weight, scale_param=net.params[layer_name])
elif 'bias' in var_name:
bias = data_dict[var_name]
save_scale2caffe(biases=bias, scale_param=net.params[layer_name])
else:
continue
elif layer_num == '2':
# conv without bias
layer_name = '/'.join(var_name.split('.')[1:-1])
if 'weight' in var_name:
weight = data_dict[var_name]
save_conv2caffe(weights=weight, conv_param=net.params[layer_name])
else:
continue
elif 'conv1' in var_name:
if 'weight' in var_name:
weight = data_dict[var_name]
layer_name = var_name.split('.')[0]
save_conv2caffe(weights=weight, conv_param=net.params[layer_name])
elif 'bn1' in var_name:
if 'run' in var_name:
layer_name = 'conv1/BatchNorm'
else:
layer_name = 'conv1/scale'
if 'mean' in var_name:
mean = data_dict[var_name]
save_bn2caffe(running_mean=mean, bn_param=net.params[layer_name])
elif 'var' in var_name:
var = data_dict[var_name]
save_bn2caffe(running_var=var, bn_param=net.params[layer_name])
elif 'weight' in var_name:
weight = data_dict[var_name]
save_scale2caffe(weights=weight, scale_param=net.params[layer_name])
elif 'bias' in var_name:
bias = data_dict[var_name]
save_scale2caffe(biases=bias, scale_param=net.params[layer_name])
else:
continue
# elif 'deconv' in var_name:
# layer_name = var_name.split('.')[0]
# if 'weight' in var_name:
# weight = data_dict[var_name]
# save_deconv2caffe(weights=weight,deconv_param=net.params[layer_name])
# else:
# bias = data_dict[var_name]
# save_deconv2caffe(biases=bias,deconv_param=net.params[layer_name])
elif var_name.split('.')[0] in ['wh','hm','reg']:
layer_name = '/'.join(['heatmap_layer']+var_name.split('.')[:2])
if 'weight' in var_name:
weight=data_dict[var_name]
save_conv2caffe(weights=weight,conv_param=net.params[layer_name])
else:
bias = data_dict[var_name]
save_conv2caffe(biases=bias,conv_param=net.params[layer_name])
weight1 = data_dict['deconv1.weight']
bias1 = data_dict['deconv1.bias']
net.params['deconv1'][0].data[...]=weight1
net.params['deconv1'][1].data[...]=bias1
weight2 = data_dict['deconv2.weight']
bias2 = data_dict['deconv2.bias']
net.params['deconv2'][0].data[...] = weight2
net.params['deconv2'][1].data[...] = bias2
weight3 = data_dict['deconv3.weight']
bias3 = data_dict['deconv3.bias']
net.params['deconv3'][0].data[...] = weight3
net.params['deconv3'][1].data[...] = bias3
net.save('./%s.caffemodel' % model_name)
def test_model(model_name):
import cv2
import torch
from mobilenetv2 import MobileNetV2
checkpoint_path = '/data1/exp/ctdet/default/model_best.pth'
image = cv2.imread('540e3f90874dfa66.jpg')
# image = np.random.randn(112, 112, 3)*255
# image = image.astype(np.uint8)
input = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)
# input = image.copy()[...,::-1]
input = cv2.resize(input, (512, 512))
# debug
# input = input[:8, :8, :]
input = input.astype(np.float32) / 256.0
input = np.expand_dims(input, 0)
torch_input = input.copy().transpose((0, 3, 1, 2))
tensor_input = torch.from_numpy(torch_input)
input_ = input.copy()
heads = {'hm': 6, 'wh': 2, 'reg': 2}
num_layers = 34
model3 = MobileNetV2(heads, head_conv=64)
data_dict = torch.load(checkpoint_path)['state_dict']
# data_dict.pop('bn1.weight')
# data_dict.pop('bn1.bias')
model3.load_state_dict(data_dict)
model3.train(False)
pytorch_result = model3(tensor_input)
net = caffe.Net('./%s.prototxt' % model_name, './%s.caffemodel' % model_name, caffe.TEST)
input_ = input.transpose((0, 3, 1, 2))
net.blobs['data'].data[...] = input_
output_ = net.forward()
# output_ = net.forward(end='deconv1') # 获取指定层的输出
# print(output_)
keys = list(output_.keys())
print(output_[keys[0]].shape)
caffe_output = output_[keys[0]]
def cal_MPA(caffe_output, cmp_output):
try:
error = np.abs(caffe_output - cmp_output)
except:
cmp_output = cmp_output.transpose((0, 3, 1, 2))
error = np.abs(caffe_output - cmp_output)
zeros = np.zeros_like(error)
error = np.where(np.less(error, 1e-5), zeros, error)
print('error: ', np.sum(error))
MPA = np.max(error) / np.max(np.abs(cmp_output)) * 100.
print('MPA: %f' % MPA)
# cmp_output = pytorch_result.cpu().detach().numpy()
# cal_MPA(caffe_output, cmp_output)
for k,val in output_.items():
cmp_output = pytorch_result[0][k].cpu().detach().numpy()
cal_MPA(val, cmp_output)
# bin_file = '/data2/SharedVMs/nfs_sync/model_speed_test/mobileResult.bin'
# hisi_result = np.fromfile(bin_file, dtype=np.float32)
# hisi_result = np.reshape(hisi_result, [1, 196])
# cal_MPA(caffe_output, hisi_result)
#
# caffe_output.astype(dtype=np.float32)
# caffe_output.tofile('./data/caffe_varify_output.bin')
print('Done.')
def main():
model_name = 'new_MobileNet_CenterNet'
# gen_pfld_prototxt(model_name=model_name)
# npy_file = './mobilenet_centernet.npy'
# meta_file = './TF_model/model.meta'
# ckpt_file = './TF_model/model.ckpt-312'
# save_caffemodel(npy_file, model_name=model_name,
# meta_file=meta_file, ckpt_file=ckpt_file)
save_caffemodel(model_name)
test_model(model_name)
if __name__ == '__main__':
gen_prototxt()
main()
|
993,959 | d0138e451a25e1b67810b4a7efae176aed743c07 | from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def home_view(request, *args, **kwargs):
print(request.user)
# return HttpResponse("<h1>Welcome Sandesh !!!!</h1>")
my_context = {"name": "I am in Home page", "list": [8, 9, 10, "Abc"]}
return render(request, "home.html", my_context)
def about_view(request, *args, **kwargs):
# return HttpResponse("<h1>About page</h1")
my_context = {"name": "I am in About page"}
return render(request, "about.html", my_context)
|
993,960 | a16cccec4570cced3e6bcbd755a50bbd88f0c716 | from datetime import datetime
from signature import signature
class Bank(object):
def __init__(self, number, type):
self.__number = str(number)
self.__type = type
self.__out = False
self.__signed_in = True
self.__out_log = {}
self.__out_info = {}
self.__returned = True
self.__made = False
self.__amount = "350"
self.__notes = ""
def number(self):
"""Returns bank number"""
return self.__number
def banktype(self):
"""Returns bank type"""
return self.__type
def out(self):
"""Returns true if out in the park"""
return self.__out
def signed_in(self):
"""Returns true if it has been signed back in, but still considered out in the park"""
return self.__signed_in
def returned(self):
"""Returns true if it has been marked return"""
return self.__returned
def made(self):
"""Returns true if made and ready to be signed out"""
return self.__made
def amount(self):
"""Returns the amount of money in the bank"""
return self.__amount
def notes(self):
"""Returns the notes for the bank"""
return self.__notes
def signoutinfo(self):
"""Returns the current sign out info for the bank"""
return self.__out_info
def signoutlog(self):
"""Returns the log of sign out info"""
return self.__out_log
def signout(self, name, location, notes):
"""Signs out the bank
name = name of person signing it out
location = where they're taking it
notes= special notes related to the bank"""
now = datetime.now()
if not self.__out and self.__signed_in and self.__returned and self.__made:
signature(self.__type, self.__number, now)
self.__out = True
self.__signed_in = False
self.__returned = False
self.__made = False
self.__notes = notes
self.__out_info = {"Name_Out":name, "Location":location, "Amount":self.__amount, "Time_Out":now, "Name_In": None, "Time_In": None, "Returned":self.__returned}
if self.__notes != "":
self.__out_info["Notes"] = self.__notes
# else:
# print("That bank is already out")
def signin(self, name):
"""Signs the bank back in
name = person signing it in"""
now = datetime.now()
if self.__out and not self.__signed_in and not self.__returned:
signature(self.__type, self.__number, now)
self.__signed_in = True
self.__out_info["Name_In"] = name
self.__out_info["Time_In"] = now
def returnbank(self):
"""Marks the bank returned"""
now = datetime.now()
if self.__out and self.__signed_in and not self.__returned:
self.__returned = True
self.__out = False
self.__out_info["Returned"] = self.__returned
self.__out_info["Returned_Time"] = now
self.__out_log["{}/{}/{} {}:{}".format(now.month, now.day, now.year, now.hour, now.minute)] = self.__out_info
def unreturnbank(self):
"""Unmarks a bank returned incase of accident"""
pass
def makebank(self, amount):
"""Make the bank and prepare it to be signed out
amount = amount of money in bank"""
if self.__returned and not self.__made:
self.__amount = amount
self.__made = True
def __str__(self):
return "{} #{}".format(self.__type, self.__number)
#REMOVE PRINT STATEMENTS AND TURN THEM INTO RETURNS SO ERROR MESSAGES CAN BE CREATED
class Bar(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "bar"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "350"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "Bar Bank object"
class FB(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "fb"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "350"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "F&B Bank object"
class TKG(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "tkg"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "350"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "Ticketing Bank object"
class Retail(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "retail"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "350"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "Retail Bank object"
class Bike(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "bike"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "350"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "Bike Bank object"
class Change(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "change"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "500"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "Change Bank object"
class Fanny(object):
def __init__(self):
self.__banks = []
self.__locations = []
def addbank(self, number):
"""Adds a new bank to the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
# print("Bank already exists")
return False, 0
if not found:
self.__banks.append(Bank(number, "fanny"))
return True, 0
def removebank(self, number):
"""Removes a bank from the list"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
self.__banks.remove(bank)
return True, 0
elif not bank.returned():
# print("bank not returned")
return False, 1
elif bank.made():
# print("bank already made")
return False, 2
if not found:
# print("Bank not found")
return False, 0
def banks(self):
"""Returns the list of banks"""
return self.__banks
def addlocation(self, location):
"""Add a location to the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
# print("Location already exists")
return False, 0
if not found:
self.__locations.append(location)
return True, 0
def removelocation(self, location):
"""Removes a bank from the list"""
found = False
for loc in self.__locations:
if loc == location:
found = True
self.__locations.remove(loc)
return True, 0
if not found:
# print("Location not found", location)
return False, 0
def locations(self):
"""Returns a list of locations"""
return self.__locations
def signout(self, name, location, number, notes = ""):
"""
Signs out a bank
name = person signing out
location = the location the person is bringing the bank
number = bank number being taken
notes = any special information needed at the time of sign out
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if not bank.out() and bank.signed_in() and bank.returned():
bank.signout(name, location, notes)
return True, 0
elif bank.out():
return False, 1
# print("Bank is out")
elif not bank.signed_in():
return False, 2
# print("Bank not signed in")
elif not bank.returned():
return False, 3
# print("Bank not returned")
if not found:
return False, 0
def signedout(self):
"""Returns a list of all banks currently out"""
out = []
for bank in self.__banks:
if bank.out():
out.append(bank)
return out
def signin(self, name, number):
"""
Signs a bank back in
name = person signing it in
number = bank being signed in
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.out() and not bank.signed_in() and not bank.returned():
bank.signin(name)
return True, 0
elif not bank.out():
return False, 1
# print("Bank is not out")
elif bank.signed_in():
return False, 2
# print("Bank signed in")
elif bank.returned():
return False, 3
# print("Bank already returned")
if not found:
return False, 0
def signedin(self):
"""Returns a list of banks that have been signed in"""
signin = []
for bank in self.__banks:
if bank.signed_in():
signin.append(bank)
return signin
def returnbank(self, number):
"""
Mark a bank returned
number = bank being marked
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.signed_in() and not bank.returned() and bank.out():
bank.returnbank()
return True, 0
elif not bank.signed_in():
return False, 1
# print("Bank not signed in")
elif bank.returned():
return False, 2
# print("Bank already returned")
elif not bank.out():
return False, 3
# print("Bank is not out")
if not found:
return False, 0
def returnedbanks(self):
"""
List of banks that have been returned
"""
returned = []
for bank in self.__banks:
if bank.returned():
returned.append(bank)
return returned
def notreturnedbanks(self):
"""
List of banks that have not been returned
"""
notreturned = []
for bank in self.signedin():
if not bank.returned():
notreturned.append(bank)
return notreturned
def tobemade(self):
"""Returns a list of banks ready to be made"""
tobe = []
for bank in self.__banks:
if bank.returned() and not bank.made():
tobe.append(bank)
return tobe
def makebank(self, number, amount = "350"):
"""
Makes a bank, i.e. the bank is prepared and ready to be signed out
number = bank being made
amount = how much money put into the bank
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
if bank.returned() and not bank.made():
bank.makebank(amount)
return True, 0
elif not bank.returned():
# print("Bank not returned")
return False, 1
elif bank.made():
# print("Bank already made")
return False, 2
if not found:
return False, 0
# print("Bank not found")
def madebanks(self):
"""
Returns a list of banks that are made and ready to be signed out
"""
made = []
for bank in self.__banks:
if bank.made():
made.append(bank)
return made
def get_notes(self, number):
"""
Returns the notes for a given bank
number = bank number of notes wanted
"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return bank.notes()
if not found:
# print("Bank not found")
def signoutinfo(self, number):
"""Returns the current sign out info fo the bank"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutinfo()
if not found:
return False, 0
def banklog(self, number):
"""Returns the bank log"""
found = False
for bank in self.__banks:
if bank.number() == str(number):
found = True
return True, bank.signoutlog()
if not found:
return False, 0
def __str__(self):
return "Fanny Bank object"
|
993,961 | 2e03ed0c2d390aafb378dcc2c1074d0589c2b096 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 15:31:04 2021
@author: 70037165
"""
from pulp import *
import pandas as pd
#LpProblem, LpMinimize, LpVariable, lpSum
# Assign spreadsheet filename: file
file = r'C:\Users\70037165\Desktop\SKU Rationalization\SSC for Python.xlsx'
cost_df = pd.read_excel(file, sheet_name = 'Distance', header = 0)
cost_df = cost_df.set_index(['Sending plant', 'Receiving plant'])
costs = cost_df.to_dict()
df = pd.read_excel(file, sheet_name= 'Data', header = 0)
plant = list(set(df['Plant']))
code = list(set(df['Code']))
df = df.set_index(['Code', 'Plant'])
df.sort_index
#sending_plants = []
#receiving_plants = []
for inx, data in df.groupby(level = 0):
sending_plants = []
receiving_plants = []
sending_amount = []
for inx1, data1 in data.iterrows():
if data1['Batch'] > (data1['Monthly sales'] * 1.5):
inx1_list = list(inx1)
#print(inx1_list[0])
sending_plants.append(inx1_list[1])
sending_amount.append((inx1[1], data1['Batch'] - (data1['Monthly sales'] * 1.5)))
else:
inx1_list = list(inx1)
receiving_plants.append(inx1_list[1])
print('SKU: ' + str(inx1[0]))
print('sending is: ' + str(sending_plants))
print('receiving is: ' + str(receiving_plants))
if not sending_plants:
print('No sending plant\n')
elif not receiving_plants:
print('Have sending - but no receiving\n')
else:
#sending_amount.append(data1['Batch'] - (data1['Monthly sales'] * 1.5))
print(sending_amount)
sending_amount_dict = dict(sending_amount)
#print(costs)
# Initialize Model
model = LpProblem("Minimize Transportation Costs", LpMinimize)
# Define decision variables
key = [(s, r) for s in sending_plants for r in receiving_plants]
var_dict = LpVariable.dicts('sending product amount',
key,
lowBound = 0, cat='Float')
# Use the LpVariable dictionary variable to define objective
model += lpSum([costs[(s, r)] * var_dict[(s, r)]
for s in sending_plants for r in receiving_plants])
# Define Constraints
# For each overproduction plant, sum plant shipment set equal to over quantity
for s in sending_plants:
model += lpSum([var_dict[(s, r)] for r in receiving_plants]) == sending_amount_dict[s]
# Solve Model
model.solve(COIN_CMD(msg=1))
print("The moving amount is".format(var_dict))
'''
print('receiving is: ' + str(receiving_plant))
print('sending is: ' + str(sending_plant))
sending_plants s
receing_plants r
sending_amount = [s, sa]
costs [s, r]
from pulp import *
# Initialize Model
model = LpProblem("Minimize Transportation Costs", LpMinimize)
# Define decision variables
key = [(s, r) for s in sending_plants for r in receiving_plants]
var_dict = LpVariable.dicts('moving amount',
key,
lowBound=0, cat='Integer')
# Use the LpVariable dictionary variable to define objective
model += lpSum([costs[(s, r)] * var_dict[(s,r)]
for s in sending_plants for r in receiving_plants])
# Define Constraints
# For each overproduction plant, sum plant shipment set equal to over quantity
for s in sending_plants:
model += lpSum([var_dict[(s, r] for r in receving_plants]) == moving_amount[s]
# Solve Model
model.solve()
print("The moving amount is".format(var_dict))'''
|
993,962 | 25993ab3cb1b0020932eec54820da0ef5a438c94 | # Generated by Django 3.0.8 on 2021-01-06 23:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kit', '0008_auto_20210106_2300'),
]
operations = [
migrations.AlterField(
model_name='kit_info',
name='img',
field=models.ImageField(default='defaultImage.jpg', upload_to=''),
),
migrations.AlterField(
model_name='kit_product',
name='kit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kit.Kit', verbose_name='Kit'),
),
]
|
993,963 | 9064e9e82a3ee9f90411135705be0a6b560466fd | #! /usr/bin/env python
#
def r8_li ( x ):
#*****************************************************************************80
#
## R8_LI evaluates the logarithmic integral for an R8 argument.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 April 2016
#
# Author:
#
# Original FORTRAN77 version by Wayne Fullerton.
# Python version by John Burkardt.
#
# Reference:
#
# Wayne Fullerton,
# Portable Special Function Routines,
# in Portability of Numerical Software,
# edited by Wayne Cowell,
# Lecture Notes in Computer Science, Volume 57,
# Springer 1977,
# ISBN: 978-3-540-08446-4,
# LC: QA297.W65.
#
# Parameters:
#
# Input, real X, the argument.
#
# Output, real VALUE, the logarithmic integral evaluated at X.
#
import numpy as np
from r8_ei import r8_ei
from r8_log import r8_log
from machine import r8_mach
from sys import exit
sqeps = np.sqrt ( r8_mach ( 3 ) )
if ( x < 0.0 ):
print ( '' )
print ( 'R8_LI - Fatal error!' )
print ( ' Function undefined for X <= 0.' )
exit ( 'R8_LI - Fatal error!' )
if ( x == 0.0 ):
value = 0.0
return value
if ( x == 1.0 ):
print ( '' )
print ( 'R8_LI - Fatal error!' )
print ( ' Function undefined for X = 1.' )
exit ( 'R8_LI - Fatal error!' )
if ( abs ( 1.0 - x ) < sqeps ):
print ( '' )
print ( 'R8_LI - Warning!' )
print ( ' Answer less than half precision.' )
print ( ' X is too close to 1.' )
value = r8_ei ( r8_log ( x ) )
return value
def r8_li_test ( ):
#*****************************************************************************80
#
## R8_LI_TEST tests R8_LI.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 April 2016
#
# Author:
#
# John Burkardt
#
import platform
from logarithmic_integral_values import logarithmic_integral_values
print ( '' )
print ( 'R8_LI_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8_LI evaluates the logarithmic integral.' )
print ( '' )
print ( ' X LI(X) R8_LI(X) Diff' )
print ( '' )
n_data = 0
while ( True ):
n_data, x, fx1 = logarithmic_integral_values ( n_data )
if ( n_data == 0 ):
break
fx2 = r8_li ( x )
print ( ' %14.4f %14.6g %14.6g %14.6g' % ( x, fx1, fx2, abs ( fx1 - fx2 ) ) )
#
# Terminate.
#
print ( '' )
print ( 'R8_LI_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r8_li_test ( )
timestamp ( )
|
993,964 | 1f329fe2a08be5b59dfb868e067207281b6c369c | from moodle import (
__version__,
Auth,
Core,
Mod,
Tool,
Moodle,
)
def test_version():
assert __version__ == '0.14.2'
def test_moodle(moodle: Moodle):
assert isinstance(moodle, Moodle)
assert isinstance(moodle.auth, Auth)
assert isinstance(moodle.core, Core)
assert isinstance(moodle.mod, Mod)
assert isinstance(moodle.tool, Tool)
|
993,965 | 262c59dc7f53f7b2e92556fa70bd13dd31b20023 | import os
def getfiles(extension):
return [f for f in os.listdir() if f.endswith(extension)]
def writefile(handle, string):
with open(handle, 'w+') as f:
f.write(string)
# Convert Jupyter notebooks to HTML
files = getfiles('.ipynb')
for f in files:
os.system(f"jupyter nbconvert --to html {f}")
# Move notebooks to docs/
os.system('mv *.html docs/.')
os.chdir('docs')
# Make index page.
header = """
---
title: Data Testing Tutorial
---
This contains static HTML versions of the Jupyter notebooks that I have made for this tutorial.
Files:
"""
for f in files:
html_name = f.replace(".ipynb", ".html")
header += f'- [{html_name}]({html_name})\n'
writefile('index.md', header)
|
993,966 | 7792300008d73347f0d1f65bb51fa7fdb53b4680 | import curses
import pickle
import string # Serialization module for python
class Document:
"""Pages are presumed to have dimensions of 80 wide by 20 tall."""
def __init__(self, filename=None):
self.filename = filename if filename else "binary.bindoc"
self.pages = []
self.current = 0
self.addPage()
def addPage(self):
self.pages.append(["".ljust(80) for i in range(20)])
self.nextPage()
def delPage(self):
self.pages.pop(self.current)
if len(self.pages) <= 0:
self.addPage()
else:
self.prevPage()
def setChr(self, r, c, ltr):
self.pages[self.current][r] = self.pages[self.current][r][:c] + ltr + self.pages[self.current][r][c + 1:]
def nextPage(self):
self.current = (self.current + 1) % len(self.pages)
def prevPage(self):
self.current = (self.current - 1 + len(self.pages)) % len(self.pages)
def __str__(self):
return "\n".join(self.pages[self.current])
d = Document()
class Tree:
def __init__(self, filename=None,parent=None,left=None, middle=None,right=None):
self.filename = filename
self.parent=parent
self.left = left
self.middle=middle
self.right = right
def __str__(self):
return str(self.filename)
def addfull(self, input):
if (self.left == None):
self.left = Tree(input)
elif (self.middle == None):
self.middle = Tree(input)
elif (self.right == None):
self.right = Tree(input)
def findparent(self,baba,current):
if(self.left.filename==baba):
self.left.addfull(current)
if(self.middle.filename==baba):
self.middle.addfull(current)
if(self.right.filename==baba):
self.right.addfull(current)
t= Tree()
def drawscreen(scr, doc):
height, width = scr.getmaxyx()
if height < 24 or width < 80:
scr.move(0, 0)
scr.erase()
curses.doupdate()
return
pos_r, pos_c = curses.getsyx()
scr.hline(20, 0, '~', width)
pos_p = str(doc.current + 1) + '/' + str(len(doc.pages)) # Not displaying zero-based indexing
scr.addstr(20, width - len(pos_p), pos_p)
commands = [["^C: Quit", "^O: Story save", "^L: Story load"],
["^X: Back to write","^V: Set Node"]]
for r in range(2):
ct = 0
for cmd in commands[r]:
scr.addstr(21 + r, ct * 20 + 5, cmd, curses.A_REVERSE)
ct += 1
if width > 80:
for row in range(height - 4):
scr.addstr(row, 80, " " * (width - 80), curses.A_REVERSE)
scr.move(0, 0)
lines = str(doc).split('\n')
for line in range(len(lines)):
scr.addstr(line, 0, lines[line])
scr.move(pos_r, pos_c)
def sizecheck(scr):
h, w = scr.getmaxyx()
return h, w, h >= 24 and w >= 80
def main(stdscr):
global d
global t
stdscr.clear()
drawscreen(stdscr, d)
stdscr.move(0, 0)
s_height, s_width, enabled = sizecheck(stdscr)
flag = 0
stdscr.addstr("File build: ")
while True:
if (flag == 0):
stdscr.addstr(" enter file number:")
flag = 1
e = stdscr.getch() - 48
e1 = ".bindoc"
e2 = str(e) + e1
stdscr.addstr(str(e))
d.filename = e2
stdscr.addstr(" file")
stdscr.move(0+ 1, 0)
c = stdscr.getch()
if enabled:
if c == curses.KEY_UP:
pos_r, pos_c = curses.getsyx()
pos_r = max(pos_r - 1, 0)
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_DOWN:
pos_r, pos_c = curses.getsyx()
pos_r = min(pos_r + 1, 19)
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_LEFT:
pos_r, pos_c = curses.getsyx()
pos_c = max(pos_c - 1, 0)
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_RIGHT:
pos_r, pos_c = curses.getsyx()
pos_c = min(pos_c + 1, 79)
stdscr.move(pos_r, pos_c)
elif c >= 32 and c <= 126:
pos_r, pos_c = curses.getsyx()
stdscr.addstr(pos_r, pos_c, chr(c))
d.setChr(pos_r, pos_c, chr(c))
if pos_r >= 19 and pos_c >= 79:
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_HOME:
pos_c = 0
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_END:
pos_c = 79
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_PPAGE:
d.prevPage()
drawscreen(stdscr, d)
pos_r, pos_c = 0, 0
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_NPAGE:
d.nextPage()
drawscreen(stdscr, d)
pos_r, pos_c = 0, 0
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_IC:
d.addPage()
drawscreen(stdscr, d)
pos_r, pos_c = 0, 0
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_DC:
d.delPage()
drawscreen(stdscr, d)
pos_r, pos_c = 0, 0
stdscr.move(pos_r, pos_c)
elif c == curses.KEY_BACKSPACE or curses.keyname(c) == '^H':
pos_r, pos_c = curses.getsyx()
pos_c -= 1
if pos_c < 0:
pos_c = s_width - 1
pos_r -= 1
if pos_r < 0:
pos_r = 0
pos_c = 0
stdscr.addch(pos_r, pos_c, 32)
d.setChr(pos_r, pos_c, ' ')
stdscr.move(pos_r, pos_c)
elif curses.keyname(c) == '^X':
d.delPage()
drawscreen(stdscr, d)
stdscr.move(0,0)
stdscr.addstr("File build: ")
flag=0;
elif curses.keyname(c) == '^V':
tempfilename=d.filename
d.delPage()
drawscreen(stdscr, d)
stdscr.move(0, 0)
stdscr.addstr("Enter parent: ")
parent = stdscr.getch()-48
stdscr.addstr(str(parent))
stdscr.addstr(" is parent ")
i=0
children=[]
while(i<3):
pos_r, pos_c = curses.getsyx()
stdscr.move(pos_r+1,0)
stdscr.addstr("Enter which child: ")
child = stdscr.getch()
if(child==10):
tempchild=None
children.append(tempchild)
else:
child=child-48
stdscr.addstr(str(child))
stdscr.addstr(" is child ")
tempchild=str(child)
children.append(tempchild)
i=i+1
c=stdscr.getch()
if(c==10):
t1=Tree(children[0])
t2=Tree(children[1])
t3=Tree(children[2])
t.addfull(t1)
t.addfull(t2)
t.addfull(t3)
t.findparent(t1.parent, t1.filename)
t.findparent(t2.parent, t2.filename)
t.findparent(t2.parent, t2.filename)
print(t)
print(t.left.filename)
print(t.middle.filename)
print(t.right.filename)
#http://www.openbookproject.net/thinkcs/python/english2e/ch21.html
#https://stackoverflow.com/questions/2358045/how-can-i-implement-a-tree-in-python-are-there-any-built-in-data-structures-in
elif c == 10: # linefeed
pos_r, pos_c = curses.getsyx()
pos_c = 0
pos_r = min(pos_r + 1, 19)
stdscr.move(pos_r, pos_c)
elif curses.keyname(c) == '^L':
try:
f = open(d.filename, 'r')
d = pickle.load(f)
f.close()
except IOError:
pass
drawscreen(stdscr, d)
elif curses.keyname(c) == '^O':
f = open(d.filename, 'w')
pickle.dump(d, f)
#if(t.left==None):
# t.filename=str(d.filename)
#elif(t.middle==None):
# t.filename=str(d.filename)
#elif(t.right==None):
# t.filename=str(d.filename)
#else
# t.left=Tree()
f.close()
elif c == curses.KEY_RESIZE:
s_height, s_width, enabled = sizecheck(stdscr)
drawscreen(stdscr, d)
else:
if c == curses.KEY_RESIZE:
s_height, s_width, enabled = sizecheck(stdscr)
drawscreen(stdscr, d)
curses.doupdate()
try:
curses.wrapper(main)
pass
except KeyboardInterrupt:
pass
|
993,967 | db6379b741a540836cc00b10aad2cbd691f13d84 | import random
import time
import datetime
import sys
import math
import os
from collections import OrderedDict
from torch import autograd
from torch.autograd import Variable
import torch
import torch.nn as nn
from visdom import Visdom
import numpy as np
import SimpleITK as sitk
def save_numpy(tensor, name):
array = torch.squeeze(tensor).cpu().float().numpy()
np.save(name, array)
def save_dicom(tensor, name):
array = torch.squeeze(tensor).cpu().float().numpy()
array = np.clip(np.rint(array * 255.0), 0.0, 255.0).astype(np.uint8)
array = np.moveaxis(array, 1, 0)
array = array[::-1]
dicom_scan = sitk.GetImageFromArray(array)
sitk.WriteImage(dicom_scan, name)
def load_network(network, save_path=None):
if not os.path.isfile(save_path):
print('%s not exists yet!' % save_path)
else:
try:
network.load_state_dict(torch.load(save_path))
except:
saved_dict = torch.load(save_path)
pretrained_dict = OrderedDict()
for k, v in saved_dict.items():
ks = k.split('.')
ks[1] = 'model.'+ks[1]
name = '.'.join(ks)
pretrained_dict[name] = v
model_dict = network.state_dict()
try:
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
network.load_state_dict(pretrained_dict)
print('Pretrained network G has excessive layers; Only loading layers that are used')
except:
print('Pretrained network G has fewer layers; The following are not initialized:')
for k, v in pretrained_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
not_initialized = set()
for k, v in model_dict.items():
if k not in pretrained_dict or v.size() != pretrained_dict[k].size():
# not_initialized.add(k.split('.')[0])
not_initialized.add(k)
print(sorted(not_initialized))
network.load_state_dict(model_dict)
return network
def tensor2image(tensor):
image = 127.5*(tensor[0].cpu().float().numpy() + 1.0)
if image.shape[0] == 1:
image = np.tile(image, (3,1,1))
return image.astype(np.uint8)
class Logger():
def __init__(self, n_epochs, batches_epoch):
self.viz = Visdom()
self.n_epochs = n_epochs
self.batches_epoch = batches_epoch
self.epoch = 1
self.batch = 1
self.prev_time = time.time()
self.mean_period = 0
self.losses = {}
self.loss_windows = {}
self.image_windows = {}
def log(self, losses=None, images=None):
self.mean_period += (time.time() - self.prev_time)
self.prev_time = time.time()
sys.stdout.write('\rEpoch %03d/%03d [%04d/%04d] -- ' % (self.epoch, self.n_epochs, self.batch, self.batches_epoch))
for i, loss_name in enumerate(losses.keys()):
if loss_name not in self.losses:
self.losses[loss_name] = losses[loss_name].item()
else:
self.losses[loss_name] += losses[loss_name].item()
if (i+1) == len(losses.keys()):
sys.stdout.write('%s: %.4f -- ' % (loss_name, self.losses[loss_name]/self.batch))
else:
sys.stdout.write('%s: %.4f | ' % (loss_name, self.losses[loss_name]/self.batch))
batches_done = self.batches_epoch*(self.epoch - 1) + self.batch
batches_left = self.batches_epoch*(self.n_epochs - self.epoch) + self.batches_epoch - self.batch
sys.stdout.write('ETA: %s' % (datetime.timedelta(seconds=batches_left*self.mean_period/batches_done)))
# Draw images
for image_name, tensor in images.items():
if image_name not in self.image_windows:
self.image_windows[image_name] = self.viz.image(tensor2image(tensor.data), opts={'title':image_name})
else:
self.viz.image(tensor2image(tensor.data), win=self.image_windows[image_name], opts={'title':image_name})
# End of epoch
if (self.batch % self.batches_epoch) == 0:
# Plot losses
for loss_name, loss in self.losses.items():
if loss_name not in self.loss_windows:
self.loss_windows[loss_name] = self.viz.line(X=np.array([self.epoch]), Y=np.array([loss/self.batch]),
opts={'xlabel': 'epochs', 'ylabel': loss_name, 'title': loss_name})
else:
self.viz.line(X=np.array([self.epoch]), Y=np.array([loss/self.batch]), win=self.loss_windows[loss_name], update='append')
# Reset losses for next epoch
self.losses[loss_name] = 0.0
self.epoch += 1
self.batch = 1
sys.stdout.write('\n')
else:
self.batch += 1
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0,1) > 0.5:
i = random.randint(0, self.max_size-1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
def weights_init_normal_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal(m.weight.data, 1.0, 0.02)
torch.nn.init.constant(m.bias.data, 0.0)
def weights_init_normal(net):
for m in net.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def gradient_penalty(critic, real, fake, type='2d', device="cpu"):
if type == '2d':
BATCH_SIZE, C, H, W = real.shape
alpha = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)
elif type == '3d':
BATCH_SIZE, C, D, H, W = real.shape
alpha = torch.rand((BATCH_SIZE, 1, 1, 1, 1)).repeat(1, C, D, H, W).to(device)
interpolated_images = real * alpha + fake * (1 - alpha)
# Calculate critic scores
mixed_scores = critic(interpolated_images)
# Take the gradient of the scores with respect to the images
gradient = torch.autograd.grad(
inputs=interpolated_images,
outputs=mixed_scores,
grad_outputs=torch.ones_like(mixed_scores),
create_graph=True,
retain_graph=True,
)[0]
gradient = gradient.view(gradient.shape[0], -1)
gradient_norm = gradient.norm(2, dim=1)
gradient_penalty = torch.mean((gradient_norm - 1) ** 2)
return gradient_penalty |
993,968 | 4182a2e71f8d328d34e4d2269dcd556fff85555f | ###############################################################################
# Euler 219 - Skew-cost Coding
# Kelvin Blaser 2014.12.31 Happy New Years! Good riddance 2014.
#
# Think about a minimum cost coding of size n-1. To get a prefix-free coding of
# length n, pick one of the strings and add a 0 and a 1 to it. If you choose
# the string of smallest cost, then this will be the minimum cost coding of size
# n.
#
# The algorithm is to start with the min-cost coding of size 2, then increase
# the size by adding one string. Only need to keep track of the cost of each
# string, since the actual string doesn't matter, just the total cost. I will
# keep track of how many strings of cost x I have, keeping the memory
# requirement O(log n). For each step, just subtract one from the minimum cost
# pool of strings and add one to min_cost + cost_zero and add one to
# min_cost + cost_one.
###############################################################################
from fractions import gcd
import scipy as sp
def minSkewCost(N, cost_zero=1, cost_one=4):
g = gcd(cost_zero, cost_one)
cost_zero /= g
cost_one /= g
strings = sp.zeros((int(sp.log(N)/sp.log(2))+1)*max(cost_zero, cost_one),
dtype=int)
strings[cost_zero] = 1
strings[cost_one] = 1
min_cost = min(cost_zero, cost_one)
n_strings = 2
while n_strings < N:
while strings[min_cost] == 0:
min_cost += 1
to_move = min(strings[min_cost], N-n_strings)
strings[min_cost] -= to_move
strings[min_cost+cost_one] += to_move
strings[min_cost+cost_zero] += to_move
n_strings += to_move
cost = 0L
for x in xrange(len(strings)):
cost += long(x)*strings[x]
return cost
if __name__=='__main__':
print minSkewCost(6,1,4)
print minSkewCost(10,1,4)
print minSkewCost(10**9,1,4)
|
993,969 | a1134167f3b900eb92f8219cc2cfa990d782dbfe | # Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.
# If target is not found in the array, return [-1, -1].
# Follow up: Could you write an algorithm with O(log n) runtime complexity?
# Example 1:
# Input: nums = [5,7,7,8,8,10], target = 8
# Output: [3,4]
# Leetcode 34: https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
# Difficulty: Medium
# Solution: Run two lower-bounded binary searches, one for the target and one for the successor of the target
def searchRange(self, nums: List[int], target: int) -> List[int]:
if not nums:
return [-1, -1]
def lowerBinarySearch(target):
left, right = 0, len(nums) - 1
while left <= right:
# Calculate the midpoint
mid = left + (right - left) // 2
# This binary search always returns the lower bound of a number because
# When the current num is lower than the target, left is shifted to the next number right of mid
# and if current num is greater than or equal to target, it shifts right to the number left of mid
# this ensures that if numbers are duplicated the search will always narrow into the leftmost number
if nums[mid] < target:
left = mid + 1
else:
right = mid - 1
return left
# Find index of lowest number
lowerIndex = lowerBinarySearch(target)
# This finds the index of the first number larger than the target, and then subtracts
# one from the index it finds which is going to be the rightmost target
upperIndex = lowerBinarySearch(target + 1) - 1
# If we didn't go out of bounds in our search and if the number at the lowerIndex actually equals our
# target (because our binary search will return the next largest number if it didn't exist) we can return the indices
if lowerIndex < len(nums) and nums[lowerIndex] == target:
return [lowerIndex, upperIndex]
return [-1, -1]
# Time Complexity: O(log n)
# Space Complexity: O(n)
|
993,970 | 53770160b3b8b7742aa39d57be85850f7d97610e | from django.conf import settings
from django.contrib.staticfiles.finders import find
from django.contrib.staticfiles.storage import staticfiles_storage, StaticFilesStorage, HashedFilesMixin
from django.utils.safestring import mark_safe
class InlineStaticService:
def get_inline_static(self, name: str) -> str:
return mark_safe(self._from_app_directory(name)
if settings.DEBUG
else self._from_staticfiles_storage(name))
def _from_app_directory(self, name):
with open(find(name), 'r') as static_file:
return static_file.read()
def _from_staticfiles_storage(self, name):
path = staticfiles_storage.stored_name(name) if self._is_hashed(staticfiles_storage) else name
if staticfiles_storage.exists(path):
with staticfiles_storage.open(path, 'r') as static_file:
return static_file.read()
def _is_hashed(self, storage: StaticFilesStorage) -> bool:
return isinstance(storage, HashedFilesMixin)
|
993,971 | d4141f52479300a8017e3499ffd3281f6a3235f3 | # Generated by Django 2.2.9 on 2020-05-21 07:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20200520_1753'),
]
operations = [
migrations.AlterField(
model_name='notificacion',
name='grupo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group'),
),
]
|
993,972 | 7444e75277332b5acca5a04da91ec718bf5dfcf6 | # spiral.py
# COMP9444, CSE, UNSW
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
class PolarNet(torch.nn.Module):
def __init__(self, num_hid):
super(PolarNet, self).__init__()
self.in_to_hid = nn.Linear(in_features=2,out_features=num_hid,bias=True) #the first layer
#self.hid_to_out_xxxx = nn.Linear(in_features=num_hid,out_features=num_hid,bias=True) #the second layer
self.hid_to_out = nn.Linear(in_features=num_hid,out_features=1,bias=True) #the second layer
self.tan_h1 = None
# INSERT CODE HERE
def forward(self, input):
x= input[:,0]
y = input[:,1]
r = torch.sqrt((x**2+y**2)).reshape(-1,1)
a = torch.atan2(y,x).unsqueeze(1)
cat_function = torch.cat((r,a),-1)
hid_sum = self.in_to_hid(cat_function)
hidden = torch.tanh(hid_sum)
#hidden = torch.relu(hid_sum)
self.tan_h1 = hidden
'''
xxxxxx = self.hid_to_out_xxxx(hidden)
hidden1 = torch.tanh(xxxxxx)
out_sum = self.hid_to_out(hidden1)
output = torch.sigmoid(out_sum)# CHANGE CODE HERE
'''
out_sum = self.hid_to_out(hidden)
output = torch.sigmoid(out_sum)# CHANGE CODE HERE
return output
class RawNet(torch.nn.Module):
def __init__(self, num_hid):
super(RawNet, self).__init__()
self.in_to_hid = nn.Linear(in_features=2,out_features=num_hid,bias=True) #the first layer
self.hid_to_hid = nn.Linear(in_features=num_hid,out_features=num_hid,bias=True) #the second layer
#self.hid_to_out_xxxx = nn.Linear(in_features=num_hid,out_features=num_hid,bias=True) #the second layer
self.hid_to_out = nn.Linear(in_features=num_hid,out_features=1,bias=True) #the second layer
self.tan_h1 = None
self.tan_h2 = None
def forward(self, input):
in_to_hid_x = self.in_to_hid(input)
hidden1 = torch.tanh(in_to_hid_x)
#hidden1 = torch.relu(in_to_hid_x)
self.tan_h1 = hidden1
hid_to_hid_x = self.hid_to_hid(hidden1)
hidden2 = torch.tanh(hid_to_hid_x)
#hidden2 = torch.relu(hid_to_hid_x)
self.tan_h2 = hidden2
'''
id_to_out_xxxx =self.hid_to_out_xxxx(hidden2)
hidden3 = torch.tanh(id_to_out_xxxx)
hid_to_out_x = self.hid_to_out(hidden3)
output = torch.sigmoid(hid_to_out_x)
'''
hid_to_out_x = self.hid_to_out(hidden2)
output = torch.sigmoid(hid_to_out_x)
return output
def graph_hidden(net, layer, node):
xrange = torch.arange(start=-7,end=7.1,step=0.01,dtype=torch.float32)
yrange = torch.arange(start=-6.6,end=6.7,step=0.01,dtype=torch.float32)
xcoord = xrange.repeat(yrange.size()[0])
ycoord = torch.repeat_interleave(yrange, xrange.size()[0], dim=0)
grid = torch.cat((xcoord.unsqueeze(1),ycoord.unsqueeze(1)),1)
with torch.no_grad(): # suppress updating of gradients
net.eval() # toggle batch norm, dropout
output = net(grid)
#print(net)
if layer == 1:
#print('net',net)
pred = (net.tan_h1[:,node]>=0).float()
plt.clf()
plt.pcolormesh(xrange,yrange,pred.cpu().view(yrange.size()[0],xrange.size()[0]), cmap='Wistia')
if layer == 2:
#print('net1',net)
pred = (net.tan_h2[:,node]>=0).float()
plt.clf()
plt.pcolormesh(xrange,yrange,pred.cpu().view(yrange.size()[0],xrange.size()[0]), cmap='Wistia')
|
993,973 | 18e490b935de3d312b1ccc8fc5c37ad4a4545aba | #!/usr/bin/python
# delete_versions.py
import argparse
from blackduck import Client
from blackduck.Client import HubSession
from blackduck.Authentication import BearerAuth, CookieAuth
import csv
import logging
import sys
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] {%(module)s:%(lineno)d} %(levelname)s - %(message)s"
)
def check_for_columns(column_names, fieldnames):
for column in column_names:
if column not in fieldnames:
print("Error, input CSV file does not have required column", column)
sys.exit(-1)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Delete project versions from Hub")
# must specify authentication details (i.e. token file or username/password) on command line
parser.add_argument('--base-url', dest='base_url', default=None,
help="Hub server BASE_URL e.g. https://example.com")
parser.add_argument('--token-file', dest='token_file', default=None, help="File containing access token")
parser.add_argument('--username', dest='username', default=None, help="Hub server USERNAME")
parser.add_argument('--password', dest='password', default=None, help="Hub server PASSWORD")
parser.add_argument('--one', dest='one', action='store_true', default=None, help="Exit after processing one row")
group1 = parser.add_argument_group('required arguments')
group1.add_argument('--input', dest='csv_file_input', required=True, help="Input CSV file")
group1.add_argument('--mode', dest='mode', required=True, help="One of list, delete")
args = parser.parse_args()
verify = False # TLS certificate verification
session = HubSession(args.base_url, timeout=15.0, retries=3, verify=verify)
# De-tangle the possibilities of specifying credentials
if args.token_file:
tf = open(args.token_file, 'r')
access_token = tf.readline().strip()
auth = BearerAuth(session, access_token)
elif args.username and args.password:
auth = CookieAuth(session, args.username, args.password)
else:
raise SystemError("Authentication credentials not specified")
bd = Client(base_url=args.base_url, session=session, auth=auth, verify=verify)
if args.mode not in ['list', 'delete']:
print("Error: must specify --mode to be one of: list or delete")
sys.exit(-1)
input_file = open(args.csv_file_input, 'r')
reader = csv.DictReader(input_file)
logging.info("csv fieldnames:")
logging.info(reader.fieldnames)
check_for_columns([
'projectId',
'project',
'versionId',
'version',
'phase',
'createdAt',
'sumScanSize',
'sumScanSizeReadable',
'latestSummary',
'bom',
'events',
'latestScanEvent',
'rescanned',
'latestNotableActivity',
'notableActivityEvents'], reader.fieldnames)
if args.mode == 'list':
print("Listing all rows in CSV to be processed for deletion")
elif args.mode == 'delete':
print("Deleting ALL rows in CSV")
print("WARNING: deletion cannot be undone!")
confirm = input("Do you want to proceed (type 'yes' to DELETE)? ")
if confirm != "yes":
print("Exiting")
sys.exit(0)
else:
print("Error: unknown mode: " + args.mode)
sys.exit(-1)
num_deleted = 0
row_num = 0 # csv module will skip header row automatically
for row in reader:
row_num += 1
print("Row {} ".format(row_num), end='')
pv_url = args.base_url + "/api/projects/" + row['projectId'] + "/versions/" + row['versionId']
if args.mode == 'delete':
print("- deleting project:{} version:{}".format(row['project'], row['version']))
response = bd.session.delete(pv_url)
if response.status_code == 204:
# success
num_deleted += 1
elif response.status_code == 404:
print(" Error: response status code returned: 404 Not Found")
print(" Does the project version still exist?")
print(" " + pv_url)
else:
print(" Error: response status code returned: " + str(response.status_code))
print(" An unexpected error occurred. Check the project version?")
print(" " + pv_url)
else:
print("[DRY-RUN] project:{} version:{}".format(row['project'], row['version']))
if args.one:
print("Exiting after processing one row")
break
if args.mode == 'delete' and num_deleted > 0:
print("Deleted", num_deleted, "project versions.")
print("Note storage usage is not updated until unmapped scans are removed.")
|
993,974 | 817ecc738291a1129d72ad5dffb59c8908ef5d60 | import os, sys, re
from datetime import date
from optparse import OptionParser
from xml.dom.minidom import parse, parseString
import xml.etree.ElementTree as ET
import markup
from markup import oneliner
FILENAME = "filterstats.html"
def write_new_file() :
title = "Filtered Simulation efficiency statistics"
header = ""
footer = "TODO: automatic extraction of event-type and generation statistics from xml file?????"
table = ( "Production", "Events in", "Events out", "Efficiency" )
page = markup.page()
page.init( title=title, header=header, footer=footer, css= "http://lhcb-release-area.web.cern.ch/LHCb-release-area/DOC/css/lhcb.css")
page.h2( "Filtered Simulation efficiency statistics ( by production )")
page.br()
page.table( border="1" )
#table headers
page.tr()
page.th( table )
page.tr.close()
page.table.close()
page.br()
#print page
f = open(FILENAME,'w')
f.write(page.str())
f.close()
return
def append_to_file( efflist, lines ) :
v = lines.index('</body>\n')
newrow = oneliner.tr("")
newrows = [ newrow[:4]+"\n" ]
for d in efflist :
newtuple = ( d ["prod"], d["in"], d["out"], d["eff"] )
newrows.append( oneliner.td( newtuple )+"\n" )
newrows += [ newrow[4:]+'\n' ]
newlines = lines[ : v-3 ] + newrows + lines [v-3 : ]
htmlfile = open(FILENAME,'w')
for line in newlines :
htmlfile.write(line)
htmlfile.close()
if __name__=="__main__" :
opt = OptionParser()
opt.add_option("-o", "--outfile",
action="store", type="string", dest="files",
default=None,
metavar='<OUTFILE>',
help="output file [default : NONE ]")
opt.add_option("-r", "--regen",
action="store_true", dest="regen",
default=False,
metavar='<REGEN>',
help="regen file [default : NONE ]")
(options, args) = opt.parse_args()
if len(args) == 0 :
print "no xml files ! "
else :
print "xml files to process", args
files = args
print files
effs = []
for f in files :
#xml file to process
xf = ET.parse(f)
#base node
root = xf.getroot()
inevts = 0
for ff in root.find("input").findall("file") :
#print ff, ff.tag, ff.attrib, ff.text
inevts += int(ff.text)
prodid1 = int(ff.attrib["name"].split("/")[5])
outevts = 0
for ff in root.find("output").findall("file") :
#print ff, ff.tag, ff.attrib, ff.text
outevts += int(ff.text)
prodid2 = int(ff.attrib["name"].split("_")[0][4:])
eff = float(outevts)/float(inevts)
from math import sqrt
err = eff * sqrt( 1.0/float(inevts) + 1.0/(float(outevts) ) )
print f, " in %d out %d with efficiency %g +/- %g"%(inevts,outevts,eff,err)
effdict = { "prod" : prodid2, "in" : inevts , "out": outevts, "eff" : eff , "err" : err }
effs.append( effdict )
try :
if options.regen :
raise ValueError
htmlfile = open(FILENAME,'r')
htmlfile.close()
except :
print "no file, generate new file"
write_new_file()
htmlfile = open(FILENAME,'r')
lines = htmlfile.readlines()
htmlfile.close()
append_to_file ( effs, lines )
|
993,975 | 142e30f7eda097b0ca2fd8f738b767744dc58c1d | db_path ="sqlite:////home/mdiannna/StartupWeekendBucharest/BookMyEvent/database.db" |
993,976 | 41c451369c5ae04f4ebe16326ba41bc43b616fe4 | #!/usr/bin/env python
import math
import motor_interface
import numpy as np
import rospy
import time
from eraserbot.srv import ImageSrv, ImageSrvResponse, StateSrv, StateSrvResponse
from geometry_msgs.msg import TwistStamped, Vector3
class Controller():
def __init__(self):
#rospy.init_node("Controller")
self.bot = motor_interface.Robot(1, 2)
rospy.wait_for_service('current_state')
self.state_service = rospy.ServiceProxy('current_state', StateSrv)
def closed_move_straight(self, dist, maxSpeed):
# dist: Desired distance in meters
# Moves [dist] meters forwards/backwards in a straight line
# Closed loop function
xi = self.state_service().state.x # get the original x position
yi = self.state_service().state.y # get the original y position
# initialize these things
x = xi # x position to be updated
y = yi # y position to be updated
travel = 0 # distance traveled from original position
mvel = maxSpeed # motor speed
while (abs(travel - abs(dist)) > 0.01): # while distance is >1 cm from the desired distance
if (abs(travel - abs(dist)) < 0.05): # slow down when <5 cm away
mvel = maxSpeed / 2
if (travel < abs(dist)):
self.bot.set_speed(mvel, mvel) # assumes will drive in straight line
else:
self.bot.set_speed(-1*mvel,-1*mvel) # drive backwards for negative distances
# get updated position and travel distance
x = self.state_service().state.x
y = self.state_service().state.y
travel = math.sqrt((x - xi)**2 + (y - yi)**2)
print(travel)
self.bot.turnOffMotors() # when reached distance, stop moving
print("done moving")
def closed_tank_pivot(self, theta, maxSpeed):
# theta: Desired angle position in radians
# Rotates until
# Closed loop function
# THIS TAKES DIFFERENT INPUTS THAN open_tank_pivot
# initialize these things
t = self.state_service().state.z # x position to be updated
dist = (t - theta) % (2*math.pi) # angular offset, positive is too far counterclockwise
mvel = maxSpeed # motor speed
while (abs(dist) > 0.04): # while >0.04 rad from desired angular position
if (abs(dist) < 0.5): # slow down when <0.5 rad away
mvel = maxSpeed / 2
if (dist > 0): # if too far counterclockwise
self.bot.set_speed(mvel, -1*mvel) # turn clockwise
else: # if too far clockwise
self.bot.set_speed(-1*mvel, mvel) # turn counterclockwise
t = self.state_service().state.z # get updated position
dist = ((t - theta + math.pi) % (2*math.pi)) - math.pi # recalculate angular offset
print(self.state_service().state.z)
self.bot.turnOffMotors() # when reached distance, stop moving
|
993,977 | 0ba42df509f06a8454628656a5da29cb6b2d6d9c | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 3 21:47:08 2016
@author: nam
"""
import mmRef;
class TrainService():
def __init__(self, trainLine, servcName, newTimetable, initPaxOnTrain, pcPaxRem, paxRem):
self.name = servcName;
self.line = trainLine;
self.timetable = newTimetable;
self.initPax = initPaxOnTrain;
self.percentPaxRemOnBoard = pcPaxRem;
# this is the number of passengers remaining on board after (successfull and unsuccessful) alighting.
# it does not include passengers newly boarded or failing to board.
self.paxRemOnBoard = paxRem;
def getInitPax(self,paxProfile):
for row in self.initPax:
if (row[1]==paxProfile):
return row[0];
return 0;
def getPcPaxRemOnBoard(self,stopName,paxProfile):
pcPaxRemOnBoard = [];
if stopName in self.percentPaxRemOnBoard:
pcPaxRemOnBoard = self.percentPaxRemOnBoard[stopName];
else:
return 0;
for row in pcPaxRemOnBoard:
if row[1]==paxProfile:
return row[0];
return 0;
def getPaxRemOnBoard(self,paxProfile):
for row in self.paxRemOnBoard:
if (row[1]==paxProfile):
return row[0];
return 0;
def getCopyPaxRemOnBoard(self):
copyPaxRem = [];
for row in self.paxRemOnBoard:
newRow=[];
for iItem in range(0,len(row)):
newRow.append(row[iItem]);
copyPaxRem.append(newRow);
return copyPaxRem;
def updatePaxRemOnBoard(self,slimRemOnBoard,defRemOnBoard,fatRemOnBoard):
copyPaxRemOnBoard = self.getCopyPaxRemOnBoard();
for iRow in range(0,len(copyPaxRemOnBoard)):
if (copyPaxRemOnBoard[iRow][1]==mmRef.Profiles.slimFastActiv.name):
copyPaxRemOnBoard[iRow][0] = slimRemOnBoard;
elif (copyPaxRemOnBoard[iRow][1]==mmRef.Profiles.DefaultProfile.name):
copyPaxRemOnBoard[iRow][0] = defRemOnBoard;
elif (copyPaxRemOnBoard[iRow][1]==mmRef.Profiles.fatSlowLazy.name):
copyPaxRemOnBoard[iRow][0] = fatRemOnBoard;
self.paxRemOnBoard = copyPaxRemOnBoard; |
993,978 | 7f0d733b8de1756e9c863afe04c6af053015c290 | # Generated by Django 2.1.5 on 2019-01-23 06:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0002_auto_20190123_0010'),
]
operations = [
migrations.CreateModel(
name='Literacy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area', models.CharField(choices=[('all', 'All'), ('rural', 'Rural'), ('urban', 'Urban')], max_length=10)),
('year', models.SmallIntegerField()),
('male', models.SmallIntegerField()),
('female', models.SmallIntegerField()),
('total', models.SmallIntegerField()),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mapdata.Region')),
],
),
]
|
993,979 | 0771ebf65935628b754f1cf4b79ce1bacb21bb58 | """
Write a Python function that takes a number as a parameter and check the
number is prime or not.
Note : A prime number (or a prime) is a natural number greater than 1 and that
has no positive divisors other than 1 and itself.
"""
def func(nums):
if nums < 100:
for i in range(2 , nums):
if nums % i == 0:
return f'{nums} is not a prime number !!!'
else:
for i in range(2 , (nums // 2)):
if nums % i == 0:
return f'{nums} is not a prime number !!!'
return f'{nums} is a prime number !!!'
print(func(0))
print(func(2))
print(func(7))
print(func(10))
print(func(13))
|
993,980 | bce27e0073f93bd01c44788a890e57ee1a616c37 | from hashlib import md5
from django.utils.functional import cached_property
from web.models.post import Post
from django.core.paginator import Paginator
from django.shortcuts import render
from django.core.cache import cache
from web.models.comment import Comment
from web.helpers.composer import get_posts_by_cid
from django.views.decorators.cache import cache_page
# 让分页器从缓存中读取数据总条数 避免遍历整个数据库表
@cached_property
def count(self):
# 多个地方用了分页器: 主页的作品列表 视频页里的评论列表
# 通过sql语句并md5 使每个分页区分开 不会混淆
sql, params = self.object_list.query.sql_with_params()
sql = sql % params
cache_key = md5(sql.encode('utf-8')).hexdigest()
# print(cache_key)
row_count = cache.get(cache_key)
if not row_count:
row_count = self.object_list.count()
cache.set(cache_key, row_count)
return row_count
Paginator.count = count
# @cache_page(60 * 15)
def show_list(request):
post_list = Post.objects.order_by('-play_counts')
paginator = Paginator(post_list, 40)
posts = paginator.page(1)
for post in posts:
# print(post.get_composers())
post.composers = post.get_composers()
return render(request, 'post_list.html', {'posts': posts})
def post_detail(request, pid):
post = Post.get(pid=pid)
post.composers = post.get_composers()
first_composer = post.composers[0]
first_composer.posts = get_posts_by_cid(first_composer.cid, 6)
return render(request, 'post.html', locals())
def get_comments(request):
pid = request.GET.get('id')
# print(pid)
page = request.GET.get('page')
# print(page)
comment_list = Comment.objects.filter(pid=pid).order_by('-created_at')
paginator = Paginator(comment_list, 10)
comments = paginator.page(page)
print(comments)
print(comment_list)
return render(request, 'comments.html', locals())
|
993,981 | d7e8656bb6a9e22298e82020474010a20b1adf2d | # Generated by Django 2.2 on 2020-03-22 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('session', '0020_auto_20200314_1603'),
]
operations = [
migrations.AddField(
model_name='gig',
name='type',
field=models.CharField(choices=[('Project', 'Project'), ('Essay', 'Essay'), ('Home work', 'Home work')], default='Home work', max_length=255),
),
]
|
993,982 | b5bf4d45ec532819db9b1bc96aafec58907e74a1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 14:33:44 2020
@author: wenninger
"""
from argparse import ArgumentParser
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import os
import sys
sys.path.insert(0, '../Helper')
from Cooper_Pair_Tunnelling_Current import cooperPairTunnellingCurrent,magneticFluxQuantum
from Fundamental_BCS_Equations import cooperPair_Binding_Energy_over_T,cooperPair_Binding_Energy_0K
from plotxy import newfig,pltsettings,lbl,plot
parser = ArgumentParser()
parser.add_argument('-f', '--folder', action='store',default = 'Default_Folder', help='The folder in which the result is stored in.')
args = parser.parse_args()
directory = 'Cooper_Pair_Tunnelling_Current_Unit_Test/'+args.folder+'/'
if not os.path.exists(directory):
os.makedirs(directory)
title = newfig('Flux_Dependency')
fluxes = np.arange(0,15,.001) * magneticFluxQuantum
te = 4 #K, The actual temperature
tC = 9.2 # K for Nb
delta = cooperPair_Binding_Energy_over_T(t_over_Tc =te/tC,delta0 = cooperPair_Binding_Energy_0K(tC), tDebye = 276)[0]
rN =10
current = cooperPairTunnellingCurrent(fluxes,delta,te,rN)
plt.plot(fluxes,current)
pltsettings(save=directory+title,fileformat='.pdf',disp = True,close=True, xlabel=lbl['Wb'],ylabel=lbl['A'],
xlim=None,ylim=None,title=None,legendColumns=1,skip_legend=True)
title = newfig('Normal_Resistance_Dependency')
fluxes = 0
te = 4 #K, The actual temperature
delta = cooperPair_Binding_Energy_over_T(t_over_Tc =te/tC,delta0 = cooperPair_Binding_Energy_0K(tC), tDebye = 276)[0] #9.2 K for Nb
rN =np.arange(5,40,.01)
current = cooperPairTunnellingCurrent(fluxes,delta,te,rN)
plt.plot(rN,current)
pltsettings(save=directory+title,fileformat='.pdf',disp = True,close=False, xlabel=lbl['Rn'],ylabel=lbl['A'],
xlim=None,ylim=None,title=None,legendColumns=1,skip_legend=True)
|
993,983 | 95f95cb7909dc39645281e60db52b9e9201e9b98 | # -*- coding: utf-8 -*-
s = raw_input()
t = raw_input()
import string
lower = string.lowercase
upper = lower.upper()
class mydict(dict):
def __missing__(self, key):
self[key] = 0
return self[key]
ds = mydict()
dt = mydict()
for i in s:
ds[i] += 1
for i in t:
dt[i] += 1
cnt1 = 0
cnt2 = 0
for key in ds:
t = min( ds[key], dt[key] )
ds[key] -= t
dt[key] -= t
cnt1 += t
for key in lower:
if ds[key] and dt[key.upper()]:
cnt2 += min( ds[key] , dt[key.upper()] )
for key in upper:
if ds[key] and dt[key.lower()]:
cnt2 += min( ds[key] , dt[key.lower()] )
print cnt1, cnt2
|
993,984 | 53b00e3b96be6ce40f8a938dc91481a982b96b45 | import json
import logging
import os
import sys
from Utils import Utils
payment_plans = [] #payment plans data
debts_with_Is_payment_field = [] # debts objects linked to payment plans
debts_with_payment_plans ={} # dic to hold debtId along with payment plans details
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
log_dir_path = os.getcwd()
def addFieldIsPaymentPlanToDebt(payment_plans, debts):
for debt in debts:
for payment_plan in payment_plans:
if debt['id'] == payment_plan['debt_id']:
debt.update({'is_in_payment_plan': True})
debts_with_payment_plans[debt['id']] = payment_plan
if not debt.get('is_in_payment_plan'):
debt.update({'is_in_payment_plan': False})
debts_with_Is_payment_field.append(debt)
print(debt)
def getDebtswithPaymentPlan():
return debts_with_payment_plans, debts_with_Is_payment_field
def main(payment_plans, debts):
handler = logging.FileHandler(log_dir_path+'/Scripts.log', 'w+')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
# console log
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(console)
addFieldIsPaymentPlanToDebt(payment_plans, debts)
if __name__ == "__main__":
"This controller is going to be used in Main.py"
|
993,985 | 3fb4a522861fca51b2f54d48c9cc9c28120280aa | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: exercise12
Description :
Author : burt
date: 2018/11/7
-------------------------------------------------
Change Activity:
2018/11/7:
-------------------------------------------------
"""
import math
def count_su(x, y):
"""
计算x,y之间有多少个素数
素数定义:用一个数分别去除2到sqrt(这个数),如果能被整除,则表明此数不是素数,反之是素数
:param x:
:param y:
:return:
"""
prime_num = []
for i in range(x, y):
count = 0
for j in range(2, int(math.sqrt(i)) + 1):
if i % j == 0:
count += 1
break
if count == 0:
prime_num.append(i)
return prime_num, len(prime_num)
if __name__ == '__main__':
result = count_su(100, 200)
print(result[0], '共有%d个' % (result[1],))
|
993,986 | cdc13ea21e7687b9d16d5f470f28b3f402076c7d | #!/usr/bin/env python
# coding: utf-8
# ### ques1
# In[1]:
counter = 0
while counter < 3:
print("Inside loop")
counter = counter + 1
else:
print("Inside else")
# ### ques5
# In[8]:
a=input("enter a string\n")
len(a)
# ### ques6
# In[10]:
a=input("enter a string\n")
count=0
for char in a:
count+=1
print(count)
# ### ques7
# In[11]:
A=input("ENTER A STRING\n")
if len(A)>2:
f=A[0:2]+A[-2:]
print(f)
else:
print(A)
# ### ques8
# In[12]:
s=input("enter a string\n")
char1=s[0:1]
s1=s.replace(char1,'$')
char1+s1[1:]
# ### ques9
# In[13]:
r=input("enter the 1st string\n")
s=input("enter the 2nd string\n")
char1=r[0:1]
r=r.replace(r[0:1],s[0:1])
s=s.replace(s[0:1],char1)
print(r,s)
# ### ques10
# In[ ]:
r=input("enter a string\n")
if len(r)>3:
s=r+"ing"
print(s)
if r[-3:]=="ing":
r=r.replace(r[-3:],"ly")
else:
print(r)
|
993,987 | 35b1096faafbc615f312a4f5ba1096fe7f234aea | #6. Escribe una funci�n de Python que tome una lista de palabras y devuelva la longitud de la m�s larga.
def funcion(lista):
cadena=lista[0]
tamaño=len(lista[0])
for palabra in lista:
if tamaño <=len(palabra):
cadena = palabra
tamaño = len(palabra)
else:
cadena = cadena
print("La cadena mas larga es:",cadena,len(cadena))
lista = ["hola","ESCOM","teoria","computacional","aaaaaaaaaaaaa"]
print(lista)
funcion(lista)
|
993,988 | 89b243884a9de98afcc9f1d0bec14237ea0dda44 | import common
data = common.gen_rand_list(6, 1, 100)
print data
def msort(l, u):
if l >= u:
return [data[l]]
m = int((l + u) / 2)
msort(l, m)
msort(m + 1, u)
merge(l, m, u)
def merge(l, m, u):
i = l
j = m + 1
#print l, m, u, data[l:m+1], data[m+1: u+1]
while(i != j and j != u + 1):
if data[i] >= data[j]:
data.insert(i, data[j])
i += 1
j += 1
del data[j]
else:
i += 1
#print "i:%d, j:%d" %(i,j), data[l:u + 1]
msort(0, len(data) - 1)
#print data
|
993,989 | fbe3783103b9593904515e67818404020568ade3 | from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from order.models import Order
from .forms import AccountEditForm, RegistrationForm
from .models import UserBase
from .tokens import account_activation_token
# Create your views here.
@login_required
def account_dashboard(request):
"""
Dashboard view (list of completed orders)
"""
user_id = request.user.id
orders = Order.objects.filter(user_id=user_id, billing_status=True)
return render(request, 'account/user/dashboard.html', {'orders': orders})
@login_required
def account_edit(request):
"""
Update/Modify account view
"""
if request.method == 'POST':
edit_form = AccountEditForm(instance=request.user, data=request.POST)
if edit_form.is_valid():
edit_form.save()
else:
edit_form = AccountEditForm(instance=request.user)
return render(request, 'account/user/edit.html', {'edit_form': edit_form})
@login_required
def account_delete(request):
"""
Delete account view
"""
user = UserBase.objects.get(pk=request.user.pk)
user.is_active = False
user.save()
logout(request)
return redirect('account:delete_confirm')
def account_register(request):
"""
Account register view
"""
if request.user.is_authenticated:
return redirect('account:dashboard')
if request.method == 'POST':
register_form = RegistrationForm(request.POST)
if register_form.is_valid():
# return a model object user, then add extra data and save it.
user = register_form.save(commit=False) # not save yet
# user.email = register_form.cleaned_data['email']
user.set_password(register_form.cleaned_data['password'])
user.is_active = False
user.save()
# Setup email
current_site = get_current_site(request)
subject = 'Activate your Account'
protocol = 'https' if request.is_secure() else 'http'
message = render_to_string('account/registration/account_activation_email.html', {
'protocol': protocol,
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
user.email_user(subject=subject, message=message, email=user.email)
return render(request, 'account/registration/account_activation_complete.html')
else:
register_form = RegistrationForm()
return render(request, 'account/registration/register.html', {'form': register_form})
def account_activate(request, uidb64, token):
"""
Account activation
"""
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserBase.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, user.DoesNotExist):
user = None
if user and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
return redirect('account:dashboard')
else:
return render(request, 'account/register/account_activation_invalid.html')
|
993,990 | 20b9c1570457e181907684785c4d8388d816ec84 | #!/usr/bin/env python
"""Kegboard daemon.
The kegboard daemon is the primary interface between a kegboard devices and a
kegbot system. The process is responsible for several tasks, including:
- discovering kegboards available locally
- connecting to the kegbot core and registering the individual boards
- accumulating data if the kegbot core is offline
The kegboard daemon is compatible with any device that speaks the Kegboard
Serial Protocol. See http://kegbot.org/docs for the complete specification.
The daemon should run on any machine which is attached to kegboard hardware.
The daemon must connect to a Kegbot Core in order to publish data (such as flow
and temperature events). This is accomplished through Redis, which must be
running locally.
"""
from future import standard_library
standard_library.install_aliases()
import queue
import gflags
import serial
import os
import time
from kegbot.util import app
from kegbot.util import util
from kegbot.pycore import common_defs
from kegbot.pycore import kegnet
from kegbot.kegboard import kegboard
FLAGS = gflags.FLAGS
gflags.DEFINE_string('kegboard_device_path', None,
'Name of the single kegboard device to use. If unset, the program '
'will attempt to use all usb serial devices.')
STATUS_CONNECTING = 'connecting'
STATUS_CONNECTED = 'connected'
STATUS_NEED_UPDATE = 'need-update'
class KegboardKegnetClient(kegnet.KegnetClient):
def __init__(self, reader, addr=None):
kegnet.KegnetClient.__init__(self, addr)
self._reader = reader
def onSetRelayOutput(self, event):
self._logger.debug('Responding to relay event: %s' % event)
if not event.output_name:
return
try:
output_id = int(event.output_name[-1])
except ValueError:
return
if event.output_mode == event.Mode.ENABLED:
output_mode = 1
else:
output_mode = 0
# TODO(mikey): message.SetValue is lame, why doesn't attr access work as in
# other places? Fix it.
message = kegboard.SetOutputCommand()
message.SetValue('output_id', output_id)
message.SetValue('output_mode', output_mode)
self._reader.WriteMessage(message)
class KegboardManagerApp(app.App):
def __init__(self, name='core'):
app.App.__init__(self, name)
self.devices_by_path = {}
self.status_by_path = {}
self.name_by_path = {}
self.client = kegnet.KegnetClient()
def _Setup(self):
app.App._Setup(self)
def _MainLoop(self):
self._logger.info('Main loop starting.')
while not self._do_quit:
self.update_devices()
if not self.service_devices():
time.sleep(0.1)
def update_devices(self):
if FLAGS.kegboard_device_path:
devices = kegboard.find_devices([FLAGS.kegboard_device_path])
else:
devices = kegboard.find_devices()
new_devices = [d for d in devices if d not in list(self.status_by_path.keys())]
for d in new_devices:
self._logger.info('Device added: %s' % d)
self.add_device(d)
removed_devices = [d for d in list(self.status_by_path.keys()) if d not in devices]
for d in removed_devices:
self._logger.info('Device removed: %s' % d)
self.remove_device(d)
def add_device(self, path):
kb = kegboard.Kegboard(path)
try:
kb.open()
except OSError as e:
# TODO(mikey): Back off and eventually blacklist device.
self._logger.warning('Error opening device at path {}: {}'.format(path, e))
return
self.devices_by_path[path] = kb
self.status_by_path[path] = STATUS_CONNECTING
self.name_by_path[path] = ''
try:
kb.ping()
except IOError:
self._logger.warning('Error pinging device')
remove_device(path)
def remove_device(self, path):
device = self.devices_by_path.pop(path)
device.close_quietly()
del self.status_by_path[path]
del self.name_by_path[path]
def get_status(self, path):
return self.status_by_path.get(path, None)
def get_name(self, path):
return self.name_by_path.get(path, 'unknown')
def active_devices(self):
for k, v in self.devices_by_path.items():
if self.get_status(k) in (STATUS_CONNECTING, STATUS_CONNECTED):
yield v
def service_devices(self):
message_posted = False
for kb in self.active_devices():
for message in kb.drain_messages():
self.handle_message(kb, message)
message_posted = True
return message_posted
def post_message(self, kb, message):
self._logger.info('Posting message from %s: %s' % (kb, message))
def handle_message(self, kb, message):
path = kb.device_path
name = self.get_name(path) or 'unknown device'
self._logger.info('%s: %s' % (name, message))
if isinstance(message, kegboard.HelloMessage):
if self.get_status(path) == STATUS_CONNECTING:
if message.serial_number:
name = 'kegboard-%s' % (message.serial_number[-8:],)
else:
name = 'kegboard'
name = name.lower()
self._logger.info('Device %s is named: %s' % (kb, name))
if name in list(self.name_by_path.values()):
self._logger.warning('Device with this name already exists! Disabling it.')
self.status_by_path[path] = STATUS_NEED_UPDATE
else:
self.status_by_path[path] = STATUS_CONNECTED
self.name_by_path[path] = name
self.client.SendControllerConnectedEvent(name)
if self.status_by_path[path] != STATUS_CONNECTED:
self._logger.debug('Ignoring message, device disconnected')
return
self.message_to_event(kb, message)
def message_to_event(self, kb, message):
"""Converts a message to an event and posts it to the client."""
path = kb.device_path
name = self.name_by_path.get(path, '')
if not name:
self._logger.warning('Illegal state: unknown device name')
return
client = self.client
if isinstance(message, kegboard.MeterStatusMessage):
tap_name = '%s.%s' % (name, message.meter_name)
client.SendMeterUpdate(tap_name, message.meter_reading)
elif isinstance(message, kegboard.TemperatureReadingMessage):
sensor_name = '%s.%s' % (name, message.sensor_name)
client.SendThermoUpdate(sensor_name, message.sensor_reading)
elif isinstance(message, kegboard.AuthTokenMessage):
# For legacy reasons, a kegboard-reported device name of 'onewire' is
# translated to 'core.onewire'. Any other device names are reported
# verbatim.
device = message.device
if device == 'onewire':
device = common_defs.AUTH_MODULE_CORE_ONEWIRE
# Convert the token byte field to little endian string representation.
bytes_be = message.token
bytes_le = ''
for b in bytes_be:
bytes_le = '%02x%s' % (ord(b), bytes_le)
if message.status == 1:
client.SendAuthTokenAdd(common_defs.ALIAS_ALL_TAPS, device, bytes_le)
else:
client.SendAuthTokenRemove(common_defs.ALIAS_ALL_TAPS, device, bytes_le)
if __name__ == '__main__':
KegboardManagerApp.BuildAndRun()
|
993,991 | 9eb367bc220e09cdae4f2cd2583295a5b8d25f02 | from lib.recordset import DB
from classes.logger import log
class User:
user_id = None
first_name = None
last_name = None
username = None
language_code = None
def __init__(self, user_id, first_name, last_name, username, language_code):
self.user_id = user_id
self.first_name = first_name
self.last_name = last_name
self.username = username
self.language_code = language_code
class Message:
message_id = None
text = None
chat_id = None
date = None
from_id = None
def __init__(self, message_id, text, chat_id, date, from_id):
self.message_id = message_id
self.text = text
self.chat_id = chat_id
self.date = date
self.from_id = from_id
class DBStat(object):
db = None
def __init__(self):
self.db = DB(db_name="podbot")
def check_user(self, user_id):
sql = "select 1 as res from tgrm_users where id= %s "
return self.db.cursor.execute(sql, user_id)
def add_new_user(self, user=User):
try:
sql = "insert into tgrm_users set id= %s , " \
"first_name = %s , " \
"last_name= %s, " \
"username= %s , " \
"language_code= %s "
if self.db.cursor.execute(sql, (
user.user_id, user.first_name, user.last_name, user.username, user.language_code)):
self.db.commit()
except Exception as e:
log.error(e)
def add_new_message(self, message=Message):
sql = """
insert into messages SET
text = %s, chat_id = %s, date = %s, from_id = %s
"""
if self.db.cursor.execute(sql, (message.text, message.chat_id, message.date, message.from_id)):
self.db.commit()
|
993,992 | 7324ca1e8025b34815f0447ad89c0becd406a8df | import sys
from Views import PrimeiraWindow
from Controller.controllerView import *
class MeuApp(QtWidgets.QMainWindow, PrimeiraWindow.Ui_MainWindow):
def __init__(self, parent=None):
super(MeuApp, self).__init__(parent)
# parametros
self.controler = ControllerView()
# métodos_Executaveis ao iniciar
self.setupUi(self)
self.initEvent()
def initEvent(self):
self.btn_cad_cliente.clicked.connect(self.controler.chamar_cadastro_cliente)
self.btn_reg_consulta.clicked.connect(self.controler.chamar_cadastro_consulta)
def main():
app = QtWidgets.QApplication(sys.argv)
form = MeuApp()
form.show()
app.exec_()
if __name__ == '__main__':
main()
|
993,993 | 4184f6815e8024cbfe231495d20bed9465758b2f | import os
from collections import namedtuple
import re
import csv
from pathlib import Path
def _is_file(path_dir, child):
return os.path.isfile(os.path.join(path_dir, child)) and (child != "__init__.py") and (child.split(".")[1] == "py")
def _is_directory(path_dir, directory):
return directory != "__pycache__" and os.path.isdir(os.path.join(path_dir, directory)) is True
dag_attributes = namedtuple("DagAttr", "file_path,catchup,start_date")
def get_file_attributes(file_path: str, record: namedtuple,
patterns: list = [".*catchup.*", "MAIN_START_DATE = datetime\((\d{4},\s\d+,\s\d+).*"]):
records = []
catchup_pattern = re.compile(patterns[0])
start_date_pattern = re.compile(patterns[1])
with open(file_path, 'r') as f:
content = f.readlines()
catchup = "".join([re.sub("[\n\s,]", "", line) for line in content if catchup_pattern.match(line)])
start_date = "".join(
[re.sub("MAIN_START_DATE = datetime\((\d{4},\s\d+,\s\d+).*\n", "\\1", line) for line in content if
start_date_pattern.match(line)])
records.append(record(file_path=file_path, catchup=catchup, start_date=start_date))
return records
if __name__ == "__main__":
path = os.path.join(
os.getenv("HOME"),
"abraca-data/bi-etl-ejuice/bietlejuice/jobs/composer/dags")
directories = (os.path.join(path, directory) for directory in os.listdir(path)
if _is_directory(path, directory) )
dag_files = [{"file_path": directory + "/" + child, "record": dag_attributes}
for directory in directories for child in os.listdir(directory)
if _is_file(directory, child)
]
records = map(lambda dag_file: get_file_attributes(**dag_file), dag_files)
with open("./data.csv", 'w') as f:
writer = csv.writer(f)
for record in records:
record = record[0]
row = [record.file_path, record.catchup, record.start_date]
writer.writerow(row)
|
993,994 | dc4d8090736cab5e4931f5ebaf7ab2e5696f6cde | # -*- coding: utf-8 -*-
import itchat as IC
from itchat import auto_login as login
print ("itchat Start!")
login (hotReload=True)
IC.send('Test 中文(全角字符utf-8),发给自己', toUserName='wzk.py')
|
993,995 | 6c869ef538761daa80444aa2a5578142961a46f5 | # DAILY CODE MODE
# symbol = "*"
# n = 4
#
# print(symbol + " : " + " ".join([str(x) for x in range(n)]))
# answer = []
# for i in range(n + 1):
# answer.append([str(i) + " : "])
# for j in range(n + 1):
# if symbol == "+":
# answer[i].append(str(i + j))
# elif symbol == "-":
# answer[i].append(str(i - j))
# elif symbol == "*":
# answer[i].append(str(i * j))
# print(" ".join(answer[i]))
#
# Answer by Say_What1
# def c98(op, n):
# n = int(n)
# table = [[0]*(n+1) for i in range(n+1)]
# div = ''
# run = [i for i in range(n+1)]
# for i in range(n+1):
# div += '----'
# for j in range(n+1):
# table[i][j] = eval('%d%s%d'%(i, op, j))
# print(op,'|', str(run).strip('[]').replace(',',' '))
# print(div)
# for i in run:
# print(run[i], '|', str(table[i]).strip(',[]').replace(',', ' '))
# c98('+',4)
#
# BREAKDOWN
# table[i][j] = eval('%d%s%d'%(i, op, j))
#
# str(run).strip('[]').replace(',',' '))
# It seems that this is just a way of joining up a list with a space between each block which we would usually do with " ".join(given_list)
#
# SUMMARY
# Another easy one, this is easy enough to do and just requires nested looping. Not so sure about the given answer as it repeats a lot of steps in a cumbersome way.
#
|
993,996 | 9c545de16979be59db3eef171e06ed4d09b85343 | def find_power_sum(n):
sum_=0
num_list=list(str(n))
for x in num_list:
sum_+=int(x)**5
if sum_==n:
return True
summ=0
for x in range(2,1000000):
if find_power_sum(x):
summ+=x
print(f'Sum: {summ}') |
993,997 | d449e5fab869ef4fa970a24dde301475a27cc4dd | # -*- coding: utf-8 -*-
import os
import json
import requests
from flod_common.session.utils import unsign_auth_token
USERS_URL = os.environ.get('USERS_URL', 'http://localhost:4000')
USERS_VERSION = os.environ.get('USERS_VERSION', 'v1')
def is_super_admin(cookies=None):
if cookies:
if 'auth_token' in cookies:
username = unsign_auth_token(cookies['auth_token'])
super_user_name = os.environ["AUTH_ADMIN_USER_ID"]
if username == super_user_name:
return True
return False
def get_user_id_for_user(cookies=None):
if cookies:
if cookies['auth_token']:
username = unsign_auth_token(cookies['auth_token'])
super_user_name = os.environ["AUTH_ADMIN_USER_ID"]
if username != super_user_name:
return username
return None
def get_user_by_id(user_id, cookies):
url = '%s/api/%s/users/%s' % (USERS_URL, USERS_VERSION, user_id)
response = requests.get(url, cookies=cookies)
return response.json()
def has_role(user, name):
return name in (role['name'] for role in user.get('roles', []))
def is_administrator(user):
return has_role(user, 'flod_brukere') or has_role(user, u'flod_aktørregister_admin') or has_role(user, 'tilskudd_saksbehandler') or has_role(user, 'tilskudd_godkjenner')
def get_user(cookies):
if 'auth_token' not in cookies:
return None
username = unsign_auth_token(cookies['auth_token'])
url = '%s/api/%s/users/%s' % (USERS_URL, USERS_VERSION, username)
r = requests.get(url, cookies=cookies)
return r.json()
|
993,998 | 8301e7ce2e1b44989a75c14492a00ff05929be7d | # coding:utf-8
import sys
import time,datetime
import os
import commands
import re
#根据各自业务判断是否需要处理,跟输出目录比较是否处理过也可在此处进行
def is_need_process(file_path):
is_need_process = False
if file_path.endswith(".hdf"):
is_need_process = True
return is_need_process
#获取需要处理的文件
def get_files(input_path):
in_file_list = os.listdir(input_path)
file_list = []
for i in range(len(in_file_list)-1,-1,-1):
file_path = os.path.join(input_path, in_file_list[i])
if os.path.isfile(file_path) and is_need_process(file_path):
file_list.append(file_path)
return file_list
#绘图
def draw_PNG(path_ncl, pngPath, ncPath, cycle, dsName):
print "processing..... ",ncPath+"\r"
pattern = 'MOD11C1.A(\d{7}).006'
nc_name_obj = re.search(pattern, ncPath)
if nc_name_obj == None or len(nc_name_obj.regs) <2:
return
else:
day_of_year = nc_name_obj.group(1)
tem_date = time.strptime(day_of_year, '%Y%j')
date = time.strftime('%Y-%m-%d', tem_date)
shell_str = "ncl -Q " + path_ncl +" ncPath='\"" + ncPath + "\"'" +" dsName='\"" + dsName + "\"'" +" cycle='\"" + cycle + "\"'" +" date='\"" + date + "\"'" +" pngPath='\""+pngPath+"\"'"
status,output=commands.getstatusoutput(shell_str)
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
# input_path = sys.argv[1]
# pngPath = sys.argv[2]
startTime = datetime.datetime.now()
print 'start modis lst ',startTime
input_path = "/QHZX_DATA/input_product/MODIS_LST/"
#input_path = "C:\\Users\\yu\\Desktop\\ncl\\modis"
pngPath = "/QHZX_DATA/product/modis/lst/"
path_ncl_global = "/QHZX_DATA/produce_codes/NCL/MODIS/LST_MODIS_Global_Main.ncl"
path_ncl_asian = "/QHZX_DATA/produce_codes/NCL/MODIS/LST_MODIS_Asian_Main.ncl"
path_ncl_china = "/QHZX_DATA/produce_codes/NCL/MODIS/LST_MODIS_China_Main.ncl"
dsName_day = "LST_Day_CMG"
dsName_night = "LST_Night_CMG"
cycle = 'daily'
file_list = get_files(input_path)
for file in file_list:
#全球
draw_PNG(path_ncl_global, pngPath, file, cycle, dsName_day)
draw_PNG(path_ncl_global, pngPath, file, cycle, dsName_night)
#亚洲
draw_PNG(path_ncl_asian, pngPath, file, cycle, dsName_day)
draw_PNG(path_ncl_asian, pngPath, file, cycle, dsName_night)
#中国
draw_PNG(path_ncl_china, pngPath, file, cycle, dsName_day)
draw_PNG(path_ncl_china, pngPath, file, cycle, dsName_night)
# 耗时
spendTime = datetime.datetime.now() - startTime
print 'running_time', spendTime
|
993,999 | 356735db66870ffa21bbabafa2996b0107a30d48 | # -*- coding: utf-8 -*-
"""
@author: Quoc-Tuan Truong <tuantq.vnu@gmail.com>
"""
from cornac.utils import tryimport
def test_tryimport():
dummy = tryimport('this_module_could_not_exist_bla_bla')
try:
dummy.some_attribute
except ImportError:
assert True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.