text stringlengths 38 1.54M |
|---|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inventory schemas for Placement API."""
import copy
from nova.api.openstack.placement.schemas import common
from nova.db import constants as db_const
BASE_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"total": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1,
},
"reserved": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 0,
},
"min_unit": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1
},
"max_unit": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1
},
"step_size": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1
},
"allocation_ratio": {
"type": "number",
"maximum": db_const.SQL_SP_FLOAT_MAX
},
},
"required": [
"total",
"resource_provider_generation"
],
"additionalProperties": False
}
POST_INVENTORY_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
POST_INVENTORY_SCHEMA['properties']['resource_class'] = {
"type": "string",
"pattern": common.RC_PATTERN,
}
POST_INVENTORY_SCHEMA['required'].append('resource_class')
POST_INVENTORY_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_RECORD_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
PUT_INVENTORY_RECORD_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"inventories": {
"type": "object",
"patternProperties": {
common.RC_PATTERN: PUT_INVENTORY_RECORD_SCHEMA,
}
}
},
"required": [
"resource_provider_generation",
"inventories"
],
"additionalProperties": False
}
|
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.datetime_safe import datetime
from model_utils.models import TimeStampedModel
from app.core.storage_backends import MediaStorage
from app.core.utils import storage_path
class User(AbstractUser):
def user_images_path(self, filename, *args, **kwargs):
now = datetime.now()
folder = '/'.join(['user_profile', str(now.year), str(now.month), str(now.day), 'images'])
return storage_path(folder, filename)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
user_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField(unique=True)
avatar = models.ImageField(storage=MediaStorage(), upload_to=user_images_path, blank=True, null=True)
is_manager = models.BooleanField(default=False)
def __str__(self):
return self.name
@property
def name(self):
name = "%s %s" % (self.first_name, self.last_name)
if not name.strip():
name = "User #%s" % self.pk
return name
def save(self, *args, **kwargs):
if not self.pk and not self.username:
from allauth.utils import generate_unique_username
self.username = generate_unique_username(
[self.first_name, self.last_name, self.email, self.username, 'user']
)
self.first_name = ' '.join(self.first_name.split())
self.last_name = ' '.join(self.last_name.split())
return super().save(*args, **kwargs)
|
'''
Problem:
Given an array of characters, compress it in-place.
The length after compression must always be smaller than or equal to the original array.
Every element of the array should be a character (not int) of length 1.
After you are done modifying the input array in-place, return the new length of the array.
'''
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
r = 0
w = 0
tracking = None
counter = 0
while(r < len(chars)):
tracking = chars[r]
while(r < len(chars) and chars[r] == tracking):
counter += 1
r += 1
chars[w] = tracking
w += 1
if counter > 1:
for char in str(counter):
chars[w] = char
w += 1
counter = 0
return(w) |
#!/usr/bin/python
from tkinter import *
import cv2
import PIL.Image, PIL.ImageTk
from skimage.transform import *
from skimage import io
from skimage.transform import resize
import pickle
import numpy as np
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import model_from_json
from keras import optimizers
class VQA_GUI:
def __init__(self, master):
self.master = master
master.title("VQA GUI")
Label(master,
text = 'Test the Model',
fg = "blue",
font = "Times 10 bold").grid(row = 0, column = 1)
self.q = self.makeentry(master, "Question: ", 1, 0)
self.pic = self.makeentry(master, "Image Link: ", 2, 0)
self.photo, height, width = self.get_image("https://static.meijer.com/Media/002/84001/0028400183826_a1c1_0600.png")
self.canvas = Canvas(master, width = width + 10, height = height + 10)
self.canvas.grid(row = 1, column = 2, columnspan=5, rowspan=5,
sticky=W+E+N+S, padx=5, pady=5)
self.image_on_canvas = self.canvas.create_image(0,0, image = self.photo,
anchor = NW)
self.strQ = StringVar()
self.labelQ = Label(master, textvariable = self.strQ)
self.labelQ.grid(row = 3, column = 0)
self.strClass = StringVar()
self.labelClass = Label(master, textvariable = self.strClass)
self.labelClass.grid(row = 4, column = 0)
self.strPerc = StringVar()
self.labelPerc = Label(master, textvariable = self.strPerc)
self.labelPerc.grid(row = 4, column = 1)
self.greet_button = Button(master, text="Submit", command=self.greet)
self.greet_button.grid(row = 8, column = 2)
Label(master,
text = 'Training the Model',
fg = "blue",
font = "Times 10 bold").grid(row = 9, column = 1)
self.ans = self.makeentry(master, "Correct Answer: ", 10, 0)
self.ans_button = Button(master, text="Train", command=self.train_save)
self.ans_button.grid(row = 10, column = 2)
num_file = open('current_model_num.txt')
self.counter = int(num_file.readline())
###Functions for GUI
def makeentry(self, parent, caption, r_in, c_in, width=None, **options):
entry = Entry(parent, **options)
if width:
entry.config(width)
Label(parent, text=caption).grid(row = r_in, column = c_in)
entry.grid(row = r_in, column = c_in + 1)
return entry
def greet(self):
if self.q.get()!='':
#print(self.q.get())
self.trans_q(self.q.get())
self.output()
self.q.delete(0, END)
if self.pic.get()!='':
self.photo, height, width = self.get_image(self.pic.get())
self.canvas.itemconfig(self.image_on_canvas, image = self.photo)
self.pic.delete(0, END)
def output(self):
class_perc_dict = self.predict()
classStr = 'Class:\n'
percStr = 'Percentage:\n'
for k in class_perc_dict.keys():
classStr+=(str(k)+'\n')
percStr+=(str(class_perc_dict[k])+'%\n')
self.strQ.set('Current Question: ' + self.q.get())
self.strClass.set(classStr)
self.strPerc.set(percStr)
###Functions for VQA
def get_model(self):
json_file = open("model_VQA_4_" + str(self.counter) + ".json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model_VQA_4_" + str(self.counter)+ ".h5")
return loaded_model
def predict(self):
loaded_model = self.get_model()
# returns an array with percentage posssibility of each class
perc_arr = loaded_model.predict([self.input_img, self.input_q])[0]
# returns an array with indices indictating from highest to lowest percentage
perc_ind = np.array(-perc_arr).argsort()
my_dict2 = self.get_dict()
class_perc_dict = dict()
output = ""
for i in range(5):
class_val = my_dict2[perc_ind[i]]
class_perc = 100 * perc_arr[perc_ind[i]]
class_perc_dict.update({class_val:class_perc})
return class_perc_dict
def trans_q(self, q):
## getting the dictionary
pkl_file = open("word_index_VQA_3.pickle", 'rb')
word_index = pickle.load(pkl_file)
words = q.split(' ')
new_seq = []
for w in words:
new_seq.append(word_index.get(w))
trans_q = np.expand_dims(new_seq, axis = 0)
self.input_q = pad_sequences(trans_q, maxlen = 50)
def get_image(self, file_name):
orig_img = io.imread(file_name)
new_img_GUI = cv2.resize(orig_img, (224, 224))
# reshape the image to 224, 224, 3
reshaped_img = resize(orig_img[:,:,:3], (224,224,3))
self.input_img = np.expand_dims(reshaped_img, axis=0)
#self.input_img = np.expand_dims(new_img, axis=0)
height, width, no_channels = new_img_GUI.shape
photoimg = PIL.Image.fromarray(new_img_GUI)
photo = PIL.ImageTk.PhotoImage(image = photoimg)
#self.photo = photo
#self.photo_height = height
#self.photo_width = width
return photo, height, width
def get_dict(self):
## getting ANS_CLASS
file = open("ans_index_VQA_4.pickle", "rb")
ans_class = pickle.load(file)
file.close()
self.ans_class = ans_class
my_dict2 = {y:x for x,y in ans_class.items()}
return my_dict2
def trans_ans(self):
### converting the answer to a value
ans_str = self.ans.get()
ans_vector = np.zeros(len(self.ans_class))
ans_vector[self.ans_class[ans_str]] = 1
ans_vector= np.expand_dims(ans_vector, axis=0)
self.input_ans = ans_vector
def train_save(self):
loaded_model = self.get_model()
self.trans_ans()
loaded_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
loaded_model.fit([self.input_img, self.input_q], self.input_ans, epochs=1)
## Saving the model
# serialize model to JSON
model_json = loaded_model.to_json()
with open("model_VQA_4_" + str(self.counter + 1) + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
loaded_model.save_weights("model_VQA_4_" + str(self.counter + 1) + ".h5")
self.counter += 1
num_file = open('current_model_num.txt', 'w')
num_file.write(str(self.counter))
print("Saved model to disk")
root = Tk()
my_gui = VQA_GUI(root)
root.mainloop()
|
#%%
import BikeSystem
import pandas as pd
#%%
bs = BikeSystem.DCBikeSystem()
df = bs.load_data()
df.columns
# mp = bs.load_map()
# mp.to_csv("export/{}_Geocoding.csv".format(bs.city))
# cnt_1 = df.startstation.value_counts()
# cnt_2 = df.endstation.value_counts()
# start = df.groupby("startstation")["starttime"].min()
# end = df.groupby("endstation")["stoptime"].max()
# res = pd.DataFrame({
# "Checkout_Count": cnt_1,
# "ReturnCount": cnt_2,
# "EarliestRecord": start,
# "LatestRecord": end
# })
# res.to_csv("export/{}_Activity.csv".format(bs.city))
|
from scapy.all import *
import requests
import pprint
import codecs
import json
import sys
import shutil
from threading import Thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ip_mac_dic = {}
rangeofips = 256
def scan_network(type_ip,From,to):
i = int(From)
while i < int(to) :
if i == 0:
continue
ip = str(type_ip) + str(i)
if i == 100:
print("100 ip cheked")
if i == 200:
print("almost done ..")
try :
ans, unans = srp(Ether(dst = "ff:ff:ff:ff:ff:ff")/ARP(pdst=ip),timeout=1, iface = 'wlo1',inter=1,verbose=0)
target_m = ans[0][1].hwsrc
target_m = str(target_m)
ip_mac_dic[ip] = target_m
except IndexError :
pass
i = i + 1
def nscan_network(type_ip,f_o):
ip = str(type_ip) + str(f_o)
try :
ans, unans = srp(Ether(dst = "ff:ff:ff:ff:ff:ff")/ARP(pdst=ip),timeout=1, iface = 'wlo1',inter=1,verbose=0)
target_m = ans[0][1].hwsrc
target_m = str(target_m)
ip_mac_dic[ip] = target_m
except IndexError :
pass
def get_info(mac):
MAC_URL = 'http://macvendors.co/api/%s'
r = requests.get(MAC_URL % str(mac))
info = r.json()
r.close()
info = info['result']
s_hex = info['start_hex']
company = info['company']
address = info['address']
mac_prefix = info['mac_prefix']
country = info['country']
type_m = info['type']
end_hex = info['end_hex']
return s_hex, company, address, mac_prefix, country, type_m, end_hex
columns = shutil.get_terminal_size().columns
def rapport(ip_addr,mac_addr):
print("\n\n")
ip_r = bcolors.OKGREEN + str(ip_addr) + bcolors.ENDC
intro = "Gudom result repport for " + ip_r + " :"
print(intro.center(columns))
#start_hex, company, address, mac_prefix, country, type_m, end_hex = get_info(str(mac_addr))
f = """
+----------------------------------------------------------------------------------------------------------
/
| {0} : {1}
|
| {2} : {3}
|
| {4} : {5}
|
| {6} : {7}
|
| {8} : {9}
|
| {10} : {11}
|
| {12} : {13}
|
| {14} : {15}
+
\___________________________________________________________________________________________________________
"""
s_hex, company, address, mac_prefix, country, type_m, end_hex = get_info(mac_addr)
print(bcolors.BOLD + f.format("MAC ADDRESS",mac_addr,"Start hex",s_hex,"End hex",end_hex,"MAC prefix",mac_prefix,"Company",company,"Country",country,"Address",address,"type",type_m) + bcolors.ENDC)
if __name__ == '__main__':
print("\n")
logo = bcolors.OKBLUE + "Starting GUDOM (https://github.com/ScriptGenerator/ip-tools) at {0}".format(time.ctime()) + bcolors.ENDC
print(logo.center(columns))
print("\n")
for g in range(256):
if g == 0:
continue
f = "t{0} = Thread(target=nscan_network, args=('192.168.0.',{1}))".format(str(g),str(g))
exec(f)
for s in range(256):
if s == 0:
continue
statement = "t{0}.start()".format(str(s))
exec(statement)
t1.join()
for j in range(256):
if j == 0:
continue
join_k = "t{0}.join()".format(str(j))
exec(join_k)
print(bcolors.FAIL + "We got the following informations by a cool API called macvendors." + bcolors.ENDC)
for key, value in ip_mac_dic.items() :
print('')
rapport(key,value)
time.sleep(2)
print("\n\n")
|
from discord.ext import commands
class Reactions(commands.Cog):
"""Works with Reactions"""
def __init__(self,bot):
self.bot = bot
#Event to give roles with a default reaction
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if reaction.emoji == "👍":
print(user)
role = user.guild.get_role(889280422364721152)
await user.add_roles(role)
def setup(bot):
bot.add_cog(Reactions(bot))
|
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs) == 0: return ""
# longest common prefix
lcp = strs[0]
i = 0
for i in range(len(strs)-1):
while len(lcp) >0:
if strs[i+1].startswith(lcp):
# lcp remains
break
else:
# decrement lcp from back until prefix matches
lcp = lcp[0:-1]
# no lcp, break early
if lcp=="": break
return lcp |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from mysql_credentials import MysqlCredentials as dbc
APP = Flask(__name__)
# APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
APP.config['SQLALCHEMY_DATABASE_URI'] = f'mysql+pymysql://{dbc.user}:{dbc.password}@localhost/flask_migrate'
DB = SQLAlchemy(APP)
MIGRATE = Migrate(APP, DB)
MANAGER = Manager(APP)
MANAGER.add_command('DB', MigrateCommand)
class Member(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
name = DB.Column(DB.String(50))
subscribed = DB.Column(DB.Boolean)
class Orders(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
total = DB.Column(DB.Integer)
if __name__ == '__main__':
MANAGER.run()
APP.run(debug=True)
|
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from functools import partial
from logging import getLogger
from multiprocessing.pool import Pool
from typing import Dict, Optional, Tuple
from g2p_en import G2p
from ordered_set import OrderedSet
from pronunciation_dictionary import (PronunciationDict, Pronunciations, SerializationOptions, Word,
save_dict)
from tqdm import tqdm
from word_to_pronunciation import Options, get_pronunciations_from_word
from dict_from_g2pE.argparse_helper import (DEFAULT_PUNCTUATION, ConvertToOrderedSetAction,
add_chunksize_argument, add_encoding_argument,
add_maxtaskperchild_argument, add_n_jobs_argument,
add_serialization_group, parse_existing_file,
parse_non_empty_or_whitespace, parse_path,
parse_positive_float)
def get_app_try_add_vocabulary_from_pronunciations_parser(parser: ArgumentParser):
parser.description = "Transcribe vocabulary using g2p."
# TODO support multiple files
parser.add_argument("vocabulary", metavar='vocabulary', type=parse_existing_file,
help="file containing the vocabulary (words separated by line)")
add_encoding_argument(parser, "--vocabulary-encoding", "encoding of vocabulary")
parser.add_argument("dictionary", metavar='dictionary', type=parse_path,
help="path to output created dictionary")
parser.add_argument("--weight", type=parse_positive_float,
help="weight to assign for each pronunciation", default=1.0)
parser.add_argument("--trim", type=parse_non_empty_or_whitespace, metavar='SYMBOL', nargs='*',
help="trim these symbols from the start and end of a word before lookup", action=ConvertToOrderedSetAction, default=DEFAULT_PUNCTUATION)
parser.add_argument("--split-on-hyphen", action="store_true",
help="split words on hyphen symbol before lookup")
add_serialization_group(parser)
mp_group = parser.add_argument_group("multiprocessing arguments")
add_n_jobs_argument(mp_group)
add_chunksize_argument(mp_group)
add_maxtaskperchild_argument(mp_group)
return get_pronunciations_files
def get_pronunciations_files(ns: Namespace) -> bool:
assert ns.vocabulary.is_file()
logger = getLogger(__name__)
try:
vocabulary_content = ns.vocabulary.read_text(ns.vocabulary_encoding)
except Exception as ex:
logger.error("Vocabulary couldn't be read.")
return False
vocabulary_words = OrderedSet(vocabulary_content.splitlines())
trim_symbols = ''.join(ns.trim)
options = Options(trim_symbols, ns.split_on_hyphen, False, False, 1.0)
dictionary_instance = get_pronunciations(
vocabulary_words, ns.weight, options, ns.n_jobs, ns.maxtasksperchild, ns.chunksize)
s_options = SerializationOptions(ns.parts_sep, ns.include_numbers, ns.include_weights)
try:
save_dict(dictionary_instance, ns.dictionary, ns.serialization_encoding, s_options)
except Exception as ex:
logger.error("Dictionary couldn't be written.")
logger.debug(ex)
return False
logger.info(f"Written dictionary to: {ns.dictionary.absolute()}")
return True
def get_pronunciations(vocabulary: OrderedSet[Word], weight: float, options: Options, n_jobs: int, maxtasksperchild: Optional[int], chunksize: int) -> PronunciationDict:
lookup_method = partial(
process_get_pronunciation,
weight=weight,
options=options,
)
model = G2p()
with Pool(
processes=n_jobs,
initializer=__init_pool_prepare_cache_mp,
initargs=(vocabulary, model),
maxtasksperchild=maxtasksperchild,
) as pool:
entries = range(len(vocabulary))
iterator = pool.imap(lookup_method, entries, chunksize)
pronunciations_to_i = dict(tqdm(iterator, total=len(entries), unit="words"))
return get_dictionary(pronunciations_to_i, vocabulary)
def get_dictionary(pronunciations_to_i: Dict[int, Pronunciations], vocabulary: OrderedSet[Word]) -> PronunciationDict:
resulting_dict = OrderedDict()
for i, word in enumerate(vocabulary):
pronunciations = pronunciations_to_i[i]
assert len(pronunciations) == 1
assert word not in resulting_dict
resulting_dict[word] = pronunciations
return resulting_dict
process_unique_words: OrderedSet[Word] = None
process_model: G2p = None
def __init_pool_prepare_cache_mp(words: OrderedSet[Word], model: G2p) -> None:
global process_unique_words
global process_model
process_unique_words = words
process_model = model
def process_get_pronunciation(word_i: int, weight: float, options: Options) -> Tuple[int, Pronunciations]:
global process_unique_words
global process_model
assert 0 <= word_i < len(process_unique_words)
word = process_unique_words[word_i]
# TODO support all entries; also create all combinations with hyphen then
lookup_method = partial(
lookup_in_model,
model=process_model,
weight=weight,
)
pronunciations = get_pronunciations_from_word(word, lookup_method, options)
#logger = getLogger(__name__)
# logger.debug(pronunciations)
return word_i, pronunciations
def lookup_in_model(word: Word, model: G2p, weight: float) -> Pronunciations:
assert len(word) > 0
# lower() because G2p seems to predict only lower-case words correctly
word = word.lower()
result = model.predict(word)
result = tuple(result)
result = OrderedDict((
(result, weight),
))
return result
|
"""Load covariance matrix, perform classif, perm test, saves results.
Outputs one file per freq x state
Author: Arthur Dehgan"""
from time import time
from scipy.io import savemat, loadmat
import pandas as pd
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.model_selection import cross_val_score
from pyriemann.classification import TSclassifier
from utils import StratifiedLeave2GroupsOut, prepare_data, classification
from params import SAVE_PATH, STATE_LIST
prefix = "classif_subsamp_"
name = "cov"
state = "SWS"
SAVE_PATH = SAVE_PATH / name
info_data = pd.read_csv(SAVE_PATH.parent / "info_data.csv")[STATE_LIST]
info_data = info_data[state]
N_TRIALS = info_data.min().min()
N_SUBS = len(info_data) - 1
groups = [i for i in range(N_SUBS) for _ in range(N_TRIALS)]
N_TOTAL = N_TRIALS * N_SUBS
labels = [0 if i < N_TOTAL / 2 else 1 for i in range(N_TOTAL)]
file_name = prefix + name + "n153_{}.mat".format(state)
save_file_path = SAVE_PATH / "results" / file_name
data_file_path = SAVE_PATH / name + "_{}.mat".format(state)
final_save = None
data = loadmat(data_file_path)
data = prepare_data(data, n_trials=N_TRIALS, random_state=0)
sl2go = StratifiedLeave2GroupsOut()
lda = LDA()
clf = TSclassifier(clf=lda)
score = cross_val_score(clf, data, labels, groups, cv=sl2go, n_jobs=-1)
print(score)
# save['acc_bootstrap'] = [save['acc_score']]
# save['auc_bootstrap'] = [save['auc_score']]
# if final_save is None:
# final_save = save
# else:
# for key, value in final_save.items():
# final_save[key] = final_save[key] + save[key]
# savemat(save_file_path, final_save)
|
import fileinput
import itertools
def get_inputs(filename=None):
distances = {}
for l in fileinput.input(filename or "inputs.txt"):
parts = l.strip().replace(" to ", " ").replace(" = ", " ").split()
distances[(parts[0], parts[1])] = int(parts[2])
distances[(parts[1], parts[0])] = int(parts[2])
return distances
def get_paths(distances):
towns = {k[0] for k in distances}
paths = []
for path in itertools.permutations(towns):
d = 0
it = iter(path)
b = next(it)
for i in it:
a = b
b = i
d += distances[(a, b)]
paths.append(d)
return paths
def part_1(filename=None):
distances = get_inputs(filename)
paths = get_paths(distances)
return min(paths)
def part_2(filename=None):
distances = get_inputs(filename)
paths = get_paths(distances)
return max(paths)
if __name__ == "__main__":
print("Day 09")
print(f"Part 1: {part_1()}")
print(f"Part 2: {part_2()}")
|
#!/usr/bin/env python
#coding=utf-8
# vim: set filetype=python ts=4 sw=4 sts=4 expandtab autoindent :
'''
Get username and password from http://www.bugmenot.com/
File: bugmynot.py
Author: notsobad.me
Description:
Created: 2009-11-09 15:23:41
Last modified: 2010.11.25
'''
import optparse
import sys
import urllib2
import re
class BugMeNot:
def __init__(self):
self.regex = u'<tr><th>Username </th><td>([^<]*)</td></tr>'
self.regex += u'[^<]+?<tr><th>Password </th><td>([^<]*)</td></tr>'
self.regex += u'[^<]+?<tr><th>Other</th><td>([^<]*)</td></tr>'
self.regex += u'[^<]+?<tr><th>Stats</th><td class="stats"><em class="[^"]*">([0-9]*%)[^<]*</em>[^<]*</td></tr>'
def _get_account(self, host):
headers = dict()
headers['User-Agent'] = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)'
headers['Pragma'] = 'no-cache'
headers['Cache-Control'] = 'no-cache'
try:
urlOpener = urllib2.build_opener()
request = urllib2.Request('http://www.bugmenot.com/view/%s?utm_source=extension&utm_medium=firefox' % host, None, headers)
page = urlOpener.open(request).read(50000) # Log in BugMeNot
except (urllib2.HTTPError, urllib2.URLError):
print >> sys.stderr, 'Http Error! Please check the url you input and the network connection'
sys.exit(2)
re_loginpwd = re.compile(self.regex, re.IGNORECASE | re.DOTALL)
match = re_loginpwd.findall(page)
#return [(i, j) for i, j in match if i and j and len(i) < 30]
return [{'username':i, 'password':j, 'other':o, 'stats':s} for i, j, o, s in match if i and j and len(i) < 30]
def get_account(self, host):
return self._get_account(host)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-e", "--extended_info", dest="extended_info",
action="store_true", default=False, help="Show extended info in the text mode")
parser.add_option("-s", "--site", dest="site", help="The target site")
parser.add_option("-t", "--ret_type", dest="ret", default="text", help="The return type(text/json)")
(options, args) = parser.parse_args()
if options.site:
bug = BugMeNot()
accounts = bug.get_account(options.site)
if not accounts:
print "No accounts/password for %s found in www.bugmenot.com" % options.site
sys.exit(1)
if options.ret == 'text':
print "%-30s\t%-20s" % ("Username", "Password"),
line_len = 30 + 20
if options.extended_info:
print "\t%-25s\t%-5s" % ("Other", "Stats"),
line_len += 25 + 20
print "\n", "-" * line_len
for account in accounts:
print "%(username)-30s\t%(password)-20s" % account,
if options.extended_info:
print "\t%(other)-25s\t%(stats)-5s" % account,
print
elif options.ret == 'json':
import json
print json.dumps(accounts)
else:
parser.print_help()
sys.exit(1)
sys.exit(0)
|
from Crypto.Util.number import *
from random import getrandbits
from flag import flag
flag = bytes_to_long(flag.encode("utf-8"))
flag = bin(flag)[2:]
length = len(flag)
A = []
a, b = 0, 0
for _ in range(length):
a += getrandbits(32) + b
b += a
A.append(a)
p = getStrongPrime(512)
q = getStrongPrime(512)
assert q > sum(A)
pub_key = [a * p % q for a in A]
cipher = sum([int(flag[i]) * pub_key[i] for i in range(length)])
f = open("output.txt", "w")
f.write("pub_key = " + str(pub_key) + "\n")
f.write("cipher = " + str(cipher) + "\n")
f.close()
|
import matplotlib.pyplot as plt
# Import Dependencies
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import tensorflow as tf
features_df = pd.read_csv('ds_midterm2018.csv', usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13])
features_df.head()
features_df.shape
features_df.describe()
labels_df = pd.read_csv('ds_midterm2018_label.csv', usecols=[1])
labels_df.head()
labels_df.shape
X_train = features_df
y_train = labels_df
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = pd.DataFrame(data=scaler.transform(X_train), columns=X_train.columns, index=X_train.index)
X_train = np.array(X_train)
y_train = np.array(y_train)
type(X_train), type(y_train)
lr = 0.01# 수정
# Number of epochs for which the model will run
epochs = 500 # 수정
X = tf.placeholder(tf.float32,[None,X_train.shape[1]])
# Labels
y = tf.placeholder(tf.float32,[None,1])
W = tf.Variable(tf.random_normal([13,13])) # 괄호안을 채워 넣으세요
print(W)
# Bias
b = tf.Variable(tf.random_normal([500, 13])) # 괄호안을 채워 넣으세요
init = tf.global_variables_initializer()
y_hat = tf.add(tf.matmul(X, W), b)
# Loss Function
loss= tf.reduce_max(y_hat)# LOSS 함수를 여기에
# Gradient Descent Optimizer to Minimize the Cost
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(loss)
loss_history = np.empty(shape=[1],dtype=float)
with tf.Session() as sess:
# Initialize all Variables
sess.run(init)
err = sess.run(loss, feed_dict={X: X_train, y: y_train})
print('Epoch: 0, Error: {0}'.format(err))
for epoch in range(1, epochs):
# Run the optimizer and the cost functions
result = sess.run(optimizer, feed_dict={X: X_train, y: y_train})
err = sess.run(loss, feed_dict={X: X_train, y: y_train})
# Add the calculated loss to the array
loss_history = np.append(loss_history, err)
# Print the Loss/Error after every 100 epochs
if epoch % 100 == 0:
print('Epoch: {0}, Error: {1}'.format(epoch, err))
print('Epoch: {0}, Error: {1}'.format(epoch + 1, err))
# Values of Weight & Bias after Training
new_W = sess.run(W)
new_b = sess.run(b)
# Predicted Labels
y_pred = sess.run(y_hat, feed_dict={X: X_train})
# Error: MSE or MAE
# error = tf.metrics.mean_absolute_error # 계산
error = tf.metrics.mean_squared_error
print('Trained Weights: \n', new_W)
print('Trained Bias: \n', new_b)
plt.plot(range(len(loss_history)), loss_history)
plt.axis([0, epochs, 0, np.max(loss_history)])
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss vs Epochs', fontsize=25)
print('Predicted Values: \n', y_pred)
print('Error [TF Session]: ', error)
plt.show()
|
import os
from flask import Flask, render_template, jsonify, request, redirect, Response
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.automap import automap_base
import pandas as pd
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
# ************** Database Setup ***************
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://admin:NUDataScience2019@nu-chicago-crime-app.ccjnkjeza5yv.us-east-2.rds.amazonaws.com:3306/chicago_crime_app"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Attach db to Flask app so Flask handels db session managment and other good things
db = SQLAlchemy(app)
data = {}
e = db.get_engine()
table_names = ['graphdata', 'mapdata', 'three_day_forecast', 'scikitlearn_results_day_1','scikitlearn_results_day_2','scikitlearn_results_day_3'] #e.table_names()
for name in table_names:
tbl = e.execute('SELECT * FROM {}'.format(name)).fetchall()
data[name] = pd.DataFrame(tbl)
# Futher down in code, to access data:
data['graphdata']
# *********************************************
# ************** WEBPAGES *********************
# *********************************************
@app.route("/")
def renderHome():
return render_template("index.html")
# *********************************************
# ************** API ENDPOINTS ****************
# *********************************************
@app.route("/api/graph/<crime>")
def lStationsJson(crime):
tbl = data['graphdata']
results = tbl[tbl[3] == crime]
json_str = results.to_json(orient="records")
return Response(response=json_str, status=200, mimetype='application/json')
@app.route("/api/map/<crime>")
def lStationsJson2(crime):
tbl = data['mapdata']
results = tbl[tbl[3] == crime]
json_str = results.to_json(orient="records")
return Response(response=json_str, status=200, mimetype='application/json')
@app.route("/api/forecast")
def lStationsJson3():
tbl = data['three_day_forecast']
json_str = tbl.to_json(orient="records")
return Response(response=json_str, status=200, mimetype='application/json')
@app.route("/api/predictionDay1/<crime>")
def lStationsJson4(crime):
tbl = data['scikitlearn_results_day_1']
results = tbl[tbl[7] == crime]
json_str = results.to_json(orient="records")
return Response(response=json_str, status=200, mimetype='application/json')
@app.route("/api/predictionDay2/<crime>")
def lStationsJson5(crime):
tbl = data['scikitlearn_results_day_2']
results = tbl[tbl[7] == crime]
json_str = results.to_json(orient="records")
return Response(response=json_str, status=200, mimetype='application/json')
@app.route("/api/predictionDay3/<crime>")
def lStationsJson6(crime):
tbl = data['scikitlearn_results_day_3']
results = tbl[tbl[7] == crime]
json_str = results.to_json(orient="records")
return Response(response=json_str, status=200, mimetype='application/json')
return jsonify(data)
if __name__ == "__main__":
app.run(debug=True) |
from __future__ import print_function
import airflow
import pytz
import logging
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.hive_operator import HiveOperator
from airflow.models import Variable
start_date = datetime(2019, 02, 06, 0, 0, 0, tzinfo=pytz.utc)
os.environ['SPARK_HOME'] = '/home/ubuntu/spark-2.3.1-bin-hadoop2.7/'
sys.path.append(os.path.join(os.environ['SPARK_HOME'], 'bin'))
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date,
'schedule_interval': None,
'email': [''],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=45)
}
dag = DAG('lab5',
description = 'Airflow simple',
schedule_interval = None,
default_args = default_args)
spark_task = BashOperator(
task_id='spark_python',
bash_command='/home/ubuntu/spark-2.3.1-bin-hadoop2.7/bin/spark-submit --packages org.apache.hadoop:hadoop-aws:2.7.3 /home/ubuntu/pyspark_read_s3/main.py',
dag = dag
)
load_to_hive = HiveOperator(
hive_cli_conn_id = 'hive_cli_default',
task_id = 'load_to_hive',
hql = 'SELECT B_I from finaldata',
dag = dag
)
spark_task >> load_to_hive
|
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def startMainPage(request):
# return HttpResponse("welcome")
return render(request, 'home_html.html')
|
import cv2
import numpy as np
import utlis
import os
score = 0
###########################################################
def Process(path, pre, fin, ans, questions, choice):
print(path)
widthImg = 700
heightImg = 700
# questions = 5
# choice = 5
# ans = [1, 2, 0, 1, 4]
print(ans)
##################################
# cap = cv2.VideoCapture(0)
# cap.set(10, 150)
####################################
img = cv2.imread(path)
# PREPROSEESING
img = cv2.resize(img, (widthImg, heightImg))
imgContours = img.copy()
imgFinal = img.copy()
imgBiggestContours = img.copy()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, 10, 50)
# FIND ALL CONTOURS
countours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(imgContours, countours, -1, (0, 255, 0), 10)
try:
# FIND RECTANGLES
rectCon = utlis.rectCountour(countours)
biggestContour = utlis.getCornerPoints(rectCon[0])
# print(biggestContour.shape)
gradePoints = utlis.getCornerPoints(rectCon[1])
# print(biggestContour)
if biggestContour.size != 0 and gradePoints.size != 0:
cv2.drawContours(imgBiggestContours, biggestContour, -1, (0, 255, 0), 20)
cv2.drawContours(imgBiggestContours, gradePoints, -1, (255, 0, 0), 20)
biggestContour = utlis.reorder(biggestContour)
gradePoints = utlis.reorder(gradePoints)
pt1 = np.float32(biggestContour)
pt2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
matrix = cv2.getPerspectiveTransform(pt1, pt2)
imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
ptG1 = np.float32(gradePoints)
ptG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]])
matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150))
# cv2.imshow("Grade",imgGradeDisplay)
# APPLT THRESHOLD
imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)
imgThresh = cv2.threshold(imgWarpGray, 170, 255, cv2.THRESH_BINARY_INV)[1]
boxes = utlis.splitBoxes(imgThresh)
# cv2.imshow("Test",boxes[2])
# print(cv2.countNonZero(boxes[1]),cv2.countNonZero(boxes[2]))
# GETTING NONE ZERO PIXEL VALUE EACH BOXES
myPixelVal = np.zeros((questions, choice)) # row and column
countC = 0
countR = 0
for image in boxes:
totolPixels = cv2.countNonZero(image)
myPixelVal[countR][countC] = totolPixels
countC += 1
if (countC == choice): countR += 1;countC = 0 # choice pixel fatch throw this code
# print(myPixelVal)
# FINDING INDEX VALUES OF THE MARKINGS
myIndex = []
for x in range(0, questions): # all pixel is convert into row like Q1: A B C D E
arr = myPixelVal[x]
# print("arr",arr)
myIndexVal = np.where(arr == np.amax(arr)) # frist row to find max value thresold this answer of Q:1
# print(myIndexVal[0])
myIndex.append(myIndexVal[0][0]) # list of Answer
# print(myIndex)
# GRADING
grading = []
for x in range(0, questions):
if (ans[x] == myIndex[x]):
grading.append(1) # perform array of grade[1,1,1,1,1,1]
else:
grading.append(0)
# print(grading)
global score
score = (sum(grading) / questions) * 100 # FINAL GRADE # count final grade
print(score)
# DISPLAYING ANSWERS
imgResult = imgWarpColored.copy()
imgResult = utlis.showAnswer(imgResult, myIndex, grading, ans, questions, choice)
imgRawDrawing = np.zeros_like(imgWarpColored)
imgRawDrawing = utlis.showAnswer(imgRawDrawing, myIndex, grading, ans, questions, choice)
invMatrix = cv2.getPerspectiveTransform(pt2, pt1)
imgInvWarp = cv2.warpPerspective(imgRawDrawing, invMatrix, (widthImg, heightImg))
imgRawGrade = np.zeros_like(imgGradeDisplay)
cv2.putText(imgRawGrade, str(int(score)) + "%", (60, 100), cv2.FONT_HERSHEY_COMPLEX, 3, (0, 255, 255), 3)
# cv2.imshow("Grade",imgRawGrade)
InvMatrixG = cv2.getPerspectiveTransform(ptG2, ptG1)
imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, InvMatrixG, (widthImg, heightImg))
imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1, 0)
imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1, 0)
imgBlank = np.zeros_like(img)
imageArray = ([img, imgGray, imgBlur, imgCanny],
[imgContours, imgBiggestContours, imgWarpColored, imgThresh],
[imgResult, imgRawDrawing, imgInvWarp, imgFinal])
except:
imgBlank = np.zeros_like(img)
imageArray = ([img, imgGray, imgBlur, imgCanny],
[imgBlank, imgBlank, imgBlank, imgBlank],
[imgBlank, imgBlank, imgBlank, imgBlank])
lables = [["Orignal", "Gray", "Blur", "Canny"],
["contours", "biggestContours", "Warp", "Thresold"],
["Result", "Raw Drawing", "Inv Warp", "Final Img"]]
imagStacked = utlis.stackImages(imageArray, 0.35, lables)
if (pre == 1):
cv2.imshow("Stacked Image", imagStacked)
if (fin == 1):
cv2.imshow("Fianl Result", imgFinal)
path = 'E:/Project/mini_project/venv/ImageFile'
cv2.imwrite(os.path.join(path, 'FinalOutput.jpg'), imgFinal)
cv2.waitKey(0)
def Score():
print(score)
return score
|
import numpy as np
#import tensorflow as tf
from keras.preprocessing import image
import matplotlib.pyplot as plt
import skimage as skimage
from skimage import data, io, filters, transform
input_size = 512
#random flip
def random_flip(img, mask, u=1):
if np.random.random() < u:
img = image.flip_axis(img, 1)
mask = image.flip_axis(mask, 1)
return img, mask
#rotate util
'''
this takes a theta in radians
ht is along x axis
wd is along y axis
'''
def rotate(x, theta, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0.):
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = image.transform_matrix_offset_center(rotation_matrix, h, w)
x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
#rotate that uses rotate Util
'''
optioanlly takes rotate limits
'''
def random_rotate(img, mask, rotate_limit=(-30, 30), u=0.8):
if np.random.random() < u:
theta = np.pi / 180 * np.random.uniform(rotate_limit[0], rotate_limit[1])
img = rotate(img, theta)
mask = rotate(mask, theta)
return img, mask
#shift util
def shift(x, wshift, hshift, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0.):
h, w = x.shape[row_axis], x.shape[col_axis]
tx = hshift * h
ty = wshift * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
#shift method
'''
takes shift limits for ht and wd
'''
def random_shift(img, mask, w_limit=(-0.2, 0.2), h_limit=(-0.2, 0.2), u=0.7):
if np.random.random() < u:
wshift = np.random.uniform(w_limit[0], w_limit[1])
hshift = np.random.uniform(h_limit[0], h_limit[1])
img = shift(img, wshift, hshift)
mask = shift(mask, wshift, hshift)
return img, mask
#zoom util
def zoom(x, zx, zy, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0.):
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = image.transform_matrix_offset_center(zoom_matrix, h, w)
x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
#zoom method
'''
uses zoom util to zoom in ht and wd along the center
'''
def random_zoom(img, mask, zoom_range=(0.8, 1), u=0.5):
if np.random.random() < u:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
img = zoom(img, zx, zy)
mask = zoom(mask, zx, zy)
return img, mask
#shear util
def shear(x, shear, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0.):
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = image.transform_matrix_offset_center(shear_matrix, h, w)
x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
#method for shear
'''
takes in shear intensity range and shears ht and wd along the center
'''
def random_shear(img, mask, intensity_range=(-0.3, 0.3), u=0.6):
if np.random.random() < u:
sh = np.random.uniform(-intensity_range[0], intensity_range[1])
img = shear(img, sh)
mask = shear(mask, sh)
return img, mask
#random blur
def random_blur(img, mask, blurSigma = 4):
blur_sigma = np.random.uniform(1, blurSigma)
if blur_sigma > 0:
img2 = skimage.filters.gaussian(img, sigma=blur_sigma, multichannel=True)
mask2 = skimage.filters.gaussian(mask, sigma=blur_sigma, multichannel=True)
return img2, mask2
#generic method for plotting
'''
takes image,mask,transfromed image, transformed mask as input
'''
def plot_img_and_mask_transformed(img, mask, img_tr, mask_tr):
fig, axs = plt.subplots(ncols=4, figsize=(16, 4), sharex=True, sharey=True)
axs[0].imshow(img)
axs[1].imshow(mask[:, :, 0])
axs[2].imshow(img_tr)
axs[3].imshow(mask_tr[:, :, 0])
for ax in axs:
ax.set_xlim(0, input_size)
ax.axis('off')
fig.tight_layout()
plt.show()
keyTransformation = {0:random_flip,1:random_rotate,2:random_shift,3:random_zoom,4:random_shear,5:random_blur}
def TransformImageMask(img, mask):
images = []
masks = []
for i in range(6):
resImage,resMask = keyTransformation[i](img,mask)
images.append(resImage)
masks.append(resMask)
#for i in range(6):
# plot_img_and_mask_transformed(img,mask,images[i],masks[i])
return images, masks
#img = image.load_img(r'C:\ML\image_augmentation\train_masks.csv\train\11fcda0a9e1c_07.jpg',target_size=(512, 512))
#img = image.img_to_array(img)
#mask = image.load_img(r'C:\ML\image_augmentation\train_masks.csv\train_masks\11fcda0a9e1c_07_mask.gif',grayscale=True, target_size=(512, 512))
#mask = image.img_to_array(mask)
#img, mask = img / 255., mask / 255.
#TransformImageMask(img,mask)
|
import Demux4Way
import Demux
class Demux8Way():
def __init__(self):
self.a = [0,]
self.select = [0,0,0,]
self.b = [0,]
self.c = [0,]
self.d = [0,]
self.e = [0,]
self.outa = [0,]
self.outb = [0,]
self.outc = [0,]
self.outd = [0,]
self.oute = [0,]
self.outf = [0,]
self.outg = [0,]
self.outh = [0,]
self.gate0 = Demux4Way.Demux4Way()
self.gate1 = Demux.Demux()
self.gate2 = Demux.Demux()
self.gate3 = Demux.Demux()
self.gate4 = Demux.Demux()
def _setup(self):
self.gate0.a = self.a
self.gate0.select = self.select[0:2]
self.b = self.gate0.outaf()
self.c = self.gate0.outbf()
self.d = self.gate0.outcf()
self.e = self.gate0.outdf()
self.gate1.a = self.b
self.gate1.select = self.select[2:3]
self.outa = self.gate1.outaf()
self.outb = self.gate1.outbf()
self.gate2.a = self.c
self.gate2.select = self.select[2:3]
self.outc = self.gate2.outaf()
self.outd = self.gate2.outbf()
self.gate3.a = self.d
self.gate3.select = self.select[2:3]
self.oute = self.gate3.outaf()
self.outf = self.gate3.outbf()
self.gate4.a = self.e
self.gate4.select = self.select[2:3]
self.outg = self.gate4.outaf()
self.outh = self.gate4.outbf()
def outaf(self):
self._setup()
return self.outa
def outbf(self):
self._setup()
return self.outb
def outcf(self):
self._setup()
return self.outc
def outdf(self):
self._setup()
return self.outd
def outef(self):
self._setup()
return self.oute
def outff(self):
self._setup()
return self.outf
def outgf(self):
self._setup()
return self.outg
def outhf(self):
self._setup()
return self.outh
|
import xarray as xr
import numpy as np
from itertools import product
from functools import reduce
from tools.LoopTimer import LoopTimer
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import sys
def nan_correlate(x,y):
idx = np.logical_and(~np.isnan(x), ~np.isnan(y))
return np.corrcoef(x[idx],y[idx])[0][1]
def quick_regrid(in_arr, reg_arr):
"""Fast way to regrid all values in in_arr onto reg_arr.
Requires that reg_arr is regularly space. and we'll check, so don't screw around"""
spacing = np.diff(reg_arr)
if not (np.allclose(spacing.astype(float), spacing.astype(float)[0])):
print(np.unique(spacing))
raise ValueError('not equally spacing, cannot quick regrid')
spacing = spacing[0]
return (np.round(in_arr.astype(float)/spacing.astype(float))*spacing.astype(float)).astype(in_arr[0].dtype)
def process_data(daily_data, var, dims=None):
if not dims:
dims = daily_data.dims
for di, dim in enumerate(dims):
savename = (f'/home/disk/eos4/jkcm/Data/MEASURES/correlations/lag_correlations.{var}.{dim}.nc')
remaining_dims = [i for i in daily_data.dims if not i==dim]
dim_axis = daily_data.dims.index(dim)
other_axes = [i for i in np.arange(len(daily_data.dims)) if not i==dim_axis]
ax_iters = [daily_data[i].values for i in remaining_dims]
empties = np.full([daily_data.shape[i] for i in other_axes], np.nan)
all_lags = np.arange(1,10)
end_result = daily_data.loc[{dim: daily_data[dim].values[0]}].copy(data=empties).expand_dims(lag=all_lags).copy()
lt = LoopTimer(reduce((lambda x, y: x * y), end_result.shape))
print(f'working on loop {di}/{len(dims)}: {dim}')
for lag in all_lags:
a = daily_data.isel({dim:slice(lag,None,lag)})
a_shift = daily_data.isel({dim:slice(None,-lag,lag)})
for i in product(*[l for l in ax_iters]):
lt.update()
x = {rd: n for rd, n in zip(remaining_dims,i)}
a_sl = a.sel(x).values
a_shift_sl = a_shift.sel(x).values
corr = nan_correlate(a_sl, a_shift_sl)
x['lag'] = lag
end_result.loc[x] = corr
end_result.to_netcdf(savename)
def get_dataset(var):
if var in ['EIS', 'SST', 'RH_700', 'sfc_div', 'div_700', 'WSPD_10M', 'LTS']:
MERRA_data = xr.open_dataset(r'/home/disk/eos4/jkcm/Data/MERRA/measures/MERRA_unified_subset_SEP.mon_anom.nc')
dataset = MERRA_data[var].isel(time=slice(None,None,8))
elif var == 'vapor':
amsr_data = xr.open_dataset(r'/home/disk/eos9/jkcm/Data/amsr/rss/all/amsr_unified.subset.mon_anom.nc')
dataset = amsr_data[var].isel(orbit_segment=0, latitude=slice(None,None,4), longitude=slice(None,None,4))
elif var in ['net_cre', 'cldarea_low_1h']:
ceres_data = xr.open_dataset(r'/home/disk/eos9/jkcm/Data/ceres/proc/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4.subset.mon_anom.nc')
dataset = ceres_data[var].isel(time=slice(19,None,24))
elif var == 'ascat_div':
raise NotImplementedError()
# ascat_data =xr.open_dataset(r'/home/disk/eos9/jkcm/Data/ascat/rss/proc/ascat_unified.anomfromyseas.nc')
else:
raise ValueError('variable not recognized')
return dataset
if __name__ == '__main__':
if not len(sys.argv) >= 2:
raise ValueError('hey bro gimme a variable')
var = sys.argv[1]
if len(sys.argv) == 3:
dims=sys.argv[2].split(',')
else:
dims=None
dataset = get_dataset(var)
process_data(dataset, var, dims) # daily starting at Noon
|
import numpy as np
from matplotlib import pyplot as pl
import pandas
from sklearn.linear_model import LinearRegression
from neural_network import FaceNet
from PIL import Image
import matplotlib.patches as patches
data = pandas.read_csv('data.csv', delimiter=' ')
pathes = data[['File']].as_matrix()[:, 0]
rects = data[['x', 'y', 'w', 'h']].as_matrix()
size = 48
imgX = []
rectY = []
for file, rect in zip(pathes, rects):
img = Image.open(file)
x = rect[0] / img.size[0]
y = rect[1] / img.size[1]
w = rect[2] / img.size[0]
h = rect[3] / img.size[1]
img = (np.array(img.resize((size, size)))/128.0 - 1).astype(np.float32).T
r = np.array([x, y, w, h], np.float32)
imgX.append(img)
rectY.append(r)
imgX = np.array(imgX)
rectY = np.array(rectY)
rectY = (2 * rectY - 1).astype(np.float32)
index = np.random.permutation(len(imgX))
train_index = index[:int(len(index)*0.8)]
test_index = index[int(len(index)*0.8):]
"""predicted_rect = np.zeros(rectY[test_index].shape)
for i in range(4):
print('start fit %s'%i)
L = LinearRegression()
L.fit(imgX[train_index].reshape(len(train_index), -1), rectY[train_index, [i]])
print(L.score(imgX[train_index].reshape(len(train_index), -1), rectY[train_index, [i]]))
print(L.score(imgX[test_index].reshape(len(test_index), -1), rectY[test_index, [i]]))
predicted_rect[:, i] = L.predict(imgX[test_index].reshape(len(test_index), -1))"""
model = FaceNet()
print('start fit')
learning_curve = model.fit(imgX[train_index], rectY[train_index], 20, 10)
print('end fit')
print(model.score(imgX[train_index], rectY[train_index]))
print(model.score(imgX[test_index], rectY[test_index]))
predicted_rect = model.predict(imgX[train_index])
pl.figure()
pl.plot(learning_curve)
pl.show()
rectY = 0.5 * (rectY + 1)
predicted_rect = 0.5 * (predicted_rect + 1)
for img, rect, rect_true in zip(imgX[test_index], predicted_rect, rectY[test_index]):
image = (img.T + 1)/2
fig, ax = pl.subplots(1)
ax.imshow(image)
rect_patch_predicted = patches.Rectangle((rect[0]*size, rect[1]*size), rect[2] * size, rect[3] * size,
linewidth=1, edgecolor='r', facecolor='none')
rect_patch_true = patches.Rectangle((rect_true[0] * size, rect_true[1] * size), rect_true[2] * size, rect_true[3] * size,
linewidth=1, edgecolor='g', facecolor='none')
ax.add_patch(rect_patch_predicted)
ax.add_patch(rect_patch_true)
pl.show()
|
#!/usr/bin/python3
# -*- coding -*-
# Arthor: NERD
# Data: 2018-6-24
# Version: 1.0
from L298NHBridge import HBridge
from ImageProcess import ProcessData
def Init(self):
Motors = HBridge(19, 26, 23, 24, 13, 21, 22)
speed_run = 0
angle_steer = 0
speed_run, angle_steer = ProcessImage()
Motors.setMotorRun(speed_run)
Motors.setMotorRun(angle_steer)
def main:
Init(self)
while True:
try:
pass
expect RuntimeError:
pass
if __name__ = "main":
main()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^contador/', views.contador, name='contador'),
] |
import logging
from django.http import HttpResponse
import json
from myuw.views.rest_dispatch import RESTDispatch, data_not_found
from myuw.dao.finance import get_account_balances_for_current_user
from myuw.dao.notice import get_tuition_due_date
from myuw.logger.timer import Timer
from myuw.logger.logresp import log_data_not_found_response
from myuw.logger.logresp import log_success_response
class Finance(RESTDispatch):
"""
Performs actions on resource at /api/v1/finance/.
"""
def GET(self, request):
"""
GET returns 200 with the student account balances
of the current user
"""
timer = Timer()
logger = logging.getLogger(__name__)
balances = get_account_balances_for_current_user()
if balances is None:
log_data_not_found_response(logger, timer)
return data_not_found()
log_success_response(logger, timer)
logger.debug(balances.json_data())
date = get_tuition_due_date()
response = balances.json_data()
response['tuition_due'] = str(date)
return HttpResponse(json.dumps(response))
|
from threading import Thread
from pyfirmata import Arduino, util
import serial
import time
class ArduinoConnection(Thread):
def __init__(self):
Thread.__init__(self)
self.SAMPLING_INTERVAL = 0.100
self.MEAN_INTERVAL = 5
self.MEAN_SAMPLES_NUMBER = round(self.MEAN_INTERVAL/self.SAMPLING_INTERVAL)
PORT = '/dev/ttyACM0'
self.board = Arduino(PORT)
it = util.Iterator(self.board)
it.start()
self.analog_pin_value_arr = [self.board.get_pin('a:0:i'), self.board.get_pin('a:1:i'), self.board.get_pin('a:2:i'), self.board.get_pin('a:3:i'), self.board.get_pin('a:4:i'), self.board.get_pin('a:5:i')]
for i in range(len(self.analog_pin_value_arr)):
self.analog_pin_value_arr[i].enable_reporting()
self.mean_analog_valuea_arr = [0.0] * 6
self.mean_analog_valuea_assigned_arr = [0.0] * 6
def run(self):
#s= ''
sample_number = 0
while True:
while (sample_number < self.MEAN_SAMPLES_NUMBER):
# time.sleep(DELAY)
self.board.pass_time(self.SAMPLING_INTERVAL)
for i in range(len(self.mean_analog_valuea_arr)):
self.mean_analog_valuea_arr[i] = self.mean_analog_valuea_arr [i] + self.analog_pin_value_arr[i].read()
sample_number = sample_number + 1
for i in range(len(self.mean_analog_valuea_arr)):
self.mean_analog_valuea_arr[i] = self.mean_analog_valuea_arr[i] / self.MEAN_SAMPLES_NUMBER
#s = s + str(self.mean_analog_valuea_arr[i]) + ' '
self.mean_analog_valuea_assigned_arr = self.mean_analog_valuea_arr
#print s
#s = ''
sample_number = 0
self.mean_analog_valuea_arr = [0.0] * 6
def getMeanAnalogArduinoValueArray(self):
return self.mean_analog_valuea_assigned_arr
|
hardware_config = {
# Micro Controller Unit description (HEAD/arch/<arch>/<mcu_fam>/<vendor> folder)
'mcu_arch' : 'avr',
'mcu_family' : 'avr8',
'mcu_vendor' : 'atmel',
'mcu_cpu' : 'atmega1281',
'mcu_toolchain' : 'GCC',
# Device driver description (HEAD/target/mcu folder)
'mcu' : 'atmega1281',
# Transceiver source description (HEAD/target/if folder)
'if' : 'at86rf212',
# C code global defined symbols
'defines' : [
'LEDS_ON_BOARD=true',
'IF_AT86RF212',
'BOARD_ATANY_900',
],
# GCC flags
'cflags' : [
'-mmcu=atmega1281',
],
# LINKER flags
'ldflags' : [
'-mmcu=atmega1281',
],
}
Return('hardware_config') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import podium_api
from podium_api.asyncreq import get_json_header_token, make_request_custom_success
from podium_api.types.event import get_event_from_json
from podium_api.types.paged_response import get_paged_response_from_json
from podium_api.types.redirect import get_redirect_from_json
def make_event_update(
token,
event_uri,
title=None,
start_time=None,
end_time=None,
venue_id=None,
private=None,
success_callback=None,
failure_callback=None,
progress_callback=None,
redirect_callback=None,
):
"""
Request that updates a PodiumEvent.
Args:
token (PodiumToken): The authentication token for this session.
event_uri (str): URI for the event you are updating.
Kwargs:
venue_id(str): ID for the venue of event.
title (str): title for the vent.
start_time (str): Starting time, use ISO 8601 format.
end_time (str): Ending time, use ISO 8601 format.
private (bool): True if it is a private event
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), updated_uri (str))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
body = {}
if title is not None:
body["event[title]"] = title
if start_time is not None:
body["event[start_time]"] = start_time
if end_time is not None:
body["event[end_time]"] = end_time
if venue_id is not None:
body["event[venue_id]"] = venue_id
else:
body["event[venue_id]"] = ""
if private is not None:
body["event[private]"] = str(private).lower()
header = get_json_header_token(token)
return make_request_custom_success(
event_uri,
event_update_success_handler,
method="PUT",
success_callback=success_callback,
redirect_callback=redirect_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
body=body,
header=header,
data={"updated_uri": event_uri},
)
def make_event_create(
token,
title,
start_time,
end_time,
venue_id=None,
private=None,
success_callback=None,
failure_callback=None,
progress_callback=None,
redirect_callback=None,
):
"""
Request that creates a new PodiumEvent.
The uri for the newly created event will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
token (PodiumToken): The authentication token for this session.
title (str): title for the vent.
start_time (str): Starting time, use ISO 8601 format.
end_time (str): Ending time, use ISO 8601 format.
private (bool): True if it is a private event
Kwargs:
venue_id(str): ID for the venue of event.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
endpoint = "{}/api/v1/events".format(podium_api.PODIUM_APP.podium_url)
body = {"event[title]": title, "event[start_time]": start_time, "event[end_time]": end_time}
if venue_id is not None:
body["event[venue_id]"] = venue_id
if private is not None:
body["event[private]"] = str(private).lower()
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
None,
method="POST",
success_callback=success_callback,
redirect_callback=create_event_redirect_handler,
failure_callback=failure_callback,
progress_callback=progress_callback,
body=body,
header=header,
data={"_redirect_callback": redirect_callback},
)
def make_event_delete(
token, event_uri, success_callback=None, redirect_callback=None, failure_callback=None, progress_callback=None
):
"""
Deletes the event for the provided URI.
Args:
token (PodiumToken): The authentication token for this session.
event_uri (str): URI for the event you want.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(deleted_uri (str))
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
header = get_json_header_token(token)
return make_request_custom_success(
event_uri,
event_delete_handler,
method="DELETE",
success_callback=success_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
redirect_callback=redirect_callback,
header=header,
data={"deleted_uri": event_uri},
)
def make_event_get(
token,
event_uri,
expand=True,
quiet=None,
success_callback=None,
redirect_callback=None,
failure_callback=None,
progress_callback=None,
):
"""
Request that returns a PodiumEvent for the provided event_uri.
Args:
token (PodiumToken): The authentication token for this session.
event_uri (str): URI for the event you want.
Kwargs:
expand (bool): Expand all objects in response output. Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumEvent)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
params = {}
if expand is not None:
params["expand"] = expand
if quiet is not None:
params["quiet"] = quiet
header = get_json_header_token(token)
return make_request_custom_success(
event_uri,
event_success_handler,
method="GET",
success_callback=success_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
redirect_callback=redirect_callback,
params=params,
header=header,
)
def make_events_get(
token,
start=None,
per_page=None,
endpoint=None,
expand=True,
quiet=None,
success_callback=None,
redirect_callback=None,
failure_callback=None,
progress_callback=None,
):
"""
Request that returns a PodiumPagedRequest of events.
By default a get request to
'https://podium.live/api/v1/events' will be made.
Args:
token (PodiumToken): The authentication token for this session.
Kwargs:
expand (bool): Expand all objects in response output. Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
endpoint (str): If provided this endpoint will be used instead of
the default: 'https://podium.live/api/v1/events'
Return:
UrlRequest: The request being made.
"""
if endpoint is None:
endpoint = "{}/api/v1/events".format(podium_api.PODIUM_APP.podium_url)
params = {}
if expand is not None:
params["expand"] = expand
if quiet is not None:
params["quiet"] = quiet
if start is not None:
params["start"] = start
if per_page is not None:
per_page = min(per_page, 100)
params["per_page"] = per_page
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
events_success_handler,
method="GET",
success_callback=success_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
redirect_callback=redirect_callback,
params=params,
header=header,
)
def event_delete_handler(req, results, data):
"""
Returns the URI for the deleted resource to the user set success_callback
Called automatically by **make_event_delete**
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["success_callback"] is not None:
data["success_callback"](data["deleted_uri"])
def event_success_handler(req, results, data):
"""
Creates and returns a PodiumEvent to the success_callback found in data
if there is one.
Called automatically by **make_event_get**
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["success_callback"] is not None:
data["success_callback"](get_event_from_json(results["event"]))
def events_success_handler(req, results, data):
"""
Creates and returns a PodiumPagedResponse with PodiumEvent as the payload
to the success_callback found in data if there is one.
Called automatically by **make_events_get**.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["success_callback"] is not None:
data["success_callback"](get_paged_response_from_json(results, "events"))
def create_event_redirect_handler(req, results, data):
"""
Handles the success redirect of a **make_event_create** call.
Returns a PodiumRedirect with a uri for the newly created event to the
_redirect_callback found in data.
Automatically called by **make_event_create**, will call the
redirect_callback passed in to **make_event_create** if there is on.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["_redirect_callback"] is not None:
data["_redirect_callback"](get_redirect_from_json(results, "event"))
def event_update_success_handler(req, results, data):
"""
Success callback after updating an event. Will return the message from
the server and the event uri to the success_callback.
Called automatically by **make_event_update**.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["success_callback"] is not None:
data["success_callback"](results, data["updated_uri"])
|
import re
with open('9.input', 'r') as file:
input = file.read()
input = re.sub('!.', '', input)
input = re.sub('<.*?>', '', input)
depth = 0
score = 0
for c in input:
if c == '{':
depth = depth + 1
elif c == '}':
score = score + depth
depth = depth - 1
print(score)
|
# Generated by Django 2.0.13 on 2019-05-17 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('capapi', '0013_auto_20181107_2037'),
]
operations = [
migrations.CreateModel(
name='MailingList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(error_messages={'unique': "You're already subscribed."}, max_length=254, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('do_not_email', models.BooleanField(default=False)),
],
),
]
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import signal, misc
fs = 8000
t = np.linspace(0, 1/70, 512, endpoint=False)
x0 = signal.chirp(t, 700, 1/70, 900, method='linear')
x1 = signal.chirp(t, 700, 1/70, 9000, method='linear')
fig, ax = plt.subplots(2)
ax[0].plot(t, x0)
ax[1].plot(t, x1)
ax[0].set_title('Time Domain Representation of Chirp Signals')
ax[1].set_xlabel('Time (s)')
ax[0].set_ylabel('(A)', rotation=0, fontsize=15)
ax[1].set_ylabel('(B)', rotation=0, fontsize=15)
plt.show()
|
"""
213. House Robber II
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed. All houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.
Example 1:
Input: [2,3,2]
Output: 3
Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2),
because they are adjacent houses.
Example 2:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
"""
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
if n == 1:
return nums[0]
mx1 = self.robHelper(nums[0:n-1])
mx2 = self.robHelper(nums[1:n])
return max(mx1,mx2)
def robHelper(self, array):
"""
:type nums: List[int]
:rtype: int
"""
n = len(array)
if n == 0:
return 0
if n == 1:
return array[0]
dp = [[0,0] for i in range(n)]
dp[0][0] = 0
dp[0][1] = array[0]
dp[1][0] = array[0]
dp[1][1] = array[1]
for i in range(2,n):
dp[i][0] = max(dp[i-2][0]+array[i-1],dp[i-2][1])
dp[i][1] = max(dp[i-2][1]+array[i],dp[i-1][0]+array[i])
#print(dp)
return max(dp[n-1][0], dp[n-1][1])
|
import os
import hashlib
class FileComparator(object):
"""
Class to read in two files and compare them
Can compare for matching lines or matching hashes
TODO: Have it take a function expression for file comparison
"""
def __init__(self, filePath1, filePath2):
"""
Constructor for FileComparator object
"""
if os.path.isfile(filePath1) and os.path.isfile(filePath2):
self._filePath1 = filePath1
self._filePath2 = filePath2
else:
print("Implement error handling")
exit()
self._hasReadInFiles = False
def _readInFileToList(self, filePath):
"""
Private method to return a list of lines contained in the input file
"""
destinationList = []
with open(filePath, "r") as f:
for line in f:
destinationList.append(line.replace("\n","").replace("\r",""))
return destinationList
def readInFiles(self):
"""
Method to read the files into lists. Use when files have been updated
"""
self._hasReadInFiles = True
self._fileList1 = self._readInFileToList(self._filePath1)
self._fileList2 = self._readInFileToList(self._filePath2)
def inDepthComparison(self):
return self.findMatchingLinesAnyOrder()
def findMatchingLinesAnyOrder(self, shouldPrint=False):
"""
Method to match the lines within files.
It should maintain a sensible order, like the order of the first
file from the constructor.
Return a list of lines that match.
Optional argument to have it print out that list without the blank lines
"""
if not self._hasReadInFiles:
self.readInFiles()
setOf2 = set(self._fileList2)
ret = [line for line in self._fileList1 if line in setOf2]
if shouldPrint:
print("Matching lines between " + self._filePath1 + " and " + self._filePath2 + ":")
removingBlanks = [line for line in ret if line != ""]
print(*removingBlanks, sep='\n')
return ret
def compareFileHashes(self, stringOfHash="sha1", shouldPrint=False):
"""
Method to take the hash of the two files for comparison
Defaults to the SHA1 hashing algorithm, but the algo can be set to something like:
- sha1
- sha224
- sha256
- sha384
- sha512
- blake2b (Added in newer versions of Python)
- blake2s (Added in newer versions of Python)
- md5 (md5 not a guaranteed algorithm, python implementation dependent)
Has argument to specify that as a string
Has argument to specify whether or not to print out or just return T/F
"""
hashes = []
for filename in [self._filePath1, self._filePath2]:
hasher = hashlib.new(stringOfHash)
with open(filename, 'rb') as f:
buf = f.read()
hasher.update(buf)
hashes.append(hasher.hexdigest())
ret = hashes[0] == hashes[1]
if shouldPrint:
print("Are hashes the same: " + str(ret))
return ret
|
from .AmountTypes import PaidAmount
from .util import Xmleable, default_document, createElementContent
class ID(Xmleable):
def __init__(self, id, tipo_documento="02"):
self.id = id
self.schemeID = tipo_documento
self.schemeName = "Anticipo"
self.schemeAgencyName = "PE:SUNAT"
def generate_doc(self):
self.doc = createElementContent("cbc:ID", self.id)
self.doc.setAttribute("schemeID", self.schemeID)
self.doc.setAttribute("schemeName", self.schemeName)
self.doc.setAttribute("schemeAgencyName", self.schemeAgencyName)
class PaidDate(Xmleable):
def __init__(self, date):
self.date = date
def generate_doc(self):
self.doc = createElementContent("cbc:PaidDate", self.date)
class PaidTime(Xmleable):
def __init__(self, time):
self.time = time
def generate_doc(self):
self.doc = createElementContent("cbc:PaidTime", self.time)
class PrepaidPayment(Xmleable):
def __init__(self, id=None, paid_amount=None, paid_date=None, paid_time=None):
self.id = id
self.paid_amount = paid_amount
self.paid_date = paid_date
self.paid_time = paid_time
def fix_values(self):
if type(self.id) == str:
self.id = ID(self.id)
if type(self.paid_date) == str:
self.paid_date = PaidDate(self.paid_date)
if type(self.paid_time) == str:
self.paid_time = PaidTime(self.paid_time)
if type(self.paid_amount) in [float]:
self.paid_amount = PaidAmount(self.paid_amount)
def validate(self, errs, obs):
if type(self.id) != ID:
raise Exception("Bad type")
if type(self.paid_amount) != PaidAmount:
raise Exception("Bad type")
if type(self.paid_date) != PaidDate:
raise Exception("Bad type")
if self.paid_time:
if type(self.paid_time) != PaidTime:
raise Exception("Bad type")
def generate_doc(self):
self.doc = default_document.createElement("cac:PrepaidPayment")
self.doc.appendChild(self.id.get_document())
self.doc.appendChild(self.paid_amount.get_document())
self.doc.appendChild(self.paid_date.get_document())
if self.paid_time:
self.doc.appendChild(self.paid_time.get_document())
|
# coding: utf-8
# dont add this, request.path is non unicode in python 2.7
# or add it, as request.path shoudl be unicode anyway?!
# from __future__ import unicode_literals
from ..models import Redirect
try:
reload
except NameError:
from importlib import reload
from django.contrib.sites.models import Site
from django.http import QueryDict
from django.test import TestCase
from django.test import override_settings
from mock import Mock
from painless_redirects import conf
from ..middleware import ManualRedirectMiddleware, ForceSiteDomainRedirectMiddleware
no_auto_create = override_settings(
PAINLESS_REDIRECTS_AUTO_CREATE=False,
)
auto_create = override_settings(
PAINLESS_REDIRECTS_AUTO_CREATE=True,
)
class ForceSiteDomainRedirectMiddlewareTestCase(TestCase):
def setUp(self):
self.middleware = ForceSiteDomainRedirectMiddleware()
self.request = Mock()
self.request.is_secure = lambda: False
self.request.get_host = lambda: "nogood.com"
self.request.META = {}
self.request.GET = QueryDict("")
self.request.path = "/"
def test_no_redirect(self):
self.request.get_host = lambda: "example.com"
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
def test_debug_no_redirect(self):
with self.settings(DEBUG=True):
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
def test_must_redirect(self):
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://example.com/")
def test_must_redirect_preserves_path(self):
self.request.path = "/abc/def/yeah/"
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://example.com/abc/def/yeah/")
def test_must_redirect_preserves_getvars(self):
self.request.path = "/abc/def/yeah/"
self.request.GET = QueryDict("karma=true")
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://example.com/abc/def/yeah/?karma=true")
class ManualRedirectMiddlewareTestCase(TestCase):
"""
request.get_current_site() is always the default example.com fixture
check: http://blog.namis.me/2012/05/13/writing-unit-tests-for-django-middleware/
"""
def setUp(self):
self._setup_request_response_middleware()
self.redirect = Redirect.objects.create(
old_path="/the-old-path/",
new_path="/the-new-path/",
)
self.site = Site.objects.create(
name="example site 1",
domain="example1.com",
)
self.site2 = Site.objects.create(
name="example site 2",
domain="example2.com",
)
def _setup_request_response_middleware(self):
self.middleware = ManualRedirectMiddleware()
self.request = Mock()
self.request.META = {}
self.request.get_host = lambda: 'host.com'
self.response = Mock()
def test_no_404_on_status_200(self):
self.request.path = self.redirect.old_path
self.response.status_code = 200
self.assertEqual(
self.middleware.process_response(self.request, self.response),
self.response)
@no_auto_create
def test_no_redirect_found(self):
reload(conf)
self.request.path = "/some-other-path/"
self.response.status_code = 404
self.assertEqual(
self.middleware.process_response(self.request, self.response),
self.response)
self.assertEqual(1, Redirect.objects.all().count())
@no_auto_create
def test_no_redirect_when_site_specified(self):
reload(conf)
self.redirect.site = self.site
self.redirect.save()
self.request.path = self.redirect.old_path
self.response.status_code = 404
self.assertEqual(
self.middleware.process_response(self.request, self.response),
self.response)
self.assertEqual(1, Redirect.objects.all().count())
def test_simple_redirect(self):
reload(conf)
self.response.status_code = 404
self.request.path = self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_simple_redirect_302(self):
reload(conf)
self.redirect.permanent = False
self.redirect.save()
self.response.status_code = 404
self.request.path = self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/the-new-path/")
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 1)
self.middleware.process_response(self.request, self.response)
self.middleware.process_response(self.request, self.response)
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 3)
def test_redirect_not_enabled(self):
reload(conf)
self.redirect.permanent = False
self.redirect.enabled = False
self.redirect.save()
self.response.status_code = 404
self.request.path = self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 1)
self.middleware.process_response(self.request, self.response)
self.middleware.process_response(self.request, self.response)
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 3)
def test_simple_redirect_keep_querystring(self):
self.response.status_code = 404
self.request.path = self.redirect.old_path
self.request.META['QUERY_STRING'] = 'a=b'
self.redirect.keep_querystring = True
self.redirect.old_path += "?a=b"
self.redirect.save()
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/?a=b")
def test_simple_redirect_drop_querystring(self):
self.response.status_code = 404
self.request.path = self.redirect.old_path
self.request.META['QUERY_STRING'] = 'a=xy'
self.redirect.old_path += "?a=xy"
self.redirect.save()
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
@auto_create
def test_wildcard_should_work_with_existing_auto_created_that_is_disabled(self):
"""
jap. it should!
:return:
"""
reload(conf)
old_path = '/the-old-path/'
self.response.status_code = 404
self.request.path = '{}{}'.format(old_path, 'wildcard/maybe/')
self.redirect.enabled = False
self.redirect.save()
self.assertEqual(Redirect.objects.filter(enabled=True).count(), 0)
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 2) # auto created one!
# the auto redirects
self.redirect.enabled = True
self.redirect.save()
self.assertEqual(Redirect.objects.filter(enabled=True).count(), 1)
# with existing auto created redirect!
self.redirect.wildcard_match = True
self.redirect.enabled = True
self.redirect.save()
self._setup_request_response_middleware()
self.response.status_code = 404
self.request.path = '{}{}'.format(self.redirect.old_path, 'wildcard/maybe/')
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual("/the-new-path/", response.url, )
self.assertEqual(Redirect.objects.count(), 2)
@no_auto_create
def test_special_chars_in_url(self):
"""
in python 2.7, request.path seems to be ascii, in certain deployment scenarios
only reproducable when not importing from __future__ import unicode_literals
probably related: https://serverfault.com/questions/359934/unicodeencodeerror-when-uploading-files-in-django-admin
only happened on a uwsgi configuration for now.
"""
reload(conf)
self.response.status_code = 404
self.request.path = self.redirect.old_path
self.request.path = "/2011/11/réééédirect/"
self.request.META['QUERY_STRING'] = "?what=ééé"
response = self.middleware.process_response(self.request, self.response)
# only check if it doesnt fail for now.
self.assertEqual(response.status_code, 404)
def test_new_site_redirect(self):
self.redirect.new_site = self.site
self.redirect.save()
self.response.status_code = 404
self.request.scheme = "https"
self.request.path = "/the-old-path/"
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(
response.url, "https://%s%s" % (self.redirect.new_site.domain, self.redirect.new_path))
def test_wildcard_redirect(self):
self.redirect.old_path = "/the-wildcard/yes/"
self.redirect.wildcard_match = True
self.redirect.save()
self.response.status_code = 404
self.request.path = "%sthe/right/part/" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_wildcard_redirect_keep_tree(self):
self.redirect.old_path = "/the-wildcard/yes/"
self.redirect.wildcard_match = True
self.redirect.keep_tree = True
self.redirect.save()
self.response.status_code = 404
self.request.path = "%sthe/right/part/" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/the/right/part/")
# must work with site too
# self.redirect.site = self.site
self.redirect.save()
self._setup_request_response_middleware() # re-init
self.response.status_code = 404
self.request.path = "%sthe/right/part/2" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/the/right/part/2")
def test_wildcard_redirect_with_site(self):
self.redirect.site = Site.objects.get_current()
self.redirect.old_path = "/the-wildcard/yes/"
self.redirect.wildcard_match = True
self.redirect.save()
self.response.status_code = 404
self.request.path = "%sthe/right/part/" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_redirect_without_slash(self):
self.redirect.old_path = '/whatever/check.html'
self.redirect.save()
self.request.path = self.redirect.old_path
self.response.status_code = 404
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_from_custom_domain(self):
self.redirect.domain = 'custom.com'
self.redirect.old_path = '/'
self.redirect.new_path = 'http://another.com/'
self.redirect.save()
self.request.path = self.redirect.old_path
self.request.get_host = lambda: 'custom.com'
self.response.status_code = 200
response = self.middleware.process_request(self.request, )
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://another.com/")
def test_from_custom_domain_false_positive(self):
self.redirect.domain = 'custom.com'
self.redirect.old_path = '/'
self.redirect.new_path = 'http://another.com/'
self.redirect.save()
self.request.path = self.redirect.old_path
# check for false positives!
self.request.get_host = lambda: 'none-or-what.com'
self.response.status_code = 200
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
response = self.middleware.process_response(self.request, self.response)
self.assertNotEqual(response.status_code, 301)
# self.assertEqual(response.url, "http://another.com/")
def test_old_path_too_long(self):
reload(conf)
very_long = '/'
for c in range(0, conf.INDEXED_CHARFIELD_MAX_LENGTH):
very_long += 'ccccc'
self.assertGreater(len(very_long), conf.INDEXED_CHARFIELD_MAX_LENGTH)
self.request.path = very_long + "/"
# check for false positives!
self.response.status_code = 404
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(404, response.status_code)
self.assertEqual(2, Redirect.objects.all().count())
self.assertEqual(conf.INDEXED_CHARFIELD_MAX_LENGTH, len(Redirect.objects.all()[0].old_path))
@auto_create
def test_auto_create_with_locale_middleware(self):
# will be redirected to /en/' by locale middleware later on!
self.request.path = '/?test'
self.response.status_code = 404
self.assertEqual(Redirect.objects.all().count(), 1)
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 1)
# 404 with lang slug > auto create ok!
self.response.status_code = 404
self.request.path = '/nothing-yet/'
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 2)
@auto_create
def test_auto_create_respect_append_slash(self):
# will be redirected to /nope/' by locale commonmiddleware later on!
self.request.path = '/nope'
self.response.status_code = 404
self.assertEqual(Redirect.objects.all().count(), 1)
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 1)
# 404 with lang slug > auto create ok!
self.response.status_code = 404
self.request.path = '/nothing-yet/'
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 2)
|
import socket
s = socket.socket()
s.bind(("localhost", 1235))
s.listen(0)
c, addr = s.accept()
while True:
command= None
print("Waiting for commands...")
while command is None :
command = c.recv(1024).decode()
print("Command: ",command)
#command execution part
if command== "end" :
break
print("\n")
c.close()
|
#coding:utf-8
import unittest
class Test(unittest.TestCase):
u'''测试test01'''
def testAdd(self): # test method names begin with 'test'
a = 1
b = 2
result = a+b #测试结果
ex = 3 #期望结果
self.assertEqual(result,ex) #断言是测试结果与期望结果对比
if result==ex:
print("True")
else:
print("False")
def testMultiply(self):
a = 2
b = 3
result = a*b #实际结果
self.assertEqual(result,5) #断言(检查点)pass or failed result与期望结果6 之间的对比
if __name__ == '__main__':
unittest.main() |
from base_viewer import BaseViewer
from latextools_utils import get_setting
from latextools_utils.external_command import external_command
from latextools_utils.sublime_utils import get_sublime_exe
import os
import re
import shlex
import sublime
import string
import sys
try:
from shlex import quote
except ImportError:
from pipes import quote
if sys.version_info >= (3, 0):
PY2 = False
strbase = str
else:
PY2 = True
strbase = basestring
WINDOWS_SHELL = re.compile(r'\b(?:cmd|powershell)(?:.exe)?\b', re.UNICODE)
# a viewer that runs a user-specified command
class CommandViewer(BaseViewer):
CONTAINS_VARIABLE = re.compile(
r'\$(?:(?:pdf|src)_file(?:_(?:name|ext|base_name|path))?'
r'|sublime_binary|src_file_rel_path|line|col)\b',
re.IGNORECASE | re.UNICODE
)
def _replace_vars(self, s, pdf_file, tex_file=None, line='', col=''):
'''
Function to substitute various values into a user-provided string
Returns a tuple consisting of the string with any substitutions made
and a boolean indicating if any substitutions were made
Provided Values:
--------------------|-----------------------------------------------|
$pdf_file | full path of PDF file
$pdf_file_name | name of the PDF file
$pdf_file_ext | extension of the PDF file
$pdf_file_base_name | name of the PDF file without the extension
$pdf_file_path | full path to directory containing PDF file
$sublime_binary | full path to the Sublime binary
Forward Sync Only:
--------------------|-----------------------------------------------|
$src_file | full path of the tex file
$src_file_name | name of the tex file
$src_file_ext | extension of the tex file
$src_file_base_name | name of the tex file without the extension
$src_file_path | full path to directory containing tex file
$line | line to sync to
$col | column to sync to
'''
# only do the rest if we must
if not self.CONTAINS_VARIABLE.search(s):
return(s, False)
sublime_binary = get_sublime_exe() or ''
pdf_file_path = os.path.split(pdf_file)[0]
pdf_file_name = os.path.basename(pdf_file)
pdf_file_base_name, pdf_file_ext = os.path.splitext(pdf_file_name)
if tex_file is None:
src_file = ''
src_file_path = ''
src_file_name = ''
src_file_ext = ''
src_file_base_name = ''
else:
if os.path.isabs(tex_file):
src_file = tex_file
else:
src_file = os.path.normpath(
os.path.join(
pdf_file_path,
tex_file
)
)
src_file_path = os.path.split(src_file)[0]
src_file_name = os.path.basename(src_file)
src_file_base_name, src_file_ext = os.path.splitext(src_file_name)
template = string.Template(s)
return (template.safe_substitute(
pdf_file=pdf_file,
pdf_file_path=pdf_file_path,
pdf_file_name=pdf_file_name,
pdf_file_ext=pdf_file_ext,
pdf_file_base_name=pdf_file_base_name,
sublime_binary=sublime_binary,
src_file=src_file,
src_file_path=src_file_path,
src_file_name=src_file_name,
src_file_ext=src_file_ext,
src_file_base_name=src_file_base_name,
line=line,
col=col
), True)
def _run_command(self, command, pdf_file, tex_file=None, line='', col=''):
if isinstance(command, strbase):
if PY2:
command = str(command)
command = shlex.split(command)
if PY2:
command = [unicode(c) for c in command]
substitution_made = False
for i, component in enumerate(command):
command[i], replaced = self._replace_vars(
component, pdf_file, tex_file, line, col)
substitution_made = substitution_made or replaced
if not replaced:
command.append(pdf_file)
external_command(
command,
cwd=os.path.split(pdf_file)[0],
# show the Window if not using a Windows shell, i.e., powershell or
# cmd
show_window=not bool(WINDOWS_SHELL.match(command[0]))
if sublime.platform() == 'windows' else False
)
def forward_sync(self, pdf_file, tex_file, line, col, **kwargs):
command = get_setting('viewer_settings', {}).\
get(sublime.platform(), {}).get('forward_sync_command')
if command is None:
self.view_file(pdf_file)
return
self._run_command(command, pdf_file, tex_file, line, col)
def view_file(self, pdf_file, **kwargs):
command = get_setting('viewer_settings', {}).\
get(sublime.platform(), {}).get('view_command')
if command is None:
sublime.error_message(
'You must set the command setting in viewer_settings before '
'using the viewer.'
)
return
self._run_command(command, pdf_file)
|
#main() contains a test case for the median finder
#stream(x) streams the next int into the median finder.
#From there, it inserts it into its running list, sorted
#getMedian() will find the median of the current sorted list
import bisect
def main():
stream = [2, 1, 5, 7, 2, 0, 5]
medianFinder = MedianFinder()
for i in stream:
medianFinder.stream(i)
print(medianFinder.getMedian())
class MedianFinder:
def __init__(self):
self.sortedList = []
def stream(self, x):
bisect.insort(self.sortedList, x)
def getMedian(self):
length = len(self.sortedList)
if(length == 0):
return 0
elif(length == 1):
return self.sortedList[0]
elif(length % 2 == 0):
firstIndex = self.sortedList[(length // 2) - 1]
secondIndex = self.sortedList[(length // 2)]
return (firstIndex + secondIndex) / 2
else:
return self.sortedList[(length // 2) - 1]
main()
|
import csv
from Mining_frequent_closed_itemsets_CLOSET.fpTree.fpTree import FPTree
from Mining_frequent_closed_itemsets_CLOSET.fpTree.frequentItemset import FrequentItemSet
from Mining_frequent_closed_itemsets_CLOSET.itemDictionary import ItemDictionary
class FPTreeBuilder:
def __init__(self, file_name, min_support):
self.file_name = file_name
self.min_support = min_support
self.dictionary = ItemDictionary()
def get_fptree(self):
frequent_items = self.get_frequent_itemset()
frequent_items.sort_descending()
return self.build_fptree(frequent_items)
def get_frequent_itemset(self):
itemset = FrequentItemSet()
with open(self.file_name, 'r') as file:
reader = csv.reader(file, delimiter=',')
transaction_counter = 0
for row in reader:
if transaction_counter == 0:
transaction_counter += 1
else:
for item in row:
itemset.add(self.dictionary.get_id(item))
transaction_counter += 1
itemset.remove_not_frequent_items(self.min_support)
return itemset
def build_fptree(self, frequent_itemset):
fptree = FPTree(frequent_itemset)
with open(self.file_name, 'r') as file:
reader = csv.reader(file, delimiter=',')
first_line = True
for row in reader:
if first_line is True:
first_line = False
else:
items_id = []
for x in row:
items_id.append(self.dictionary.get_id(x))
fptree.add_itemset(items_id)
fptree.extract_items_associated_in_every_transaction()
return fptree
|
import os
Path={}
if __name__ == '__main__':
WorkSpace = os.path.abspath('..\\..')+'\\'
else:
WorkSpace = os.getcwd()+'\\'
Path['bootloader'] = {
'WorkSpace' :WorkSpace,
'BuildRelativePath':'Build\\bootloader\\',
'BuildPath' :WorkSpace+'Build\\bootloader\\',
'BinPath' :WorkSpace+'Build\\bootloader\\bin\\',
'Target' :WorkSpace+'Build\\bootloader\\bin\\bootloader',
'TargetName' :'bootloader',
'LinkerFile' :WorkSpace+'Config\\LinkerFile\\STM32F407VETx_FLASH_BOOT.ld',
'ObjPath' :WorkSpace+'Build\\bootloader\\obj\\',
'SourcePath' :{
'MCULibraryPath' :WorkSpace+'Source\\Generic\\Driver\\Mcu\\',
'DriverPath' :WorkSpace+'Source\\Generic\\Driver\\',
'ServicePath' :WorkSpace+'Source\\Generic\\Service\\',
'SupportPath' :WorkSpace+'Source\\Generic\\Support\\',
'PlatformPath' :WorkSpace+'Source\\Generic\\Platform\\',
}
}
Path['application'] = {
'WorkSpace' :WorkSpace,
'BuildRelativePath':'Build\\application\\',
'BuildPath' :WorkSpace+'Build\\application\\',
'BinPath' :WorkSpace+'Build\\application\\bin\\',
'Target' :WorkSpace+'Build\\application\\bin\\application',
'TargetName' :'application',
'LinkerFile' :WorkSpace+'Config\\LinkerFile\\STM32F407VETx_FLASH_APP.ld',
'ObjPath' :WorkSpace+'Build\\application\\obj\\',
'SourcePath' :{
'MCULibraryPath' :WorkSpace+'Source\\Generic\\Driver\\Mcu\\',
'DriverPath' :WorkSpace+'Source\\Generic\\Driver\\',
'ServicePath' :WorkSpace+'Source\\Generic\\Service\\',
'SupportPath' :WorkSpace+'Source\\Generic\\Support\\',
'PlatformPath' :WorkSpace+'Source\\Generic\\Platform\\',
}
}
if __name__ == '__main__':
import os
print(Path)
os.system('pause')
# WorkSpace = os.getcwd()+'\\'
# SourceBasePath = 'Source\\'
# BOOT_TargetPath = 'Build\\Bootloader\\obj\\'
# BOOT_OutPath = 'Build\\Bootloader\\bin\\'
# BOOT_OutName = 'BOOT_TestCode'
# BOOT_Out = BOOT_OutPath+BOOT_OutName
# BOOT_LD = 'Config\\LinkerFile\\STM32F407VETx_FLASH_BOOT.ld'
# BOOT_Source = {}
# BOOT_Source['MCULibraryPath'] = SourceBasePath+'Generic\\Driver\\Mcu\\'
# BOOT_Source['DriverPath'] = SourceBasePath+'Generic\\Driver\\'
# BOOT_Source['ServicePath'] = SourceBasePath+'Generic\\Service\\'
# BOOT_Source['SupportPath'] = SourceBasePath+'Generic\\Support\\'
# BOOT_Source['PlatformPath'] = SourceBasePath+'Generic\\Platform\\'
# APP_TargetPath = 'Build\\Application\\obj\\'
# APP_OutPath = 'Build\\Application\\bin\\'
# APP_OutName = 'APP_TestCode'
# APP_Out = APP_OutPath+APP_OutName
# APP_LD = 'Config\\LinkerFile\\STM32F407VETx_FLASH_APP.ld'
# APP_Source = {}
# APP_Source['MCULibraryPath'] = SourceBasePath+'Generic\\Driver\\Mcu\\'
# APP_Source['DriverPath'] = SourceBasePath+'Generic\\Driver\\'
# APP_Source['ServicePath'] = SourceBasePath+'Generic\\Service\\'
# APP_Source['SupportPath'] = SourceBasePath+'Generic\\Support\\'
# APP_Source['PlatformPath'] = SourceBasePath+'Generic\\Platform\\'
|
class LibraryMethod:
def __init__(self,id,title):
self.id=id
self.title=title
def print_info(self):
print("-----libraryMethod-----")
print("The id is :" + str(self.id))
print("the title is :" + self.title)
class Book(LibraryMethod):
def __init__(self,id,title,author,ISBN):
LibraryMethod.__init__(self,id,title)
self.author=author
self.ISBN = ISBN
def print_info(self):
super().print_info()
print("-----Book Info-----")
print("the author name:" + self.author)
print("the ISBN is :" +self.ISBN)
class Audio(LibraryMethod):
def __init__(self,id,title,singer,year):
LibraryMethod.__init__(self,id,title)
self.singer=singer
self.year=year
def print_info(self):
super().print_info()
print("-----Audio-----")
print("Singer Name:"+self.singer)
print("Year is:"+str(self.year))
class AudioBook(Book,Audio):
def __init__(self,id,title,author,singer,ISBN,year):
Book.__init__(self,id,title,author,ISBN)
Audio.__init__(self,id,title,singer,year)
ab1 = AudioBook(10,"show me the meaning","Hatem","Qusai","10-2202-5",1995)
ab1.print_info()
|
from django import forms
from .models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.forms import UsernameField
from allauth.account.forms import SignupForm , LoginForm
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ('email',"mobile_number")
field_classes = {"email":UsernameField }
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = User
fields = UserChangeForm.Meta.fields
class CustomSignupForm(SignupForm):
field_order = [
'mobile_number',
'email',
"password1",
"password2",
]
def __init__(self, *args, **kwargs):
super(CustomSignupForm, self).__init__(*args, **kwargs)
self.fields['mobile_number'] = PhoneNumberField()
def save(self, request):
user = super(CustomSignupForm,self).save(request)
mobile_number = self.cleaned_data.get('mobile_number')
if mobile_number:
user.mobile_number = mobile_number
user.save()
return user |
from setuptools import setup, find_packages
setup(
name="tootstream",
version="0.5.0",
python_requires=">=3",
install_requires=[line.strip() for line in open('requirements.txt')],
packages=find_packages('src'),
package_dir={'': 'src'}, include_package_data=True,
package_data={
},
author="Sara Murray",
author_email="saramurray@protonmail.com",
description="A command line interface for interacting with Mastodon instances", # nopep8
long_description="A command line interface for interacting with Mastodon instances", # nopep8
license="MIT",
keywords="mastodon, mastodon.social, toot, tootstream",
url="http://www.github.com/magicalraccoon/tootstream",
entry_points={
'console_scripts':
['tootstream=tootstream.toot:main']
}
)
|
import pdb
from django.http import JsonResponse
from django.shortcuts import render_to_response
from django.views.generic import DetailView, CreateView
from article.models import Article, Comments
# TODO: Add comment pagination
# # For listing cover images ajax view
# @page_template('comments.html') # just add this decorator
# def article_comments_view(request, template='comments.html', extra_context=None, **kwargs):
# if 'slug' in kwargs.keys():
# article = Article.objects.get(slug=kwargs['slug'])
# else:
# return render(request, 'GET404.html')
#
# # Build context
# context = {
# 'comments': article.comments_set.all().order_by('-comment_time', ),
# }
# pdb.set_trace()
# if extra_context is not None:
# context.update(extra_context)
# return render(request, template, context)
class ArticleDetailView(DetailView):
model = Article
template_name = 'article.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# All comments
article = self.get_object()
comments = article.comments_set.all().order_by('-comment_time')
context['comments'] = comments
return context
# default, the quill JS store image as base 64 format. This is ok for comments.
# If later we want to upload the image to server, refer to the following post:
# https://github.com/quilljs/quill/issues/1400
# https://github.com/quilljs/quill/issues/1089
class CommentPostView(CreateView):
model = Comments
fields = ['comment', ]
template_name = 'comments.html'
def form_valid(self, form):
# Attach the user to the form
# form.instance.user = self.request.user
comment = form.save(commit=False)
comment.profile = self.request.user
# Find articles using unique slug in the url
url_string = self.request.path.split('/')
comment.article = Article.objects.get(slug=url_string[url_string.index('post_comments') - 1])
# Find parent comment if exist
if 'parent' in self.request.POST.keys():
comment.parent_comment = Comments.objects.get(pk=int(self.request.POST['parent']))
# Save instance
comment.save()
# Return response
results = {'comments': comment.article.comments_set.all().order_by('-comment_time')}
return render_to_response(self.template_name, results)
def form_invalid(self, form):
# TODO: Edit error message
pdb.set_trace()
return JsonResponse({'errno': 1, 'data': ['']})
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-13 06:29
from __future__ import unicode_literals
from django.db import migrations, models
import student_gallery.models
class Migration(migrations.Migration):
dependencies = [
('student_gallery', '0010_student_info'),
]
operations = [
migrations.AlterField(
model_name='student_info',
name='audio_track',
field=models.FileField(default='', upload_to=student_gallery.models.getAudio),
),
migrations.AlterField(
model_name='student_info',
name='email',
field=models.EmailField(default='', max_length=60),
),
migrations.AlterField(
model_name='student_info',
name='pic',
field=models.ImageField(upload_to=student_gallery.models.getImage),
),
migrations.AlterField(
model_name='student_info',
name='resume',
field=models.FileField(default='', upload_to=student_gallery.models.getResume),
),
]
|
""" Executes python code. Used in some field types, in dynamic roles """
# import os
import sys
# from loguru import logger
from dotmap import DotMap
import backend.dialects as ax_dialects
import backend.misc as ax_misc
this = sys.modules[__name__]
async def aexec(code, localz, **kwargs):
""" This function wraps python code from AxAction with async function
and runs it """
# Restore globals later
args = ", ".join(list(kwargs.keys()))
code_from_action = code.replace("\n", "\n ")
async_code = (f"async def func({args}):"
f"\n {code_from_action}"
f"\n return ax")
exec(async_code, {}, localz) # pylint: disable=exec-used
# Don't expect it to return from the coro.
result = await localz["func"](**kwargs)
return result
async def execute_field_code(code, form, arguments=None, current_user=None):
""" Used to execute AxField backend code. see /backend/fields for info """
localz = dict()
host = await ax_misc.get_ax_host()
ax = DotMap() # javascript style dicts item['guid'] == item.guid
ax.row.guid = form.row_guid
ax.form = form
ax.host = host
ax.sql = ax_dialects.dialect.custom_query
ax.user_email = None
ax.user_guid = None
if current_user:
ax.user_email = current_user.get("email", None)
ax.user_guid = current_user.get("user_id", None)
for field in form.db_fields:
ax.row[field.db_name] = field.value
if arguments:
for key, value in arguments.items():
ax[key] = value
localz['ax'] = ax
try:
await aexec(code=str(code), localz=localz, ax=ax)
ret_ax = localz['ax']
return ret_ax
except SyntaxError as err:
return err
except Exception as err: # pylint: disable=broad-except
return err
|
"""
Run this program in the directory containing the gcode files
you would like to split into 2 files for inserting nuts, etc.
You will be prompted for the file name and the z-height to
stop the print at.
"""
import sys
import re
import time
while True:
filename = raw_input('\nEnter filename: ')
try:
f = open(filename,'r')
text = f.read()
f.close()
except Exception:
print filename, "doesn't exist.."
time.sleep(1)
continue
break
while True:
zpos = raw_input('\nEnter z-height to break file at: ')
m = re.search(r'G.*Z' + zpos, text)
if m:
break
print 'Invalid z-height!'
time.sleep(1)
i = m.start()
g1 = text[:i]
g2 = text[i:]
endText = """
******************End of file 1of2 code******************
G1 X190.000 Y190.000 ; move away to edge of bed
M104 S0 ; turn off nozzle temperature
M140 S0 ; turn off bed temperature
"""
g1 += endText
startText = """
******************Start of file 2of2 code******************
;resume with the rest of the print after nuts have been inserted
M190 S110 ; wait for bed temperature to be reached
M104 S230 ; set temperature
M109 S230 ; wait for temperature to be reached
"""
g2 = startText + g2
filename1 = filename.split('.')[0] + '(1of2)' + '.gcode'
filename2 = filename.split('.')[0] + '(2of2)' + '.gcode'
with open(filename1,'w') as f:
f.write(g1)
f.close()
with open(filename2,'w') as f:
f.write(g2)
f.close()
print '\nGCode file conversion complete!'
time.sleep(1)
|
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.item import Item, Field
import config
class BrokenItem(Item):
url = Field()
referer = Field()
status = Field()
class BrokenLinksSpider(CrawlSpider):
name = 'BrokenLinksSpider'
rules = (Rule(LinkExtractor(), callback='parse_item', follow=True),)
def __init__(self, name=None, urls=None, domains=None, httpstatus=None, **kwargs):
self.name = name or config.name
self.allowed_domains = domains.split(',') if domains else config.allowed_domains
self.allowed_domains = [d.split('://')[-1].rstrip('/') for d in self.allowed_domains]
self.start_urls = urls.split(',') if urls else config.start_urls
self.handle_httpstatus_list = map(int, httpstatus.split(',')) if httpstatus else config.httpstatus_list
super(BrokenLinksSpider, self).__init__(**kwargs)
def parse_item(self, response):
if response.status in self.handle_httpstatus_list:
item = BrokenItem()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer').decode('utf8')
item['status'] = response.status
return item
|
from typing import List
# DP problem find the minimum path sum from top left to bottom right corner - LC 64
def min_path_sum(grid: List[List[int]]) -> int:
if grid is None or len(grid )==0:
return 0
R = len(grid)
C = len(grid[0])
result = [[0 for _ in range(C)] for _ in range(R)]
result[0][0] = grid[0][0]
# initialize the first row and the first column
for i in range(1, C):
result[0][i] = grid[0][i] + result[0][ i -1]
for i in range(1, R):
result[i][0] = grid[i][0] + result[ i -1][0]
# now fill the DP matrix to find out the minimum at each step
for i in range(1, R):
for j in range(1,C):
top_path_sum = grid[i][j ] +result[ i -1][j]
left_path_sum = grid[i][j ] +result[i][ j -1]
result[i][j] = min(top_path_sum, left_path_sum)
# return the sum at the right most cell
return result[R -1][C -1]
input_1 = [[1,3,1],[1,5,1],[4,2,1]]
print(min_path_sum(input_1)) |
from concurrent.futures import ThreadPoolExecutor
import time
import random
from database.common import execute, get_conn
from database.queries import TAKE_TASK, RUN_TASK, FINISH_TASK
from logger import get_logger
LOGGER_NAME_PREFIX = 'worker-{}'
WORKERS_CNT = 2
TEST_TASK_SLEEP_MIN = 0
TEST_TASK_SLEEP_MAX = 10
NO_TASKS_TIMEOUT = 10
def test_task():
""" A primitive test task """
time.sleep(random.randint(TEST_TASK_SLEEP_MIN, TEST_TASK_SLEEP_MAX))
def worker(worker_id):
""" Main worker function. Takes tasks from the queue and executes them """
logger = get_logger(LOGGER_NAME_PREFIX.format(worker_id))
logger.info(f'Worker {worker_id} launched')
while True:
with get_conn() as connection:
task_row = execute(TAKE_TASK, connection=connection, logger=logger)
if not task_row:
time.sleep(NO_TASKS_TIMEOUT)
continue
task_id = task_row['id']
execute(RUN_TASK, (task_id, ), connection=connection, logger=logger, fetch=False)
logger.info(f'Task {task_id} was taken by worker {worker_id}')
test_task()
with get_conn() as connection:
execute(FINISH_TASK, (task_id, ), connection=connection, logger=logger, fetch=False)
logger.info(f'Task {task_id} finished')
def main():
with ThreadPoolExecutor(max_workers=WORKERS_CNT) as executor:
for idx in range(WORKERS_CNT):
executor.submit(worker, idx)
if __name__ == '__main__':
main()
|
from dataclasses import dataclass
@dataclass
class Config:
gamma: float
batch_size: int
lr: float
initial_exploration: int
log_interval: int
update_target: int
replay_memory_capacity: int
device: str
sequence_length: int
burn_in_length: int
eta: float
local_mini_batch: int
n_step: int
over_lapping_length: int
epsilon_decay: float
random_seed: int
enable_ngu: bool
hidden_size: int
config = Config(
gamma=0.99,
batch_size=32,
lr=0.001,
initial_exploration=1000,
log_interval=10,
update_target=1000,
replay_memory_capacity=1000,
device="cpu",
sequence_length=32,
burn_in_length=4,
eta=0.9,
local_mini_batch=8,
n_step=2,
over_lapping_length=16,
epsilon_decay=0.00001,
random_seed=42,
enable_ngu=True,
hidden_size=16,
)
|
from beamngpy import BeamNGpy, Scenario, Road, Vehicle, setup_logging, StaticObject, ProceduralRing
from BeamHome import getBeamngDirectory
def main():
beamng = BeamNGpy('localhost', 64256,getBeamngDirectory())
scenario = Scenario('smallgrid', 'road_test')
vehicle = Vehicle('LIF_Mobile', model='etkc', licence='LIFLAB', colour='Blue')
ramp = StaticObject(name='pyramp', pos=(250,0, 0), rot=(0, 0, 90), scale=(0.5, 0.5, 0.5),
shape='/levels/west_coast_usa/art/shapes/objects/ramp_massive.dae')
ring = ProceduralRing(name='pyring', pos=(380, 0, 60), rot=(0, 0, 0), radius=5, thickness=2.5)
wall= StaticObject(name="trumps_wall",pos=(420,0,0),rot=(0,0,0), scale=(1,10,75),
shape='/levels/smallgrid/art/shapes/misc/gm_alpha_barrier.dae')
road_c = Road('track_editor_B_center', rid='jump_road')
roadC_Nodes=[(-2,0,0,10),(420,0,0,7)]
road_c.nodes.extend(roadC_Nodes)
scenario.add_road(road_c)
scenario.add_procedural_mesh(ring)
scenario.add_object(ramp)
scenario.add_object(wall)
scenario.add_vehicle(vehicle,(0,0,0),(0,0,-90))
scenario.make(beamng)
bng = beamng.open(launch=True)
try:
bng.load_scenario(scenario)
bng.start_scenario()
input('Press enter when done...')
finally:
bng.close()
if __name__ == '__main__':
main() |
from rest_framework import permissions, renderers, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from Estate.permissions import IsOwnerOrReadOnly, IsMyLike
from Estate.models import Profile, RealEstate, Liked
from Estate.serializers import ProfileSerializer, EstateSerializer, LikedSerializer
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
permission_classes = [
permissions.IsAuthenticated, IsOwnerOrReadOnly
]
class EstateViewSet(viewsets.ModelViewSet):
queryset = RealEstate.objects.all()
serializer_class = EstateSerializer
permission_classes = [
permissions.IsAuthenticated, IsOwnerOrReadOnly
]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class LikedViewset(viewsets.ModelViewSet):
queryset = Liked.objects.all()
serializer_class = LikedSerializer
permission_classes = [
permissions.IsAuthenticated, IsMyLike
]
|
from unittest import TestCase
import requests_mock
from test import file_to_string
from html_to_json.fetch_html import get_html_tree
class TestGetHtmlTree(TestCase):
def test_get_html_tree(self):
with requests_mock.Mocker() as m:
m.get('https://slashdot.org', text=file_to_string('fixtures/index.html'))
element = get_html_tree('https://slashdot.org')
self.assertEqual(2, len(element.getchildren()), 'Should have head and body')
self.assertEqual('en', element.get('lang'), 'lang should be en')
self.assertEqual('html', element.tag, 'tag should be html')
|
import numpy as np
PATH_TO_TRAIN_SET_CATELOG = ''
PATH_TO_VAL_SET_CATELOG = ''
PATH_TO_TEST_SET_CATELOG = ''
GROUPED_SIZE = 2
IMAGE_SIZE = 224
IMAGE_CHANNELS = 3
NUMBER_OF_CATEGORIES = 2
NO_FIGHT_LABEL = [1., 0.]
FIGHT_LABEL = [0., 1.]
FLOAT_TYPE = np.float32
TIMEOUT_FOR_WAIT_QUEUE = 100
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Test Server Settings
"""
import random
import string
def generate_random(size):
"""Generate random printable string"""
_range = range(size)
_alphabet = string.ascii_uppercase + string.digits + ' _+=\'"~@!#?/<>'
return ''.join(random.choice(_alphabet) for _ in _range)
def test_server_settings_readonly(admin_session):
""" Server settings readonly """
print(test_server_settings_readonly.__doc__)
server_api = admin_session.server_settings_update({"name": 'app_version', "value": '0.0.3'})
assert server_api is not None
assert server_api["result"] == "FAIL"
print(server_api)
def test_server_settings_update(admin_session):
""" Server settings """
print(test_server_settings_update.__doc__)
server_settings = admin_session.server_settings({})
assert server_settings is not None
assert server_settings["result"] == "DONE"
# print(server_settings)
_sett_name = 'mail_signature_text'
_new_value = generate_random(1024).strip()
_old_sett = None
for _sett in server_settings['data']:
if _sett['name'] == _sett_name:
_old_sett = _sett
print(_old_sett)
sett_updated = admin_session.server_settings_update({
"name": _sett_name,
"value": _new_value
})
assert sett_updated is not None
assert sett_updated["result"] == "DONE"
server_settings2 = admin_session.server_settings({})
assert server_settings2 is not None
assert server_settings2["result"] == "DONE"
_new_sett = None
for _sett in server_settings2['data']:
if _sett['name'] == _sett_name:
_new_sett = _sett
print(_new_sett)
assert _new_sett['value'] != _old_sett['value']
assert _new_sett['value'] == _new_value
|
from copy import deepcopy
import random
from DeepPython import Params
class Slice:
def __init__(self, py_datas, group, feed_dict=None):
if feed_dict is None:
feed_dict = {}
self.switcher = {
'Lpack': {'init': self.__init_lpack,
'get_slice': self.__get_slice_lpack,
'next_slice': self.__next_slice_lpack,
'cget_slice': self.__get_current_slice_lpack},
'Shuffle': {'init': self.__init_shuffle,
'get_slice': self.__get_slice_shuffle,
'next_slice': self.__shuffle_slice,
'cget_slice': self.__get_slice_shuffle}
}
if group not in self.switcher:
raise('Unkown type {}, expected: {}'.format(group, list(self.switcher.keys())))
self.__group = group
self.__parameters = feed_dict
self.__slices = {}
self.__nb_matches = len(py_datas)
self.nb_slices = 0
self.switcher[self.__group]['init'](py_datas)
if Params.CHECK_SLICE_OVERLAP:
self.__check_slices()
def get_slice(self, feed_dict):
return lambda l: self.switcher[self.__group]['get_slice'](l, feed_dict)
def cget_slice(self, feed_dict):
return lambda l: self.switcher[self.__group]['cget_slice'](l, feed_dict)
def next_slice(self):
return self.switcher[self.__group]['next_slice']()
def __init_lpack(self, py_datas):
size = 10
start = 6000
train = [{'left': 0, 'right': start + size*i, 'name': 'train_'+str(i)} for i in range((len(py_datas) - start-size*2) // size)]
test = [{'left': start + size * i, 'right': start + size * (i+1), 'name': 'test_'+str(i)} for i in range((len(py_datas) - start-size*2) // size)]
self.nb_slices = len(train)
self.__slices = {'train': train, 'test': test}
self.__quick_check_slices(self.__group)
self.__slices['i'] = 0
def __old_init_lpack(self, py_datas):
self.__slices = {'train': [], 'test': []}
current_nb_slice = -1
nb_matchs = 0
current_slice_test = {'left': 0, 'right': 0}
current_slice_train = {'left': 0, 'right': 0}
for row in py_datas:
if nb_matchs > 4950:
current_slice_test['right'] = nb_matchs + 1
current_slice_test['name'] = 'test_' + str(current_nb_slice)
current_slice_train['right'] = nb_matchs + 1
current_slice_train['name'] = 'train_' + str(current_nb_slice + 1)
current_nb_slice += 1
if current_nb_slice > 0:
self.__slices['test'].append(deepcopy(current_slice_test))
self.__slices['train'].append(deepcopy(current_slice_train))
current_slice_test['left'] = nb_matchs + 1
nb_matchs += 1
self.nb_slices = min(len(self.__slices['test']), len(self.__slices['train']))
self.__quick_check_slices(self.__group)
self.__slices['i'] = 0
def __get_slice_lpack(self, l, feed_dict):
left = self.__slices[feed_dict['label']][feed_dict['index']]['left']
right = self.__slices[feed_dict['label']][feed_dict['index']]['right']
return l[left:right]
def __get_current_slice_lpack(self, l, feed_dict):
feed_dict['index'] = self.__slices['i']
return self.__get_slice_lpack(l, feed_dict)
def __next_slice_lpack(self):
self.__slices['i'] += 1
# Shuffle
def __init_shuffle(self, _):
self.nb_slices = 1
self.__slices['internal_seed'] = random.getrandbits(64)
def __get_slice_shuffle(self, l, feed_dict):
n = len(l)
random.seed(a=self.__slices['internal_seed'])
if feed_dict['label'] == 'train':
return random.sample(l, n)[:int(n * self.__parameters['p_train'])]
else:
return random.sample(l, n)[int(n * self.__parameters['p_train']):]
def __shuffle_slice(self):
self.__slices['internal_seed'] = random.getrandbits(64)
def check_slices(self, train, test):
datas = range(self.__nb_matches)
train = train(datas)
test = test(datas)
if set(train) & set(test):
raise 'overlapping test and train'
def __quick_check_slices(self, group):
if group == 'Lpack':
for i in range(self.nb_slices):
if self.__slices['train'][i]['right'] <= self.__slices['test'][i]['left'] \
or self.__slices['train'][i]['left'] >= self.__slices['test'][i]['right']:
for test_train in ['test', 'train']:
if self.__slices[test_train][i]['name'] != test_train + '_' + str(i):
raise Exception('Slice number {} error: expected {}, but name is {}'
.format(i, test_train + '_' + str(i), self.__slices['train'][i]['name']))
else:
raise Exception('Overlapping train and test')
|
from re import match
from rules import name_rules
import pytest
def test_aberP():
# import ipdb; ipdb.set_trace()
found = []
strings = ['rat', 'mouse', 'Aberystwyth', 'Aberdyfi', 'Aberdeen', 'Abergavenny', 'Aberuthven']
for string in strings:
for regex in name_rules['aberP']:
m = match(regex, string)
if m is not None:
found.append(match(regex, string).group())
return found
print found
assert found == ['rat']
assert found is None
assert found is True
# assert found == ['Aberystwyth', 'Aberdyfi', 'Aberdeen', 'Abergavenny', 'Aberuthven']
def test_not_aberP():
# import ipdb; ipdb.set_trace()
found = []
strings = ['rat', 'mouse']
for string in strings:
for regex in name_rules['aberP']:
m = match(regex, string)
if m is not None:
found.append(match(regex, string).group())
assert m.group() == string
else:
return 'No match'
return found
assert found == []
def test_bergQberryS():
# import ipdb; ipdb.set_trace()
found = []
strings = ['cheese', 'echidna', 'Blencathra', 'Blencogo', 'Blaenau Ffestiniog', 'Blantyre']
for string in strings:
for regex in name_rules['aberP']:
m = match(regex, string)
# import pdb; pdb.set_trace()
if m is not None:
found.append(m.group())
else:
return 'No match'
return found
assert found == ['Blencathra', 'Blencogo', 'Blaenau Ffestiniog', 'Blantyre']
# # def test_not_bergQberryS():
# found = []
# strings = ['cheese', 'echidna']
# for string in strings:
# for regex in name_rules['bergQberryS']:
# if match(regex, string):
# found.append(match(regex, string).group())
# else:
# found is None
# return found
# assert found is None
def test_casterSchasterScesterSceterScaisterQxeterS():
found = []
strings = ['Damaraland mole rat', 'Cape Dune mole rat', 'Lancaster', 'Doncaster', 'Gloucester', 'Caister', 'Manchester', 'Chichester', 'Worcester', 'Chester', 'Exeter', 'Cirencester', 'Colchester', 'Tadcaster', 'Leicester', 'Towcester']
for string in strings:
for regex in name_rules["casterSchasterScesterSceterScaisterQxeterS"]:
if match(regex, string):
found.append(match(regex, string).group())
else:
return 'No match'
return found
assert found == ['Doncaster', 'Gloucester', 'Caister', 'Manchester', 'Chichester', 'Worcester', 'Chester', 'Exeter', 'Cirencester', 'Colchester', 'Tadcaster', 'Leicester', 'Towcester']
|
#!python3
import sys
import traceback
import re
from config import parse
import utils
canteen_request = re.compile('/(?P<dirs>([\w-]+/)*[\w-]+)/(?P<file>[\w-]+.(xml|json))')
def handler(eniron, start_response):
prefix = eniron.get('PATH_PREFIX', None)
uri = eniron['PATH_INFO']
if prefix and uri.startswith(prefix):
uri = uri[len(prefix):]
match = canteen_request.match(uri)
if not match:
start_response("404 Wrong Path", [("Content-type", 'application/xml; charset=utf-8')])
return ['<xml version="1.0"><info>{provider}/{canteen}/{feed}.xml</info></xml>']
request = utils.Request(eniron)
try:
content = parse(request, *(match.group('dirs').split('/') + [match.group('file')]))
content = content.encode('utf8')
start_response('200 OK', [('Content-Type', 'application/xml; charset=utf-8'),
('Content-Length', str(len(content)))])
return (content,)
except utils.Redirect as e:
start_response('301 Permanent Redirect', [('Location', e.location)])
return ('',)
except utils.ParserNotFound as e:
start_response('404 Parser not found', [('Content-Type', 'text/plain; charset=utf-8')])
return (e.reason,)
except utils.SourceNotFound as e:
start_response('404 Source not found', [('Content-Type', 'text/plain; charset=utf-8')])
return (e.reason,)
except utils.FeedNotFound as e:
start_response('404 Feed not found', [('Content-Type', 'text/plain; charset=utf-8')])
return (e.reason,)
except utils.NotFoundError as e:
start_response('404 Unknown file format', [('Content-Type', 'text/plain; charset=utf-8')])
return (e.reason,)
except Exception:
traceback.print_exception(*sys.exc_info())
start_response('500 Internal Server Error', [])
return ('', )
|
from RecoHI.HiTracking.hiSecondPixelTripletStep_cff import *
from RecoHI.HiTracking.hiMixedTripletStep_cff import *
from RecoHI.HiTracking.hiPixelPairStep_cff import *
from RecoHI.HiTracking.MergeTrackCollectionsHI_cff import *
hiIterTracking = cms.Sequence(
hiSecondPixelTripletStep
*hiPixelPairStep
*hiGeneralTracks
)
|
import qRC.python.quantileRegression_chain_disc as qRCd
import numpy as np
import argparse
import yaml
import root_pandas
def main(options):
stream = file(options.config,'r')
inp=yaml.load(stream)
dataframes = inp['dataframes']
showerShapes = inp['showerShapes']
chIsos = inp['chIsos']
year = str(inp['year'])
workDir = inp['workDir']
weightsDirs = inp['weightsDirs']
finalWeightsDirs = inp['finalWeightsDirs']
if year == '2017':
cols=["mass","probeScEnergy","probeScEta","probePhi","run","weight","weight_clf","rho","probeR9","probeSigmaIeIe","probePhiWidth","probeEtaWidth","probeCovarianceIeIp","probeCovarianceIpIp","probeS4","probePhoIso","probeChIso03","probeChIso03worst","probeSigmaRR","probeScPreshowerEnergy","probePt","tagPt","probePassEleVeto","tagScEta","probePass_invEleVeto"]
treenameMC = '/tagAndProbeDumper/trees/DYJetsToLL_amcatnloFXFX_13TeV_All'
elif year == '2018':
cols=["mass","probeScEnergy","probeScEta","probePhi","run","weight","weight_clf","rho","probeR9","probeSigmaIeIe","probePhiWidth","probeEtaWidth","probeCovarianceIeIp","probeCovarianceIpIp","probeS4","probePhoIso","probeChIso03","probeChIso03worst","probeSigmaRR","probePt","tagPt","probePassEleVeto","tagScEta"]
treenameMC = '/tagAndProbeDumper/trees/DYJetsToLL_amcatnloFXFX_13TeV_All'
elif year == '2016':
cols=["mass","probeScEnergy","probeScEta","probePhi","run","weight","weight_clf","rho","probeR9","probeSigmaIeIe","probePhiWidth","probeEtaWidth","probeCovarianceIetaIphi","probeCovarianceIphiIphi","probeS4","probePhoIso","probeChIso03","probeChIso03worst","probeSigmaRR","probeScPreshowerEnergy","probePt","tagPt","probePassEleVeto","tagScEta","probePass_invEleVeto"]
treenameMC = '/tagAndProbeDumper/trees/DYJets_madgraph_13TeV_All'
EBEE_cut = 'abs(probeScEta)<1.4442' if options.EBEE == 'EB' else 'abs(probeScEta)>1.556'
qRC = qRCd.quantileRegression_chain(year,options.EBEE,workDir,showerShapes)
if dataframes['mc'][options.EBEE]['input'].split('.')[-1] == 'root':
cols.pop(cols.index("weight_clf"))
qRC.MC = root_pandas.read_root(dataframes['mc'][options.EBEE]['input'],treenameMC,columns=cols).query(EBEE_cut)
else:
qRC.loadMCDF(dataframes['mc'][options.EBEE]['input'],0,options.n_evts,columns=cols)
if dataframes['data'][options.EBEE]['input'].split('.')[-1] == 'root':
if "weight_clf" in cols:
cols.pop(cols.index("weight_clf"))
qRC.data = root_pandas.read_root(dataframes['data'][options.EBEE]['input'],'/tagAndProbeDumper/trees/Data_13TeV_All',columns=cols).query(EBEE_cut)
else:
qRC.loadDataDF(dataframes['data'][options.EBEE]['input'],0,options.n_evts,columns=cols)
if options.backend is not None:
qRC.setupJoblib(options.backend,cluster_id = options.clusterid)
for var in qRC.vars:
if options.final:
qRC.loadFinalRegression(var,weightsDir=finalWeightsDirs['showerShapes'])
qRC.loadScaler(var,weightsDir=finalWeightsDirs['showerShapes'])
qRC.applyFinalRegression(var)
qRC.loadClfs(var,weightsDir=weightsDirs['showerShapes'])
qRC.correctY(var,n_jobs=options.n_jobs)
qRC_PI = qRCd.quantileRegression_chain_disc(year,options.EBEE,workDir,['probePhoIso'])
qRC_PI.MC = qRC.MC
qRC_PI.data = qRC.data
qRC_PI.loadp0tclf('probePhoIso',weightsDir=weightsDirs['phoIso'])
if options.backend is not None:
qRC_PI.setupJoblib(options.backend, cluster_id = options.clusterid)
if options.final:
qRC_PI.loadFinalRegression('probePhoIso',weightsDir=finalWeightsDirs['phoIso'])
qRC_PI.loadFinalTailRegressor('probePhoIso',weightsDir=finalWeightsDirs['phoIso'])
qRC_PI.loadScaler('probePhoIso',weightsDir=finalWeightsDirs['phoIso'])
qRC_PI.applyFinalRegression('probePhoIso',n_jobs=options.n_jobs)
qRC_PI.loadClfs('probePhoIso',weightsDir=weightsDirs['phoIso'])
qRC_PI.correctY('probePhoIso',n_jobs=options.n_jobs)
qRC_ChI = qRCd.quantileRegression_chain_disc(year,options.EBEE,workDir,chIsos)
qRC_ChI.MC = qRC_PI.MC
qRC_ChI.data = qRC_PI.data
qRC_ChI.load3Catclf(qRC_ChI.vars,weightsDir=weightsDirs['chIsos'])
if options.backend is not None:
qRC_ChI.setupJoblib(options.backend,cluster_id = options.clusterid)
if options.final:
qRC_ChI.loadFinalTailRegressor(qRC_ChI.vars,weightsDir=finalWeightsDirs['chIsos'])
for var in qRC_ChI.vars:
qRC_ChI.loadFinalRegression(var,weightsDir=finalWeightsDirs['chIsos'])
qRC_ChI.loadScaler(var,weightsDir=finalWeightsDirs['chIsos'])
qRC_ChI.applyFinalRegression(var,n_jobs=options.n_jobs)
qRC_ChI.loadTailRegressors(qRC_ChI.vars,weightsDir=weightsDirs['chIsos'])
for var in qRC_ChI.vars:
qRC_ChI.loadClfs(var,weightsDir=weightsDirs['chIsos'])
qRC_ChI.correctY(var,n_jobs=options.n_jobs)
if options.mvas:
if year == '2017':
weights = ("/mnt/t3nfs01/data01/shome/threiten/QReg/ReReco17_data/camp_3_1_0/PhoIdMVAweights/HggPhoId_94X_barrel_BDT_v2.weights.xml","/mnt/t3nfs01/data01/shome/threiten/QReg/ReReco17_data/camp_3_1_0/PhoIdMVAweights/HggPhoId_94X_endcap_BDT_v2.weights.xml")
leg2016=False
if year == '2018':
weights = ("/mnt/t3nfs01/data01/shome/threiten/QReg/ReReco17_data/camp_3_1_0/PhoIdMVAweights/HggPhoId_94X_barrel_BDT_v2.weights.xml","/mnt/t3nfs01/data01/shome/threiten/QReg/ReReco17_data/camp_3_1_0/PhoIdMVAweights/HggPhoId_94X_endcap_BDT_v2.weights.xml")
leg2016=False
if options.EBEE == 'EB':
qRC_ChI.data['probeScPreshowerEnergy'] = -999.*np.ones(qRC_ChI.data.index.size)
qRC_ChI.MC['probeScPreshowerEnergy'] = -999.*np.ones(qRC_ChI.MC.index.size)
elif options.EBEE == 'EE':
qRC_ChI.data['probeScPreshowerEnergy'] = np.zeros(qRC_ChI.data.index.size)
qRC_ChI.MC['probeScPreshowerEnergy'] = np.zeros(qRC_ChI.MC.index.size)
elif year == '2016':
weights = ("/mnt/t3nfs01/data01/shome/threiten/QReg/ReReco16/PhoIdMVAweights/HggPhoId_barrel_Moriond2017_wRhoRew.weights.xml","/mnt/t3nfs01/data01/shome/threiten/QReg/ReReco16/PhoIdMVAweights/HggPhoId_endcap_Moriond2017_wRhoRew.weights.xml")
leg2016=True
if options.final:
mvas = [ ("newPhoID","data",[]), ("newPhoIDcorrAll","qr",qRC.vars + qRC_PI.vars + qRC_ChI.vars), ("newPhoIDcorrAllFinal","final",qRC.vars + qRC_PI.vars + qRC_ChI.vars)]
else:
mvas = [ ("newPhoID","data",[]), ("newPhoIDcorrAll","qr",qRC.vars + qRC_PI.vars + qRC_ChI.vars)]
qRC.computeIdMvas( mvas[:1], weights,'data', n_jobs=options.n_jobs, leg2016=leg2016)
qRC.computeIdMvas( mvas, weights,'mc', n_jobs=options.n_jobs , leg2016=leg2016)
qRC_ChI.MC.to_hdf('{}/{}'.format(workDir,dataframes['mc'][options.EBEE]['output']),'df',mode='w',format='t')
if options.mvas:
qRC_ChI.data.to_hdf('{}/{}'.format(workDir,dataframes['data'][options.EBEE]['output']),'df',mode='w',format='t')
if __name__=="__main__":
parser=argparse.ArgumentParser()
requiredArgs = parser.add_argument_group('Required Arguments')
requiredArgs.add_argument('-c','--config', action='store', default='quantile_config.yaml', type=str,required=True)
requiredArgs.add_argument('-E','--EBEE', action='store', type=str, required=True)
requiredArgs.add_argument('-N','--n_evts', action='store', type=int, required=True)
optArgs = parser.add_argument_group('Optional Arguments')
optArgs.add_argument('-B','--backend', action='store', type=str)
optArgs.add_argument('-i','--clusterid', action='store', type=str)
optArgs.add_argument('-f','--final', action='store_true', default=False)
optArgs.add_argument('-m','--mvas', action='store_true', default=False)
optArgs.add_argument('-n','--n_jobs', action='store', type=int, default=1)
options=parser.parse_args()
main(options)
|
# https://projecteuler.net/problem=55
def is_palindrome(n):
return n == reverse_num(n)
def reverse_num(n):
value = 0
while n > 0:
value = value * 10 + n % 10
n //= 10
return value
def is_lychrel(n, attempts=51):
for _ in range(attempts):
n += reverse_num(n)
if is_palindrome(n):
return False
return True
if __name__ == "__main__":
n = 10000
count = 0
for i in range(2, n + 1):
if is_lychrel(i):
count += 1
print(f"The count is {count}")
|
a = int(input("enter a number: "))
b = int(input("enter another number: "))
print("the sum of two numbers are:", a + b)
print("the difference between two numbers are: ", a - b)
print("the product between two numbers are: ", a * b)
print("the division between two numbers are: ", a / b)
print("the floor division between two numbers are: ", a // b)
print("the modulus division between two numbers are: ", a ** b)
|
import csv
import psycopg2
import os
from random import shuffle
def csv2sql(dirname, filename):
user = ""
connect_str = "dbname={user} user={user} password={user} host='localhost'"
conn = psycopg2.connect(connect_str.format(user=user))
cur = conn.cursor()
with open(dirname + "/" + filename , 'r') as csvfile:
# sql = "insert into monologue_test values ('{author}', '{date}', '{source}', '{content}'"
sql = "insert into monologue (author, date, source, content) values ($${author}$$, $${date}$$, $${source}$$, $${content}$$)"
date = filename[:-4]
rows = list(csv.DictReader(csvfile))
shuffle(rows)
for row in rows:
insert_sql = sql.format(author=row['name'],
date=date,
source="newsmax",
content=row['monologue'].strip())
try:
cur.execute(insert_sql)
except psycopg2.IntegrityError as e:
print dirname, filename, "existed"
print row['name'], row['monologue']
conn.commit()
continue
except Exception:
print dirname, filename
exit
conn.commit()
conn.close()
if __name__ == '__main__':
#csv2sql("newsmax", "2017-07-10.csv")
#exit()
for dirname in ["/2009", "/2010", "/2011", "/2012", "/2013", "/2014", "/2015", "/2016", ""]:
for filename in sorted(os.listdir("newsmax" + dirname)):
if filename[-4:] == ".csv":
csv2sql("newsmax" + dirname, filename)
|
# -*- coding: utf-8 -*-
"""Check for undercoordinated carbons"""
import numpy as np
from ..utils.get_indices import get_c_indices
from .base_missing_check import BaseMissingCheck
from .geometry import _maximum_angle, add_sp2_hydrogen, add_sp3_hydrogens_on_cn1
class UnderCoordinatedCarbonCheck(BaseMissingCheck):
"""Check for undercoordinated carbons"""
def __init__(
self, structure, structure_graph
): # pylint: disable=super-init-not-called
self.structure = structure
self.c_indices = get_c_indices(self.structure)
self.structure_graph = structure_graph
self._position_candidates = None
@property
def name(self):
return "Undercoordinated carbon"
@property
def description(self):
return "Checks, using geometric heuristics,\
if there are any carbons that are likely undercoordinated."
def _run_check(self):
(
undercoordinated_carbons,
candidate_positions,
) = self._get_undercoordinated_carbons()
assert len(undercoordinated_carbons) == len(
candidate_positions
), "Unexpected check error"
return (
len(undercoordinated_carbons) == 0,
undercoordinated_carbons,
candidate_positions,
)
def _get_undercoordinated_carbons(self, tolerance: int = 10):
"""Idea is that carbon should at least have three neighbors if it is not sp1.
In sp1 case it is linear. So we can just check if there are carbons with
non-linear coordination with less than three neighbors. An example in CoRE
MOF would be AHOKIR. In principle this should also flag the quite common
case of benzene rings with missing hydrogens.
"""
undercoordinated_carbons = []
h_positions = [] # output must be list of lists to allow for filtering
for site_index in self.c_indices:
cn = self.get_cn(site_index) # pylint:disable=invalid-name
neighbors = self.get_connected_sites(site_index)
if cn == 1:
# this will fail for alkine
undercoordinated_carbons.append(site_index)
# make it sp3
h_positions.append(
add_sp3_hydrogens_on_cn1(self.structure[site_index], neighbors)
)
if cn == 2:
angle = _maximum_angle(
self.structure.get_angle(
site_index, neighbors[0].index, neighbors[1].index
)
)
if np.abs(180 - angle) > tolerance:
# if (not is_metal(neighbors[0].site)) or (
# not is_metal(neighbors[1].site)
# ):
# if len(_vdw_radius_neighbors(self.structure, site_index)) <= 2:
undercoordinated_carbons.append(site_index)
h_positions.append(
add_sp2_hydrogen(self.structure[site_index], neighbors)
)
# i wond't catch CN3 as this would need careful evaluation of the bond order
return undercoordinated_carbons, h_positions
|
#-*- coding:utf-8 -*
import entity
import time
import character
import movingent
import livingent
import hitbox
import files
void_collision ="0"
random_zone="O"
damage_Zone= "¤"
_wall = "X"
Gostwall = "-"
take_damage = "."
import files
#_____Create____________________________________________________________________
def create_shooting_ent(Entity, damage, bulletSpeed, assetShot, shotDelay,color, lastShot=[time.time(),0]) :
"""
G{classtree}
DESCRIPTION
===========
Ajoute à une entité la possibilité de tirer
PARAM
=====
@param Entity: Entité a modifier
@type Entity : dict
@param damage: Dommages du projectile
@type damage : int
@param bulletSpeed : vitesse des projectiles
@type bulletSpeed : int
@param assetShot :Asset du projectile
@type assetShot :list
@param shotDelay : temps entre chaque tir
@type shotDelay :int
@param lastShot : représente le dernier tir (le moment du tir, et le numéro du tir)
@type : list
RETOUR
======
@return Entity : une entité capable de tirer
@rtype Entity : dict
"""
assert type(Entity) is dict
assert "entity" in Entity["Type"]
Entity["damage"] = damage
Entity["bulletSpeed"] = bulletSpeed
Entity["assetShot"] = assetShot
Entity["shotDelay"] = shotDelay
Entity["baseShotDelay"] = shotDelay
Entity["lastShot"] = lastShot
Entity["Type"].append("shootingEnt")
Entity["bulletColor"]=color
return Entity
def create_bullet(Entity, damage, origine) :
"""
G{classtree}
DESCRIPTION
===========
Ajoute à une entité le type bullet
PARAM
=====
@param Entity: Entité a modifier
@type Entity : dict
@param Entity: le nom de l'entité qui a produite ce projectile
@type Entity : str
@param damage: Dommages du projectile
@type damage : int
RETOUR
======
@return ca : une Entiter de type bullet
@rtype ca :def
"""
assert type(Entity) is dict
assert "entity" in Entity["Type"]
Entity["origine"] = origine
Entity["damageToInflict"] = damage
Entity["Type"].append("bullet")
return Entity
#_____Accesseur____________________________________________________________________
def nb_shot(Entity) :
"""
G{classtree}
DESCRIPTION
===========
Permet de savoir le numéro de la prochaine balle à tirer
PARAM
=====
@param Entity: Entité dont on veut récupérer l'information
@type Entity : dict
RETOUR
======
@return : Le numéro de la prochaine balle qui sera tiré
@rtype : int
"""
assert type(Entity) is dict
assert "shootingEnt" in Entity["Type"]
return Entity["lastShot"][1]
#_____Modificateur____________________________________________________________________
def as_shot(Entity):
"""
G{classtree}
DESCRIPTION
===========
Permet d'incrémenter le numéro de la prochaine balle tiré.
A utiliser juste après un tir.
PARAM
=====
@param Entity: Entité dont on veut incrémenter le numéro de la balle
@type Entity : dict
RETOUR
======
@return : L'Entité avec sa prochaine balle incrémentée
@rtype : dict
"""
assert type(Entity) is dict
assert "shootingEnt" in Entity["Type"]
Entity["lastShot"][1] += 1
Entity["lastShot"][0] = time.time()
return Entity
def damageUp(Entity, amount):
"""
G{classtree}
DESCRIPTION
===========
Augmente les dégats d'une entité
PARAM
=====
@param Entity: Entité dont on veut augmenter les dégats
@type Entity : dict
@param amount: le nombre de dégâts infligés en plus
@type amount : int
RETOUR
======
@return : L'Entité avec ses dégats augmentés
@rtype : dict
"""
assert type(Entity) is dict
assert "shootingEnt" in Entity["Type"]
Entity["damage"]+=amount
return Entity
def fireRateUp(Entity, amount) :
"""
G{classtree}
DESCRIPTION
===========
Augmente la cadence de tir d'une entité
PARAM
=====
@param Entity: Entité dont on veut augmenter la cadence de tir
@type Entity : dict
@param amount: le nombre de dégâts infligés en plus
@type amount : int
RETOUR
======
@return : L'Entité avec sa cadence de tir augmentée
@rtype : dict
"""
assert type(Entity) is dict
assert "shootingEnt" in Entity["Type"]
if Entity["shotDelay"] - amount >= 0.01 : #tant qu'on tir pas plus vite que la boucle de simulation
Entity["shotDelay"]-=amount
return Entity
#_____Action____________________________________________________________________
def shoot(Entity) :
"""
G{classtree}
DESCRIPTION
===========
Permet de faire tirer une entité
PARAM
=====
@param Entity: l'entité qui va tirer
@type Entity : dict
RETOUR
@return ca : une Entité correspondant au projectile tiré
@rtype ca : dict
"""
assert type(Entity) is dict
assert "shootingEnt" in Entity["Type"]
bullet_name = "bullet"+"_"+Entity["Name"]+"_"+str(nb_shot(Entity))
if "character" in Entity["Type"] :
posture = character.get_posture(Entity)
pos_gun = character.position_gun(posture)
x = pos_gun[0]+Entity["x"]
y = pos_gun[1]+Entity["y"]
if posture[2] in [90,-90] :
Vx = 0
name_asset = "Gun_Vertical"
elif posture[1] == "Right" :
Vx = 1
if posture[2]>0 :
name_asset="Gun_Slash"
elif posture[2]<0 :
name_asset="Gun_UnSlash"
else :
Vx = -1
if posture[2]>0 :
name_asset="Gun_UnSlash"
elif posture[2]<0 :
name_asset="Gun_Slash"
if posture[2]>0 :
Vy = -1
elif posture[2]<0:
Vy = 1
else :
Vy = 0
name_asset = "Gun_Horizontal"
asset = {}
asset[name_asset] = Entity["assetShot"][name_asset]
asset["Actual"] = Entity["assetShot"][name_asset]
bullet = entity.create_entity(bullet_name,x,y,asset,Entity["bulletColor"])
bullet = movingent.create_moving_ent(bullet, Vx, Vy, Entity["bulletSpeed"])
bullet = create_bullet(bullet,Entity["damage"],Entity["Name"])
return bullet
#____Jeux de Test________________________________________________________________
if (__name__=="__main__"):
Name = "Asheiya"
X = 20
Y = 37
Asset = {}
Asset["position"]=["Wait","Right",0]
for Asheiya_doc in ["Run_Right_0","Wait_Right_0","Run_Left_0","Wait_Left_0","Run_Right_45","Wait_Right_45"]:# a terme on utilisera "Asheiya_asset" ou un constructeur de txt
Asset[Asheiya_doc]=entity.create_asset("Asheiya/Asset/" + Asheiya_doc + ".txt") #chargement Asset
player = entity.create_entity(Name,X,Y,Asset)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
from scrapy.exceptions import DropItem
from scrapy.conf import settings
import logging
class LagouPipeline(object):
def __init__(self):
client = MongoClient(settings['MONGODB_SERVER'], settings['MONGODB_PORT'])
db = client[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
def process_item(self, item, spider):
new_item = [{
"page": item['page'],
"name": item['name'],
"location": item['location'],
"position": item['position'],
"exprience": item['exprience'],
"money": item['money']
}]
self.collection.insert(new_item)
logging.debug("Item wrote to MongoDB database %s/%s"%(settings['MONGODB_DB'],settings['MONGODB_COLLECTION']))
return item
|
#__author: Think
#date 2019/8/31
import os,configparser,logging
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
config_file = os.path.join(base_dir, 'conf/server.conf')
cf = configparser.ConfigParser()
cf.read(config_file, encoding='utf-8')
####设定日志目录####
if os.path.exists(cf.get('log', 'logfile')):
logfile = cf.get('log', 'logfile')
else:
logfile = os.path.join(base_dir, 'log/client.log')
####设定下载/上传目录####
if os.path.exists(cf.get('download', 'download_dir')):
download_dir = cf.get('download', 'download_dir')
else:
download_dir = os.path.join(base_dir, 'temp')
|
for i in range(0,digits):
# #sort the ones place
# if i == 0:
# if int(reverse[i]) < 4:
# output.append("I"*int(reverse[i]))
# elif int(reverse[i]) == 4:
# output.append("IV")
# elif int(reverse[i]) == 5:
# output.append("V")
# elif int(reverse[i]) < 9:
# output.append("V" + "I"*((int(reverse[i])-5))
# else:
# output.append("IX")
# #sort the tens place
# if i == 1:
# if int(reverse[i]) < 4:
# output.append("X"*int(reverse[i]))
# elif int(reverse[i]) == 4:
# output.append("XL")
# elif int(reverse[i]) == 5:
# output.append("L")
# elif int(reverse[i]) < 9:
# output.append("L" + "X"*((int(reverse[i]-5))
# else:
# output.append("XC")
# #sort the hundreds place
# if i == 2:
# if int(reverse[i]) < 4:
# output.append("C"*int(reverse[i]))
# elif int(reverse[i]) == 4:
# output.append("CD")
# elif int(reverse[i]) == 5:
# output.append("D")
# elif int(reverse[i]) < 9:
# output.append("D" + "C"*((int(reverse[i]-5))
# else:
# output.append("CM")
# #sort thousandths place
# if i == 3:
# output.append("M"*int(reverse[i]))
# print(string_num)
# print(digits)
|
"""
-------------------------------------------------------------------------------
CROSS-CORRELATION METHOD: cross_cor_method.py
-------------------------------------------------------------------------------
Oberseminar Regelungstechnik - Auto-tuning PID
--------
"""
from matplotlib.pyplot import plot, grid, show, figure, title, xlabel, ylabel, subplot, tight_layout, ylim, xlim, close, loglog, semilogx
import numpy as np
import control
def cross_cor_method(y,u,A,N,Lambda,t,o,dt,PT1):
g_a, tout = control.matlab.impulse(PT1,t)
""" Bestimmung der Kreukorrelation -> Gewichtsfunktion """
R_uu = np.correlate(u,u,'full') / len(u)
R_uy_inv = np.correlate(u,y,'full') / len(u)
# c_{av}[k] = sum_n u[n+k] * conj(y[n]) , Mode 'full'
# c'_{av}[k] = sum_n u[n] * conj(y[n+k]) => c'_{av}[k] = c_{av}[-k]
R_uy = R_uy_inv[::-1] # Kreuzkorrelation muss entsprechend der Implementierung und der Definition gespiegelt werden
idx = ((len(R_uy)) / 2)
# k = 2/(dt+0.1)
# g = R_uy[idx:]*k / R_uu[idx]
g = 1/(A**2 * ((N+1)/N) * Lambda) * (R_uy[idx:] + A**2 / N)
g_s = g[:int(N*Lambda/dt)]
""" Bestimmung der Frequenzantwort aus der Gewichtsfunktion """
# FFT-Parameter
Fa = 1/dt # Abtastfrequenz
L = len(g_s)
G = np.fft.fft(g_s)
G = dt * G[1:L/2+1]
# Frequenzachse
w = 2 * np.pi * Fa * np.arange(0,len(G))/L
""" Ausgabe Übersicht """
figure(1)
grid()
subplot(511)
plot(t,u)
title('PRBS Signal u')
subplot(512)
plot(t,y)
title('Systemausgang y')
subplot(513)
plot(t,R_uu[len(t)-1:])
title('Autokorrelation R_uu')
xlabel('t in s')
subplot(514)
plot(R_uy)
title('Kreuzkorrelation R_uy')
subplot(515)
plot(t,g)
plot(tout,g_a,'--r')
title('"perodische" Impulsantwort g')
tight_layout()
return G, w, g_s |
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
no = True
zeroth = arr[0]
for i in arr:
if i!=zeroth:
no = False
break
if no:
print('NO')
else:
print('YES')
alr_con = [False]*n
for i in range(n):
assigned = False
for j in range(i+1, n):
if (not alr_con[j] or not alr_con[i]) and arr[j]!=arr[i]:
alr_con[i] = True
alr_con[j] = True
assigned = True
print(i+1, j+1)
if not assigned:
break
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import densenet
from .base import SaliencyMap
class GradCam(SaliencyMap):
def get_mask(self, image, last_conv_layer_name, preprocess=True):
"""Computes GradCAM saliency map.
Args:
image (ndarray): Original image
top_pred_idx: Predicted label for the input image
baseline (ndarray): The baseline image to start with for interpolation
num_steps: Number of interpolation steps between the baseline
and the input used in the computation of integrated gradients. These
steps along determine the integral approximation error. By default,
num_steps is set to 50.
Returns:
GradCAM w.r.t input image
"""
#last_conv_layer_name = 'conv5_block16_concat'
last_conv_layer = self.model.get_layer(last_conv_layer_name)
last_conv_layer_idx = self.model.layers.index(last_conv_layer)
last_conv_layer_model = tf.keras.Model(self.model.inputs, last_conv_layer.output)
classifier_input = tf.keras.Input(shape=last_conv_layer.output.shape[1:])
x = classifier_input
classifier_layers = self.model.layers[last_conv_layer_idx+1:]
for layer in classifier_layers:
x = layer(x)
classifier_model = tf.keras.Model(classifier_input, x)
with tf.GradientTape() as tape:
# Compute activations of the last conv layer and make the tape watch it
last_conv_layer_output = last_conv_layer_model(image)
tape.watch(last_conv_layer_output)
# Compute class predictions
preds = classifier_model(last_conv_layer_output)
top_pred_index = tf.argmax(preds[0])
top_class_channel = preds[:, top_pred_index]
# This is the gradient of the top predicted class with regard to
# the output feature map of the last conv layer
grads = tape.gradient(top_class_channel, last_conv_layer_output)
# This is a vector where each entry is the mean intensity of the gradient
# over a specific feature map channel
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
# We multiply each channel in the feature map array
# by "how important this channel is" with regard to the top predicted class
last_conv_layer_output = last_conv_layer_output.numpy()[0]
pooled_grads = pooled_grads.numpy()
for i in range(pooled_grads.shape[-1]):
last_conv_layer_output[:, :, i] *= pooled_grads[i]
# The channel-wise mean of the resulting feature map
# is our heatmap of class activation
heatmap = np.mean(last_conv_layer_output, axis=-1)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = np.maximum(heatmap, 0) / np.max(heatmap)
return heatmap
|
from radient import RadientClient
from integration import IntegrationClient
from integration_controller import IntegrationController
from radient_controller import RadientController
from gui import app
|
# Vanessa Dunford
# Github: https://github.com/vanicci
# Linkedin: https://www.linkedin.com/in/vanessa-dunford-08ab7663/
# Youtube: http://bit.ly/JoinMeOnYouTube
# Twitter: https://twitter.com/vaniccilondon
# Table of contents. Write a table of contents program here. Start the program with a list holding all of the information for your table of contents (chapter names, page numbers, and so on). Then print out the information from the list in a beautifully formatted table of contents. Use string formatting such as left align, right align, center.
mylist = [["Chapter 1", "Getting Started", 1],
["Chapter 2", "Numbers", 9],
["Chapter 3", "Letters", 13],]
print(": Chapter of the Book : Subject Title : Page Number :")
for item in mylist:
print(":", item[0], " "*(18-len(item[0])),":",
item[1], " "*(16-len(item[1])),":",
item[2], " "*(10-len(str(item[2]))),":")
|
a = 9
b = 8
print(a +b)
def luaspersegi(panjang,lebar):
total = panjang * lebar
print("luasnya adalah ", total)
return total
luaspersegi(10,5)
teks = str(input("siapa namamu ?"))
print ("nama saya adalah", teks)
x=5
while x < 10 :
print(x)
x=x-1 |
import cv2
import numpy as np
class PerspectiveTransform:
''' transforms the image to a bird-eyes view '''
def __init__(self, image):
self.image = image
self.img_size = (image.shape[1], image.shape[0])
self.src = np.float32(
[[(self.img_size[0] / 2) - 65, self.img_size[1] / 2 + 100],
[((self.img_size[0] / 6) - 10), self.img_size[1]],
[(self.img_size[0] * 5 / 6) + 60, self.img_size[1]],
[(self.img_size[0] / 2 + 65), self.img_size[1] / 2 + 100]])
self.dst = np.float32(
[[(self.img_size[0] / 5), -20],
[(self.img_size[0] / 5), self.img_size[1]],
[(self.img_size[0] * 4 / 5), self.img_size[1]],
[(self.img_size[0] * 4 / 5), -20]])
self.M = cv2.getPerspectiveTransform(self.src, self.dst)
self.Minv = cv2.getPerspectiveTransform(self.dst, self.src)
def get(self):
''' returns perspective transformed image using transformation matrix '''
return cv2.warpPerspective(self.image, self.M, self.img_size, flags=cv2.INTER_LINEAR)
def get_inverse(self):
''' returns perspective transformed image using inverse transformation matrix '''
return cv2.warpPerspective(self.image, self.Minv, self.img_size, flags=cv2.INTER_LINEAR)
def visualize(self):
''' visualize perspective transformation effect '''
image = self.image.copy()
cv2.line(image, tuple(self.src[0]), tuple(self.src[1]), (255, 0, 0), 3)
cv2.line(image, tuple(self.src[1]), tuple(self.src[2]), (255, 0, 0), 3)
cv2.line(image, tuple(self.src[2]), tuple(self.src[3]), (255, 0, 0), 3)
cv2.line(image, tuple(self.src[3]), tuple(self.src[0]), (255, 0, 0), 3)
image_perspective = self.get()
cv2.line(image_perspective, tuple(self.dst[0]), tuple(self.dst[1]), (255, 0, 0), 3)
cv2.line(image_perspective, tuple(self.dst[1]), tuple(self.dst[2]), (255, 0, 0), 3)
cv2.line(image_perspective, tuple(self.dst[2]), tuple(self.dst[3]), (255, 0, 0), 3)
cv2.line(image_perspective, tuple(self.dst[3]), tuple(self.dst[0]), (255, 0, 0), 3)
image = cv2.copyMakeBorder(image, 10, 10, 10, 10,
cv2.BORDER_CONSTANT, value=(255, 255, 255))
image_perspective = cv2.copyMakeBorder(image_perspective, 10, 10, 10, 10,
cv2.BORDER_CONSTANT, value=(255, 255, 255))
return np.hstack((image, image_perspective))
|
from flask import Flask,render_template,url_for,request
import joblib
from textpreprocessing import pre_process
topics = {1:'Computer Science', 2:'Physics', 3:'Mathematics', 4:'Statistics', 5:'Quantitative Biology', 6:'Quantitative Finance'}
app = Flask(__name__)
pipeline = joblib.load('abstract_classification.joblib')
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict', methods=['POST'])
def predict():
abstract = request.form['message']
prediction = pipeline.predict([abstract])
topic_prediction = topics[prediction[0]]
return render_template('result.html', prediction=topic_prediction)
if __name__ == '__main__':
app.run()
|
import os
from django.contrib.gis.db import models
from django.contrib.auth.models import User , Group
from allauth.account.signals import user_signed_up
from django.contrib.sites.models import Site
from django.db.models.signals import post_save ,m2m_changed
from django.dispatch import receiver
from sorl.thumbnail.fields import ImageField
from cartoview2.core.apps_helper import delete_installed_app
from djangoratings.fields import RatingField
current_folder, filename = os.path.split(os.path.abspath(__file__))
temp_dir = os.path.join(current_folder,'temp')
class UserProfile(models.Model):
user = models.OneToOneField(User)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
class HomePage(models.Model):
title = models.CharField(max_length=100)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.title
@receiver(user_signed_up)
def new_user_signup(sender, **kwargs):
p = UserProfile(user = kwargs['user'])
p.save()
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, **kwargs):
""" This method is executed whenever an user object is saved
"""
if instance:
registered_group, reg_created = Group.objects.get_or_create(name='registered')
instance.groups.add(registered_group)
if instance.is_staff:
admin_group, admin_created = Group.objects.get_or_create(name='admin')
instance.groups.add(admin_group)
class AppTag(models.Model):
name = models.CharField(max_length=200, unique=True, null=True, blank=True)
def __unicode__(self):
return self.name
class App(models.Model):
def only_filename(instance, filename):
return filename
name = models.CharField(max_length=200, unique=True, null=True, blank=True)
title = models.CharField(max_length=200, null=True, blank=True)
description = models.TextField(null=True, blank=True)
short_description = models.TextField(null=True, blank=True)
app_url = models.URLField(null=True, blank=True)
author = models.CharField(max_length=200, null=True, blank=True)
author_website = models.URLField(null=True, blank=True)
license = models.CharField(max_length=200, null=True, blank=True)
tags = models.ManyToManyField(AppTag, null=True, blank=True)
date_installed = models.DateTimeField('Date Installed', auto_now_add=True)
installed_by = models.ForeignKey(User, null=True, blank=True)
single_instance = models.BooleanField(default=False, null=False, blank=False)
order = models.SmallIntegerField(null=False, blank=False, default=0)
owner_url = models.URLField(null=True, blank=True)
help_url = models.URLField(null=True, blank=True)
app_logo = ImageField(upload_to=only_filename, help_text="The site will resize this master image as necessary for page display", blank=True, null = True)
is_suspended = models.NullBooleanField(null=True, blank=True , default= False)
app_img = ImageField(upload_to=only_filename, help_text="The site will resize this master image as necessary for page display", blank=True, null = True)
in_menu = models.NullBooleanField(null=True, blank=True , default= True)
rating = RatingField(range=5, can_change_vote=True)
contact_name = models.CharField(max_length=200, null=True, blank=True)
contact_email = models.EmailField (null=True, blank=True)
def delete(self):
delete_installed_app(self)
super(type(self), self).delete()
def __unicode__(self):
return self.title
def only_filename(instance, filename):
return filename
class AppInstance(models.Model):
app = models.ForeignKey(App,null=True,blank=True)
title = models.CharField(max_length=200)
description = models.TextField(null=True, blank=True)
thumbnail = models.ImageField(upload_to=only_filename, null=True,blank=True)
date_installed = models.DateTimeField("Date Installed",auto_now_add=True)
owner = models.ForeignKey(User,null=True, blank=True)
location_extent = models.PolygonField('Extent', srid = 4326, null = True, blank = True)
objects = models.GeoManager()
def __unicode__(self):
return self.title
class KeyValueGroup(models.Model):
name = models.CharField(max_length=200,unique=True)
def __unicode__(self):
return self.name
#TODO support data types (string/int/float/bool/etc...)
class KeyValue(models.Model):
key = models.CharField(max_length=200)
value = models.CharField(max_length=200,null=True)
group = models.ForeignKey(KeyValueGroup)
def __unicode__(self):
name = '%s_%s' % (self.group.name, self.key)
return name.replace(' ','_').lower()
class Settings(models.Model):
site = models.OneToOneField(Site , related_name='cartoview_settings')
show_home_logo = models.BooleanField(default=True)
|
from NeuralNet import buildNeuralNet
truthTable = [([0, 0], [0]),
([1, 0], [1]),
([0, 1], [1]),
([1, 1], [0])]
def buildNet(hiddenLayer=[]):
accuracies = []
for i in range(5):
nnet, accuracy = buildNeuralNet(examples=(truthTable, truthTable), hiddenLayerList=hiddenLayer, maxItr=5000)
accuracies.append(accuracy)
return float(sum(accuracies)) / float(len(accuracies))
results=[]
results.append(buildNet([]))
for i in range(1, 30):
results.append(buildNet([i]))
for i in range(len(results)):
print "%d\t%f"%(i, results[i]) |
# coding=utf-8
"""
@project : algorithmPython
@ide : PyCharm
@file : __init__.py
@author : illusion
@desc :
@create : 2021/6/7 1:56 下午:01
"""
|
#PF-Prac-1
'''
Created on Mar 23, 2019
@author: vijay.pal01
'''
def add_string(str1):
#start writing your code here
n=len(str1)
if(n<3):
return str1
if(str1.endswith("ing")):
str1+="ly"
else:
str1+="ing"
return str1
return str1
str1="com"
print(add_string(str1)) |
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils import timezone
class User(AbstractUser):
pass
class Post(models.Model):
body = models.CharField(max_length=255, blank=True)
created_on = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
likes = models.ManyToManyField(User, blank=True, related_name='likes')
def serialize(self):
return {
"id": self.id,
"body": self.body,
"created_on": self.created_on.strftime("%b %d %Y, %I:%M %p"),
"author": self.author.username,
"author_id":self.author.pk,
"likes": [user.username for user in self.likes.all()],
}
def __str__(self) -> str:
return f'{self.author.username} posted {self.body}'
# TO DO LATER AFTER SUBMISSION
class Comments(models.Model):
comment = models.CharField(max_length=255, blank=True)
created_on = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
likes = models.ManyToManyField(User, blank=True, related_name='comment_likes', default= 0)
dislikes = models.ManyToManyField(User, blank=True, related_name='comment_dislikes', default= 0)
class Meta:
verbose_name_plural = 'Comments'
def __str__(self):
return f'{self.author.username} commented {self.comment}'
'''CHECK OUT SIGNALS.PY AND APP.PY FILES FOR HOW THE PROFILE IS CREATED WHEN
EACH USER INSTANCE IS MADE DURING REGISTRATION'''
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
followers = models.ManyToManyField(User, blank=True, related_name='followers')
def __str__(self):
return f'{self.user.username} Profile'
def count_followers(self):
return self.followers.count()
def count_following(self):
return Profile.objects.filter(followers = self.user).count()
def serialize(self):
return {
"id": self.id,
"username":self.user.username,
"followers":[user.username for user in self.followers.all()],
"followers_count": self.followers.count(),
"following_count": Profile.objects.filter(followers = self.user).count()
}
|
import config.py
#actual function
def cat_feed:
get_weight= food_weight
if food_weight <= 500:
full= false
while full == false:
SetAngle(90)
sleep(5)
SetAngle(-90)
sleep(5)
SetAngle(90)
sleep(5)
pwm.stop()
GPIO.cleanup()
print ("The food weight is now" + get_weight)
sleep (10800)
if get_weight <= 500:
full= false
else:
print ("The dish is full")
full= true
sleep(10800)
if get_weight <= 500:
full= false
tare
cat_feed
|
# 猫眼电影介绍url
# http://maoyan.com/films/1217236
import requests, time
from fake_useragent import UserAgent
import json, csv, os
import pandas as pd
import datetime
class Spidermaoyan():
headers = {
"User-Agent": UserAgent(verify_ssl=False).random,
"Host": "m.maoyan.com",
"Referer": "http://m.maoyan.com/movie/1200486/comments?_v_=yes"
}
def __init__(self, url):
self.url = url
# 发送get请求
def get_json(self):
# 发送get请求
response_comment = requests.get(self.url, headers=self.headers)
json_comment = response_comment.text
json_comment = json.loads(json_comment)
# print(json_comment)
return json_comment
# 获取数据并存储
def get_data(self, json_comment):
json_response = json_comment["cmts"] # 列表
#print(len(json_response))
list_info = []
for data in json_response:
cmtime = data["time"]
userId = data["userId"]
cityName = data["cityName"]
content = data["content"]
if "gender" in data:
gender = data["gender"]
else:
gender = 0
nickName = data["nickName"]
userLevel = data["userLevel"]
score = data["score"]
list_one = [cmtime, userId, nickName, gender, cityName, userLevel, score, content]
list_info.append(list_one)
self.file_do(list_info)
# 存储文件
def file_do(self, list_info):
# 获取文件大小
file_size = os.path.getsize(r'D:/graduate/DyingToSurvive.csv')
if file_size == 0:
# 表头
name = ['评论日期','用户ID','评论者昵称', '性别', '所在城市', '猫眼等级', '评分', '评论内容']
# 建立DataFrame对象
file_test = pd.DataFrame(columns=name, data=list_info)
# 数据写入
file_test.to_csv(r"D:/graduate/DyingToSurvive.csv", encoding="utf_8_sig", index=False)
else:
with open(r"D:/graduate/DyingToSurvive.csv","a+", encoding="utf_8_sig", newline='') as file_test:
# 追加到文件后面
writer = csv.writer(file_test)
# 写入文件
writer.writerows(list_info)
# 猫眼电影短评接口
offset =15
# 电影是2018.9.21上映的
startTime=datetime.date(2018,7,6)
str_startTime=str(startTime)
count=0
try:
while startTime<datetime.date(2018,12,3):
comment_api ='http://m.maoyan.com/mmdb/comments/movie/1200486.json?_v_=yes&offset={0}&startTime={1}%2022%3A13%3A52'.format(
offset,str_startTime)
s0 = Spidermaoyan(comment_api)
json_comment = s0.get_json()
if json_comment["total"] == 0: # 当前时间内评论爬取完成
offset=15
startTime=startTime+datetime.timedelta(days=1)
str_startTime=str(startTime)
continue
s0.get_data(json_comment)
offset +=15
count+=1
print(count)
except:
count=count*15
print('当前可取数据:%d条爬取完成'%count*15)
#time.sleep()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2015-08-24 17:41:23
# @Author : Jintao Guo
# @Email : guojt-4451@163.com
import os
import sys
import multiprocessing
import glob
import tempfile
def get_args():
"""Get arguments from commond line"""
try:
import argparse
except ImportError as imerr:
print("\033[1;31m" + str(imerr) + " \033[0m")
sys.exit()
parser = argparse.ArgumentParser(fromfile_prefix_chars='@',
description="description")
parser.add_argument("-p",
metavar="INT",
default=1,
dest="processes_number",
type=str,
help="gzip multiple samples simultaneously [1]")
group = parser.add_mutually_exclusive_group()
group.add_argument("-i",
metavar="File",
dest="input_file",
help="input file to gzip")
group.add_argument("-I",
metavar="Files",
dest="input_list",
help="list of input files to gzip")
group.add_argument("-r",
metavar="Regular_expression",
type=str,
dest="input_re",
help="input files to gzip")
clustered_group = parser.add_argument_group("Clustered")
clustered_group.add_argument("--qsub",
action="store_true",
default=False,
dest="qsub",
help="run crest in cluster [False]")
clustered_group.add_argument("--nodes",
metavar="STR",
dest="node_name",
type=str,
help="name of nodes (e.g: n1,n2,...)")
clustered_group.add_argument("-n",
metavar="INT",
default=1,
dest="nodes_number",
type=int,
help="number of nodes [1]")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
else:
return args
def gzip(f):
print("gzip " + f)
os.system("gzip " + f)
def qsub_gzip(f):
ftmp = tempfile.NamedTemporaryFile()
ftmp.write("#!/bin/bash\n")
ftmp.write("#PBS -o " +
os.path.split(os.path.realpath(__file__))[0] + "/log\n")
if args.node_name:
ftmp.write("#PBS -l nodes=1:" + args.node_name + ":ppn=1,walltime=100:00:00\n")
else:
ftmp.write("#PBS -l nodes=1:ppn=1,walltime=100:00:00\n")
ftmp.write("#PBS -j oe\ncd $PBS_O_WORKDIR\n")
ftmp.write("gzip " + f)
ftmp.seek(0)
# print ftmp.read()
os.system("qsub " + ftmp.name)
ftmp.close()
def makedir(new_dir, exist_dir=None):
"""Make a directory. If it doesn't exist, handling concurrent race conditions.
"""
if exist_dir:
new_dir = os.path.join(exist_dir, new_dir)
if os.path.exists(new_dir):
print("The " + new_dir + " is already exist")
else:
print("Make " + new_dir)
os.makedirs(new_dir)
def main():
global args
args = get_args()
pool = multiprocessing.Pool(processes=int(args.processes_number))
makedir(os.path.split(os.path.realpath(__file__))[0] + "/log")
if args.input_file:
if args.qsub:
qsub_gzip(args.input_file)
else:
gzip(args.input_file)
else:
if args.input_list:
with open(args.input_list) as f:
f_list = map(lambda x: x.strip(), f.readlines())
elif args.input_re:
f_list = glob.glob(args.input_re)
if args.qsub:
pool.map(qsub_gzip, f_list)
else:
pool.map(gzip, f_list)
pool.close()
pool.join()
if __name__ == '__main__':
sys.exit(main())
|
class Solution:
# Do not treat the matrix as a 2d array, multiply m*n and treat it
# as an array
def searchMatrix(self,matrix,target):
if matrix:
return self.binarySearch(0,len(matrix)*len(matrix[0])-1,matrix,target)
else:
return False
def binarySearch(self,low,high,matrix,target):
if low>high:
return False
else:
m = (low+high)/2
row = m/len(matrix[0])
col = m%len(matrix[0])
if matrix[row][col] == target:
return True
elif matrix[row][col]<target:
return self.binarySearch(m+1,high,matrix,target)
else:
return self.binarySearch(low,m-1,matrix,target)
S = Solution()
print S.searchMatrix([[1]],0)
|
__author__ = "Yuzhou_1shu"
number = input("请输入一个不多于5位的正整数:")
if len(number) > 5 or int(number) < 0:
print("Error, 请输入一个不多于5位的正整数:")
else:
print("输入正整数的长度为:", len(number))
print("逆序打印出各位数字", number[::-1]) |
import wx
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, title="The Main Frame")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
class MyFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title="",pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE,name="MyFrame"):
super(MyFrame, self).__init__(parent, id, title,pos, size, style, name)
# Attributes
self.panel = wx.Panel(parent = self)
self.button = wx.Button(self.panel,wx.ID_CANCEL,pos = (50,50))# 使用系统默认的ID,就不用设置label了
self.butid = self.button.GetId()
print(self.butid)
self.Bind(wx.EVT_BUTTON,self.button_click, self.button)
menu_bar = wx.MenuBar()
edit_menu = wx.Menu()
edit_menu.Append(wx.NewId(), item = "Test1",helpString = "test when click")
edit_menu.Append(wx.ID_PREFERENCES) # 使用系统默认的ID,就不用设置item了
menu_bar.Append(edit_menu, title = "Edit")
self.SetMenuBar(menu_bar)
def button_click(self,event): #定义相应函数,handler
for children in self.GetChildren():
print(children)
button = self.panel.FindWindowById(self.butid) #找到Button
print(type(button))
button.SetLabel("标签改变")
panel = button.GetParent() #找到parent
print(type(panel))
app = wx.GetApp() #get app
print(app)
frame = app.GetTopWindow()
print(frame)
if __name__ == "__main__":
app = MyApp(False)
app.MainLoop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
############################################################################
# Joshua R. Boverhof, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import sys, unittest
from ServiceTest import main, ServiceTestCase, ServiceTestSuite, TestException
from ZSI.schema import ElementDeclaration, GED
from ZSI import ParsedSoap
"""
WSDL:
"""
# General targets
def dispatch():
"""Run all dispatch tests"""
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test_dispatch'))
return suite
def local():
"""Run all local tests"""
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test_local'))
return suite
def net():
"""Run all network tests"""
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test_net'))
return suite
def all():
"""Run all tests"""
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test_'))
return suite
class TestCase(ServiceTestCase):
"""Test case for WolframSearch
"""
name = "test_WolframSearch"
client_file_name = "WolframSearchService_services"
types_file_name = "WolframSearchService_services_types"
server_file_name = "WolframSearchService_services_server"
def __init__(self, methodName):
ServiceTestCase.__init__(self, methodName)
self.wsdl2py_args.append('-b')
def test_local_import(self):
wsreq = self.client_module.WolframSearchRequest()
def test_net_search(self):
loc = self.client_module.WolframSearchServiceLocator()
port = loc.getWolframSearchmyPortType(**self.getPortKWArgs())
msg = self.client_module.WolframSearchRequest()
opts = msg.new_Options()
msg.Options = opts
opts.Query = 'Newton'
opts.set_element_Limit(10)
rsp = port.WolframSearch(msg)
self.failUnless(rsp.Result.SearchTime > 0, 'expecting a search time >0')
def test_dispatch_search(self):
loc = self.client_module.WolframSearchServiceLocator()
port = loc.getWolframSearchmyPortType(**self.getPortKWArgs())
msg = self.client_module.WolframSearchRequest()
opts = msg.new_Options()
msg.Options = opts
opts.Query = 'Newton'
opts.set_element_Limit(10)
rsp = port.WolframSearch(msg)
self.failUnless(rsp.Result.SearchTime > 0, 'expecting a search time >0')
if __name__ == '__main__':
main()
|
from urllib import request
import re
class Croller(object):
def __init__(self):
self.title_pattern = re.compile('')
self.url_pattern = re.compile('')
return self
def yield_article(self):
class AsahiCroller(Croller):
def __init__(self):
self.url = 'http://www.asahi.com/edu/list/kosodate.html'
def yield_article(self):
pass
|
import csv
def remove_spaces(ls):
return [elem.strip() for elem in ls]
with open('data.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader) # The iterator skips the first line (i.e the headers)
for line in csv_reader:
print(line)
# Copy the contents of this csv into a new one with a different delimiter
with open('data.csv', 'r') as org_csv:
csv_reader = csv.reader(org_csv) # if the csv file is separated by '-', add delimiter = '-'
with open('copy_data.csv', 'w') as new_csv: # creates a new file
csv_writer = csv.writer(new_csv, delimiter='-') # \t for tabs
for line in csv_reader: # get line from the first csv
csv_writer.writerow(remove_spaces(line))
|
import datetime
from django.db import models
from university.models import *
YEARS = []
for r in range(2021, (datetime.datetime.now().year+2)):
YEARS.append((r,r))
class Susi(models.Model):
class Meta:
verbose_name = '수시전형'
verbose_name_plural = '수시전형'
university = models.ForeignKey(
verbose_name='대학',
to='university.University',
related_name='susis',
on_delete=models.CASCADE,
)
name = models.CharField(
verbose_name='전형명',
max_length=31,
)
year = models.IntegerField(
verbose_name='학년도',
choices=YEARS,
)
def __str__(self):
return str(self.year) + '/' + self.university.name + '/수시전형/' + self.name
# ex) 2021/서울대학교/수시전형/일반전형
class SusiSchedule(models.Model):
class Meta:
verbose_name = '수시전형 일정'
verbose_name_plural = '수시전형 일정'
susi = models.ForeignKey(
verbose_name='수시전형 종류',
to='Susi',
related_name='susi_schedules',
on_delete=models.CASCADE,
)
major_block = models.ForeignKey(
verbose_name='학과 블록',
to='university.SusiMajorBlock',
related_name='susi_schedules',
on_delete=models.CASCADE,
)
description = models.CharField(
verbose_name='설명',
max_length=255,
)
start_date = models.DateTimeField(
verbose_name='시작시간',
)
end_date = models.DateTimeField(
verbose_name='종료시간',
)
def __str__(self):
return str(self.susi.year) + '/' + self.susi.university.name + '/수시전형/' + self.susi.name + '/' + self.description + '/' + self.major_block.name
# ex) 2021/서울대학교/수시전형/일반전형/지원서 접수/의과대학, 수의과대학, 치의과대학
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the superReducedString function below.
def superReducedString(s):
i = 0
a = len(s)
b = ""
while True:
if s[i] == s[i+1]:
i += 2
else:
b += s[i]
i += 1
if i+1 == len(s):
break
if len(b) == 0:
return "Empty String"
else:
return b
if __name__ == '__main__':
s = input()
result = superReducedString(s)
print(result) |
from django.forms import ModelForm
from .models import NoteTitle, Notedetails
class Note_title(ModelForm):
class Meta:
model = NoteTitle
exclude = ()
class Note_details(ModelForm):
class Meta:
model = Notedetails
exclude = () |
#
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import logging
import numpy
# Import required src
from usienarl import Environment, SpaceType
class Interface:
"""
Base interface abstract class to define interaction between agent and environment.
An interface allows to translate the environment state and actions to the agent observations and actions.
The default interface is the pass-through interface, implementing a simple fully observable interface for the
environment. The environment is specific for each interface.
You should always define your own interface or use a pass-trough, the base class cannot be used as-is.
"""
def __init__(self,
environment: Environment):
# Make sure parameters are valid
assert(environment is not None)
# Define internal attributes
self._environment: Environment = environment
def sample_agent_action(self,
logger: logging.Logger,
session) -> numpy.ndarray:
"""
Sample a random action as seen by the agent, i.e. a random action in the environment translated in agent action.
The way the action is sampled depends on the environment.
:param logger: the logger used to print the interface information, warnings and errors
:param session: the session of tensorflow currently running
:return: the random action as seen by the agent, wrapped in a numpy array
"""
# Sample an action from the environment
environment_action: numpy.ndarray = self._environment.sample_action(logger, session)
# Translate it to agent action and return it
return self.environment_action_to_agent_action(logger, session, environment_action)
def agent_action_to_environment_action(self,
logger: logging.Logger,
session,
agent_action: numpy.ndarray) -> numpy.ndarray:
"""
Translate the given agent action to the respective environment action, both wrapped in a numpy array to allow
parallelization.
:param logger: the logger used to print the interface information, warnings and errors
:param session: the session of tensorflow currently running
:param agent_action: the action as seen by the agent, wrapped in a numpy array
:return: the action as seen by the environment relative to the given agent action, wrapped in a numpy array
"""
# Abstract method, it should be implemented on a child class basis
raise NotImplementedError()
def environment_action_to_agent_action(self,
logger: logging.Logger,
session,
environment_action: numpy.ndarray) -> numpy.ndarray:
"""
Translate the given environment action to the respective agent action, both wrapped in a numpy array to allow
parallelization.
:param logger: the logger used to print the interface information, warnings and errors
:param session: the session of tensorflow currently running
:param environment_action: the action as seen by the environment, wrapped in a numpy array
:return: the action as seen by the agent relative to the given environment action, wrapped in a numpy array
"""
# Abstract method, it should be implemented on a child class basis
raise NotImplementedError()
def environment_state_to_observation(self,
logger: logging.Logger,
session,
environment_state: numpy.ndarray) -> numpy.ndarray:
"""
Translate the given environment state to the respective agent state, i.e. observation, both wrapped in a
numpy array to allow parallelization.
:param logger: the logger used to print the interface information, warnings and errors
:param session: the session of tensorflow currently running
:param environment_state: the state as seen by the environment, wrapped in a numpy array
:return: the state as seen (observed) by the agent relative to the given environment state, wrapped in a numpy array
"""
# Abstract method, it should be implemented on a child class basis
raise NotImplementedError()
def possible_agent_actions(self,
logger: logging.Logger,
session) -> []:
"""
Get a list of all actions' indexes possible at the current states of the environment if the action space
is discrete, translated into agent actions.
Get a list containing the lower and the upper bounds at the current states of the environment, each
wrapped in numpy array with the shape of the action space, translated into agent actions.
:param logger: the logger used to print the environment information, warnings and errors
:param session: the session of tensorflow currently running
:return: a list of indices containing the possible actions or a list of upper and lower bounds arrays, translated into agent actions
"""
# Translate each action in each batch of actions from environment to agent
batch_environment_possible_actions: [] = self._environment.possible_actions(logger, session)
batch_agent_possible_actions: [] = []
for environment_possible_actions in batch_environment_possible_actions:
agent_possible_actions: [] = []
for environment_action in environment_possible_actions:
# If single environment action is discrete append it as a number, otherwise as an array (the lower/upper boundary)
if self.agent_action_space_type == SpaceType.discrete:
agent_possible_actions.append(self.environment_action_to_agent_action(logger, session, numpy.array(environment_action)).tolist())
else:
agent_possible_actions.append(self.environment_action_to_agent_action(logger, session, numpy.array(environment_action)))
batch_agent_possible_actions.append(agent_possible_actions)
# Return the agent version of the possible actions
return batch_agent_possible_actions
@property
def environment(self) -> Environment:
"""
The environment associated with the interface.
"""
return self._environment
@property
def observation_space_type(self) -> SpaceType:
"""
The type of the observation space of the agent.
"""
# Abstract property, it should be implemented on a child class basis
raise NotImplementedError()
@property
def observation_space_shape(self) -> ():
"""
The shape of the observation space of the agent.
Note: it may differ from the environment's state space shape.
"""
# Abstract property, it should be implemented on a child class basis
raise NotImplementedError()
@property
def agent_action_space_type(self) -> SpaceType:
"""
The type of the action space of the agent.
"""
# Abstract property, it should be implemented on a child class basis
raise NotImplementedError()
@property
def agent_action_space_shape(self) -> ():
"""
The shape of the action space of the agent.
Note: it may differ from the environment's action space shape.
"""
# Abstract property, it should be implemented on a child class basis
raise NotImplementedError()
|
from pymongo import MongoClient
def get_db_connection():
"""Connection au cluster distant mongo atlas. Retour d'un objet client utilisable pour du requêtage."""
client = MongoClient("mongodb+srv://amaury:motdepasse@moncluster.xximx.mongodb.net/<MonCluster>?retryWrites=true&w=majority")
return client
def envoi_promesse_base(dico):
"""Envoi d'une promesse depuis un dico constitué via les inputs html gérés dans la fonction gestion_formulaire."""
client = get_db_connection()
client.db.promesses_de_dons.insert_one(dico)
client.close()
def recup_promesses_base():
"""Récupération et return sous forme de liste de dictionnaires de toutes les promesses depuis la base."""
client = get_db_connection()
promesses = []
for p in client.db.promesses_de_dons.find({}, {"_id":0}):
promesses.append(p)
client.close()
return promesses |
from networking_modules.socketfunctions import TCPSocket,UDPSocket
from networking_modules.conversion import *
import threading
import math
import time
import random
class client:
host = '127.0.0.1'
clientaddr = '127.0.0.6'
engageFlag = 1
tcpPort = 9000
udpPort = 9001
udpSend = socket.socket(family= socket.AF_INET, type= socket.SOCK_DGRAM)
tcpSend = socket.socket(family= socket.AF_INET, type = socket.SOCK_STREAM)
tcpRecv = socket.socket(family= socket.AF_INET, type = socket.SOCK_STREAM)
udpRecv = socket.socket(family= socket.AF_INET, type= socket.SOCK_DGRAM)
tcpSPort = 9000
udpSPort = 9002
tcpRPort = 9001
udpRPort = 9003
recvaddr = (clientaddr,tcpRPort)
sendaddr = (clientaddr,tcpSPort)
intPayload = 10
stringPayload = "Yolo"
def __init__(self,hostaddr = '127.0.0.6'):
self.clientaddr = hostaddr
print('hostaddr/clientaddr'+str(self.clientaddr))
self.recvaddr = (self.clientaddr,self.tcpRPort)
self.sendaddr = (self.clientaddr,self.tcpSPort)
print('send addr'+str(self.sendaddr))
self.tcpSend.bind((self.clientaddr,self.tcpSPort))
self.tcpRecv.bind(self.recvaddr)
def Connect(self,ip='127.0.0.1'):
self.tcpRecv.connect((ip,self.tcpSPort))
def Listen(self):
self.tcpSend.listen(1)
conn, endPointAddr = self.tcpSend.accept()
self.tcpSend = conn
def SetIP(self, ip='127.0.0.1'):
self.clientaddr = ip
self.recvaddr = (self.clientaddr,self.tcpRPort)
self.sendaddr = (self.clientaddr,self.tcpSPort)
self.tcpSend.bind(self.sendaddr)
self.tcpRecv.bind(self.recvaddr)
def SendData(self, buf = bytes()):
self.tcpSend.send(buf)
print("Data Sent")
def RecvData(self):
start = time.time()
buf = bytes()
while buf.__len__() == 0:
print('Entering While: '+str(buf.__len__()))
buf = self.tcpRecv.recv(100)
timeElapsed = time.time() - start
if(timeElapsed >= 2):
break
if(buf.__len__()!=0):
return buf
else:
return -1
def TCPSendData(self,buf=bytes()):
self.tcpSend.send(buf)
'''
print("Making Client Handler Thread: ")
th = threading.Thread(target = c.ClientHandler)
th.start()
print("Running Main Loop")
c.Loop()
'''
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la classe Periode, détaillée plus bas."""
from abstraits.obase import BaseObj
from corps.aleatoire import varier
from primaires.format.description import Description
from primaires.format.fonctions import supprimer_accents
from .element import Element
class Periode(BaseObj):
"""Classe décrivant une période de temps (cycle pour une plante).
Une période possède :
plante -- le prototype de plante définissant la période
nom -- le nom de la période
nom_singulier, nom_pluriel -- les noms définis pour cettte période
description -- la description du végétal à cette période
fin -- un tuple (jour, mois) représentant la fin du cycle
variation -- un nombre de jours représentant la variation aléatoire [1]
elements -- une liste représentant les éléments récoltables
à cette période [2]
[1] La première période présente dans la liste d'un prototype est
toujours choisie au début de l'année. Si plus d'une période
est définie, la première étape laisse la place à la suivante
au jour précisé plus ou moins la variation optionnelle.
Cela permet d'introduire une donnée aléatoire faisant
qu'un plan de maïs pourra être récoltable dans des dates
approximatives. Notez qu'après la dernière étape définie,
la plante revient en étape 0 (la première).
[2] La liste des éléments est constituée d'objets Element (voir
la classe définie dans .element.py).
"""
def __init__(self, nom, cycle):
"""Constructeur de la période."""
BaseObj.__init__(self)
self.nom = nom
self.cycle = cycle
self.nom_singulier = "une plante"
self.nom_pluriel = "plantes"
self.description = Description(parent=self)
self.fin = (0, 0)
self.variation = 0
self.elements = []
self.poids_max = 0
self.visible = True
def __getnewargs__(self):
return ("", None)
def __repr__(self):
return "<période {} ({}-{} variation={})>".format(self.nom,
self.fin[0] + 1, self.fin[1] + 1, self.variation)
def __str__(self):
return self.nom
@property
def date_fin(self):
"""Retourne la date de fin grâce aux informations du module temps."""
return "{} du {}".format(importeur.temps.cfg.noms_jours[ \
self.fin[0]], importeur.temps.cfg.mois[self.fin[1]][0])
@property
def plante(self):
return self.cycle and self.cycle.plante or None
@property
def periode_suivante(self):
"""Retourne, si trouvée, la période suivante.
Si aucune période ne vient après, retourne la première du cycle.
Si la période présente ne peut être trouvée dans le cycle, lève une
exception IndexError.
"""
indice = self.cycle.periodes.index(self)
if indice == -1:
raise IndexError("période introuvable {} dans la plante {}".format(
self, self.plante))
try:
return self.cycle.periodes[indice + 1]
except IndexError:
return self.cycle.periodes[0]
@property
def finie(self):
"""Retourne true si la période est finie, False sinon."""
tps = importeur.temps.temps
jour = tps.jour
mois = tps.mois
t_j, t_m = self.fin
t_j += varier(t_j, self.variation, min=None)
if t_j < 0:
t_m -= t_j // 30
t_j = t_j % 30
if mois > t_m:
return True
elif mois == t_m and jour > t_j:
return True
return False
def get_nom(self, nombre=1):
"""Retourne le nom singulier ou pluriel en fonction du nombre."""
if nombre == 1:
return self.nom_singulier
return str(nombre) + " " + self.nom_pluriel
def ajouter_element(self, nom, objet, quantite):
"""Ajout d'un élément.
Si on cherche à ajouter un élément existant (l'objet est défini
dans un autre élément), une exception ValueError est levée.
"""
sa_nom = supprimer_accents(nom).lower()
for elt in self.elements:
if elt.objet is objet or supprimer_accents(elt.nom) == sa_nom:
raise ValueError("l'élément {} existe déjà".format(elt))
elt = Element(self.plante, self, nom.lower(), objet, quantite)
self.elements.append(elt)
return elt
def get_element(self, nom):
"""Retourne l'élément portant ce nom
La recherche n'est pas sensible aux majuscules / minuscules
ou aux accents.
"""
nom = supprimer_accents(nom).lower()
for elt in self.elements:
if supprimer_accents(elt.nom) == nom:
return elt
raise ValueError("le nom d'élément {} est introuvable".format(nom))
def get_element_depuis_objet(self, objet):
"""Retourne l'élément associé à l'objet."""
for elt in self.elements:
if elt.objet is objet:
return elt
raise ValueError("aucun élément ne correspond à l'objet {}".format(
objet.cle))
def est_element(self, nom):
"""Retourne True si l'élément existe, False sinon."""
try:
return bool(self.get_element(nom))
except KeyError:
return False
def supprimer_element(self, nom):
"""Supprime l'élément."""
nom = supprimer_accents(nom).lower()
for elt in list(self.elements):
if elt.nom == nom:
self.elements.remove(elt)
elt.detruire()
return
raise ValueError("aucun élément ne porte le nom {}".format(nom))
def detruire(self):
"""Destruction de la période."""
for elt in self.elements:
elt.detruire()
BaseObj.detruire(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.