text stringlengths 38 1.54M |
|---|
from flask import Flask, request, jsonify
# from backend.model import acc_binary
import joblib
import numpy as np
import pickle
import os
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
data = request.get_json()
print(data)
age = int(data['age'])
sex = int(data['gender'])
trestbps = float(data['trestbps'])
chol = float(data['chol'])
restecg = float(data['restecg'])
thalach = float(data['thalach'])
exang = int(data['exang'])
cp = int(data['cp'])
fbs = float(data['fbs'])
x = np.array([age, sex, cp, trestbps, chol, fbs, restecg,
thalach, exang]).reshape(1, -1)
x = np.array([age, sex, cp, trestbps, chol, fbs, restecg,
thalach, exang]).reshape(1, -1)
scaler_path = os.path.join(os.path.dirname(__file__), 'models/scaler.pkl')
scaler = None
with open(scaler_path, 'rb') as f:
scaler = pickle.load(f)
x = scaler.transform(x)
model_path = os.path.join(os.path.dirname(__file__), 'models/rfc.sav')
rfc = joblib.load(model_path)
print("Model loaded")
# score = (acc_binary/20)
# percent = "{:.0%}".format(score)
y = rfc.predict(x)
print(y)
# No heart disease
if y == 0:
return jsonify({'heart_disease': False})
# y=1,2,3,4 are stages of heart disease
else:
return jsonify({'heart_disease': True})
if __name__ == "__main__":
app.run(debug=True, port=5000) |
# Assignment 1
import math
class DTNode:
def __init__(self, decision):
self.decision = decision
self.children = None # cant have no children and a callable decision
def predict(self, feature_vector):
if self.is_decision_node():
return self.children[self.decision(feature_vector)].predict(feature_vector)
else:
return self.decision
def leaves(self):
if self.children is None:
return 1
return sum([child.leaves() for child in self.children])
def is_decision_node(self):
return True if callable(self.decision) else False
# returns a separator function and a partitioned dataset
def partition_by_feature_value(feature_index, dataset):
features_at_index = []
p_dataset = {}
for (v, c) in dataset:
if not p_dataset.setdefault(v[feature_index]):
p_dataset[v[feature_index]] = [(v, c)]
features_at_index.append(v[feature_index])
else:
p_dataset[v[feature_index]].append((v, c))
separator = lambda x: features_at_index.index(x[feature_index])
return separator, list(p_dataset.values())
def find_classes_from_data(data):
return {c for _, c in data}
def proportion_with_k(data, k):
return sum([1 for _, yi in data if yi == k]) / len(data)
def misclassification(data):
return 1 - max([proportion_with_k(data, k) for k in find_classes_from_data(data)])
def gini(data):
return sum([proportion_with_k(data, k) * (1 - proportion_with_k(data, k)) for k in find_classes_from_data(data)])
# Equal true and false classifications gives an entropy of 1
def entropy(data):
return -sum([proportion_with_k(data, k) * math.log(proportion_with_k(data, k)) for k in find_classes_from_data(data)])
def impurity_at_node_m(data, feature, criterion):
_, feature_partitions = partition_by_feature_value(feature, data)
impurity = sum([(len(partition) / len(data)) * criterion(partition) for partition in feature_partitions])
return feature, impurity
def most_common_label(data):
proportion = 0
class_label = None
for _, c in data:
prop_with_c = proportion_with_k(data, c)
if prop_with_c > proportion:
proportion = prop_with_c
class_label = c
return class_label
class DTree:
def __init__(self, dataset, criterion):
self.dataset = dataset
self.criterion = criterion
self.features = set(range(len(dataset[0][0]))) # each feature is represented as an index to its data in the dataset
self.tree = self.split(self.dataset, self.features)
def split(self, dataset, features):
if proportion_with_k(dataset, dataset[0][1]) == 1: # if all examples are in one class
return DTNode(dataset[0][1]) # return a leaf node with that class label
elif len(features) == 0: # if the set of features is empty
# return a leaf node with the most common class label
return DTNode(most_common_label(dataset))
else:
# pick a categorical feature F
F, _ = min([impurity_at_node_m(dataset, f, self.criterion) for f in features], key=lambda x: x[1])
separator, partitions = partition_by_feature_value(F, dataset)
# create new decision node
decision_node = DTNode(separator)
decision_node.children = [self.split(partition, features - {F}) for partition in partitions]
return decision_node
# Components of Iterative Dichotomiser 3
# 1. A way of partitioning the dataset by features;
# 2. A criterion for impurity of a dataset; and
# 3. An iterative loop of concurrently splitting the dataset and building the tree.
def train_tree(dataset, criterion):
dtree = DTree(dataset, criterion)
return dtree.tree
def main():
# Q3 - DTNode #
# # Example 1
# # The following (leaf) node will always predict True
# node = DTNode(True)
#
# # Prediction for the input (True, False):
# print(node.predict((True, False)))
#
# # Sine it's a leaf node, the input can be anything. It's simply ignored.
# print(node.predict(None))
# # Example 2
# t = DTNode(True)
# f = DTNode(False)
# n = DTNode(lambda v: 0 if not v else 1)
# n.children = [t, f]
#
# print(n.predict(False))
# print(n.predict(True))
# Q4 - Partition by feature
# # Example 1
# from pprint import pprint
# dataset = [
# ((True, True), False),
# ((True, False), True),
# ((False, True), True),
# ((False, False), False),
# ]
# f, p = partition_by_feature_value(0, dataset)
# pprint(sorted(sorted(partition) for partition in p))
#
# partition_index = f((True, True))
# # Everything in the "True" partition for feature 0 is true
# print(all(x[0] == True for x, c in p[partition_index]))
# partition_index = f((False, True))
# # Everything in the "False" partition for feature 0 is false
# print(all(x[0] == False for x, c in p[partition_index]))
# # Example 2
# from pprint import pprint
# dataset = [
# (("a", "x", 2), False),
# (("b", "x", 2), False),
# (("a", "y", 5), True),
# ]
# f, p = partition_by_feature_value(1, dataset)
# pprint(sorted(sorted(partition) for partition in p))
# partition_index = f(("a", "y", 5))
# # everything in the "y" partition for feature 1 has a y
# print(all(x[1] == "y" for x, c in p[partition_index]))
# Q5 - misclassification, gini, and entropy functions #
# # Example 1
# data = [
# ((False, False), False),
# ((False, True), True),
# ((True, False), True),
# ((True, True), False)
# ]
# print("{:.4f}".format(misclassification(data)))
# print("{:.4f}".format(gini(data)))
# print("{:.4f}".format(entropy(data)))
# # Example 2
# data = [
# ((0, 1, 2), 1),
# ((0, 2, 1), 2),
# ((1, 0, 2), 1),
# ((1, 2, 0), 3),
# ((2, 0, 1), 3),
# ((2, 1, 0), 3)
# ]
# print("{:.4f}".format(misclassification(data)))
# print("{:.4f}".format(gini(data)))
# print("{:.4f}".format(entropy(data)))
# Q6 - DTree #
# # Example 1
# dataset = [
# ((True, True), False),
# ((True, False), True),
# ((False, True), True),
# ((False, False), False)
# ]
# t = train_tree(dataset, misclassification)
# print(t.predict((True, False)))
# print(t.predict((False, False)))
# # Example 2
# dataset = [
# (("Sunny", "Hot", "High", "Weak"), False),
# (("Sunny", "Hot", "High", "Strong"), False),
# (("Overcast", "Hot", "High", "Weak"), True),
# (("Rain", "Mild", "High", "Weak"), True),
# (("Rain", "Cool", "Normal", "Weak"), True),
# (("Rain", "Cool", "Normal", "Strong"), False),
# (("Overcast", "Cool", "Normal", "Strong"), True),
# (("Sunny", "Mild", "High", "Weak"), False),
# (("Sunny", "Cool", "Normal", "Weak"), True),
# (("Rain", "Mild", "Normal", "Weak"), True),
# (("Sunny", "Mild", "Normal", "Strong"), True),
# (("Overcast", "Mild", "High", "Strong"), True),
# (("Overcast", "Hot", "Normal", "Weak"), True),
# (("Rain", "Mild", "High", "Strong"), False),
# ]
# t = train_tree(dataset, misclassification)
# print(t.predict(("Overcast", "Cool", "Normal", "Strong")), "Expected: True")
# print(t.predict(("Sunny", "Cool", "Normal", "Strong")), "Expected: True")
# print()
# # Example 3
from pprint import pprint
dataset = []
with open('car.data', 'r') as f:
for line in f.readlines():
features = line.strip().split(",")
dataset.append((tuple(features[:-1]), features[-1]))
pprint(dataset[:5])
print(
"Exprected: [(('vhigh', 'vhigh', '2', '2', 'small', 'low'), 'unacc'), \n (('vhigh', 'vhigh', '2', '2', 'small', 'med'), 'unacc'), \n (('vhigh', 'vhigh', '2', '2', 'small', 'high'), 'unacc'), \n (('vhigh', 'vhigh', '2', '2', 'med', 'low'), 'unacc'), \n (('vhigh', 'vhigh', '2', '2', 'med', 'med'), 'unacc')]")
t = train_tree(dataset, misclassification)
print(t.predict(("high", "vhigh", "2", "2", "med", "low")), "Expected: unacc")
print(t.leaves(), len(dataset), len(dataset) / t.leaves())
# # Example 4
from pprint import pprint
dataset = []
with open('balance-scale.data', 'r') as f:
for line in f.readlines():
out, *features = line.strip().split(",")
dataset.append((tuple(features), out))
pprint(dataset[:5])
print(
"Expected: [(('1', '1', '1', '1'), 'B'), \n (('1', '1', '1', '2'), 'R'), \n (('1', '1', '1', '3'), 'R'), \n (('1', '1', '1', '4'), 'R'), \n (('1', '1', '1', '5'), 'R')]")
t = train_tree(dataset, misclassification)
print(t.predict(("1", "4", "3", "2")), "Expected: R")
print(t.leaves(), len(dataset), len(dataset) / t.leaves())
# Q7 - Leaves #
# # Example 1
# n = DTNode(True)
# print(n.leaves())
#
# # Example 2
# t = DTNode(True)
# f = DTNode(False)
# n = DTNode(lambda v: 0 if not v else 1)
# n.children = [t, f]
# print(n.leaves())
if __name__ == "__main__":
main()
|
class ListNode:
def __init__(self, val):
self.val = val
self.next = None
class Solution:
def reorderList(self, head: ListNode) -> None:
table = []
cur = head
while cur:
table.append(cur.val)
cur = cur.next
i = 0
j = len(table) - 1
while i <= j:
if i == j:
head.val = table[i]
head = head.next
else:
head.val = table[i]
head = head.next
head.val = table[j]
head = head.next
i += 1
j -= 1
if __name__ == '__main__':
solution = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
res = solution.reorderList(head)
while res:
print(res.val)
res = res.next |
from typing import List
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
if not matrix or not matrix[0]:
return False
def binarySearch(array, target):
lo, hi = 0, len(array) - 1
while lo < hi:
mid = lo + (hi - lo) // 2
if array[mid] < target:
lo = mid + 1
else:
hi = mid
return array[lo] == target
for i in range(len(matrix)):
if binarySearch(matrix[i], target):
return True
return False
if __name__ == "__main__":
arr = [[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]]
s = Solution()
print(s.findNumberIn2DArray(arr, 5)) |
import os
import pandas as pd
from data_pipeline.core import DataPipeline
from multiprocessing import Process, Lock
from util.daemon import MyPool
from util.dataset import load_dataset
from util.logger import init_logger
class Worker:
def __init__(self, load, destination, outer_loop, inner_loop):
self.load = load
self.destination = destination
self.out_loop = outer_loop
self.in_loop = inner_loop
def __call__(self, x):
DataPipeline(x, self.destination).process(
load=self.load, o_loop=self.out_loop, shadow=True, i_loop=self.in_loop
)
def worker(index, load, destination, lock, outer_loop, inner_loop):
DataPipeline(index, destination, lock).process(load, o_loop=outer_loop, shadow=True, i_loop=inner_loop)
def collect_df(destination, num):
# collect df from csv files
combined_df = pd.concat([pd.read_csv(destination + str(i)) for i in range(num)], axis=0)
# save
if os.path.isfile(destination) is True:
combined_df.to_csv(destination, mode='w', index=False, header=True)
else:
combined_df.to_csv(destination, mode='a', index=False, header=False)
# check
init_logger().info("final saved df's tail 5: \n{df}".format(df=pd.read_csv(destination).tail(5)))
# delete
for i in range(num):
if os.path.exists(destination + str(i)):
os.remove(destination + str(i))
return 0
def collect_data(destination, num):
if os.path.isfile(destination) is True:
f = open(destination, "ab")
else:
f = open(destination, "wb")
# combine
for i in range(num):
with open(destination + str(i), 'rb') as pf:
f.write(pf.read())
pf.close()
f.close()
# delete
for i in range(num):
if os.path.exists(destination + str(i)):
os.remove(destination + str(i))
def pool_parallel(destination, p_num=4):
# generate child process
with MyPool(p_num) as pool:
pool.map(Worker(
load=load_dataset(batch_size=64),
destination=destination
), range(p_num))
# wait all child process to work done
pool.close()
pool.join()
# collect data
collect_df(destination=destination, num=p_num)
init_logger().info("success to collect data into '{dest}'".format(dest=destination))
def parallel(destination, outer_loop, inner_loop, p_num=4):
lock = Lock()
procs = []
# generate child process
for i in range(p_num):
proc = Process(
target=worker, args=(i, load_dataset(batch_size=32), destination, lock, outer_loop, inner_loop)
)
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
def single(destination, o_loop=250, i_loop=10, b_type=None, in_ch=None):
DataPipeline(0, destination, None).process(
load_dataset(batch_size=64), o_loop=o_loop, shadow=True, i_loop=i_loop, b_type=b_type,in_ch=in_ch
)
def main(arg="parallel"):
destination = "training_data/data"
if arg is "parallel":
logger = init_logger()
logger.info("director id: %s" % (os.getpid()))
parallel(destination=destination, outer_loop=2, inner_loop=1, p_num=4)
else:
single(destination=destination)
if __name__ == '__main__':
main("single")
|
import tkinter
import sys
from .ctk_canvas import CTkCanvas
from ..theme_manager import ThemeManager
from ..settings import Settings
from ..draw_engine import DrawEngine
from .widget_base_class import CTkBaseClass
class CTkSwitch(CTkBaseClass):
def __init__(self, *args,
text="CTkSwitch",
text_font="default_theme",
text_color="default_theme",
text_color_disabled="default_theme",
bg_color=None,
border_color=None,
fg_color="default_theme",
progress_color="default_theme",
button_color="default_theme",
button_hover_color="default_theme",
width=36,
height=18,
corner_radius="default_theme",
# button_corner_radius="default_theme",
border_width="default_theme",
button_length="default_theme",
command=None,
onvalue=1,
offvalue=0,
variable=None,
textvariable=None,
state=tkinter.NORMAL,
**kwargs):
# transfer basic functionality (bg_color, size, _appearance_mode, scaling) to CTkBaseClass
super().__init__(*args, bg_color=bg_color, width=width, height=height, **kwargs)
# color
self.border_color = border_color
self.fg_color = ThemeManager.theme["color"]["switch"] if fg_color == "default_theme" else fg_color
self.progress_color = ThemeManager.theme["color"]["switch_progress"] if progress_color == "default_theme" else progress_color
self.button_color = ThemeManager.theme["color"]["switch_button"] if button_color == "default_theme" else button_color
self.button_hover_color = ThemeManager.theme["color"]["switch_button_hover"] if button_hover_color == "default_theme" else button_hover_color
self.text_color = ThemeManager.theme["color"]["text"] if text_color == "default_theme" else text_color
self.text_color_disabled = ThemeManager.theme["color"]["text_disabled"] if text_color_disabled == "default_theme" else text_color_disabled
# text
self.text = text
self.text_label = None
self.text_font = (ThemeManager.theme["text"]["font"], ThemeManager.theme["text"]["size"]) if text_font == "default_theme" else text_font
# shape
self.corner_radius = ThemeManager.theme["shape"]["switch_corner_radius"] if corner_radius == "default_theme" else corner_radius
# self.button_corner_radius = ThemeManager.theme["shape"]["switch_button_corner_radius"] if button_corner_radius == "default_theme" else button_corner_radius
self.border_width = ThemeManager.theme["shape"]["switch_border_width"] if border_width == "default_theme" else border_width
self.button_length = ThemeManager.theme["shape"]["switch_button_length"] if button_length == "default_theme" else button_length
self.hover_state = False
self.check_state = False # True if switch is activated
self.state = state
self.onvalue = onvalue
self.offvalue = offvalue
# callback and control variables
self.command = command
self.variable: tkinter.Variable = variable
self.variable_callback_blocked = False
self.variable_callback_name = None
self.textvariable = textvariable
# configure grid system (3x1)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=0, minsize=self.apply_widget_scaling(6))
self.grid_columnconfigure(2, weight=0)
self.bg_canvas = CTkCanvas(master=self,
highlightthickness=0,
width=self.apply_widget_scaling(self._current_width),
height=self.apply_widget_scaling(self._current_height))
self.bg_canvas.grid(row=0, column=0, padx=0, pady=0, columnspan=3, rowspan=1, sticky="nswe")
self.canvas = CTkCanvas(master=self,
highlightthickness=0,
width=self.apply_widget_scaling(self._current_width),
height=self.apply_widget_scaling(self._current_height))
self.canvas.grid(row=0, column=0, padx=0, pady=0, columnspan=1, sticky="nswe")
self.draw_engine = DrawEngine(self.canvas)
self.canvas.bind("<Enter>", self.on_enter)
self.canvas.bind("<Leave>", self.on_leave)
self.canvas.bind("<Button-1>", self.toggle)
self.text_label = tkinter.Label(master=self,
bd=0,
text=self.text,
justify=tkinter.LEFT,
font=self.apply_font_scaling(self.text_font),
textvariable=self.textvariable)
self.text_label.grid(row=0, column=2, padx=0, pady=0, sticky="w")
self.text_label["anchor"] = "w"
self.text_label.bind("<Enter>", self.on_enter)
self.text_label.bind("<Leave>", self.on_leave)
self.text_label.bind("<Button-1>", self.toggle)
if self.variable is not None and self.variable != "":
self.variable_callback_name = self.variable.trace_add("write", self.variable_callback)
self.check_state = True if self.variable.get() == self.onvalue else False
self.draw() # initial draw
self.set_cursor()
def set_scaling(self, *args, **kwargs):
super().set_scaling(*args, **kwargs)
self.grid_columnconfigure(1, weight=0, minsize=self.apply_widget_scaling(6))
self.text_label.configure(font=self.apply_font_scaling(self.text_font))
self.bg_canvas.configure(width=self.apply_widget_scaling(self._desired_width), height=self.apply_widget_scaling(self._desired_height))
self.canvas.configure(width=self.apply_widget_scaling(self._desired_width), height=self.apply_widget_scaling(self._desired_height))
self.draw()
def destroy(self):
# remove variable_callback from variable callbacks if variable exists
if self.variable is not None:
self.variable.trace_remove("write", self.variable_callback_name)
super().destroy()
def set_cursor(self):
if Settings.cursor_manipulation_enabled:
if self.state == tkinter.DISABLED:
if sys.platform == "darwin" and Settings.cursor_manipulation_enabled:
self.canvas.configure(cursor="arrow")
if self.text_label is not None:
self.text_label.configure(cursor="arrow")
elif sys.platform.startswith("win") and Settings.cursor_manipulation_enabled:
self.canvas.configure(cursor="arrow")
if self.text_label is not None:
self.text_label.configure(cursor="arrow")
elif self.state == tkinter.NORMAL:
if sys.platform == "darwin" and Settings.cursor_manipulation_enabled:
self.canvas.configure(cursor="pointinghand")
if self.text_label is not None:
self.text_label.configure(cursor="pointinghand")
elif sys.platform.startswith("win") and Settings.cursor_manipulation_enabled:
self.canvas.configure(cursor="hand2")
if self.text_label is not None:
self.text_label.configure(cursor="hand2")
def draw(self, no_color_updates=False):
if self.check_state is True:
requires_recoloring = self.draw_engine.draw_rounded_slider_with_border_and_button(self.apply_widget_scaling(self._current_width),
self.apply_widget_scaling(self._current_height),
self.apply_widget_scaling(self.corner_radius),
self.apply_widget_scaling(self.border_width),
self.apply_widget_scaling(self.button_length),
self.apply_widget_scaling(self.corner_radius),
1, "w")
else:
requires_recoloring = self.draw_engine.draw_rounded_slider_with_border_and_button(self.apply_widget_scaling(self._current_width),
self.apply_widget_scaling(self._current_height),
self.apply_widget_scaling(self.corner_radius),
self.apply_widget_scaling(self.border_width),
self.apply_widget_scaling(self.button_length),
self.apply_widget_scaling(self.corner_radius),
0, "w")
if no_color_updates is False or requires_recoloring:
self.bg_canvas.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
self.canvas.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
if self.border_color is None:
self.canvas.itemconfig("border_parts", fill=ThemeManager.single_color(self.bg_color, self._appearance_mode),
outline=ThemeManager.single_color(self.bg_color, self._appearance_mode))
else:
self.canvas.itemconfig("border_parts", fill=ThemeManager.single_color(self.border_color, self._appearance_mode),
outline=ThemeManager.single_color(self.border_color, self._appearance_mode))
self.canvas.itemconfig("inner_parts", fill=ThemeManager.single_color(self.fg_color, self._appearance_mode),
outline=ThemeManager.single_color(self.fg_color, self._appearance_mode))
if self.progress_color is None:
self.canvas.itemconfig("progress_parts", fill=ThemeManager.single_color(self.fg_color, self._appearance_mode),
outline=ThemeManager.single_color(self.fg_color, self._appearance_mode))
else:
self.canvas.itemconfig("progress_parts", fill=ThemeManager.single_color(self.progress_color, self._appearance_mode),
outline=ThemeManager.single_color(self.progress_color, self._appearance_mode))
self.canvas.itemconfig("slider_parts", fill=ThemeManager.single_color(self.button_color, self._appearance_mode),
outline=ThemeManager.single_color(self.button_color, self._appearance_mode))
if self.state == tkinter.DISABLED:
self.text_label.configure(fg=(ThemeManager.single_color(self.text_color_disabled, self._appearance_mode)))
else:
self.text_label.configure(fg=ThemeManager.single_color(self.text_color, self._appearance_mode))
self.text_label.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
def toggle(self, event=None):
if self.state is not tkinter.DISABLED:
if self.check_state is True:
self.check_state = False
else:
self.check_state = True
self.draw(no_color_updates=True)
if self.variable is not None:
self.variable_callback_blocked = True
self.variable.set(self.onvalue if self.check_state is True else self.offvalue)
self.variable_callback_blocked = False
if self.command is not None:
self.command()
def select(self, from_variable_callback=False):
if self.state is not tkinter.DISABLED or from_variable_callback:
self.check_state = True
self.draw(no_color_updates=True)
if self.variable is not None and not from_variable_callback:
self.variable_callback_blocked = True
self.variable.set(self.onvalue)
self.variable_callback_blocked = False
def deselect(self, from_variable_callback=False):
if self.state is not tkinter.DISABLED or from_variable_callback:
self.check_state = False
self.draw(no_color_updates=True)
if self.variable is not None and not from_variable_callback:
self.variable_callback_blocked = True
self.variable.set(self.offvalue)
self.variable_callback_blocked = False
def get(self):
return self.onvalue if self.check_state is True else self.offvalue
def on_enter(self, event=0):
self.hover_state = True
if self.state is not tkinter.DISABLED:
self.canvas.itemconfig("slider_parts", fill=ThemeManager.single_color(self.button_hover_color, self._appearance_mode),
outline=ThemeManager.single_color(self.button_hover_color, self._appearance_mode))
def on_leave(self, event=0):
self.hover_state = False
self.canvas.itemconfig("slider_parts", fill=ThemeManager.single_color(self.button_color, self._appearance_mode),
outline=ThemeManager.single_color(self.button_color, self._appearance_mode))
def variable_callback(self, var_name, index, mode):
if not self.variable_callback_blocked:
if self.variable.get() == self.onvalue:
self.select(from_variable_callback=True)
elif self.variable.get() == self.offvalue:
self.deselect(from_variable_callback=True)
def configure(self, require_redraw=False, **kwargs):
if "text" in kwargs:
self.text = kwargs.pop("text")
self.text_label.configure(text=self.text)
if "text_font" in kwargs:
self.text_font = kwargs.pop("text_font")
self.text_label.configure(font=self.apply_font_scaling(self.text_font))
if "state" in kwargs:
self.state = kwargs.pop("state")
self.set_cursor()
require_redraw = True
if "fg_color" in kwargs:
self.fg_color = kwargs.pop("fg_color")
require_redraw = True
if "progress_color" in kwargs:
new_progress_color = kwargs.pop("progress_color")
if new_progress_color is None:
self.progress_color = self.fg_color
else:
self.progress_color = new_progress_color
require_redraw = True
if "button_color" in kwargs:
self.button_color = kwargs.pop("button_color")
require_redraw = True
if "button_hover_color" in kwargs:
self.button_hover_color = kwargs.pop("button_hover_color")
require_redraw = True
if "border_color" in kwargs:
self.border_color = kwargs.pop("border_color")
require_redraw = True
if "border_width" in kwargs:
self.border_width = kwargs.pop("border_width")
require_redraw = True
if "command" in kwargs:
self.command = kwargs.pop("command")
if "textvariable" in kwargs:
self.textvariable = kwargs.pop("textvariable")
self.text_label.configure(textvariable=self.textvariable)
if "variable" in kwargs:
if self.variable is not None and self.variable != "":
self.variable.trace_remove("write", self.variable_callback_name)
self.variable = kwargs.pop("variable")
if self.variable is not None and self.variable != "":
self.variable_callback_name = self.variable.trace_add("write", self.variable_callback)
self.check_state = True if self.variable.get() == self.onvalue else False
require_redraw = True
super().configure(require_redraw=require_redraw, **kwargs)
|
# def char_codes(string):
# # result = list(string)
# result = list(string).ord()
# return result
#
# print(char_codes('Create'))
# def char_codes(string):
# char = list(string)
# result = []
#
# for i in char:
# result.append(ord(i))
# return result
#
# print(char_codes('Create'))
def string(char_codes):
result = ""
for i in range(len(char_codes)):
result + = (chr(char_codes[i]))
return result
print(string([67, 114, 101, 97, 116, 101]))
|
counts = [
{'school_class': '4a', 'scores': [3,4,5,5,2]},
{'school_class': '4b', 'scores': [2,3,4,5,4]},
{'school_class': '4c', 'scores': [3,4,2,2,2]}
]
summa_class = 0
summa_school = 0
quantity = 0
for s_class in range(len(counts)):
for count_number in counts[s_class]['scores']:
summa_class += count_number
quantity += 1
print('Среднее по классу: ', summa_class/len(counts[s_class]['scores']))
summa_school += summa_class
summa_class = 0
print('Среднее по школе:', summa_school/quantity) |
import scrapy
from scrapy.crawler import CrawlerProcess
class TrainSpider(scrapy.Spider):
name = "trip"
start_urls = ['http://baike.wdzj.com/list-letter-a-1.html']
def parse(self, response):
''' do something with this parser '''
next_page = response.xpath('//div[@id="fenye"]/a[contains(text(),"››")]/@href').extract_first()
print(next_page)
if next_page is not None:
next_page = response.urljoin(next_page)
print(next_page)
yield scrapy.Request(next_page, callback=self.parse)
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(TrainSpider)
process.start() # the script will block here until the crawling is finished
# %%
# import scrapy
# from scrapy.crawler import CrawlerProcess
#
# class TrainSpider()
|
import pandas as pd
Q1_2015 = pd.read_csv("C:/Users/Daniel/Desktop/Capital Bike Share Data/2015-Q1-Trips-History-Data.csv")
|
import math
a = int(input('Nhap a = '))
b = int(input('Nhap b = '))
c = int(input('Nhap c = '))
if a == 0:
print('phuong trinh bac 2 thanh bac 1 bx+c=0 ')
if b == 0:
print('phuong trinh vsn')
else:
print('phuong trinh co 1 nghiem ',(-c/b))
else:
deta = b*b - 4*a*c
if deta < 0:
print('phuong trinh vo nghiem')
elif deta == 0:
print('phuong trinh co 1 nghiem',(-b/(2*a)))
else:
x1 = (-b+math.sqrt(deta))/(2*a)
x2 = (-b-math.sqrt(deta))/(2*a)
print('phuong trinh co 2 nghiem x1 = {}, x2 = {}'.format(x1,x2))
#for i in range(2000,3201):
# if(i%7==0)and(i%5!=0):
# print(i, end=' , ')
|
from DnaData import get_db
class Batchlist:
def __init__(self):
self.__list_all_batch=[]
def execute(self):
data = get_db()
try:
for name in data["batch"]:
self.__list_all_batch.append(name)
print(self.__list_all_batch)
except:
print("no such batch")
|
#
# @lc app=leetcode.cn id=1115 lang=python
#
# [1115] 交替打印FooBar
#
# @lc code=start
import threading
empty = threading.Semaphore(1) # empty信号量初始值设为1 空缓冲区数量
full = threading.Semaphore(0) # full 信号量初始值设为0 满缓冲区数量
'''信号量为0时,不可被减,同时信号量不设上限
所以需要两个信号量empty、full共同监测两个边界[0,1]'''
class FooBar(object):
def __init__(self, n):
self.n = n
def foo(self, printFoo):
for i in range(self.n):
empty.acquire() # empty-1,申请一个空缓冲区,有空位时应执行生产者活动
printFoo()
full.release() # full+1,释放一个满缓冲区
def bar(self, printBar):
for i in range(self.n):
full.acquire() # full-1, 申请一个满缓冲区,当缓冲区有商品时才能实现消费者行为
printBar()
empty.release() # empty+1,释放一个空缓冲区
test = FooBar(10)
def printFoo():
print("Foo")
def printBar():
print("Bar")
thread1 = threading.Thread(target=test.foo, args=(printFoo,))
thread2 = threading.Thread(target=test.bar, args=(printBar,))
thread1.start()
thread2.start()
# @lc code=end
|
import argparse
import time
from copy import deepcopy
from pathlib import Path
import numpy as np
import pandas as pd
import sklearn
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import qiqc
from qiqc.datasets import load_qiqc, build_datasets
from qiqc.preprocessing.modules import load_pretrained_vectors
from qiqc.training import classification_metrics, ClassificationResult
from qiqc.utils import set_seed, load_module
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--modelfile', '-m', type=Path, required=True)
_args, others = parser.parse_known_args(args)
modules = load_module(_args.modelfile)
config = modules.ExperimentConfigBuilder().build(args=args)
qiqc.utils.rmtree_after_confirmation(config.outdir, config.test)
train(config, modules)
def train(config, modules):
print(config)
start = time.time()
set_seed(config.seed)
config.outdir.mkdir(parents=True, exist_ok=True)
build_model = modules.build_model
Preprocessor = modules.Preprocessor
TextNormalizer = modules.TextNormalizer
TextTokenizer = modules.TextTokenizer
WordEmbeddingFeaturizer = modules.WordEmbeddingFeaturizer
WordExtraFeaturizer = modules.WordExtraFeaturizer
SentenceExtraFeaturizer = modules.SentenceExtraFeaturizer
Ensembler = modules.Ensembler
train_df, submit_df = load_qiqc(n_rows=config.n_rows)
datasets = build_datasets(train_df, submit_df, config.holdout, config.seed)
train_dataset, test_dataset, submit_dataset = datasets
print('Tokenize texts...')
preprocessor = Preprocessor()
normalizer = TextNormalizer(config)
tokenizer = TextTokenizer(config)
train_dataset.tokens, test_dataset.tokens, submit_dataset.tokens = \
preprocessor.tokenize(datasets, normalizer, tokenizer)
print('Build vocabulary...')
vocab = preprocessor.build_vocab(datasets, config)
print('Build token ids...')
train_dataset.tids, test_dataset.tids, submit_dataset.tids = \
preprocessor.build_tokenids(datasets, vocab, config)
print('Build sentence extra features...')
sentence_extra_featurizer = SentenceExtraFeaturizer(config)
train_dataset._X2, test_dataset._X2, submit_dataset._X2 = \
preprocessor.build_sentence_features(
datasets, sentence_extra_featurizer)
[d.build(config.device) for d in datasets]
print('Load pretrained vectors...')
pretrained_vectors = load_pretrained_vectors(
config.use_pretrained_vectors, vocab.token2id, test=config.test)
print('Build word embedding matrix...')
word_embedding_featurizer = WordEmbeddingFeaturizer(config, vocab)
embedding_matrices = preprocessor.build_embedding_matrices(
datasets, word_embedding_featurizer, vocab, pretrained_vectors)
print('Build word extra features...')
word_extra_featurizer = WordExtraFeaturizer(config, vocab)
word_extra_features = word_extra_featurizer(vocab)
print('Build models...')
word_features_cv = [
preprocessor.build_word_features(
word_embedding_featurizer, embedding_matrices, word_extra_features)
for i in range(config.cv)]
models = [
build_model(
config, word_features, sentence_extra_featurizer.n_dims
) for word_features in word_features_cv]
print('Start training...')
splitter = sklearn.model_selection.StratifiedKFold(
n_splits=config.cv, shuffle=True, random_state=config.seed)
train_results, valid_results = [], []
best_models = []
for i_cv, (train_indices, valid_indices) in enumerate(
splitter.split(train_dataset.df, train_dataset.df.target)):
if config.cv_part is not None and i_cv >= config.cv_part:
break
train_tensor = train_dataset.build_labeled_dataset(train_indices)
valid_tensor = train_dataset.build_labeled_dataset(valid_indices)
valid_iter = DataLoader(
valid_tensor, batch_size=config.batchsize_valid)
model = models.pop(0)
model = model.to_device(config.device)
model_snapshots = []
optimizer = torch.optim.Adam(model.parameters(), config.lr)
train_result = ClassificationResult('train', config.outdir, str(i_cv))
valid_result = ClassificationResult('valid', config.outdir, str(i_cv))
batchsize = config.batchsize
for epoch in range(config.epochs):
if epoch in config.scale_batchsize:
batchsize *= 2
print(f'Batchsize: {batchsize}')
epoch_start = time.time()
sampler = None
train_iter = DataLoader(
train_tensor, sampler=sampler, drop_last=True,
batch_size=batchsize, shuffle=sampler is None)
_summary = []
# Training loop
for i, batch in enumerate(
tqdm(train_iter, desc='train', leave=False)):
model.train()
optimizer.zero_grad()
loss, output = model.calc_loss(*batch)
loss.backward()
optimizer.step()
train_result.add_record(**output)
train_result.calc_score(epoch)
_summary.append(train_result.summary.iloc[-1])
# Validation loop
if epoch >= config.validate_from:
for i, batch in enumerate(
tqdm(valid_iter, desc='valid', leave=False)):
model.eval()
loss, output = model.calc_loss(*batch)
valid_result.add_record(**output)
valid_result.calc_score(epoch)
_summary.append(valid_result.summary.iloc[-1])
_model = deepcopy(model)
_model.threshold = valid_result.summary.threshold[epoch]
model_snapshots.append(_model)
summary = pd.DataFrame(_summary).set_index('name')
epoch_time = time.time() - epoch_start
pbar = '#' * (i_cv + 1) + '-' * (config.cv - 1 - i_cv)
tqdm.write(f'\n{pbar} cv: {i_cv} / {config.cv}, epoch {epoch}, '
f'time: {epoch_time}')
tqdm.write(str(summary))
train_results.append(train_result)
valid_results.append(valid_result)
best_indices = valid_result.summary.fbeta.argsort()[::-1]
best_models.extend([model_snapshots[i] for i in
best_indices[:config.ensembler_n_snapshots]])
# Build ensembler
train_X, train_X2, train_t = \
train_dataset.X, train_dataset.X2, train_dataset.t
ensembler = Ensembler(config, best_models, valid_results)
ensembler.fit(train_X, train_X2, train_t)
scores = dict(
valid_fbeta=np.array([r.best_fbeta for r in valid_results]).mean(),
valid_epoch=np.array([r.best_epoch for r in valid_results]).mean(),
threshold_cv=ensembler.threshold_cv,
threshold=ensembler.threshold,
elapsed_time=time.time() - start,
)
if config.holdout:
test_X, test_X2, test_t = \
test_dataset.X, test_dataset.X2, test_dataset._t
y, t = ensembler.predict_proba(test_X, test_X2), test_t
y_pred = y > ensembler.threshold
y_pred_cv = y > ensembler.threshold_cv
result = classification_metrics(y_pred, t)
result_cv = classification_metrics(y_pred_cv, t)
result_theoretical = classification_metrics(y, t)
scores.update(dict(
test_fbeta=result['fbeta'],
test_fbeta_cv=result_cv['fbeta'],
test_fbeta_theoretical=result_theoretical['fbeta'],
test_threshold_theoretical=result_theoretical['threshold'],
))
print(scores)
# Predict submit datasets
submit_y = ensembler.predict(submit_dataset.X, submit_dataset.X2)
submit_df['prediction'] = submit_y
submit_df = submit_df[['qid', 'prediction']]
submit_df.to_csv(config.outdir / 'submission.csv', index=False)
return scores
if __name__ == '__main__':
main()
|
t=int(input())
while t:
t-=1
l=['h','a','c','k','e','r','r','a','n','k']
l=l[::-1]
x=0
s=input()
for i in s:
if len(l)==0:
x=1
break
elif i==l[-1]:
l.pop()
if x==0:
if len(l)==0:
print('YES')
else:
print('NO')
else:
print('YES') |
#!/usr/dev/env python3
#coding utf-8
usename = input("account:")
password = input("password:")
if usename == 'hy' and password == '123456':
print('auth success' )
else:
print('auth failed, please try again') |
from django.db import models
from .region import Region
class State(models.Model):
"""Esta classe define um estado (unidade federativa) que pertence à uma região"""
name = models.CharField('Nome', blank=False, max_length=20)
initials = models.CharField('Sigla', blank=False, max_length=2, null=True)
region = models.ForeignKey(Region, on_delete=models.CASCADE, related_name="states", null=True,
verbose_name="Região")
def __str__(self):
return self.name
class Meta:
verbose_name = 'Estado'
verbose_name_plural = 'Estados'
ordering = ['name']
|
# 사전에서는 키 중복 허용되지 않음
# 같은 값의 키로 추가하면 덮어 씌워지는 구조
cabinet = {3:"유재석", 100: "김태호"}
print(cabinet[3])
print(cabinet.get(3))
# print(cabinet[5]) # 프로그램 종료됨 5라는 키 없기 때문에
print(cabinet.get(5)) # 값이 없어도 오류 출력 안함
print(cabinet.get(5, "사용가능")) # 값이 없으면 기본값 출력
print(3 in cabinet) # 3이라는 키가 캐비넷이 있으면 True
# 새 손님
str_cabinet = {"A-3":"유재석", "B-100": "김태호"}
print(str_cabinet)
str_cabinet["A-3"] = "김종국"
str_cabinet["C-20"] = "조세호"
print(str_cabinet)
# 간 손님
del str_cabinet["A-3"]
print(str_cabinet)
# key 들만 출력
print(str_cabinet.keys())
print(str_cabinet.values())
print(str_cabinet.items())
# 목욕탕 폐점
str_cabinet.clear()
print(str_cabinet)
|
from mymed.setup.loggers import LOGGERS # import loggers will also initialize the loggers
log = LOGGERS.Setup
log.debug(f'__file__={0:<35} | __name__={1:<20} | __package__={2:<20}'.format(__file__, __name__, str(__package__)))
if any(__name__ == case for case in ['__main__', 'mymed']):
from mymed.app import create_app
app = create_app()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000)
|
import pymx
import numpy as np
scfoutfile = 'Bi2Se3.scfout'
pm = pymx.PyMX(scfoutfile,ver='3.8')
pm.default_setting()
center = (pm.a1+pm.a2+pm.a3)/2.
I_mat = pymx.inversion_mat()
gamma = 0.*pm.b1
L = 0.5*pm.b1
X = 0.5*pm.b1+0.5*pm.b2
print('inversion pair')
print(pm.Crystal_symmetry_pair(I_mat,center=center))
print('gamma')
INV = pm.Crystal_symmetry_mat(I_mat,gamma,center=center)
w,v = pm.TB_eigen(gamma)
for i in range(0,48,2):
c = v[:,i]
print np.linalg.multi_dot([c.conjugate(),INV,c])
print('L')
INV = pm.Crystal_symmetry_mat(I_mat,L,center=center)
w,v = pm.TB_eigen(L)
for i in range(0,48,2):
c = v[:,i]
print np.linalg.multi_dot([c.conjugate(),INV,c])
print('X')
INV = pm.Crystal_symmetry_mat(I_mat,X,center=center)
w,v = pm.TB_eigen(X)
for i in range(0,48,2):
c = v[:,i]
print np.linalg.multi_dot([c.conjugate(),INV,c])
print('\nparity of Bi2Se3 at Gamma point from DOI:10.1038/NPHYS1270 :')
print('+-+-+-+-+-+--+(HOMO);-(LUMO)')
|
import scipy.io as sio
import numpy as np
import json
import heapq
def generate_axis(wordcnt_path, n, word_tag=None):
with open(wordcnt_path) as f:
wordcnt = f.readlines()
indices = []
values = []
for i in range(len(wordcnt)):
line =wordcnt[i].split('\n')[0]
tokens = line.split(' ')
if(word_tag):
if(tokens[2] in word_tag):
indices.append(i)
values.append(float(tokens[1]))
else:
indices.append(i)
values.append(float(tokens[1]))
if(n > len(indices)):
exit(0)
indices = np.array(indices).reshape(-1, )
print(indices.shape)
values = np.array(values).reshape(-1, )
n_largest = heapq.nlargest(n, range(len(indices)), values.take)
return indices[n_largest]
if(__name__ == '__main__'):
res = generate_axis('/data/zj/local_pyproject/TF-IDF/TF_IDF_TAG.txt', 1000)
print(res)
|
from keras.datasets import mnist
import keras.utils.np_utils as ku
import keras.models as models
import keras.layers as layers
from keras import regularizers
import numpy as np
import matplotlib.pyplot as plt
(tr_im, tr_label), (te_im, te_label) = mnist.load_data()
class_names = ['0', '1', '2', '3', '4',
'5', '6', '7', '8', '9']
# plt.figure(figsize=(10, 10))
# for i in range(25):
# plt.subplot(5, 5, i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(tr_im[i], cmap=plt.cm.binary)
# plt.xlabel(class_names[tr_label[i]])
# plt.show()
# preprocess the data
# first reshape the inputs to a 1D array
train_im = tr_im.reshape((60000, 28*28))
train_im = train_im.astype('float32')/255
test_im = te_im.reshape((10000, 28*28))
test_im = test_im.astype('float32')/255
# second : to use softmax classification we shold have n*10 array then categorize labels too
train_label = ku.to_categorical(tr_label)
test_label = ku.to_categorical(te_label)
# bulid the model from sequential
nn = models.Sequential()
# add first layer and configure hidden layer to 512 neurons and set it's activation function
nn.add(layers.Dense(512, activation='relu', input_shape=(
28*28,), kernel_regularizer=regularizers.l2(0.01)))
# add final layer and set it to softmax layer
nn.add(layers.Dense(10, activation='softmax'))
# set optimizer optimizer parameters
nn.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
# finally the fit function
history = nn.fit(train_im, train_label, validation_data=(
test_im, test_label), epochs=50, batch_size=120)
# in the end evaluate the trained network by test dataset
print(nn.evaluate(test_im, test_label))
# show the results
train_loss = history.history['loss']
test_loss = history.history['val_loss']
x = list(range(1, len(test_loss) + 1))
plt.plot(x, test_loss, color='red', label='test loss')
plt.plot(x, train_loss, label='traning loss')
plt.show()
|
##############################################################################
# Copyright (c) 2016 Max Breitenfeldt and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-05 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='resource',
name='dev_pod',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='resource',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_lab_owner', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='resource',
name='slave',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='jenkins.JenkinsSlave'),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 15:53:49 2020
@author: karth
"""
#---------importing packages---------------
import re
import pandas as pd
import matplotlib.pyplot as plt
import emoji
import numpy as np
#---------reading file---------------
file = open(r"C:\\Users\\karth\\Desktop\\WhatsApp Chat.txt",mode='r',encoding="utf8")
chat = file.read()
file.close()
print(chat)
#---------using regualr expression to find match and sequence from data---------------
# Get date
date_regex=re.compile(r'(\d+/\d+/\d+)')#Match a digit: [0-9]
date=date_regex.findall(chat)
print(date)
# Get time
time_regex=re.compile(r'(\d{1,2}:\d{2} am|pm)') #(\d{1,2}) - captures upto two digits(ie; one or two digits).
time=time_regex.findall(chat)
# Get Users
user_regex=re.compile(r'-(.*?):')
user=user_regex.findall(chat)
#\ Used to drop the special meaning of character following it (discussed below)
#[] Represent a character class
#^ Matches the beginning
#$ Matches the end
#. Matches any character except newline
#? Matches zero or one occurrence.
#| Means OR (Matches with any of the characters separated by it.
#* Any number of occurrences (including 0 occurrences)
#+ One or more occurrences
#{} Indicate number of occurrences of a preceding RE to match.
#() Enclose a group of REs
# Get Message
message_regex=re.compile(r'([^:]+):?$')
me_regex=re.compile(r"(\n)(?<=)(\d+/\d+/\d+)(.*)")
mess=me_regex.findall(chat)
message = [''.join(message_regex.findall(''.join(msg))).strip() for msg in mess]
# Zip date,time,user,message together
data=[]
for w,x,y,z in zip(date,time,user,message):
data.append([str(w),str(x),str(y),str(z)])
print(data)
# Create DataFrame from joined zip data from above list
df=pd.DataFrame(data,columns=("Date","Time","User","Message"))
print(df)
#-----------------Data Cleaning-------------------------
# Let's clean our Message
df['Message']=df['Message'].str.replace('\'(.*?): ','')
#Date
df['Date'] = pd.to_datetime(df['Date'], errors='coerce')
#df['Date'] = pd.to_datetime(df['Date'])
# Get Day, Month, year from Date
df['Day']=df['Date'].dt.day
df['Month']=df['Date'].dt.month
df['Year']=df['Date'].dt.year
# Message words
df['Words'] = df['Message'].str.strip().str.split('[\W_]+')
# Word length
df['Word Length'] = df['Words'].apply(len)-2
# Get Media shared in the Message
df['Media']=df['Message'].str.contains('<Media omitted>')
# Save the DataFrame to a csv file
df.to_csv("whatsapps.csv")
#Returns the first 5 rows of the dataframe
print(df.head())
#The shape attribute of pandas. DataFrame stores the number of rows and columns as a tuple (number of rows, number of columns)
print(df.shape[0])
#To display number of columns
print(df.columns)
#---------------------------------------------------------------------
#Summary by no. of msg by dates
dates=df.groupby('Date')['Date'].count()
print(dates)
#Summary by no. of msg by user
#message=df.groupby('User')['Message'].count()
#print(message)
#Summary by no. of msg by month
month =df.groupby(['Month'])['Month'].count()
print(month)
#Summary by no. of msg by day
Day=df.groupby('Day')['Day'].count()
print(Day)
#Summary by Message word length and User
char_wordlength=df.groupby(['User'])['Word Length'].sum()
print(char_wordlength)
#counting total no. media
media=df.groupby(['Media'])['Media'].sum()
print(media)
#---------------Plotting Visuals---------------------------
# Total No.of messages by user
plt.style.use('ggplot')
users=df.groupby('User')['Message'].count().nlargest(5)
print(users)
def bar_chart(users):
ax = users.plot(kind='bar', color = ['limegreen','darkorange','gold', 'yellow', 'red'], fontsize=12)
ax.seb_ackground_color = 'black'
ax.set_title("Total No.of messages by user\n", fontsize=16, fontweight='bold')
ax.set_xlabel("Names", fontsize=10,fontweight='bold')
ax.set_ylabel("No.of Messages", fontsize=10,fontweight='bold')
ax.set_facecolor('snow')
#ax.legend("User Names")
plt.show('users')
#plt.savefig('whatsapp.png')
bar_chart(users)
# No. of Messages by Month
message=df.groupby('Month')['Message'].count()
plt.style.use('ggplot')
message=df.groupby('Month')['Message'].count()
def line_chart(message):
ax = message.plot(kind='line', color = ['deeppink'], fontsize=12)
ax.seb_ackground_color = 'black'
ax.set_title("No. of Messages by Month\n", fontsize=16, fontweight='bold')
ax.set_xlabel("Month", fontsize=10,fontweight='bold')
ax.set_ylabel("No.of Messages", fontsize=10,fontweight='bold')
ax.set_facecolor('snow')
#ax.legend("User Names")
plt.show('message')
#plt.savefig('whatsapp.png')
line_chart(message)
#count emoji
#message=df.groupby('User')['Message'].count().nlargest(5)
def extract_emojis(message):
emojis=[]
for string in df['Message']:
my_str = str(string)
for each in my_str:
if each in emoji.UNICODE_EMOJI:
emojis.append(each)
return emojis
emoji_dict={}
for keys in message.keys():
print(keys)
emoji_dict[keys] = extract_emojis(keys)
emoji_df = pd.DataFrame(emoji_dict[keys])
print(emoji_df[0].value_counts()[:10])
#count of media per user
media_df=df[df['Media']==True]
media_per_user_group=media_df.groupby(['User'])['Media'].count().nlargest(4)
print(media_per_user_group)
def media_shared_pie(media_per_user_group):
fig, ax = plt.subplots()
explode=[]
for i in np.arange(len(media_per_user_group)):
explode.append(0)
ax = media_per_user_group.plot(kind='pie', colors = ['limegreen','darkorange','gold','red'], explode=explode, fontsize=10, autopct='%1.1f%%', startangle=180)
ax.axis('equal')
ax.set_title(" No. of Media shared by Users\n", fontsize=18)
plt.show()
media_shared_pie(media_per_user_group)
#count of text msg per user
media_df=df[df['Media']==False]
media_per_user_group=media_df.groupby(['User'])['Media'].count().nlargest(4)
print(media_per_user_group)
def media_shared_pie(media_per_user_group):
fig, ax = plt.subplots()
explode=[]
for i in np.arange(len(media_per_user_group)):
explode.append(0)
ax = media_per_user_group.plot(kind='pie', colors = ['limegreen','darkorange','gold','red'], explode=explode, fontsize=10, autopct= '%1.1f%%', startangle=180)
ax.axis('equal')
ax.set_title("No. of Text Message by Users\n", fontsize=18)
plt.show()
media_shared_pie(media_per_user_group)
#count of media per user
plt.style.use('ggplot')
media_per_user_group=media_df.groupby(['User'])['Media'].count().nlargest(4)
print(media_per_user_group)
def bar_chart(users):
ax = media_per_user_group.plot(kind='barh', color = ['limegreen','darkorange','gold', 'yellow', 'red'], fontsize=12)
ax.seb_ackground_color = 'black'
ax.set_title("No. of Media shared by Users\n", fontsize=16, fontweight='bold')
ax.set_xlabel("Names", fontsize=10,fontweight='bold')
ax.set_ylabel("No.of Messages", fontsize=10,fontweight='bold')
ax.set_facecolor('snow')
#ax.legend("User Names")
plt.show('media_per_user_group')
#plt.savefig('whatsapp.png')
bar_chart(media_per_user_group)
|
LOG_SUM256 = 'sum256'
LOG_SHA256 = 'sha256'
LOG_BCONCAT = 'bconcat'
LOG_TO_BYTES = 'to_bytes'
# log: call method
def clog(caller, func, msg):
if func == LOG_SHA256:
print('[CALL] ' + func + '(' + str(msg) + ')')
elif func == LOG_BCONCAT:
print('[CALL] ' + func + ': '+ str(msg))
elif func == LOG_TO_BYTES:
print('[CALL] ' + caller + '.' + func + '()')
# log: print result
def rlog(caller, func, msg, *result):
clog(caller, func, msg)
for r in result:
print('-', r)
print() |
#!/usr/bin/env python3
import os, subprocess, sys, time
from subprocess import PIPE
def sleep(seconds):
print(f"Waiting {seconds} seconds")
time.sleep(seconds)
def exec(cmd):
print("Executing", cmd)
p = subprocess.run(cmd, stdout=PIPE, stderr=sys.stderr)
if p.returncode != 0:
raise AssertionError(f"Command `{cmd}` failed with exit code {p.returncode}")
return p.stdout.decode().strip()
REPO_DIR = "_repo"
# Build the flatpak app
exec(["flatpak-builder", "--force-clean", "--repo", REPO_DIR, "_flatpak", "tests/org.flatpak.FlatManagerCI.yml"])
# Generate a flat-manager token
os.environ["REPO_TOKEN"] = exec(["cargo", "run", "--bin=gentoken", "--", "--secret=secret", "--repo=stable"])
# Create a new build and save the repo URL
build_repo = exec(["./flat-manager-client", "create", "http://127.0.0.1:8080", "stable"])
# Push to the upload repo
exec(["./flat-manager-client", "push", build_repo, REPO_DIR])
# Commit to the build repo
exec(["./flat-manager-client", "commit", build_repo])
# Wait for that job to finish
sleep(10)
# Publish to the main repo
exec(["./flat-manager-client", "publish", build_repo])
# Wait for the repository to be updated
sleep(15)
# Make sure the app installs successfully
exec(["flatpak", "remote-add", "flat-manager", "http://127.0.0.1:8080/repo/stable", "--gpg-import=key.gpg"])
exec(["flatpak", "install", "-y", "flat-manager", "org.flatpak.FlatManagerCI"]) |
import pygame
import os
import sys
import random
import neat
W_WIDTH = 1500
W_HEIGHT = 900
F_HEIGHT = 50
BIRD_X = 300
pygame.init()
screen = pygame.display.set_mode((W_WIDTH, W_HEIGHT))
clock = pygame.time.Clock()
font = pygame.font.SysFont('Comic Sans MS', 30)
ff_font = pygame.font.SysFont('Comic Sans MS', 100)
pygame.display.set_caption("Flappy Bird")
curr_gen = 0
high_score = 0
curr_score = 0
fast_forward = False
class Floor:
def __init__(self):
self.width = W_WIDTH
self.height = F_HEIGHT
self.x = 0
self.y = W_HEIGHT - F_HEIGHT
def draw(self):
pygame.draw.rect(screen, (0, 0, 255), pygame.Rect(self.x, self.y, self.width, self.height))
class Pipe:
def __init__(self, x = 1600):
self.width = 100
self.gap_height = 300
self.gap_buffer = 50
self.x = x
self.gap_offset = random.randint(self.gap_buffer, W_HEIGHT - self.gap_height - self.gap_buffer - F_HEIGHT)
def size_width(self):
if (self.x < 0):
return self.width + self.x
elif (self.x > W_WIDTH - self.width):
return W_WIDTH - self.x
else:
return self.width
def position_upper(self):
left = self.x if self.x > 0 else 0
top = 0
return (left, top)
def position_lower(self):
left = self.x if self.x > 0 else 0
top = self.gap_offset + self.gap_height
return (left, top)
def size_upper(self):
height = self.gap_offset
return (self.size_width(), height)
def size_lower(self):
height = W_HEIGHT - self.gap_height - self.gap_offset - F_HEIGHT
return (self.size_width(), height)
def draw(self):
if (self.x > W_WIDTH):
return
pygame.draw.rect(screen, (0, 255, 0), pygame.Rect(self.position_upper(), self.size_upper()))
pygame.draw.rect(screen, (0, 255, 0), pygame.Rect(self.position_lower(), self.size_lower()))
def move(self, speed):
self.x -= speed
if (self.x + self.width < 0):
return 2
if (self.x + self.width < BIRD_X):
return 1
return 0
class Bird:
def __init__(self, genome, config):
self.width = 90
self.height = 90
self.x = BIRD_X
self.term = 8
self.grav = 0.1
self.up = -6
self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
self.y = 400
self.vel = 0
# NN Inputs: bird: velocity, height, pipe: gap offset, distance to next
self.brain = neat.nn.FeedForwardNetwork.create(genome, config)
self.genome = genome
self.genome.fitness = 0
def move(self):
self.genome.fitness += 0.1
self.y += self.vel
if (self.vel < self.term):
self.vel += self.grav
def jump(self):
self.vel = self.up
def decide(self, pipe):
output = self.brain.activate((self.vel, pipe.gap_offset + pipe.gap_height - self.y, pipe.x - self.x + self.width))
if (output[0] > 0.5):
self.jump()
def draw(self):
pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, self.width, self.height))
class FlappyBird:
def __init__(self, generation):
self.next_pipes = [Pipe(), Pipe(2400)]
self.past_pipes = []
self.pipespeed = 3
self.birds = generation
self.floor = Floor()
def handle_events(self):
global fast_forward
for event in pygame.event.get():
if (event.type == pygame.QUIT):
sys.exit()
elif (event.type == pygame.KEYDOWN):
if (event.key == pygame.K_SPACE):
fast_forward = not fast_forward
if (fast_forward == True):
screen.fill((255, 255, 255))
ff_text = ff_font.render(">>> Fast Forwarding >>>", False, (0, 0, 0))
screen.blit(ff_text, (200, 350))
pygame.display.update()
def birds_decide(self):
for bird in self.birds:
bird.decide(self.next_pipes[0])
def move_entities(self):
global high_score, curr_score
for pipe in self.past_pipes:
if (pipe.move(self.pipespeed) == 2):
self.past_pipes.pop(0)
self.next_pipes.append(Pipe())
for pipe in self.next_pipes:
if (pipe.move(self.pipespeed) == 1):
curr_score += 1
if (curr_score > high_score):
high_score = curr_score
self.past_pipes.append(self.next_pipes.pop(0))
for bird in self.birds:
bird.genome.fitness += 10
for bird in self.birds:
bird.move()
def check_collision(self, bird):
for pipe in self.next_pipes:
if (bird.x + bird.width > pipe.x and pipe.x + pipe.width > bird.x):
if (bird.y < pipe.gap_offset or bird.y + bird.height > pipe.gap_offset + pipe.gap_height):
return True
if (bird.y + bird.height > self.floor.y):
return True
return False
def draw_entities(self):
global high_score
screen.fill((255, 255, 255))
for pipe in self.next_pipes:
pipe.draw()
for pipe in self.past_pipes:
pipe.draw()
for bird in self.birds:
bird.draw()
self.floor.draw()
text = font.render("High Score: " + str(high_score), False, (0, 0, 0))
screen.blit(text, (100, 100))
text = font.render("Score: " + str(curr_score), False, (0, 0, 0))
screen.blit(text, (100, 150))
text = font.render("Generation: " + str(curr_gen), False, (0, 0, 0))
screen.blit(text, (100, 200))
pygame.display.update()
def tick(self):
clock.tick(120)
def step(self):
self.handle_events()
self.birds_decide()
self.move_entities()
for bird in self.birds:
if (self.check_collision(bird)):
self.birds.remove(bird)
return len(self.birds) == 0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def train(genomes, config):
global curr_gen, fast_forward, curr_score
curr_score = 0
birds = []
for genome_id, genome in genomes:
birds.append(Bird(genome, config))
flappy = FlappyBird(generation = birds)
while True:
if (flappy.step()):
break
if (not fast_forward or curr_score > 15):
flappy.draw_entities()
flappy.tick()
curr_gen += 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config.txt')
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
p.add_reporter(neat.StatisticsReporter())
winner = p.run(train, 500)
# show final stats
print('\nBest genome:\n{!s}'.format(winner))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import time
import threading
import matplotlib.pyplot as plt
import librosa
import librosa.display
import numpy as np
import wave
import pyaudio
def waveplot():
#y为长度等于采样率sr*时间的音频向量
y, sr = librosa.load("/Users/mash5/Documents/python3-workspace/python-rosa/music/1/mp3/0001.mp3", sr=None, mono=True)
onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512, aggregate=np.median)
peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.6, 10)
tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr, hop_length=512)
times = librosa.frames_to_time(beats, sr=sr)
# print(peaks)
print(tempo)
print(beats)
print(times)
plt.figure()
#创建波形图
# librosa.display.waveplot(y, sr)
librosa.display.waveplot(y, sr)
#显示波形图
plt.show()
def remix():
#y为长度等于采样率sr*时间的音频向量
y1, sr1 = librosa.load("/Users/mash5/Documents/python3-workspace/python-rosa/music/1/mp3/0001.mp3", sr=None)
y2, sr2 = librosa.load("/Users/mash5/Documents/python3-workspace/python-rosa/music/2/mp3/0002.mp3", sr=None)
y3, sr3 = librosa.load("/Users/mash5/Documents/python3-workspace/python-rosa/music/4/mp3/0003.mp3", sr=None)
print(y1.size)
print(y1.dtype)
print(y2.size)
print(y2.dtype)
size = 0
if y2.size < y1.size:
size = y2.size
else:
size = y1.size
if y3.size < size:
size = y3.size
y1 = y1[0: size: 1]
y2 = y2[0: size: 1]
y3 = y3[0: size: 1]
y4 = y1 * 1 + y2 * 1 + y3 * 1
# librosa.output.write_wav("/Users/mash5/Documents/python3-workspace/python-rosa/music/temp.wav", y3, sr3)
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=sr1, output=True)
audata = y4.astype(np.float32).tostring()
stream.write(audata)
stream.stop_stream()
stream.close()
p.terminate()
def beats_slice():
#y为长度等于采样率sr*时间的音频向量
y1, sr = librosa.load("/home/pi/Desktop/python-rosa/music/1/mp3/0002.mp3", sr=None)
print(y1.size)
tempo, beats = librosa.beat.beat_track(y=y1, sr=sr, hop_length=512)
print(beats)
beat_samples = librosa.frames_to_samples(beats, hop_length=512)
print(beat_samples)
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=sr, output=True)
pre_s = 0
cur_beat_index = 0
beat_count = len(beat_samples)
while True:
if cur_beat_index < beat_count:
s = beat_samples[cur_beat_index]
cur_beat_index += 1
if (s - pre_s) < sr:
continue
else:
y = y1[pre_s:s]
pre_s = s
else:
y = y1[pre_s:]
pre_s = 0
cur_beat_index = 0
audata = y.astype(np.float32).tostring()
stream.write(audata)
stream.stop_stream()
stream.close()
p.terminate()
sensor_value_list = [1, 1, 0, 1, 0]
# 设置超声波数值
def set_sensor_value(values):
global sensor_value_list
sensor_count = len(sensor_value_list)
index = 0
for value in values:
if index < sensor_count:
sensor_value_list[index] = value
index += 1
def dy_remix():
global sensor_value_list
sensor_count = len(sensor_value_list)
PATH = "/home/pi/Desktop/python-rosa/music/"
def get_audio_series(p_index, m_index, sr):
y_temp, sr_temp = librosa.load(PATH + str(p_index) + "/mp3/000" + str(m_index) + ".mp3", sr=sr)
return y_temp, sr_temp
def get_beats_samples(y, sr):
# 起点强度(音符按键起始点)
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
# 节拍点(帧索引)
_, beats = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr, hop_length=512)
# 节拍点(采样引)
beat_samples = librosa.frames_to_samples(beats, hop_length=512)
return beats, beat_samples
def get_fragment(y, start, s_count):
t_count = len(y)
if start == t_count:
start = 0
s_y = None
end = start + s_count
if end <= t_count:
s_y = y[start:end]
else:
end -= t_count
s_y1 = y[start:]
s_y2 = y[0:end]
s_y = np.concatenate((s_y1, s_y2))
return s_y, end
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=44100, output=True)
# 采样率
sr = None
# 每组音频数
audios_count = 5
# 加载所有音频数据及节奏点数据
audios_pool = []
beat_samples_pool = []
for x in range(0, sensor_count):
audios_list = []
beat_samples_list = []
for i in range(0, audios_count):
y, sr = get_audio_series(x + 1, i + 1, sr)
beats, beat_samples = get_beats_samples(y, sr)
audios_list.append(y)
beat_samples_list.append(beat_samples)
audios_pool.append(audios_list)
beat_samples_pool.append(beat_samples_list)
print("========%d=========",x)
# 上一次互动参数
pre_sensor_value_list = []
# 互动参数对应的音频数据索引
audio_indexs = []
# 互动参数对应的音频数据
audios_list = []
# 动参数对应的音频数据节奏点采样索引
beat_samples_list = []
# 当前音频播放的开始位置与播放的数据切片
starts = []
s_ys = []
for x in range(0, sensor_count):
pre_sensor_value_list.append(0)
audio_indexs.append(0)
audios_list.append(audios_pool[x][0])
beat_samples_list.append(beat_samples_pool[x][0])
starts.append(0)
s_ys.append(0)
def get_yield_size(sensor_value_index):
for i in range(0, sensor_count):
if sensor_value_index == i:
continue
if pre_sensor_value_list[i] > 0:
start = starts[i]
beat_samples = beat_samples_list[i]
bs_count = len(beat_samples)
for x in range(0, bs_count):
if start < beat_samples[x]:
t_x = x + 1
if t_x < bs_count:
return beat_samples[t_x] - start
else:
return len(audios_list[i]) - start + beat_samples[t_x - bs_count - 1]
return len(audios_list[i]) - start + beat_samples[0]
return 0
s_count = sr
while True:
s_count = sr
# 有互动项被启动,寻找播放该音频的开始位置
for x in range(0, sensor_count):
sensor_value = sensor_value_list[x]
if pre_sensor_value_list[x] == 0 and sensor_value > 0:
yield_size = get_yield_size(x)
if yield_size > 0:
s_count = yield_size
break
for x in range(0, sensor_count):
sensor_value = sensor_value_list[x]
if sensor_value > 0:
if pre_sensor_value_list[x] > 0:
s_ys[x], starts[x] = get_fragment(audios_list[x], starts[x], s_count)
else:
if pre_sensor_value_list[x] > 0:
# 该项互动音频索引递增
audio_indexs[x] += 1
if audio_indexs[x] == 5:
audio_indexs[x] = 0
# 该项互动替换音频数据
i = audio_indexs[x]
audios_list[x] = audios_pool[x][i]
beat_samples_list[x] = beat_samples_pool[x][i]
# 该项互动播放参数清空
s_ys[x] = 0
starts[x] = 0
pre_sensor_value_list[x] = sensor_value
# 混音操作
y = s_ys[0] + s_ys[1] + s_ys[2] + s_ys[3] + s_ys[4]
# 播放
if not isinstance(y, int):
audata = y.astype(np.float32).tostring()
stream.write(audata)
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == '__main__':
audio_thread = threading.Thread(target=dy_remix, name="超声波接收线程")
audio_thread.start()
while True:
msg = input("input:")
if msg == 'c':
sys.exit()
else:
values = msg.split(',');
for x in range(0, len(values)):
values[x] = int(values[x])
set_sensor_value(values)
|
names = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE",
"SIX", "SEVEN", "EIGHT", "NINE"]
letters_list = [list(set(s)) for s in names]
order = [0, 2, 6, 7, 8, 3, 5, 4, 1, 9]
unique = ['Z', 'W', 'X', 'S', 'G', 'H', 'V', 'R', 'O', 'I']
def replace_num(num, cnt, s):
letters = letters_list[num]
for c in letters:
s = s.replace(c, '', names[num].count(c)*cnt)
return s
def make_num(count):
res = ''
for i in range(10):
res = res + str(i)*count[i]
return res
def solve():
infile = open("A-large.in")
outfile = open("A-large.out", 'w')
T = int(infile.readline().strip())
for i in range(T):
occur = [0] * 10
s = infile.readline().strip()
for j in range(10):
num = order[j]
occur[num] = s.count(unique[j])
if occur[num] != 0:
s = replace_num(num, occur[num], s)
res = "Case #%d: %s\n" % (i+1, make_num(occur))
outfile.write(res)
infile.close()
outfile.close()
solve()
|
from django.db import models
from jsonfield import JSONField
from ckeditor.fields import RichTextField
# Create your models here.
class MarkedText(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
text = models.TextField()
data = JSONField(blank=True, null=True)
analysis = RichTextField(blank=True, null=True)
visible = models.BooleanField(default=True)
published = models.BooleanField(default=False)
video_url = models.URLField(blank=True, null=True)
title = models.TextField(blank=True, null=True, default='Manjka naslov')
date = models.DateField(blank=True, null=True)
photo = models.FileField(blank=True, null=True)
og_title = models.TextField(blank=True, null=True)
og_desc = models.TextField(blank=True, null=True)
def __str__(self):
return self.text[:50]
def __unicode__(self):
return self.text[:50]
|
import os
import sys
import codecs
import chardet
#from subFunc_tools import *
def convert(file, in_enc="GBK", out_enc="UTF-8"):
in_enc = in_enc.upper()
out_enc = out_enc.upper()
if(in_enc == 'UTF-8'):
return
try:
f = codecs.open(file, 'r', in_enc)
new_content = f.read()
codecs.open(file, 'w', out_enc).write(new_content)
except Exception as e:
print(file)
print("Fail", e)
if __name__ == "__main__":
path = r'./easy_ham/'
list_files = os.listdir(path)
for fileName in list_files:
filePath = path + fileName
with open(filePath, "rb") as f:
data = f.read()
codeType = chardet.detect(data)['encoding']
convert(filePath, codeType, 'UTF-8')
path = r'./spam/'
list_files = os.listdir(path)
for fileName in list_files:
filePath = path + fileName
with open(filePath, "rb") as f:
data = f.read()
codeType = chardet.detect(data)['encoding']
convert(filePath, codeType, 'UTF-8')
|
try:
from osgeo import ogr
except ImportError:
import ogr
import sys, os
def subtract(sourceFile, maskFile):
outputFileName = 'difference'
driver = ogr.GetDriverByName("ESRI Shapefile")
source = driver.Open(sourceFile,0)
sourceLayer = source.GetLayer()
if source is None:
print "Could not open file ", sourceFile
sys.exit(1)
mask = driver.Open(maskFile,0)
maskLayer = mask.GetLayer()
if mask is None:
print "Could not open file ", maskFile
### Create output file ###
if os.path.exists(outputFileName):
os.remove(outputFileName)
try:
output = driver.CreateDataSource(outputFileName)
except:
print 'Could not create output datasource ', outputFileName
sys.exit(1)
# newLayer = output.CreateLayer('difference',geom_type=ogr.wkbPolygon,srs=sourceLayer.GetSpatialRef())
newLayer = output.CreateLayer('difference',geom_type=ogr.wkbMultiLineString,srs=sourceLayer.GetSpatialRef())
prototypeFeature = sourceLayer.GetFeature(0)
for i in range(prototypeFeature.GetFieldCount()):
newLayer.CreateField(prototypeFeature.GetFieldDefnRef(i))
prototypeFeature.Destroy()
if newLayer is None:
print "Could not create output layer"
sys.exit(1)
newLayerDef = newLayer.GetLayerDefn()
##############################
processedCount = 0
featureID = 0
total = sourceLayer.GetFeatureCount()
sourceFeature = sourceLayer.GetNextFeature()
while sourceFeature:
sourceGeom = sourceFeature.GetGeometryRef()
maskLayer.ResetReading()
maskLayer.SetSpatialFilter(sourceGeom)
maskFeature = maskLayer.GetNextFeature()
while maskFeature:
maskGeom = maskFeature.GetGeometryRef()
sourceGeom = sourceGeom.Difference(maskGeom)
maskFeature.Destroy()
maskFeature = maskLayer.GetNextFeature()
if sourceGeom.Length() > 0:
newFeature = ogr.Feature(newLayerDef)
newFeature.SetGeometry(sourceGeom)
newFeature.SetFID(featureID)
for i in range(sourceFeature.GetFieldCount()):
newFeature.SetField(i, sourceFeature.GetField(i))
newLayer.CreateFeature(newFeature)
featureID += 1
newFeature.Destroy()
sourceFeature.Destroy()
sourceFeature = sourceLayer.GetNextFeature()
processedCount += 1
print "%d / %d / %d" % (processedCount, featureID, total)
source.Destroy()
mask.Destroy()
if __name__ == "__main__":
subtract(sys.argv[1], sys.argv[2])
|
"""Import objects"""
import re
import urllib
from flask import render_template, url_for, request, flash, redirect, session
from flaskapp import APP
from flaskapp.models import *
from flaskapp.helpers import *
# an instance of Account class (responsible for user registration and login)
REGISTRANT = Account()
LIST = Lists()
PROCEDURES = Procedures()
@APP.route("/")
def index():
"""Root - display homepage"""
return render_template("index.html")
@APP.route("/login", methods=['POST', 'GET'])
def login():
"""Display login form and check for details"""
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
username = request.form['username']
password = request.form['password']
# server side validation for users that might bypass
# javascript check by disabling it in their browser
if username == "" or password == "":
return apology("please input required details")
login_rigistrant = REGISTRANT.login(username, password)
if login_rigistrant is True:
session["username"] = username
return redirect(url_for('dashboard', username=username))
elif login_rigistrant is False:
return apology("please check your details and try again")
else:
return render_template("login.html")
@APP.route("/signup", methods=['GET', 'POST'])
def signup():
"""Display signup form and add new registrant"""
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# collect the submited data
username = request.form['username']
email = request.form['email']
password = request.form['password']
confirm = request.form['confirm']
# server side validation for users that might bypass
# javascript check by disabling it in their browser
#if username == "" or email == "" or password == "" or confirm == "":
# flash("please enable javascript in your broswer!")
# return render_template("signup.html")
# check for correct email format
# check whether email has exactly one @ sign, and at least one . in the part after the @
#if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
# return apology("please check your email format and try again")
#if len(password) < 8 or not re.search(r"\d" and r"[A-Z]" and r"[a-z]" and r"\W", password):
# return apology("password need 8 characters, digits, uppercase, lowercase and symbol")
#else:
signup_registrant = REGISTRANT.adduser(username, email, password, confirm)
if signup_registrant is True:
return redirect(url_for('login'))
elif signup_registrant is False:
return apology("email or username exists")
elif signup_registrant == "pass_fail":
return apology("password mismatch!")
else:
return render_template("signup.html")
@APP.route("/logout")
@login_required
def logout():
"""Log user out."""
# forget any username
session.clear()
# redirect user to login form
return redirect(url_for("login"))
@APP.route("/dashboard", methods=['GET'])
@login_required
def dashboard():
available_recipes = LIST.mylists()
"""Display logged in user's recipes"""
for i in available_recipes:
if i['username'] == session['username']:
return render_template("dashboard.html", username=session['username'], available_recipes=available_recipes)
return render_template("dashboard.html")
@APP.route("/addrecipe", methods=['GET', 'POST'])
@login_required
def addrecipe():
"""Adds a new lists"""
# if user reached route via POST (as by submitting a form via POST)
if request.method == 'POST':
owner = session["username"]
title = request.form['title']
# add a recpe to recipes
addrecipe = LIST.addrecipe(owner, title)
if addrecipe == True:
available_recipes = LIST.mylists()
return render_template("dashboard.html", username=session["username"], available_recipes=available_recipes)
elif addrecipe == False:
return apology("please enter required details")
elif request.method == 'GET':
return render_template('add.html')
@APP.route("/edit/<id>", methods=['GET', 'POST'])
@login_required
def edit(id):
"""Display a form to add or edit recipes"""
recipes = LIST.mylists()
procedures = PROCEDURES.allprocedures()
if request.method == 'POST':
for idx, item in enumerate(recipes):
if item['id'] == int(id):
holder = dict()
holder['id'] = int(id)
holder['title'] = request.form['title']
holder['username'] = session['username']
recipes[idx] = holder
return redirect(url_for('dashboard'))
elif request.method == 'GET':
for j in recipes:
if j['id'] == int(id):
title = j['title']
return render_template("edit.html", id=id, title=title)
@APP.route("/review/<id>")
@login_required
def review(id):
"""review a certain recipe"""
recipelist = LIST.mylists()
available_procedures = PROCEDURES.allprocedures()
for i in recipelist:
if i['id'] == int(id):
recipename = i['title']
return render_template("view.html", id=id, recipename=recipename, available_procedures=available_procedures)
return render_template("view.html")
@APP.route("/view")
@login_required
def view():
"""Display a certain user's recipe"""
return render_template("view.html")
@APP.route('/delete/<id>')
@login_required
def delete(id):
recipe = LIST.mylists()
for i, d in enumerate(recipe):
if d['id'] == int(id):
recipe.pop(i)
return redirect(url_for('dashboard'))
@APP.route('/addprocedure/<id>', methods=['GET', 'POST'])
@login_required
def addprocedure(id):
if request.method == 'GET':
available_procedures = PROCEDURES.allprocedures()
return render_template("procedure.html", id=id, username=session['username'], available_procedures=available_procedures)
elif request.method == 'POST':
owner = session['username']
procedure = request.form['procedure']
add_aprocedure = PROCEDURES.addprocedure(id, owner, procedure)
if add_aprocedure == True:
return redirect(url_for('dashboard'))
elif add_aprocedure == False:
return apology("Something went wrong!")
|
#!/usr/bin/env python2.7
import sys
import urllib2
import xml.dom.minidom as xml
import shelve
import logging
#Load bot settings
from settings import (app_key, app_secret, access_token, refresh_token, user_agent, scopes, subreddit, log_path, db_path)
#Configure logging
logging.basicConfig(filename=log_path, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y @ %H:%M :', level=logging.INFO)
# Use a modified version of the legacy nflgame schedule import script to load
# all Bills games info into a dictionary and store it within a shelve. The dictionary will
# be used to determine when it is time to create gameday threads, how to title them, etc.
xml_base_url = 'http://www.nfl.com/ajax/scorestrip?'
start_year = 2018 # Years before 2009 don't seem to have JSON feeds.
end_year = 2018
team = 'BUF'
season_types = (
('PRE', xrange(1, 4 + 1)),
('REG', xrange(1, 17 + 1)),
('POST', [18, 19, 20, 22]),
)
schedule = shelve.open('bb-schedule.db')
logging.info("Beginning data retrieval...")
for year in xrange(start_year, end_year + 1):
for season_type, weeks in season_types:
for week, real_week in enumerate(weeks, 1):
url = '%sseason=%d&seasonType=%s&week=%d' \
% (xml_base_url, year, season_type, real_week)
try:
dom = xml.parse(urllib2.urlopen(url))
except urllib2.HTTPError:
logging.info('Could not load %s' % url)
continue
for g in dom.getElementsByTagName("g"):
eid = g.getAttribute('eid')
home = g.getAttribute('h')
away = g.getAttribute('v')
#A hack to deal with the fact that the nfl XML doesn't provide 24 hour times and we can't assume that all times
# are PM since Buffalo will play Jacksonville in Londan this year at 9AM EST.
#if home == 'JAC':
# gameTime = unicode(g.getAttribute('t') + ' AM')
# preGamePosted = False
#else:
# gameTime = unicode(g.getAttribute('t') + ' PM')
# preGamePosted = False
gameTime = unicode(g.getAttribute('t') + ' PM')
preGamePosted = False
info = {
'eid': eid,
'wday': g.getAttribute('d'),
'year': int(eid[:4]),
'month': int(eid[4:6]),
'day': int(eid[6:8]),
'time': gameTime,
'season_type': season_type,
'week': week,
'home': home,
'away': away,
'gamekey': g.getAttribute('gsis'),
'preGamePosted': preGamePosted,
'gameDayPosted': False,
'postGamePosted': False
}
gameinfo = ((year, season_type, week, home, away), info)
if home==team or away==team:
key = season_type + str(week)
logging.info(info)
logging.info('Storing ' + key)
schedule[key] = info
#Add an entry for testing
info = {
'eid': '2015081452',
'wday': 'Tues',
'year': 2018,
'month': 5,
'day': 15,
'time': unicode('5:05 PM'),
'season_type': 'REG',
'week': 0,
'home': 'BUF',
'away': 'PIT',
'gamekey': '56767',
'preGamePosted': False,
'gameDayPosted': False,
'postGamePosted': False
}
key = 'REG0'
logging.info(info)
logging.info('Storing ' + key)
schedule[key] = info
logging.info('Retrieval complete. Exiting...')
schedule.close()
|
from enum import IntEnum
from typing import List, Tuple
from UE4Parse.BinaryReader import BinaryStream
class EExportCommandType(IntEnum):
ExportCommandType_Create = 0
ExportCommandType_Serialize = 1
ExportCommandType_Count = 2
class FExportBundleHeader:
FirstEntryIndex: int
EntryCount: int
def __init__(self, reader: BinaryStream):
self.FirstEntryIndex = reader.readUInt32()
self.EntryCount = reader.readUInt32()
class FExportBundleEntry:
LocalExportIndex: int
CommandType: EExportCommandType
def __init__(self, reader: BinaryStream):
self.LocalExportIndex = reader.readUInt32()
self.CommandType = EExportCommandType(reader.readUInt32())
class FExportBundle:
Header: FExportBundleHeader
Entries: Tuple[FExportBundleEntry]
def __init__(self, reader: BinaryStream):
self.Header = FExportBundleHeader(reader) # multiple header?
self.Entries = tuple(FExportBundleEntry(reader) for _ in range(self.Header.EntryCount))
def getOrder(self):
y = []
for x in self.Entries:
if x.CommandType == EExportCommandType.ExportCommandType_Serialize:
y.append(min(self.Header.EntryCount - 1, x.LocalExportIndex))
return y
|
#!/usr/bin/env python
from multiprocessing import Process
import time
class new_process(Process):
"""继承父类multiprocessing.process"""
def __init__(self, arg):
super().__init__()
self.arg = arg
def run(self):
print("start:", self.name)
print("asas", self.arg)
time.sleep(1)
print("exit:", self.name)
if __name__ == '__main__':
for i in range(10):
p = new_process(i)
p.start()
|
a = input("digite a string")
n = ""
for i in range(len(a)):
if(a[i] != "a" and a[i] != "A"):
n = n + a[i]
print(n)
|
#! /usr/bin/python3
# Written by Bill Ballard January 2018 for MightyOhm Geiger counter
# interface to Raspberry Pi with a Pimoroni scrollpHat or scrollpHatHD
# Designed to run in python3
# python3 geiger.py &
#
# Hardware setup
# connect scrollpHat to the GPIO, then connect
# Pi GPIO pin 6 to Geiger J7 pin 1
# Pi GPIO pin 8 to Geiger J7 pin 4
# Pi GPIO pin 10 to Geiger J7 pin 5
#
# Software setup, after update/upgrade
# sudo apt-get install python3-pip (if using Stretch lite)
# sudo pip3 install pySerial flask
#
# for older 5x11 Pimoroni Scroll pHat version
# sudo apt-get install python3-scrollphat
#
# or for newer HD version, also needs the flask installation
# sudo apt-get install python3-scrollphathd
#
# set this line according to your version of the scroll pHat
HD = True # set to true for HD version, false for old version
#
# sudo nano /boot/cmdline.txt
# and remove the console=serial0,115200 save and reboot
#
# License: GPL 2.0
# load all the modules we will need
import serial
import time
if (HD):
import scrollphathd
from scrollphathd.fonts import font5x7
else:
import scrollphat
# file name for logging data, customize as you wish but use full path
# in case you background the job at boot
fname = "/home/pi/geiger.csv"
# open the mightyohm geiger counter terminal
ser = serial.Serial('/dev/ttyAMA0', baudrate=9600)
# initialize scrollphathd or scrollphat
if (HD):
scrollphathd.set_brightness(0.2)
scrollphathd.rotate(180)
scrollphathd.clear()
else:
scrollphat.set_brightness(32)
scrollphat.set_rotate(True)
scrollphat.clear()
# read each line of input, reformat byte to string and write to file
# note that the decode trick is hard to find in Python documentation!
while True:
try:
lin=ser.readline()
line=lin.decode('utf-8').split(',')
outs = line[5] + line[4]
print(outs)
if (HD) :
scrollphathd.clear()
scrollphathd.write_string(outs, x=1, y=0, font=font5x7)
(buf1, buf2) = scrollphathd.get_buffer_shape()
for i in range(buf1):
scrollphathd.show()
scrollphathd.scroll()
time.sleep(0.1)
else:
scrollphat.clear()
scrollphat.write_string(outs, 11)
len = scrollphat.buffer_len()
for i in range (len):
scrollphat.scroll()
time.sleep(0.1)
# now write to file, close each time in case you stop the program
geig = open(fname, 'a')
geig.write(lin.decode('utf-8'))
geig.close()
except (KeyboardInterrupt, SystemError, SystemExit): #reasons to stop
if (HD):
scrollphathd.clear()
else:
scrollphat.clear()
geig.close()
ser.close()
|
# Dependencies
from random import random
import matplotlib.pyplot as plt
import numpy as np
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sem.html
from scipy.stats import sem
# "Will you vote for a republican in this election?"
sample_size = 100
# Like a nested for-loop, there is a list comprehension nested inside the outer list comprehension
# The inner list comprehension generates a list of True and False values.
# The outer list comprehension generates ten such lists
samples = [[True if random() < 0.5 else False for x in range(0, sample_size)]
for y in range(0, 10)]
x_axis = np.arange(0, len(samples), 1)
# Evaluate the mean of each list in samples
means = [np.mean(s) for s in samples]
# Evaluate the standard error measurement for each list in samples
standard_errors = [sem(s) for s in samples]
# Generate an errorbar plot
# The 'yerr' argument refers to the error bar for each sample, oriented to the y-axis
plt.errorbar(
x_axis,
means,
yerr=standard_errors,
color='r',
marker='o',
markersize=5,
linestyle='dashed',
linewidth=0.5)
# Display the plot
plt.show()
|
from unittest import mock
import pytest
import requests
from celery.exceptions import Retry
from project.users.factories import UserFactory
from project.users.tasks import task_add_subscribe
def test_post_succeed(db_session, monkeypatch, user):
mock_requests_post = mock.MagicMock()
monkeypatch.setattr(requests, "post", mock_requests_post)
task_add_subscribe(user.id)
mock_requests_post.assert_called_with(
"https://httpbin.org/delay/5",
data={"email": user.email}
)
def test_exception(db_session, monkeypatch, user):
mock_requests_post = mock.MagicMock()
monkeypatch.setattr(requests, "post", mock_requests_post)
mock_task_add_subscribe_retry = mock.MagicMock()
monkeypatch.setattr(task_add_subscribe, "retry", mock_task_add_subscribe_retry)
mock_task_add_subscribe_retry.side_effect = Retry()
mock_requests_post.side_effect = Exception()
with pytest.raises(Retry):
task_add_subscribe(user.id) |
from rest_framework.serializers import HyperlinkedIdentityField, ModelSerializer, ValidationError, SerializerMethodField
from account.models import User
from rest_framework import status, serializers
|
from socket import *
import sys, time
if len(sys.argv) <= 1:
print 'Usage: "python proxy.py server_ip"\n[server_ip : It is the IP Address of the Proxy Server'
sys.exit(2)
# Create a server socket, bind it to a port and start listening
tcpSERVERPort = 8080
tcpSERVERSock = socket(AF_INET, SOCK_STREAM)
fp = open('log.txt','w')
# Prepare a server socket
tcpSERVERSock.bind((sys.argv[1], tcpSERVERPort))
tcpSERVERSock.listen(5)
while True:
# Start receiving data from the client
print 'Ready to serve...'
tcpCLIENTSock, addr = tcpSERVERSock.accept()
print 'Received a connection from: ', addr
t = time.time()
message = tcpCLIENTSock.recv(4096)
print "message= Hello ",message
fp.write(message)
a = len(message)
print 'number of bytes sent =',a
# Extract the filename from the given message
if message == '':
print "No data"
else:
print "m2=::::",message.split()[1]
filename = message.split()[1].partition("/")[2]
print "filename = ",filename
fileExist = "false"
filetouse = "/" + filename
print "filetouse= :",filetouse
try:
# Check whether the file exists in the cache
f = open(filetouse[1:], "r")
outputdata = f.readlines()
b = len(outputdata)
print "bytes received from server = ",b
print "outputdata = ",outputdata
fileExist = "true"
print 'File Exists!'
# ProxyServer finds a cache hit and generates a response message
tcpCLIENTSock.send("HTTP/1.0 200 OK\r\n")
print "HTTP/1.0 200 OK\r\n"
tcpCLIENTSock.send("Content-Type:text/html\r\n")
# Send the content of the requested file to the client
for i in range(0, len(outputdata)):
tcpCLIENTSock.send(outputdata[i])
print 'Read from cache'
# Error handling for file not found in cache
except IOError:
print 'File Exist: ', fileExist
if fileExist == "false":
# Create a socket on the proxyserver
print 'Creating socket on proxyserver'
c = socket(AF_INET, SOCK_STREAM)
hostn = filename.replace("www.", "", 1)
print 'Host Name: ', hostn
try:
# Connect to the socket to port 80
c.connect((hostn, 80))
print 'Socket connected to port 80 of the host'
# Create a temporary file on this socket and ask port 80
# for the file requested by the client
fileobj = c.makefile('r', 0)
fileobj.write("GET " + "http://" + filename + " HTTP/1.0\n\n")
# Read the response into buffer
buffer = fileobj.readlines()
b = len(buffer)
print 'bytes received =' ,b
#resp = c.recv(4096)
#response = ""
#while resp:
#response += resp
# Create a new file in the cache for the requested file.
# Also send the response in the buffer to client socket
# and the corresponding file in the cache
tempFile = open("./" + filename, "wb")
#tempFile.write(response)
#tempFile.close()
#tcpcLIENTsock.send(response)
for i in range(0, len(buffer)):
tempFile.write(buffer[i])
tcpCLIENTSock.send(buffer[i])
except:
print 'illegal request'
else:
# HTTP response message for file not found
print 'File Not Found...'
elap = time.time()
diff = elap - t
# Close the socket and the server sockets
tcpCLIENTSock.close()
fp.write("\n time taken =" + str(diff))
fp.write("\n bytes sent =" + str(a))
fp.write("\n bytes received =" + str(b))
fp.write("\n")
fp.close()
print "Closing the server connection"
tcpSERVERSock.close()
|
import pygame
class Color_Ball:
def __init__(self, color, radius, x, y, x_speed, y_speed):
self.color = color
self.radius = radius
self.x = x
self.y = y
self.x_speed = x_speed
self.y_speed = y_speed
def move(self):
self.x += self.x_speed
self.y += self.y_speed
def draw(self, screen):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius)
|
import abc
import datetime
import uuid
from typing import Callable, TypeVar, Union, Mapping
from pyservices.utilities.exceptions import ModelInitException, MetaTypeException
from pyservices.data_descriptors.meta_model import MetaModel
class Field(abc.ABC):
""" Abstract class which represents a field in a MetaModel.
Class attributes:
T (typing.TypeVar): The generic param used in the field_type.
"""
_T = TypeVar('_T')
@abc.abstractmethod
def __init__(self,
name: str,
field_type: _T,
default: Union[_T, Callable[..., _T], None] = None) -> None:
""" Initialize the field.
Attributes:
name (str): The name of the field. The first letter of the name must
be in lowercase. The capitalized name may indicate the
field_type of the field.
field_type (_T): The type of the related field.
default (Union[_T, Callable[...,_T], None]): Could be either a
field_type object or a callable object returning a field_type
object. Defaults to None.
Raises:
ValueError:
If the first letter of the provided name is uppercase.
"""
if str.isupper(name[0]):
raise ValueError('The case of the name must be lowercase.')
self.name = name
self.field_type = field_type
self.default = default
def __repr__(self) -> str:
return '<Field {}:{}[{};{}]>'.format(
self.name,
self.__class__.__name__,
self.field_type.__name__,
self.default)
def init_value(self, value, strict: bool = True):
""" Return the value of a correct type.
If the field_type is not a builtin, it may be necessary to perform
some operations to value.
Attributes:
value: The value used to initialize the model.
strict (bool): Flag used to perform a strict initialization.
Returns:
_T: The value of a correct type.
Raises:
ModelInitException:
If the type value does not match with field_type.
"""
# noinspection PyTypeHints
if not isinstance(value, self.field_type):
raise ModelInitException(
f'{value}({type(value)}) is not an instance of '
f'{self.field_type}.')
return value
class SimpleField(Field):
"""A SimpleField is Field with a static build in field_type.
Class attributes:
static_field_type (type): The static type of the Field.
"""
static_field_type = None
def __init__(self,
name: str,
default: Union[static_field_type,
Callable[..., static_field_type], None] = None
) -> None:
super().__init__(name, self.__class__.static_field_type,
default)
def init_value(self, value, strict: bool = True):
""" Initialize a SimpleField.
Attributes:
value: The value used to initialize the data.
strict (bool): Used tu perform a strict initialization.
If False, some type conversions based on the static_field_type
are tried before initializing the field. (E.g. an Integer
field could be initialized with a '1' string value since it
could be casted to int.
"""
if not strict:
try:
value = self.static_field_type(value)
except TypeError:
pass
return super().init_value(value)
class StringField(SimpleField):
""" A string field.
"""
static_field_type = str
class BooleanField(SimpleField):
""" A boolean field.
"""
static_field_type = bool
class IntegerField(SimpleField):
""" An integer field.
"""
static_field_type = int
class FloatField(SimpleField):
""" An integer field.
"""
static_field_type = float
class DateTimeField(SimpleField):
""" A datetime field.
"""
static_field_type = datetime.datetime
# TODO strict could be used as false to perform the following conversions on
# the __init__ of an extension of datetime.datetime
def init_value(self, value, strict: bool = True):
""" Initialize the datetime value.
It initialize the datetime in different ways according to the type of
value.
"""
if isinstance(value, str):
value = datetime.datetime.fromisoformat(value)
elif isinstance(value, float):
value = datetime.datetime.fromtimestamp(value)
return super().init_value(value, strict)
class ComposedField(Field):
""" A group of fields.
If a ComposedField is initialized through a MetaModel __call__ method,
the field_type is already cached on MetaModel.modelClasses.
The field_type is obtained from the MetaModel.
"""
def __init__(self,
name: str,
*args: Field,
meta_model: MetaModel = None) -> None:
""" Initialize the ComposedField.
Attributes:
*args (Field); The fields which compose the
ComposedField
meta_model (type): The related MetaModel. If passed, the composed
field is generated from an existing MetaModel. If not, a
MetaModel is created from the ComposedField.
"""
if meta_model:
self.meta_model = meta_model
else:
self.meta_model = MetaModel(
name.capitalize() + '_' + str(uuid.uuid4()), *args)
super().__init__(name, self.meta_model.get_class(), None)
def get_class(self):
""" Return the class of the MetaModel.
"""
return self.meta_model.get_class()
def init_value(self, value, strict: bool = True):
""" Initialize the ComposedField.
Args:
value: If the type is dict, the map represent the values used to
initialize the meta model related to the ComposedField.
strict (bool): Flag used to perform a strict initialization.
Returns:
An instance of the class of the meta model related to the composed
field.
"""
if isinstance(value, dict):
for field in self.meta_model.fields:
v = value.get(field.name, None)
if v is None:
raise ModelInitException(f'The id is not valid. '
f'"{field.name}"" is missing.')
value[field.name] = field.init_value(v, strict=strict)
value = self.meta_model.get_class()(**value)
return super().init_value(value)
class ListField(Field):
""" A list field.
"""
def __init__(self,
name: str,
data_type: Union[SimpleField.__class__, MetaModel],
default: Union[list, Callable[..., list], None] = None
) -> None:
""" Initialize the ListField
Attributes:
data_type (Union[SimpleField.__class__, MetaModel]): An object used
to discover the type of the data represented by this ListField.
"""
if isinstance(data_type, MetaModel) \
or issubclass(data_type, SimpleField):
self.data_type = data_type
else:
raise MetaTypeException(f'The data_type must be either a '
f'SimpleField or a MetaModel instance, not '
f'a {type(data_type)}.')
super().__init__(name, list, default)
def init_value(self, value, strict: bool = True):
""" Return the value of a correct type.
Checks the type of the elements of the list.
"""
value = super().init_value(value)
if isinstance(self.data_type, MetaModel):
t = self.data_type.get_class()
elif issubclass(self.data_type, SimpleField):
t = self.data_type.static_field_type
else:
raise MetaTypeException(f'The data_type must be either a '
f'SimpleField or a MetaModel instance, not '
f'a {type(self.data_type)}.')
for el in value:
# noinspection PyTypeHints
if not isinstance(el, t):
raise ModelInitException(f'The type of the {el} is not {t}')
return value
class DictField(SimpleField):
""" A field representing a dict.
"""
static_field_type = dict
class ConditionalField(Field):
""" A field with different MetaModels associated.
"""
def __init__(self,
name: str,
meta_models: Mapping[str, MetaModel],
evaluation_field_name: str) -> None:
""" Initialize the ConditionalField.
The field_type is set to None and it will be dynamically evaluated when
a new model is initialized.
Attributes:
meta_models (Mapping[str, MetaModel]): The dict containing the
relations between field values and MetaModels.
evaluation_field_name (str): The str which indicated the title of
the field which will be used to select the right MetaModel.
"""
self.meta_models = meta_models
self.evaluation_field_name = evaluation_field_name
super().__init__(name, None, None)
def init_value(self, value, strict: bool = True):
"""I have to type check the value at when the new model is being
created.
Attributes:
value (tuple): the first element must contain the value,
the second element must contain the MetaModel identified(str)
"""
conditional_meta_model = self.meta_models.get(value[1])
if not conditional_meta_model:
raise ModelInitException(f'There are no matching MetaModels '
f'identified by {value[1]}.')
return conditional_meta_model().init_value(value[0])
|
import JavaScriptCore
from PyObjCTools.TestSupport import TestCase
class TestJSStrintRefCF(TestCase):
def testFunctions(self):
v = JavaScriptCore.JSStringCreateWithCFString("hello world")
self.assertIsInstance(v, JavaScriptCore.JSStringRef)
o = JavaScriptCore.JSStringCopyCFString(None, v)
self.assertEqual(o, "hello world")
self.assertIsInstance(o, str)
self.assertResultIsCFRetained(JavaScriptCore.JSStringCopyCFString)
JavaScriptCore.JSStringRelease(v)
|
class Player:
def __init__(self, name):
self.name = name
self.gestures = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spock']
self.score = 0
self.chosen_gesture = 0
def set_name(self):
self.name = input("What is your name?")
return self.name
def choosing_gesture(self):
user_input = input("Pick a gesture, ( Rock = 0, Paper = 1, Scissors = 2, Lizard = 3, Spock = 4 )")
if user_input == "0":
user_input = self.gestures[0]
print(f'{self.name} used Rock')
elif user_input == "1":
user_input = self.gestures[1]
print(f'{self.name} used Paper')
elif user_input == "2":
user_input = self.gestures[2]
print(f'{self.name} used Scissors')
elif user_input == "3":
user_input = self.gestures[3]
print(f'{self.name} used Lizard')
elif user_input == "4":
user_input = self.gestures[4]
print(f'{self.name} used Spock')
else:
print("Input invalid try again.")
Player.choosing_gesture(self)
self.chosen_gesture = user_input
return self.chosen_gesture
|
#!/usr/bin/env python
# File name: projecteuler040.py
# Author: Matt McGranaghan
# Date Created: 2014/05/12
# Date Modified: 2014/05/12
# Python Version: 2.7
def solution040():
target_dec = 10**6
dec = ''
count = 1
while len(dec) < target_dec:
dec = dec + str(count)
count += 1
product = 1
for i in range(7):
product = product * int(dec[10**(i)-1])
return product
print solution040() |
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen('http://www.pythonscraping.com/pages/warandpeace.html')
bs = BeautifulSoup(html.read(),'html.parser')
nameList = bs.findAll('span', {'class':'green'})
#print(nameList)
for name in nameList:
print(name.get_text())
tags = bs.find_all('span', {'class':'red'})
for tag in tags:
print(tag.get_text())
namenum = bs.find_all(text="the prince")
print(len(namenum))
greenfind= bs.find_all(class_ = 'green')
print(greenfind)
|
"""
See https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/sample_guide.md
"""
from azure.template import template_main
def simple_sample():
print("Running simple sample")
template_main()
print("Completed running simple sample")
if __name__ == "__main__":
simple_sample()
|
def main(n):
maxcount = 0
maxp = 0
for p in xrange(1, n + 1):
count = 0
print '', p
for a in xrange(1, (p + 1) / 3):
if (p * p - 2 * p * a) % (2 * p - 2 * a) == 0:
b = (p * p - 2 * p * a) / (2 * p - 2 * a)
c = p - a - b
print a, b, c
count += 1
if count > maxcount:
maxcount = count
maxp = p
return maxp
print main(1000) |
#!/usr/bin/python3
"""
Contains the route for delivering a list of states using an HTML file
"""
from flask import Flask, render_template
from models import storage
app = Flask(__name__)
@app.route('/states_list', strict_slashes=False)
def deliver_states_html():
""" Gets the list of states located within the db or file storage
Returns:
an html file that contains a list of states contained
the storage medium
"""
state_instance_list = list(storage.all("State").values())
sorted_state_instance_list = sorted(
state_instance_list, key=lambda k: k.name)
return render_template(
'7-states_list.html',
sorted_state_instance_list=sorted_state_instance_list)
@app.teardown_appcontext
def close_session(func):
storage.close()
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
# Example 8-1. Variables a and b hold references to the same list, not copies of the list
a = [1, 2, 3]
b = a
a.append(4)
print(a, b) |
# pylint: disable=too-many-branches
from __future__ import absolute_import, division, print_function, unicode_literals
import urllib
# import tldextract as _tldextract
import urlparse4 as urlparse
from pyfaup.faup import Faup
from . import py2_unicode
def tld_extract(domain):
if "_faup" not in __builtins__:
__builtins__["_faup"] = Faup()
_faup = __builtins__["_faup"]
_faup.decode(domain.decode("utf-8").strip(b"."))
return (_faup.get_subdomain() or b"", _faup.get_domain_without_tld() or b"", _faup.get_tld() or b"")
# TODO init lazily
# _tldextractor = _tldextract.TLDExtract(suffix_list_urls=None)
class URL(object):
""" Base class for manipulating an URL without context """
def __init__(self, url, check_encoding=False):
if isinstance(url, py2_unicode):
self.url = url.encode("utf-8")
else:
self.url = url
if check_encoding:
try:
self.url.decode('ascii')
except UnicodeDecodeError:
p = urlparse.urlsplit(self.url)
# TODO: check the rightfulness of this!
self.url = urlparse.urlunsplit((
p[0],
p[1],
urllib.quote(p[2], safe=b"/"),
urllib.quote(p[3], safe=b"&?="),
urllib.quote(p[4])
))
def urljoin(self, href):
""" Optimized version of urlparse.urljoin() """
return urlparse.urljoin(self.url, href)
# Allow picking/unpickling
def __getstate__(self):
return self.url
def __setstate__(self, state):
self.url = state
# This is only called when the attribute is still missing
def __getattr__(self, attr):
# pylint: disable=redefined-variable-type
if attr == "parsed":
# try:
value = urlparse.urlsplit(self.url)
# except ValueError:
# value = urlparse.urlsplit("about:blank")
elif attr == "tldextracted":
value = tld_extract(self.parsed.netloc)
# value = _tldextractor(self.url)
elif attr == "normalized":
value = urlparse.urlunsplit((
None,
self.normalized_domain,
self.parsed.path if self.parsed.path else b"/",
self.parsed.query,
b""
)).lstrip(b"/")
if value.count(b"/") == 1:
value = value.strip(b"/")
elif attr == "normalized_without_query":
value = urlparse.urlunsplit((
None,
self.normalized_domain,
self.parsed.path if self.parsed.path else b"/",
b"",
b""
)).lstrip(b"/")
if value.count(b"/") == 1:
value = value.strip(b"/")
elif attr == "homepage":
value = urlparse.urlunsplit((
self.parsed.scheme,
self.domain,
b"/",
b"",
b""
)).strip(b"/")
# Pay-level domain
elif attr == "pld":
value = b"%s.%s" % (self.tldextracted[1], self.tldextracted[2])
elif attr == "domain":
value = self.parsed.netloc
elif attr == "subdomain":
value = self.tldextracted[0]
elif attr == "normalized_domain":
value = self.domain.strip(b".")
while value.startswith(b"www."):
value = value[4:]
if value.endswith(b':80'):
value = value[:-3]
elif value.endswith(b':443'):
value = value[:-4]
value = value.strip(b".")
elif attr == "normalized_subdomain":
value = self.subdomain.strip(b".")
if value == b"www":
value = b""
else:
while value.startswith(b"www."):
value = value[4:]
elif attr == "normalized_path":
if self.parsed.path == b"/":
return b""
return self.parsed.path
# https://en.wikipedia.org/wiki/Public_Suffix_List
# Returns the domain name suffix ("co.uk" for "bbc.co.uk")
elif attr == "suffix":
value = self.tldextracted[2]
else:
raise Exception("Unknown attribute %s !" % attr)
self.__dict__[attr] = value
return value
|
try:
n=int(raw_input())
except ValueError:
print("Enter only integers")
else:
for i in range(1,6):
print(n*i) |
from .dump import EnsemblFungiBioMart
from .upload import (EnsemblFungiPrositeUploader, EnsemblFungiPfamUploader,
EnsemblFungiInterproUploader, EnsemblFungiGenomicPosUploader,
EnsemblFungiGeneUploader, EnsemblFungiAccUploader)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/12 22:00
# @Author : zengsk in HoHai
'''
FTP批量下载数据
'''
import os
import sys
from ftplib import FTP
class FtpDownloadCls:
def __init__(self, ftpserver, port, usrname, pwd):
self.ftpserver = ftpserver # ftp主机IP
self.port = port # ftp端口
self.usrname = usrname # 登陆用户名
self.pwd = pwd # 登陆密码
self.ftp = self.ftpConnect()
# ftp连接
def ftpConnect(self):
ftp = FTP()
try:
ftp.connect(self.ftpserver, self.port)
ftp.login(self.usrname, self.pwd)
except:
raise IOError('\n FTP login failed!!!')
else:
print(ftp.getwelcome())
print('\n+------- FTP connection successful!!! --------+\n')
return ftp
# 单个文件下载到本地
def downloadFile(self, ftpfile, localfile):
bufsize = 1024
with open(localfile, 'wb') as fid:
self.ftp.retrbinary('RETR {0}'.format(ftpfile), fid.write, bufsize)
return True
# 下载整个目录下的文件,包括子目录文件
def downloadFiles(self, ftpath, localpath):
print('FTP PATH: {0}'.format(ftpath))
if not os.path.exists(localpath):
os.makedirs(localpath)
self.ftp.cwd(ftpath)
print('\n+----------- downloading!!! -----------+\n')
for i, file in enumerate(self.ftp.nlst()):
print('{0} <> {1}'.format(i, file))
local = os.path.join(localpath, file)
if os.path.isdir(file): # 判断是否为子目录
if not os.path.exists(local):
os.makedirs(local)
self.downloadFiles(file, local)
else:
self.downloadFile(file, local)
self.ftp.cwd('..')
return True
# 退出FTP连接
def ftpDisConnect(self):
self.ftp.quit()
if __name__ == '__main__':
# 输入参数
ftpserver = 'hokusai.eorc.jaxa.jp' # ftp主机IP
port = 21 # ftp端口
usrname = 'rainmap' # 登陆用户名
pwd = 'Niskur+1404' # 登陆密码
ftpath = '/standard/v7/daily/00Z-23Z/201507/' # 远程文件夹
localpath = 'D:/data/' # 本地文件夹
Ftp = FtpDownloadCls(ftpserver, port, usrname, pwd)
Ftp.downloadFiles(ftpath, localpath)
Ftp.ftpDisConnect()
print("\n+-------- OK!!! --------+\n")
|
#!/usr/bin/env python
from functools import reduce
from copy import deepcopy
# fileIn = "dataSet/a_example.txt"
# fileIn = "dataSet/b_read_on.txt"
# fileIn = "dataSet/c_incunabula.txt"
# fileIn = "dataSet/d_tough_choices.txt"
# fileIn = "dataSet/e_so_many_books.txt"
# fileIn = "dataSet/f_libraries_of_the_world.txt"
import sys
sys.setrecursionlimit(10**6)
totalSc = 0
for fileIn in [
"dataSet/a_example.txt",
"dataSet/b_read_on.txt",
"dataSet/c_incunabula.txt",
"dataSet/d_tough_choices.txt",
"dataSet/e_so_many_books.txt",
"dataSet/f_libraries_of_the_world.txt"]:
with open(fileIn, "r") as f:
B, L, D = map(int, f.readline().strip().split())
bookSc = [int(x) for x in f.readline().strip().split()]
N = [0 for _ in range(L)]
T = [0 for _ in range(L)]
M = [0 for _ in range(L)]
libBList = [[] for _ in range(L)]
for lib in range(L):
# T is signup time, M is shipping number per day
N[lib], T[lib], M[lib] = map(int, f.readline().strip().split())
libBList[lib] = map(int, f.readline().strip().split())
tmpB = [[-bookSc[bid], bid] for bid in libBList[lib]]
libBList[lib] = [bid for _, bid in sorted(tmpB)]
sortLibList = sorted([[T[lib], -M[lib], -N[lib], lib] for lib in range(L)])
sortLibList = [lib for t, _, _, lib in sortLibList]
# Skip output, directly scoring it
# with open(fileIn[0] + ".txt", "w") as of:
# of.write(str(L) + "\n")
# for lib in range(L):
# of.write(" ".join([str(v) for v in [lib, N[lib]]]) + "\n")
# of.write(" ".join([str(v) for v in libBList[lib]]) + "\n")
bVis = set()
def scoreL(startD, lib, bList):
sc = 0
global T, D, bVis
remainD = D - (startD + T[lib])
if remainD > 0:
maxShipN = remainD * M[lib]
for bid in bList[:maxShipN]:
if bid not in bVis:
sc += bookSc[bid]
bVis.add(bid)
return sc
def score(ofList):
sc, currD = 0, 0
for lib, bList in ofList:
sc += scoreL(currD, lib, bList)
global T
currD += T[lib]
return sc
bbVis = set()
ofList = []
for lib, bList in [[lib, libBList[lib]] for lib in sortLibList]:
tmpBList = []
for bid in bList:
if bid not in bbVis:
bbVis.add(bid)
tmpBList.append(bid)
ofList.append([lib, tmpBList])
t = score(ofList)
# t = score([[lib, libBList[lib]] for lib in range(L)])
totalSc += t
print(t, totalSc)
print(totalSc)
|
from files_treatment_new.xls_gen_bank_rast import Xls_gen_bank
from files_treatment_new.fasta_contigs_RAST import Fasta_contigs_RAST
from files_treatment_new.genbank_file_RAST import Genbank_proteic_RAST
from files_treatment_new.fasta_contigs_RAST import Fasta_contigs_RAST
import glob
import os
from configuration.configuration_api import ConfigurationAPI
from rest_client.AuthenticationRest import AuthenticationAPI
from files_treatment_new.xls_gen_bank_rast import Xls_gen_bank
from files_treatment_new.fasta_contigs_RAST import Fasta_contigs_RAST
from objects_new.Contigs_new import Contig
from objects_API.ContigJ import ContigJson
from objects_API.StrainJ import StrainJson
from objects_API.BacteriumJ import BacteriumJson
from objects_API.GeneJ import GeneJson
from objects_API.ProteinJ import ProteinJson
from Patric.ImportFiles import ImportFilesPatric
def check_file_exits(file_path):
"""
This method just verify if a given file exists (it is necessary to give the complete path)
:param file_path: complete path of the file
:type file_path: string - required
:return True or False according the existance
:rtype boolean
"""
file_exists = os.path.exists(file_path)
if file_exists is True:
return True
else:
return False
def get_list_ids_files_in_path(path):
"""
This method list all files in a given path and return a list with these names
:param path: path where it necessary to list the files
:type path: string - required
:return list with the files paths
:rtype list(str)
:note when the start point is smaller than end point (int the contig), it is because the "Strand field int excel file is negative
"""
current_path = os.getcwd() + path
list_files = os.listdir(current_path)
return list_files
def createContig(contigObj, organismID):
"""
insert a Contig into a REST API
:param contigObj: Contig DBA object that you want to insert
:param organismID: ID of the organism which has this wholeDNA
:param listProtein: List of the proteins of the contig
:type contigObj: WholeDNA
:type organismID: int
:type listProtein: List[int]
:return: id of the Contig inserted
:rtype int
"""
contigObjJson = ContigJson(id_db_online = contigObj.id_contig_db_outside, sequence_DNA= contigObj.sequence, fasta_head = contigObj.head, organism = organismID)
contigObjJson = contigObjJson.setContig()
return contigObjJson['id']
def createContigNew(contigObj, bacteriumId):
head_cnt = ''
if '>' not in contigObj.head:
head_cnt = '>' + contigObj.head
else:
head_cnt = contigObj.head
contigObj = Contig(id_contig_db_outside = contigObj.id_contig_db_outside, head = head_cnt, sequence = contigObj.sequence)
idContig = createContig(contigObj, bacteriumId)
return idContig
def createGene(id_bacterium, dna_sequence, start_contig, end_contig, fk_contig, id_db_online, function = None, fasta_head = None):
"""
insert a Gene into a REST API
:param id_bacterium: ID of the organisms
:param dna_sequence: DNA sequence of the gene
:param start_contig: start position of the gene in the contig
:param end_contig: end position of the gene in the contig
:param fk_contig: id of the contig
:param function: function of the gene
:param fasta_head: fasta head of the gene
:type id_bacterium: int
:type dna_sequence: str
:type start_contig: int - can be None
:type end_contig: int - can be None
:type fk_contig: int - can be None
:type function: str - can be None
:type fasta_head: str - can be None
:return: id of the gene inserted
:rtype int
"""
geneObjJson = GeneJson(sequence_DNA = dna_sequence, organism = id_bacterium, position_start_contig = start_contig, position_end_contig = end_contig, contig = fk_contig, fasta_head = fasta_head, id_db_online = id_db_online)
geneObjJson = geneObjJson.setGene()
return geneObjJson.id
def createProtein(id_db_online, fk_organism, fk_gene, sequence_aa, description):
"""
insert a Protein into a REST API
:param proteinOBJ: Protein DBA object that you want to insert
:param fkOrganism: id of the organism
:param fkGene: id of the gene
:type proteinOBJ: Protein
:type fkOrganism: int
:type fkGene: int
:return: id of the protein inserted
:rtype int
"""
proteinObjJson = ProteinJson(id_db_online = id_db_online, organism = fk_organism, gene = fk_gene, sequence_AA = sequence_aa, description = description)
proteinObjJson = proteinObjJson.setProtein()
return proteinObjJson.id
def createAndInsertElements(contig_obj, id_bacterium, xls_genbank_rast_obj):
#listProts = xls_genbank_patric_obj.get_proteins_information_in_excel()
list_proteins = xls_genbank_rast_obj.get_proteins_objects_by_contig_id(contig_obj.head)
list_proteins_ids = xls_genbank_rast_obj.get_proteins_ids_by_contig_id(contig_obj.head)
contig_id = createContigNew(contig_obj, id_bacterium)
for protein_obj in list_proteins:
gene_function = None
gene_id_db_online = None
if protein_obj.sequence_prot == None:
gene_function = protein_obj.description
gene_id_db_online = protein_obj.id_accession
fasta_head_gene = '>' + protein_obj.id_accession
id_gene = createGene(id_bacterium, protein_obj.sequence_dna, protein_obj.start_point_cnt, protein_obj.end_point_cnt, fk_contig = contig_id, function = gene_function, fasta_head = fasta_head_gene, id_db_online = gene_id_db_online)
if protein_obj.sequence_prot != None and len(protein_obj.sequence_prot) > 5:
createProtein(id_db_online = protein_obj.id_accession, fk_organism = id_bacterium, fk_gene = id_gene, sequence_aa = protein_obj.sequence_prot, description = protein_obj.designation)
def createStrain(designation, fk_specie):
#Information for the Strain
strain_obj = StrainJson(designation = designation, specie = fk_specie)
id_strain = strain_obj.setStrain()
return id_strain
def createBacterium(acc_number, person_responsible, source_data, fk_strain):
bacteriumObjJson = BacteriumJson(acc_number = acc_number, person_responsible = person_responsible, source_data = source_data, strain = fk_strain)
bacteriumObjJson = bacteriumObjJson.setBacterium()
return bacteriumObjJson.id
#Token connection
conf_obj = ConfigurationAPI()
conf_obj.load_data_from_ini()
AuthenticationAPI().createAutenthicationToken()
#End token connection
#Uncomment this line for the first insertion
list_files = get_list_ids_files_in_path('/RAST/Xls/')
list_files_error = []
list_files_done = []
cwd = os.getcwd()
dict_names = {}
dict_names['470.1135'] = 'Acinetobacter baumannii MDR-ZJ06'
dict_names['470.1134'] = 'Acinetobacter baumannii MDR-TJ'
for file_name in list_files:
if file_name[:-4] in dict_names:
path_file_xls = cwd + '/RAST/Xls/' + file_name[:-3] + 'xls'
path_file_contig = cwd + '/RAST/CONTIG/' + file_name[:-3] + 'contigs.fa'
xls_file_exists = check_file_exits(path_file_xls)
contig_file_exist = check_file_exits(path_file_contig)
assert xls_file_exists == True and contig_file_exist == True, 'A file is missing'
xls_obj = Xls_gen_bank(path_file_xls, sheet_name = 'Sheet1')
contig_fasta_file_patric_obj = Fasta_contigs_RAST(path_file = path_file_contig)
qty_cntg = contig_fasta_file_patric_obj.get_qty_of_contigs()
list_cnt = contig_fasta_file_patric_obj.create_contigs_from_file()
gi_name = "Greg_" + dict_names[file_name[:-4]]
acc_value = "Greg_" + dict_names[file_name[:-4]]
person_responsible = 2
source_data = 3
fk_specie = 613
strain = createStrain(dict_names[file_name[:-4]], fk_specie)
id_strain = strain.id
id_bacterium = createBacterium(acc_value, person_responsible, source_data, id_strain)
for contig_old in list_cnt:
createAndInsertElements(contig_old, id_bacterium, xls_obj)
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
path = dir_path + '/Patric/organisms/'
print(path)
import_files_obj = ImportFilesPatric(path, '.contigs.fasta','.xls')
dict_files = import_files_obj.getOrganismsFiles()
#Information for the bacterium
manageOrganismsContent(dict_files)
#### Test old methods
for contig_id in list_contigs_ids_fasta:
print(contig_id)
list_proteins = xls_genbank_patric_obj.get_proteins_objects_by_contig_id(contig_id)
print('fini') |
import unittest
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.bot.splitter import BotSentenceSplitterConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.utils.license.keys import LicenseKeys
class BotSentenceSplitterConfigurationTests(unittest.TestCase):
def test_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
bot:
splitter:
classname: programy.dialog.splitter.regex.RegexSentenceSplitter
""", ConsoleConfiguration(), ".")
bot_config = yaml.get_section("bot")
splitter_config = BotSentenceSplitterConfiguration()
splitter_config.load_config_section(yaml, bot_config, ".")
license_keys = LicenseKeys()
splitter_config.check_for_license_keys(license_keys)
self.assertEqual("programy.dialog.splitter.regex.RegexSentenceSplitter", splitter_config.classname)
self.assertEqual('[:;,.?!]', splitter_config.split_chars)
def test_with_default_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
bot:
splitter:
classname: programy.dialog.splitter.regex.RegexSentenceSplitter
split_chars: .:'
""", ConsoleConfiguration(), ".")
bot_config = yaml.get_section("bot")
splitter_config = BotSentenceSplitterConfiguration()
splitter_config.load_config_section(yaml, bot_config, ".")
self.assertEqual("programy.dialog.splitter.regex.RegexSentenceSplitter", splitter_config.classname)
self.assertEqual(".:'", splitter_config.split_chars)
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
bot:
splitter:
""", ConsoleConfiguration(), ".")
bot_config = yaml.get_section("bot")
splitter_config = BotSentenceSplitterConfiguration()
splitter_config.load_config_section(yaml, bot_config, ".")
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
bot:
""", ConsoleConfiguration(), ".")
bot_config = yaml.get_section("bot")
splitter_config = BotSentenceSplitterConfiguration()
splitter_config.load_config_section(yaml, bot_config, ".")
def test_defaults(self):
splitter_config = BotSentenceSplitterConfiguration()
data = {}
splitter_config.to_yaml(data, True)
BotSentenceSplitterConfigurationTests.assert_defaults(self, data)
@staticmethod
def assert_defaults(test, data):
test.assertEqual(data['classname'], BotSentenceSplitterConfiguration.DEFAULT_CLASSNAME)
test.assertEqual(data['split_chars'], BotSentenceSplitterConfiguration.DEFAULT_SPLITCHARS)
|
import socket
import argparse
from icmp import ICMPSocket
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='send ICMP ECHO_REQUEST to network hosts')
parser.add_argument('-c', metavar='count', dest='count', default=5, type=int, help='Stop after sending count ECHO_REQUEST packets.')
parser.add_argument('-s', metavar='packetsize', dest='packetsize', default=56, type=int, help='Specifies the number of data bytes to be sent.')
parser.add_argument('target')
args = parser.parse_args()
ipaddr = socket.gethostbyname(args.target)
sock = ICMPSocket(ipaddr, args.packetsize)
print('PING {}({}) {} bytes of data.'.format(args.target, ipaddr, args.packetsize))
for i in range(args.count):
sock.request()
|
from django.db import models
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.models import Slugged, RichText
from mezzanine.core.fields import FileField, RichTextField
from mezzanine.utils.models import AdminThumbMixin
from .category import BlockCategory
class BaseBlock(Slugged):
"""Base Block
"""
category = models.ForeignKey(BlockCategory, null=True, blank=True)
login_required = models.BooleanField(_("Login required"), help_text=_("If checked, only logged in users can view this page"), default=False)
show_title = models.BooleanField(_("Show title"), help_text=_("If checked, show block title"), default=False)
def save(self, *args, **kwargs):
super(BaseBlock, self).save(*args, **kwargs)
cache.delete('%s%s' % ('mezzanine_blocks', self.slug))
class Meta:
abstract = True
class Block(BaseBlock):
"""Content Block
"""
content = models.TextField(blank=True)
class Meta:
verbose_name = _('Block')
verbose_name_plural = _('Blocks')
class RichBlock(BaseBlock, RichText):
"""RichText Block
"""
class Meta:
verbose_name = _('Rich Block')
verbose_name_plural = _('Rich Blocks')
class ImageBlock(BaseBlock, AdminThumbMixin):
"""An image Block
"""
image = FileField(verbose_name=_("Image"), upload_to="images", format="Image", max_length=255, null=True, blank=True)
description = RichTextField(_("Description"), blank=True, null=True)
url = models.URLField(_("External URL"), max_length=255, blank=True, null=True, help_text=_("Optional URL."))
height = models.IntegerField(_("Height"), default=100, help_text=_("Height in pixels."))
width = models.IntegerField(_("Width"), default=200, help_text=_("Width in pixels."))
quality = models.IntegerField(_("Quality"), default=80)
admin_thumb_field = "image"
class Meta:
verbose_name = _('Image Block')
verbose_name_plural = _('Image Blocks')
def get_url(self):
return self.url
def get_thumb_url(self):
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
thumb = None
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, None)
if thumb is None:
return ""
return "%s%s" % (settings.MEDIA_URL, thumbnail(thumb, self.width, self.height, self.quality))
|
import pygame
import sys
import numpy as np
pygame.init() # intializing the pygame module
WIDTH = 600
HEIGHT = 600
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
LINE_WIDTH = 10
BOARD_ROWS = 3
BOARD_COLS = 3
CIRCLE_RADIUS = 60
CIRCLE_WIDTH = 10
CROSS_WIDTH = 15
CROSS_SPACE = 50
SQUARE_SIZE = 200
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('TIC TAC TOE')
screen.fill(WHITE)
#board
board = np.zeros((BOARD_ROWS, BOARD_COLS))
def draw_lines():
pygame.draw.line(screen, BLACK, (0, 200), (600, 200), LINE_WIDTH)
pygame.draw.line(screen, BLACK, (0, 400), (600, 400), LINE_WIDTH)
pygame.draw.line(screen, BLACK, (200, 0), (200, 600), LINE_WIDTH)
pygame.draw.line(screen, BLACK, (400, 0), (400, 600), LINE_WIDTH)
def draw_figures():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 1:
pygame.draw.circle(screen, RED, (int(
col * 200 + 100), int(row * 200 + 100)), CIRCLE_RADIUS, CIRCLE_WIDTH)
elif board[row][col] == 2:
pygame.draw.line(screen, BLUE, (col * 200 + CROSS_SPACE, row * 200 + 200 - CROSS_SPACE),
(col * 200 + 200 - CROSS_SPACE, row * 200 + CROSS_SPACE), CROSS_WIDTH)
pygame.draw.line(screen, BLUE, (col * 200 + CROSS_SPACE, row * 200 + CROSS_SPACE),
(col * 200 + 200 - CROSS_SPACE, row * 200 + 200 - CROSS_SPACE), CROSS_WIDTH)
def mark_square(row, col, player):
board[row][col] = player
def available_square(row, col):
return board[row][col] == 0
def check_win(player):
# vertical win check
for col in range(BOARD_COLS):
if board[0][col] == player and board[1][col] == player and board[2][col] == player:
draw_vertical_win_line(col, player)
return True
# horizontal win check
for row in range(BOARD_ROWS):
if board[row][0] == player and board[row][1] == player and board[row][2] == player:
draw_horizontal_win_line(row, player)
return True
# asc diagonal win check
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
draw_asc_diagonal(player)
return True
# desc diagonal win chek
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
draw_desc_diagonal(player)
return True
return False
def draw_vertical_win_line(col, player):
posX = col * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1 :
color = RED
elif player == 2:
color = BLUE
pygame.draw.line(screen, color, (posX, 15), (posX, HEIGHT - 15), LINE_WIDTH)
def draw_horizontal_win_line(row, player):
posY = row * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = RED
elif player == 2:
color = BLUE
pygame.draw.line(screen, color, (15, posY), (WIDTH - 15, posY), 15)
def draw_asc_diagonal(player):
if player == 1:
color = RED
elif player == 2:
color = BLUE
pygame.draw.line(screen, color, (15, HEIGHT - 15), (WIDTH - 15, 15), 15)
def draw_desc_diagonal(player):
if player == 1:
color = RED
elif player == 2:
color = BLUE
pygame.draw.line(screen, color, (15, 15), (WIDTH - 15, HEIGHT - 15), 15)
def restart():
screen.fill(WHITE)
draw_lines()
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = 0
def is_board_full():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
return False
return True
draw_lines()
player = 1
game_over=False
#mainloop
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mouseX = event.pos[0]
mouseY = event.pos[1]
clicked_row = int(mouseY // 200)
clicked_col = int(mouseX // 200)
if available_square(clicked_row, clicked_col):
mark_square(clicked_row, clicked_col, player)
if check_win(player):
game_over=True
player = player % 2 + 1
draw_figures()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
player = 1
game_over = False
pygame.display.update()
|
target = "PCDHG"
file_path = "/home/kwangsookim_lab/joonhyeong_park/Practice/"
input_target_site = open(file_path + target + ".Selected.CpGsites.txt", 'r')
data = input_target_site.read().splitlines()
gene_index = data[0].split().index("UCSC_RefGene_Name")
target_site = list(map(lambda x : data[x].split()[0], range(1, len(data))))
target_gene = list(map(lambda x : data[x].split()[gene_index], range(1, len(data))))
cancerlist = ["PANCANCER"]
index = 0
for i in range(len(cancerlist)) :
input_tumor = open(file_path + cancerlist[i] + ".humanmethylation450.tumor.31tumors.txt", 'r')
input_normal = open(file_path + cancerlist[i] + ".humanmethylation450.normal.31tumors.txt", 'r')
output_tumor = open(file_path + cancerlist[i] + "." + target + ".Methylation.Pattern.tumor.Selected.txt", 'w')
output_normal = open(file_path + cancerlist[i] + "." + target + ".Methylation.Pattern.normal.Selected.txt", 'w')
sample_tumor = input_tumor.readline().split()[1:]
sample_normal = input_normal.readline().split()[1:]
output_tumor.write("Site\tRelatedGene\t" + "\t".join(sample_tumor) + "\n")
output_normal.write("Site\tRelatedGene\t" + "\t".join(sample_normal) + "\n")
while(True) :
line1 = input_tumor.readline().split()
line2 = input_normal.readline().split()
if(len(line2) == 0) : break
if(line1[0] == target_site[index]) :
index += 1
output_tumor.write(line1[0] + "\t" + target_gene[index - 1] + "\t" + "\t".join(line1[1:]) + "\n")
output_normal.write(line2[0] + "\t" + target_gene[index - 1] + "\t" + "\t".join(line2[1:]) + "\n")
if(index == len(target_site)) : break
|
n = int(input()) # the number of relationships of influence
influences = [input().split() for i in range(n)]
length = 0
people_left = {x for (x, _) in influences}
while len(people_left) > 0:
people_left = {y for (x, y) in influences if x in people_left}
length += 1
print(length)
|
import sys
import re
from solver.game import Game
from solver.actions.add import Add
from solver.actions.append import Append
from solver.actions.button_add import ButtonAdd
from solver.actions.divide import Divide
from solver.actions.inv10 import Inv10
from solver.actions.invert import Invert
from solver.actions.mirror import Mirror
from solver.actions.multiply import Multiply
from solver.actions.pow import Pow
from solver.actions.replace import Replace
from solver.actions.reverse import Reverse
from solver.actions.shift import Shift
from solver.actions.store_use import StoreUse
from solver.actions.subtract import Subtract
from solver.actions.truncate import Truncate
from solver.actions.sum import Sum
class Solver:
def __init__(self):
self.__game = Game()
self.__state = None
self.__has_store_button = False
self.__setup()
def __setup(self):
print ("""What do you want to do?
[0] Add button
[1] Solve
[2] Cancel
""")
userChoice = None
while True:
try:
userChoice = int(input('Chose an action: '))
except ValueError:
userChoice = None
continue
if userChoice == 2:
self.__state = 'cancel'
return
elif userChoice == 1:
self.__state = 'solve'
return
else:
self.__add_button()
def __add_button(self):
message = "What button do you want to add?\n"
for actionIndex, action in enumerate(self.__game.available_actions()):
message += '[' + str(actionIndex) + '] ' + action + "\n"
print (message)
userChoice = None
while userChoice not in range(len(self.__game.available_actions())):
try:
userChoice = int(input('Chose a button: '))
except ValueError:
userChoice = None
continue
actionName = self.__game.available_actions()[userChoice]
actionClassName = re.sub('_.',lambda x: x.group()[1].upper(), actionName)
actionClassName = actionClassName[0].upper() + actionClassName[1:]
action = getattr(sys.modules[__name__], actionClassName)()
identifier = str(action.get_identifier())
key = actionName + ('_' + identifier if identifier else '')
self.__game.add_action(key, action)
if actionName == 'store_use':
self.__has_store_button = True
def run(self):
if self.__state != 'solve':
return
if len(self.__game.actions) == 0:
print ("No buttons to press")
return
print ('solving')
pool = list(self.__game.actions.keys())
combinationKeySets = self.__combinate(pool, self.__game.moves)
if self.__has_store_button == True:
#since the store action doesn't count for a move it's added to the combinationSets after these have been generated
storeAction = StoreUse()
storeAction.type = 'store'
self.__game.add_action('store_init', storeAction)
# print (combinationKeySets)
combinationKeySets = self.__combinateWithStore(combinationKeySets)
# print (combinationKeySets)
# print ("-"*10)
self.__game.backup()
found = False
for combinationKeys in combinationKeySets:
self.__game.restore_backup()
newTotal = self.__game.start
# print (combinationKeys)
for combinationKey in combinationKeys:
# print (combinationKey)
newTotal = self.__game.actions[combinationKey].run(newTotal, self.__game)
if not isinstance(newTotal, int) or len(str(newTotal)) > 6:
#should always be int and calculator cannot handle more than 6 digits
break
# print(newTotal)
if newTotal > 0 and self.__game.portal_in != None and self.__game.portal_out != None:
result = list(str(newTotal))
while len(result) - 1 >= self.__game.portal_in:
multipler = pow(10, self.__game.portal_out) #keep base the same (when adding number to second index = *10)
index = len(result) - self.__game.portal_in - 1
toAdd = result.pop(index)
toAdd = int(toAdd) * multipler
# print(result)
result = int("".join(result))
result += toAdd
result = list(str(result))
# print(result)
newTotal = int("".join(result))
if newTotal == self.__game.end:
print (combinationKeys)
found = True
if not self.__game.find_all:
break;
# print("-----")
if found and not self.__game.find_all:
break
if not found:
print ("Solution not found :(")
def __combinate(self, pool, combinationLength):
#stored to speed processing
poolSize = len(pool)
currentCombination = 0
result = []#[[]]*(poolSize**combinationLength)
#set all indexes to zero as starter
indexes = [0]*combinationLength
while(indexes[0] < poolSize): #if the first index is bigger then poolSize, we are done
#determine the current permutation
combination = []
for i in range(0, combinationLength):
combination.append(pool[indexes[i]])
result.append(combination) #append to combination list
currentCombination += 1
#increment indexes
indexes[-1] += 1
i = combinationLength - 1
while indexes[i] == poolSize and i > 0: #if increment overflows
indexes[i-1] += 1 #increment previous index
indexes[i] = 0 #set current index to zero
i -= 1
return result
def __combinateWithStore(self, combinationKeySets):
def removeSetsWithoutStore(combinationKeySet):
return 'store_use' in combinationKeySet
combinationKeySets = [combinationKeySet for combinationKeySet in combinationKeySets if removeSetsWithoutStore(combinationKeySet)]
newCombinationKeySets = []
for combinationKeySet in combinationKeySets:
storeUsages = combinationKeySet.count('store_use') #count the amount of store_use usages
indexes = [None]*storeUsages #initialize as None, to allow store_init to occure only once
indexes[0] = 0 #need to store_init at least once
storeUseIndexes = [index for index, actionName in enumerate(combinationKeySet) if actionName == 'store_use'] #get the indices of the store_use usages
# print ('####'*5)
# print(combinationKeySet)
while(indexes[0] <= storeUseIndexes[0]): #continue until all store_inits are inserted in each index before store_use
newCombinationKeySet = list(combinationKeySet) #copy list to insert store_inits
#loop in reversed order since inserting actions in the list shifts the indexes later on. Reversing it places the last actions to be placed first, so the action at the beginning isn't placed yet. preventing the shift in index
for i in indexes[::-1]:
if i == None:
continue
newCombinationKeySet.insert(i, 'store_init')
newCombinationKeySets.append(newCombinationKeySet)
# print(newCombinationKeySet)
# print('index_before', indexes)
indexes[-1] = indexes[-1] + 1 if indexes[-1] != None else storeUseIndexes[-2] + 1 #start index after previous store_use button and continue from there
i = storeUsages - 1
while indexes[i] == storeUseIndexes[i]+1 and i > 0: #if index hits next store_use
# print('index_between_before', indexes)
indexes[i-1] = indexes[i-1] + 1 if indexes[i-1] != None else storeUseIndexes[i-2] + 1 #increment previous index
indexes[i] = None #set current index to zero
# print('index_between_after', indexes)
i -= 1
# print('index_after', indexes)
return newCombinationKeySets
|
from django.contrib import admin
from live.models import Profile,Hotel,Room,Booking
# Register your models here.
admin.site.register(Profile)
admin.site.register(Hotel)
admin.site.register(Room)
admin.site.register(Booking) |
from gmlConvert import txttogml
import sys
#takes input graph from text file
textInput=sys.argv[1]
#converting graph to gml format
g=txttogml(textInput)
influence=[]
k={}
nodes=[]
#Controlled Iterative Depth First Search
def idfs(g,start):
stack=[start]
k={}
a=[]
k.update({start:0})
count=0
while stack:
v=stack.pop()
count=count+1
for i in g.neighbors(v):
if((k.has_key(i)!=True) and (k[v]+1/g[v][i]['weight']<=1)): #we consider spread within 1 timestamp
stack.append(i)
k.update({i:(k[v]+1/g[v][i]['weight'])})
if(a.count(i)==0):
a.append(i)
return count,a
#computing influence of all the nodes
for i in range(g.order()):
count1,b=idfs(g,i+1)
influence.append(count1)
nodes.append(b)
#propagation of influence
newCount=influence
max1=0;
pos=0;
grey=[];
black=[];
while(len(grey)!=g.order()):
max1=0;
for i in range(len(newCount)):
if(newCount[i]>max1):
max1=influence[i];
pos=i;
print("Black node entered "+str(pos+1))
black.append(pos+1)
if(grey.count(pos+1)==0):
grey.append(pos+1)
for a in nodes[pos]:
if(grey.count(a)==0):
grey.append(a);
newCount=[]
for a in range(len(nodes)):
count1=0
for b in nodes[a]:
if(grey.count(b)==0):
count1=count1+1
newCount.append(count1)
print("The nodes in grey")
print(grey)
|
from typing import List
class Solution:
def calculate(self, s: str) -> int:
ret, _ = self.eval(s + "\0", 0, [])
return ret
def eval(self, s: str, start: int, stk: List[int]) -> int:
prev_op = "+"
operand = 0
i = start
while i < len(s):
if s[i] == " ":
pass
elif s[i].isdigit():
operand = operand * 10 + int(s[i])
elif s[i] in ("+", "-", ")", "\0"):
if prev_op == "+":
stk.append(operand)
elif prev_op == "-":
stk.append(-operand)
if s[i] in ("+", "-"):
operand = 0
prev_op = s[i]
elif s[i] in (")", "\0"):
return sum(stk), i
elif s[i] == "(":
operand, i = self.eval(s, i + 1, [])
else:
raise
i += 1
if __name__ == "__main__":
assert Solution().calculate("(1+(4+5+2)-3)+(6+8)") == 23
|
import shelve
import os
def all_files_exist(*f):
"""True when all files in the list exist on disk
Args:
*f:
Returns:
"""
for file in f:
if not os.path.isfile(file):
return False
return True
|
from yahoofinancials import YahooFinancials
import pandas as pd
import datetime as dt
def get_downloader(start_date,
end_date,
granularity='daily',):
"""returns a downloader closure for yahoo
:param start_date: the first day on which dat are downloaded
:param end_date: the last day on which data are downloaded
:param granularity: the frequency of price data, 'D' for daily and 'M1' for 1-minute data
:type start_date: str in format YYYY-MM-DD
:type end_date: str in format YYYY-MM-DD
:type granularity: str
"""
def downloader(symbol):
"""downloads symbol price data using yahoo REST API
:param symbol: the symbol name
:type symbol: str
"""
yf = YahooFinancials(symbol)
res = yf.get_historical_price_data(str(start_date), str(end_date), granularity)
if not res or symbol not in res or 'prices' not in res[symbol]:
ValueError('Fetching price data for "{}" failed.'.format(symbol))
prices=res[symbol]['prices']
df = pd.DataFrame({'open': [p['open'] for p in prices],
'close': [p['close'] for p in prices],
'low': [p['low'] for p in prices],
'high': [p['high'] for p in prices],
'volume': [p['volume'] for p in prices],}, index=[pd.Timestamp(d['formatted_date']) for d in prices])
if 'dividend' in prices:
df['dividend'] = [p['dividend'] for p in prices]
else:
df['dividend'] = 0
if 'split' in prices:
df['split'] = [p['split'] for p in prices]
else:
df['split'] = 1
print(df.head(3))
return df
return downloader
|
import os
from selenium import webdriver
driver = webdriver.Chrome("C:/Program/pythonpackage/chromedriver.exe")
driver.maximize_window()
from os import path
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import UnexpectedAlertPresentException
import time,unittest, re
from selenium.webdriver.common.keys import Keys
sleepTime = 0.1
product = {}
param = {}
def loadData():
global product,param
f = open(r"F:/Python/Code/tb_auto_publsh/product.txt",encoding='UTF-8')
tmpProduct = f.readline()
product = convertToDict(tmpProduct)
f = open(r"F:/Python/Code/tb_auto_publsh/param.txt",encoding='UTF-8')
tmpParam = f.readline()
param = convertToDict(tmpParam)
# string convert to dictionary
def convertToDict(str):
str = str.replace("{","").replace("}","")
str = str.split(',')
tmpDict = {}
for idx1 in range(0,len(str)):
keyValue = str[idx1].replace("'","").split(':')
tmpDict[keyValue[0]] = keyValue[1]
return tmpDict
def loginWithScan():
driver.get("https://www.taobao.com/")
time.sleep(2)
driver.find_element_by_link_text(param['loginTrigger']).click()
time.sleep(6)
def inputDelay(obj,value):
for idx in xrange(0,len(value)):
param.find_element_by_tag_name('input').send_keys(value[idx])
time.sleep(0.1)
def choiceParam(id,value):
excitation = driver.find_element_by_id(id)
excitation.find_element_by_class_name('content').click()
driver.find_element_by_xpath("//*[@title='"+value+"']").click()
if excitation.find_element_by_link_text(value):
return
else:
choiceParam(id,value)
time.sleep(1)
# counting
maxCount = 5
def writeParam(id,value):
global maxCount
maxCount -= 1
if maxCount < 0 :
return
param = driver.find_element_by_id(id)
for idx in range(0,len(value)):
param.find_element_by_tag_name('input').send_keys(value[idx])
time.sleep(0.2)
time.sleep(1)
text = param.find_element_by_tag_name('input').get_attribute('value')
if len(text) <= 0 or text != value:
param.find_element_by_tag_name('input').clear()
writeParam(id,value)
time.sleep(1)
else:
maxCount = 5
def writeDoubleParam(id,value1,value2):
size = driver.find_element_by_id(id)
size_xy = size.find_elements_by_class_name('sell-o-measurement-operand')
size_x_input = size_xy[0].find_element_by_tag_name('input')
size_x_input.send_keys(value1)
size_y_input = size_xy[1].find_element_by_tag_name('input')
size_y_input.send_keys(value2)
def setCatogory():
driver.get('https://upload.taobao.com/auction/sell.jhtml?spm=a313o.201708ban.category.d48.64f0197aLZBDbE&mytmenu=wym')
time.sleep(1)
# choice main category
driver.find_element_by_id('J_SearchKeyWord').send_keys(param['createCategory'])
time.sleep(1)
driver.find_element_by_id('J_SearchButton').click()
time.sleep(1)
driver.find_element_by_id('J_CatePubBtn').click()
time.sleep(1)
def publishProd():
setCatogory()
# set title
driver.find_element_by_id('title').send_keys(product['title'])
oriPlace = driver.find_element_by_id('struct-globalStock')
oriPlace.find_element_by_xpath("//input[@aria-checked='true']").send_keys(Keys.SPACE)
oriPlace.find_element_by_xpath("//input[@aria-checked='false']").click()
# .find_element_by_tag_name('input').click()
# mods = oriPlace.find_elements_by_class_name('tabNest-radio-info')
# mods[1].find_element_by_class_name('next-radio').find_element_by_tag_name('input').click()
# next-radio-inner press
# radios = oriPlace.find_element_by_class_name('info-content').find_element_by_class_name('next-radio-inner').click()
# oriPlaceRadios = oriPlace.find_elements_by_class_name('tabNest-radio-info')
# oriPlaceRadios[1].find_element_by_tag_name('input').click()
# oriPlaceRadios[1].find_element_by_link_text(product['originPlace']).send_keys(keys.space)
# oriPlace.find_element_by_link_text(product['originPlace']).click()
# oriPlaceRadios[1].find_element_by_link_text(product['originPlace']).click()
# set left module param
writeParam('struct-p-20000',product['brand'])
writeDoubleParam('struct-p-148060595',product['sizeX'],product['sizeY'])
writeParam('struct-p-10016',product['model'])
writeParam('struct-p-29112',product['installMethod'])
writeParam('struct-p-192254056',product['temperature'])
writeParam('struct-p-186826808',product['lineLength'])
writeParam('struct-p-191164129',product['encodeType'])
writeParam('struct-p-195174015',product['rotation'])
# set right module param
# choiceParam('struct-p-195270003',product['axlehead'])
writeParam('struct-p-122216515',product['scene'])
writeParam('struct-p-147908493',product['weight'])
writeParam('struct-p-159198215',product['power'])
writeParam('struct-p-192190064',product['torque'])
writeParam('struct-p-180944594',product['voltage'])
writeParam('struct-p-195206008',product['electric'])
writeParam('struct-p-195206009',product['speed'])
writeParam('struct-p-191164130',product['gear'])
choiceParam('struct-p-159662152',product['protectlevel'])
choiceParam('struct-p-21299',product['place'])
choiceParam('struct-p-192256056',product['excitation'])
def getPage():
driver.get('https://shop70362492.taobao.com/category-1056421148.htm?spm=a1z10.1-c.0.0.19475140cHJ39v&search=y&catName=%B0%B2%B4%A8%CB%C5%B7%FE')
productLines = driver.find_elements_by_class_name('item3line1')
print(productLines)
for idx in range(0,len(productLines)):
products = productLines[idx]
products = products.find_elements_by_class_name('item')
for idx2 in range(0,len(products)):
product = products[idx2]
text = product.find_elements_by_class_name('item-name')
print(text[0].text)
def publishProd_l():
setCatogory()
# set title
driver.find_element_by_id('title').send_keys(product['title'])
# set left module param
choiceParam('struct-p-21299',product['place'])
writeParam('struct-p-20000',product['brand'])
writeDoubleParam('struct-p-148060595',product['sizeX'],product['sizeY'])
choiceParam('struct-p-192256056',product['excitation'])
writeParam('struct-p-10016',product['model'])
writeParam('struct-p-29112',product['installMethod'])
writeParam('struct-p-192254056',product['temperature'])
writeParam('struct-p-186826808',product['lineLength'])
writeParam('struct-p-191164129',product['encodeType'])
writeParam('struct-p-195174015',product['rotation'])
# set right module param
# choiceParam('struct-p-195270003',product['axlehead'])
writeParam('struct-p-147908493',product['weight'])
choiceParam('struct-p-159662152',product['protectlevel'])
writeParam('struct-p-159198215',product['power'])
writeParam('struct-p-192190064',product['torque'])
writeParam('struct-p-180944594',product['voltage'])
writeParam('struct-p-195206008',product['electric'])
writeParam('struct-p-195206009',product['speed'])
writeParam('struct-p-191164130',product['gear'])
loadData()
loginWithScan()
time.sleep(2)
publishProd()
|
from skimage import io,transform
import numpy as np
from keras.models import *
from keras.layers import *
import keras
import os
from keras import backend as K
np.set_printoptions(threshold=np.inf)
def read_data(dir_str):
data_temp=[]
with open(dir_str) as fdata:
while True:
line=fdata.readline()
if not line:
break
data_temp.append([float(i) for i in line.split()])
return np.array(data_temp)
def Pred(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r):
feature_input = [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r]
print("matrix features:", feature_input)
img_1 = read_data("imgs/img1.txt")
max_num = np.max(img_1)
img_1 = np.reshape(img_1, (128, 128));
img_1 = img_1 * 255 / max_num;
img_2 = read_data("imgs/img2.txt")
max_num = np.max(img_2)
img_2 = np.reshape(img_2, (128, 128));
img_2 = img_2 * 255 / max_num;
Test_img_1 = np.reshape(img_1,[1,128,128,1])
Test_img_2 = np.reshape(img_2,[1,128,128,1])
Test_feature = np.reshape(feature_input,[1,18])
input1 = keras.layers.Input(shape=(128,128,1))
conv1_1 = keras.layers.Conv2D(16,(3,3), activation='tanh')(input1)
max_pooling1_1 = keras.layers.MaxPooling2D(2,2)(conv1_1)
conv1_2 = keras.layers.Conv2D(16,(5,5), strides=(2, 2), padding='same', activation='tanh')(max_pooling1_1)
max_pooling1_2 = keras.layers.MaxPooling2D(2,2)(conv1_2)
conv1_3 = keras.layers.Conv2D(16,(5,5), strides=(2, 2), padding='same', activation='tanh')(max_pooling1_2)
max_pooling1_3 = keras.layers.MaxPooling2D(2,2)(conv1_3)
flatten1 = keras.layers.Flatten()(max_pooling1_3)
input2 = keras.layers.Input(shape=(128,128,1))
conv2_1 = keras.layers.Conv2D(16,(3,3), activation='tanh')(input2)
max_pooling2_1 = keras.layers.MaxPooling2D(2,2)(conv2_1)
conv2_2 = keras.layers.Conv2D(16,(5,5), strides=(2, 2), padding='same', activation='tanh')(max_pooling2_1)
max_pooling2_2 = keras.layers.MaxPooling2D(2,2)(conv2_2)
conv2_3 = keras.layers.Conv2D(16,(5,5), strides=(2, 2), padding='same', activation='tanh')(max_pooling2_2)
max_pooling2_3 = keras.layers.MaxPooling2D(2,2)(conv2_3)
flatten2 = keras.layers.Flatten()(max_pooling2_3)
input3 = keras.layers.Input(shape=(18,))
feature_dense1 = keras.layers.Dense(18, activation='tanh')(input3)
image_dense1 = keras.layers.Dense(32, activation='tanh')(flatten1)
image_dense2 = keras.layers.Dense(32, activation='tanh')(flatten2)
added_layer = keras.layers.Concatenate()([image_dense1, image_dense2, feature_dense1])
output= keras.layers.Dense(3, activation='softmax')(added_layer)
model = keras.models.Model(inputs=[input1,input2,input3], outputs=output)
model.load_weights('./NetWeights/P100_weights.h5')
#model.summary()
result = model.predict([Test_img_1, Test_img_2, Test_feature])
Chosen_One = np.argmax(result[0])
#print(Chosen_One)
K.clear_session()
return Chosen_One
|
import sys
import dbf
import os
# get parameters from file
parameter_file = open('parameters.txt','r')
prms = parameter_file.readlines()
for line in prms:
txt = line.split('=')
if txt[0] == 'pluvio_folder':
pluvio_folder = txt[1].rstrip('\n')
if txt[0] == 'daily_folder':
daily_folder = txt[1].rstrip('\n')
if txt[0] == 'dbf_file':
dbf_file = txt[1].rstrip('\n')
parameter_file.close()
table = dbf.Table(dbf_file)
table.open()
record_count = 0
stn_list = {}
pluvio_list = {}
daily_list = {}
for file in os.listdir(pluvio_folder):
if file.endswith('.dp'):
station = file.lstrip('0').rstrip('.dp')
pluvio_list[station] = 'ALERT'
print len(pluvio_list)
for file in os.listdir(daily_folder):
if file.endswith('.dp'):
station = file.lstrip('0').rstrip('.dp')
daily_list[station] = 'Daily'
# merge both lists with priority on pluvio if clashes exist
daily_list.update(pluvio_list)
log = {}
# loop through available rainfall files and update DBF as required
for idx,stat in enumerate(daily_list):
log[stat] = 'False'
print 'Now processing %s of %s' %(idx,len(daily_list))
print 'Searching for %s' %stat
for idx,record in enumerate(dbf.Process(table)):
if stat == str(record.num):
record.priority = '1'
record.comms = daily_list[stat]
record_count += 1
log[stat] = 'True'
print 'Searching for %s: Type = %s... FOUND!' %(stat,daily_list[stat])
os.system('cls')
table.close()
print 'Done. %s out of %s stations written to DBF file' %(record_count,len(daily_list) )
print 'Stations that failed to write to DBF are:'
for x in log:
if log[x] == 'False':
print 'Station: %s' %(x)
print 'Done!' |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os.path as pth
from timeit import default_timer as timer
from koala import *
import matplotlib.pyplot as plt
plt.ion()
DO_PLOTTING = False
DATE = "20180310"
GRATING = "385R"
PIXEL_SIZE = 0.6 # Just 0.1 precision
KERNEL_SIZE = 1.25
OBJECT = "POX4"
DESCRIPTION = "POX4 CUBE"
PATH_SKYFLAT = pth.join(DATA_PATH, GRATING)
SKY_FLAT_RED_FILENAME = "10mar2_combined.fits"
THROUGHPUT_RED_FILENAME = DATE+"_"+GRATING+"_throughput_correction.dat"
FLUX_CALIBRATION_RED_FILENAME = "flux_calibration_20180310_385R_0p6_1k8.dat"
TELLURIC_CORRECTION_RED_FILENAME = "telluric_correction_20180310_385R_0p6_1k25.dat"
FILE_SKY_FLAT_RED = pth.join(
PATH_SKYFLAT, SKY_FLAT_RED_FILENAME
) # FILE NOT DIVIDED BY THE FLAT
THROUGHPUT_FILE_RED = pth.join(PATH_SKYFLAT, THROUGHPUT_RED_FILENAME)
FLUX_CAL_FILE = pth.join(DATA_PATH, FLUX_CALIBRATION_RED_FILENAME)
TELLURIC_CORRECTION_FILE = pth.join(DATA_PATH, TELLURIC_CORRECTION_RED_FILENAME)
SCIENCE_RED_1_FILENAME = pth.join(DATA_PATH, GRATING, "10mar20091red.fits")
SCIENCE_RED_2_FILENAME = pth.join(DATA_PATH, GRATING, "10mar20092red.fits")
SCIENCE_RED_3_FILENAME = pth.join(DATA_PATH, GRATING, "10mar20093red.fits")
# NOTE: name appear to be generated in commented code
fits_file_red = pth.join(
DATA_PATH, GRATING, "POX4_A_red_combined_cube_2_TEST_GitHub.fits"
)
start = timer()
if __name__ == "__main__":
print("\n> Testing KOALA RSS class. Running", version)
print("\n\n\n> ANGEL is having a lot of FUN with GitHub!")
# -----------------------------------------------------------------------------
# TESTING PyKOALA in GitHub - Taylah, Sarah, James, Sam, Blake, Ángel
# -----------------------------------------------------------------------------
# Data are 10 Mar 2018 RED
# -----------------------------------------------------------------------------
pk = (
"_"
+ "{}".format(int(PIXEL_SIZE))
+ "p"
+ "{}".format(int((abs(PIXEL_SIZE) - abs(int(PIXEL_SIZE))) * 10))
+ "_"
+ "{}".format(int(KERNEL_SIZE))
+ "k"
+ "{}".format(int((abs(KERNEL_SIZE) - abs(int(KERNEL_SIZE))) * 100))
)
# ---------------------------------------------------------------------------
# THROUGHPUT CORRECTION USING SKYFLAT
# ---------------------------------------------------------------------------
#
# The very first thing that we need is to get the throughput correction.
# IMPORTANT: We use a skyflat that has not been divided by a flatfield in 2dFdr !!!!!!
# If this has been done before, we can read the file containing the throughput correction
# TODO: throughput_red isn't used anywhere
#throughput_red = read_table(THROUGHPUT_FILE_RED, ["f"] )
# Now we read the RSS file, we ONLY correct for ccd defects and high cosmics
skyflat_red = KOALA_RSS(
FILE_SKY_FLAT_RED,
flat="",
apply_throughput=False,
sky_method="none",
#skyflat = skyflat_red,
do_extinction=False,
correct_ccd_defects = False,
correct_high_cosmics = False,
clip_high = 100,
step_ccd = 50,
plot=DO_PLOTTING,
)
# Next we find the relative throughput.
# If the data have been normalized by the FLATFIELD, we only need a SCALE
# between fibres, We consider the median value in the range
# [wave_min_scale, wave_max_scale] for all fibres and scale
skyflat_red.find_relative_throughput(
ymin=0,
ymax=800000,
wave_min_scale=6300,
wave_max_scale=6500,
plot=DO_PLOTTING,
)
# The relative throughput is an array stored in
# skyflat_red.relative_throughput
# We save that array in a text file that we can read in the future without
# the need of repeating this
array_to_text_file(
skyflat_red.relative_throughput, filename=THROUGHPUT_FILE_RED
)
# ---------------------------------------------------------------------------
# OBTAIN ABSOLUTE FLUX CALIBRATION AND TELLURIC CORRECTION USING CALIBRATION STARS
# ---------------------------------------------------------------------------
# If these have been obtained already, we can read files containing arrays with the calibrations
# Uncomment the next two sections and skip the rest till "OBTAIN SKY SPECTRA"
# Read flux calibration red
w_star, flux_calibration = read_table(FLUX_CAL_FILE, ["f", "f"] )
print(flux_calibration)
# Read telluric correction from file
w_star, telluric_correction = read_table(TELLURIC_CORRECTION_FILE, ["f", "f"])
print(telluric_correction)
# # READ STAR 1
# # First we provide names, paths, files...
# star1="H600"
# path_star1 = DATA_PATH+DATE+"/"+GRATING+"/"
# starpos1r = path_star1+"10mar20082red.fits"
# starpos2r = path_star1+"10mar20083red.fits"
# starpos3r = path_star1+"10mar20084red.fits"
# fits_file_red = path_star1+star1+"_"+GRATING+pk
# response_file_red = path_star1+star1+"_"+GRATING+pk+"_response.dat"
# telluric_file = path_star1+star1+"_"+GRATING+pk+"_telluric_correction.dat"
# #
# # ------------------- IF USING ONLY 1 FILE FOR CALIBRATION STAR -----------
# #
# # Read RSS file and apply throughput correction, substract sky using n_sky=400 lowest intensity fibres,
# # correct for CCD defects and high cosmics
#
# star1r = KOALA_RSS(starpos3r,
# apply_throughput=True, skyflat = skyflat_red, plot_skyflat=False,
# sky_method="self", n_sky=400,
# correct_ccd_defects = True, correct_high_cosmics = True, clip_high = 50, step_ccd=50,
# valid_wave_min = 6085, valid_wave_max = 9305,
# plot=DO_PLOTTING, warnings=True)
#
# # Now we search for the telluric correction
# # For this we stack n_fibres=15 highest intensity fibres, derive a continuum in steps of step=15 A
# # excluding problematic wavelenght ranges (exclude_wlm), normalize flux by continuum & derive flux/flux_normalized
# # This is done in the range [wave_min, wave_max], but only correct telluric in [correct_from, correct_to]
# # Including apply_tc=True will apply correction to the data (do it when happy with results, as need this for flux calibration)
# #
# telluric_correction_star1 = star1r.get_telluric_correction(n_fibres=15, correct_from=6830., correct_to=8380.,
# exclude_wlm=[[6000,6350],[6460,6720],[6830,7450], [7550,7750],[8050,8400]],
# apply_tc=True,
# combined_cube = False, weight_fit_median = 1.,
# step = 15, wave_min=6085, wave_max=9305)
# #
# # Next we CREATE THE CUBE for this star, using THE SAME PARAMETERS we will later using for our objects
# # 0.6 is the pixel size, 1.25 is the kernel size.
# #
# cubes1r=Interpolated_cube(star1r, PIXEL_SIZE, KERNEL_SIZE, plot=DO_PLOTTING, ADR=True) #, force_ADR = True) # CASCA con lo de Matt
# #
# #
# # ------------------- IF USING AT LEAST 2 FILES FOR CALIBRATION STAR -----------
# #
# # Run KOALA_reduce and get a combined datacube with given PIXEL_SIZE and KERNEL_SIZE
# #
# rss_list = [starpos1r,starpos2r,starpos3r]
# H600r=KOALA_reduce(rss_list, fits_file=fits_file_red+".fits", obj_name=star1, description=star1,
# apply_throughput=True, skyflat = skyflat_red,
# correct_ccd_defects = True, correct_high_cosmics = True, clip_high = 50, step_ccd=50,
# sky_method="self", n_sky=400,
# pixel_size_arcsec=PIXEL_SIZE, kernel_size_arcsec=KERNEL_SIZE,
# ADR= False,
# valid_wave_min = 6085, valid_wave_max = 9305,
# plot=DO_PLOTTING, warnings=False )
# #
# # Extract the integrated spectrum of the star & save it
# #
# H600r.combined_cube.half_light_spectrum(r_max=5, plot=DO_PLOTTING)
# spectrum_to_text_file(H600r.combined_cube.wavelength,H600r.combined_cube.integrated_star_flux, filename=fits_file_red+"_integrated_star_flux.dat")
# #
# # Find telluric correction CAREFUL WITH apply_tc=True
# #
# telluric_correction_star1 = H600r.get_telluric_correction(n_fibres=15, correct_from=6830., correct_to=8400.,
# exclude_wlm=[[6000,6350],[6460,6720],[6830,7450], [7550,7750],[8050,8400]],
# apply_tc=True,
# combined_cube = True, weight_fit_median = 1.,
# step = 15, wave_min=6085, wave_max=9305)
# # We can save this calibration as a text file
# spectrum_to_text_file(H600r.combined_cube.wavelength,telluric_correction_star1, filename=telluric_file)
# #
# # ------------------- FLUX CALIBRATION (COMMON) -----------
# #
# # Now we read the absolute flux calibration data of the calibration star and get the response curve
# # (Response curve: correspondence between counts and physical values)
# # Include exp_time of the calibration star, as the results are given per second
# # For this BE CAREFUL WITH ABSORPTIONS (Halpha) and check behaviour in the edges of the CCD
# # Change fit_degree (3,5,7), step, min_wave, max_wave to get better fits !!!
#
# H600r.combined_cube.do_response_curve('FLUX_CAL/fhilt600_edited.dat', plot=DO_PLOTTING, min_wave=6110., max_wave=9305.,
# step=20, exp_time=120., fit_degree=7)
# # Now we can save this calibration as a text file
# spectrum_to_text_file(H600r.combined_cube.response_wavelength,H600r.combined_cube.response_curve, filename=response_file_red)
# # STAR 2
# star="HD60753"
# path_star = DATA_PATH+DATE+"/"+GRATING+"/"
# starpos1r = path_star+"10mar20079red.fits"
# starpos2r = path_star+"10mar20080red.fits"
# starpos3r = path_star+"10mar20081red.fits"
# fits_file_red = path_star+star+"_"+GRATING+pk
# response_file_red = path_star+star+"_"+GRATING+pk+"_response.dat"
# telluric_file = path_star+star+"_"+GRATING+pk+"_telluric_correction.dat"
# #
# rss_list = [starpos1r,starpos2r,starpos3r]
# HD60753r=KOALA_reduce(rss_list, fits_file=fits_file_red+".fits", obj_name=star, description=star,
# apply_throughput=True, skyflat = skyflat_red,
# correct_ccd_defects = True, correct_high_cosmics = True, clip_high = 50, step_ccd=50,
# sky_method="self", n_sky=400,
# pixel_size_arcsec=PIXEL_SIZE, kernel_size_arcsec=KERNEL_SIZE,
# ADR= False,
# valid_wave_min = 6085, valid_wave_max = 9305,
# plot=DO_PLOTTING, warnings=False )
# #
# HD60753r.combined_cube.half_light_spectrum(r_max=5, plot=DO_PLOTTING)
# spectrum_to_text_file(HD60753r.combined_cube.wavelength,HD60753r.combined_cube.integrated_star_flux, filename=fits_file_red+"_integrated_star_flux.dat")
# #
# telluric_correction_star2 = HD60753r.get_telluric_correction(apply_tc=True, combined_cube = True,
# weight_fit_median = 1., step = 15, wave_min=6085, wave_max=9305,
# correct_from=6830., correct_to=8400.,
# exclude_wlm=[[6000,6330],[6460,6720],[6830,7450], [7550,7750],[8050,8400]])
# #
# spectrum_to_text_file(HD60753r.combined_cube.wavelength,telluric_correction_star2, filename=telluric_file)
# #
# HD60753r.combined_cube.do_response_curve('FLUX_CAL/fhd60753.dat', plot=DO_PLOTTING, min_wave=6110., max_wave=9305.,
# step=20, exp_time=15., fit_degree=5)
#
# spectrum_to_text_file(HD60753r.combined_cube.response_wavelength,HD60753r.combined_cube.response_curve, filename=response_file_red)
# # STAR 3
# star="HR3454"
# path_star = DATA_PATH+DATE+"/"+GRATING+"/"
# starpos1r = path_star+"10mar20094red.fits"
# starpos2r = path_star+"10mar20095red.fits"
# starpos3r = path_star+"10mar20096red.fits"
# fits_file_red = path_star+star+"_"+GRATING+pk
# response_file_red = path_star+star+"_"+GRATING+pk+"_response.dat"
# telluric_file = path_star+star+"_"+GRATING+pk+"_telluric_correction.dat"
# #
# rss_list = [starpos1r,starpos2r,starpos3r]
# HR3454r=KOALA_reduce(rss_list, fits_file=fits_file_red+".fits", obj_name=star, description=star,
# apply_throughput=True, skyflat = skyflat_red,
# correct_ccd_defects = True, correct_high_cosmics = True, clip_high = 50, step_ccd=50,
# sky_method="self", n_sky=400,
# pixel_size_arcsec=PIXEL_SIZE, kernel_size_arcsec=KERNEL_SIZE,
# ADR= False,
# valid_wave_min = 6085, valid_wave_max = 9305,
# plot=DO_PLOTTING, warnings=False )
# #
# HR3454r.combined_cube.half_light_spectrum(r_max=5, plot=DO_PLOTTING)
# spectrum_to_text_file(HR3454r.combined_cube.wavelength,HR3454r.combined_cube.integrated_star_flux, filename=fits_file_red+"_integrated_star_flux.dat")
# #
# telluric_correction_star3 = HR3454r.get_telluric_correction(apply_tc=True, combined_cube = True,
# weight_fit_median = 1., step = 15, wave_min=6085, wave_max=9305,
# correct_from=6830., correct_to=8420.,
# exclude_wlm=[[6000,6330],[6460,6720],[6830,7450], [7550,7750],[8050,8420]])
# #
# spectrum_to_text_file(HR3454r.combined_cube.wavelength,telluric_correction_star3, filename=telluric_file)
# #
# HR3454r.combined_cube.do_response_curve('FLUX_CAL/fhr3454_edited.dat', plot=DO_PLOTTING, min_wave=6110., max_wave=9305.,
# step=20, exp_time=2., fit_degree=7)
#
# spectrum_to_text_file(HR3454r.combined_cube.response_wavelength,HR3454r.combined_cube.response_curve, filename=response_file_red)
# STAR 4
# star="EG274"
# path_star = DATA_PATH+DATE+"/"+GRATING+"/"
# starpos1r = path_star+"10mar20104red.fits"
# starpos2r = path_star+"10mar20105red.fits"
# starpos3r = path_star+"10mar20106red.fits"
# fits_file_red = path_star+star+"_"+GRATING+pk
# response_file_red = path_star+star+"_"+GRATING+pk+"_response.dat"
# telluric_file = path_star+star+"_"+GRATING+pk+"_telluric_correction.dat"
#
#
# star3r = KOALA_RSS(starpos3r,
# apply_throughput=True, skyflat = skyflat_red, plot_skyflat=False,
# correct_ccd_defects = True, correct_high_cosmics = False, clip_high = 50, step_ccd=50,
# fix_wavelengths = True, sol = [0.10198480885572622, -0.0006885696621193424, 1.8422163305742697e-07],
# sky_method="self", n_sky=50, correct_negative_sky = True,
# telluric_correction = telluric_correction_20180310,
# valid_wave_min = 6085, valid_wave_max = 9305,
# plot=DO_PLOTTING, warnings=True)
#
# cubes3r=Interpolated_cube(star3r, PIXEL_SIZE, KERNEL_SIZE, plot=DO_PLOTTING) #, force_ADR = True)
#
# cubes3r.do_response_curve('FLUX_CAL/feg274_edited.dat', plot=DO_PLOTTING, min_wave=6100., max_wave=9305.,
# step=25, exp_time=180., fit_degree=7, ha_width=150)
# rss_list = [starpos1r,starpos2r,starpos3r]
# EG274r=KOALA_reduce(rss_list, fits_file=fits_file_red+".fits", obj_name=star, description=star,
# apply_throughput=True, skyflat = skyflat_red,
# correct_ccd_defects = True, correct_high_cosmics = False, clip_high = 50, step_ccd=50,
# fix_wavelengths = True, sol = [0.10198480885572622, -0.0006885696621193424, 1.8422163305742697e-07],
# sky_method="self", n_sky=50, correct_negative_sky = True,
# pixel_size_arcsec=PIXEL_SIZE, kernel_size_arcsec=KERNEL_SIZE,
# ADR= False,
# valid_wave_min = 6085, valid_wave_max = 9305,
# plot=DO_PLOTTING, warnings=False )
#
# EG274r.combined_cube.half_light_spectrum(r_max=5, plot=DO_PLOTTING)
### spectrum_to_text_file(EG274r.combined_cube.wavelength,EG274r.combined_cube.integrated_star_flux, filename=fits_file_red+"_integrated_star_flux.dat")
#
# telluric_correction_star4 = EG274r.get_telluric_correction(apply_tc=True, combined_cube = True,
# weight_fit_median = 1., step = 15, wave_min=6085, wave_max=9305,
# correct_from=6830., correct_to=8420.,
# exclude_wlm=[[6000,6330],[6460,6720],[6830,7450], [7550,7750],[8050,8420]])
# #
# spectrum_to_text_file(EG274r.combined_cube.wavelength,telluric_correction_star4, filename=telluric_file)
# #
# EG274r.combined_cube.do_response_curve('FLUX_CAL/feg274_edited.dat', plot=DO_PLOTTING, min_wave=6080., max_wave=9305., # FIX BLUE END !!!
# step=10, exp_time=180., fit_degree=7, ha_width=150)
# #
# spectrum_to_text_file(EG274r.combined_cube.response_wavelength,EG274r.combined_cube.response_curve, filename=response_file_red)
# # CHECK AND GET THE FLUX CALIBRATION FOR THE NIGHT RED
# # First we take another look to the RSS data ploting the integrated fibre values in a map
# star1r.RSS_map(star1r.integrated_fibre, norm=colors.PowerNorm(gamma=1./4.)) # Dead fibre!!!
# star2r.RSS_map(star2r.integrated_fibre, norm=colors.PowerNorm(gamma=1./4.))
# star3r.RSS_map(star3r.integrated_fibre, norm=colors.PowerNorm(gamma=1./4.))
# # We check again that star1 is on a dead fibre, we don't use this star for absolute flux calibration
# # Define in "stars" the 2 cubes we are using, and plotting their responses to check
# stars=[H600r.combined_cube,HD60753r.combined_cube,HR3454r.combined_cube,EG274r.combined_cube]
# plot_response(stars, scale=[1,1.14,1.48,1])
# stars=[EG274r.combined_cube] #H600r.combined_cube,EG274r.combined_cube]
# plot_response(stars, scale=[1,1])
# # The shape of the curves are ~OK but they have a variation of ~5% in flux...
# # Probably this would have been corrected obtaining at least TWO exposures per star..
# # We obtain the flux calibration applying:
# flux_calibration_20180310_385R_0p6_1k25 = obtain_flux_calibration(stars)
# # And we save this absolute flux calibration as a text file
# flux_calibration_file = DATA_PATH+DATE+"/flux_calibration_"+DATE+"_"+GRATING+pk+".dat"
# spectrum_to_text_file(H600r.combined_cube.wavelength,flux_calibration_20180310_385R_0p6_1k8, filename=flux_calibration_file)
# # CHECK AND GET THE TELLURIC CORRECTION
# # Similarly, provide a list with the telluric corrections and apply:
# telluric_correction_list=[telluric_correction_star1,telluric_correction_star2,telluric_correction_star3,telluric_correction_star4]
# telluric_correction_list=[telluric_correction_star4] # [telluric_correction_star1,]
# telluric_correction_20180310 = obtain_telluric_correction(EG274r.combined_cube.wavelength, telluric_correction_list)
# # Save this telluric correction to a file
# telluric_correction_file = DATA_PATH+DATE+"/telluric_correction_"+DATE+"_"+GRATING+pk+".dat"
# spectrum_to_text_file(EG274r.combined_cube.wavelength,telluric_correction_20180310, filename=telluric_correction_file )
# ---------------------------------------------------------------------------
# OBTAIN SKY SPECTRA IF NEEDED
# ---------------------------------------------------------------------------
# Using the same files than objects but choosing fibres without object emission
#
# sky_r1 = KOALA_RSS(SCIENCE_RED_1_FILENAME, apply_throughput=True, skyflat = skyflat_red, do_extinction=False,
# correct_ccd_defects = True, correct_high_cosmics = False, clip_high = 100, step_ccd = 50,
# sky_method="none", is_sky=True, win_sky=151,
# plot=DO_PLOTTING)
#
# sky1=sky_r1.plot_combined_spectrum(list_spectra=[870,871,872,873,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,900], median=True)
#
# sky_r2 = KOALA_RSS(SCIENCE_RED_2_FILENAME, apply_throughput=True, skyflat = skyflat_red, do_extinction=False,
# correct_ccd_defects = True, correct_high_cosmics = False, clip_high = 100, step_ccd = 50,
# sky_method="none", is_sky=True, win_sky=151,
# plot=DO_PLOTTING)
#
# sky2=sky_r2.plot_combined_spectrum(list_spectra=[870,871,872,873,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,900], median=True)
sky_r3 = KOALA_RSS(
SCIENCE_RED_3_FILENAME,
apply_throughput=True,
skyflat=skyflat_red,
do_extinction=False,
correct_ccd_defects=True,
correct_high_cosmics=False,
clip_high=100,
step_ccd=50,
sky_method="none",
is_sky=True,
win_sky=151,
plot=DO_PLOTTING
)
sky3 = sky_r3.plot_combined_spectrum(
list_spectra = [
870, 871, 872, 873, 875, 876, 877, 878, 879, 880, 881, 882, 883,
884, 885, 886, 887, 888, 889, 900
], median=True
)
# ---------------------------------------------------------------------------
# TIME FOR THE OBJECT !!
# ---------------------------------------------------------------------------
# rss3_all = KOALA_RSS(SCIENCE_RED_3_FILENAME, #save_rss_to_fits_file=DATA_PATH+"Tol30Ar3_rss_tcwsreu.fits",
# apply_throughput=True, skyflat = skyflat_red,
# correct_ccd_defects = True,
# fix_wavelengths = True, sol = [0.119694453613, -0.000707644207572, 2.03806478671e-07],
# #sky_method="none",
# sky_method="1D", sky_spectrum=sky3, auto_scale_sky = True,
# id_el=False, high_fibres=10, brightest_line="Ha", cut=1.5, plot_id_el=True, broad=1.8, brightest_line_wavelength =6641., #fibre=422, #422
# id_list=[6300.30, 6312.1, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7318.39, 7329.66, 8750.47, 8862.79, 9014.91, 9069.0],
# clean_sky_residuals = False, dclip=3.0, extra_w = 1.3, step_csr = 25, fibre = 0,
# telluric_correction = telluric_correction,
# do_extinction=False, correct_negative_sky = True,
# plot=DO_PLOTTING, warnings=True)
#
# Fitting a second-order polynomy a0x + a1x * fibre + a2x * fibre**2:
# a0x = 0.119694453613 a1x = -0.000707644207572 a2x = 2.03806478671e-07
# cube_test=Interpolated_cube(rss3_all, PIXEL_SIZE, KERNEL_SIZE, flux_calibration=flux_calibration, plot=DO_PLOTTING)
# save_fits_file(cube_test, DATA_PATH+"/"+GRATING+"/POX4_d_cube_test.fits", ADR=False)
rss_list = [
SCIENCE_RED_2_FILENAME,SCIENCE_RED_3_FILENAME
] #,SCIENCE_RED_3_FILENAME] #,file4r,file5r,file6r,file7r]
# sky_list=[sky1,sky2,sky3]
sky_list = [
sky3, sky3
]
hikids_red = KOALA_reduce(
rss_list,
obj_name=OBJECT,
description=DESCRIPTION,
#rss_clean=True,
fits_file=fits_file_red,
#save_rss_to_fits_file_list=save_rss_list,
apply_throughput=True,
skyflat=skyflat_red,
plot_skyflat=False,
correct_ccd_defects=True,
correct_high_cosmics=False,
clip_high=100,
step_ccd=50,
#fix_wavelengths=True,
#sol=[0.119694453613, -0.000707644207572, 2.03806478671e-07],
#sky_method="1Dfit",
sky_method="1D",
sky_list=sky_list,
scale_sky_1D=1.,
auto_scale_sky=True,
brightest_line="Ha",
brightest_line_wavelength = 6641.,
id_el=False,
high_fibres=10,
cut=1.5,
plot_id_el=True,
broad=1.8,
id_list=[
6300.30, 6312.1, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15,
6716.47, 6730.85, 7065.28, 7135.78, 7318.39, 7329.66, 8750.47,
8862.79, 9014.91, 9069.0
],
#clean_sky_residuals=False,
#dclip=3.0,
#extra_w=1.3,
#step_csr = 25,
telluric_correction=telluric_correction,
do_extinction=True,
correct_negative_sky=False,
pixel_size_arcsec=PIXEL_SIZE,
kernel_size_arcsec=KERNEL_SIZE,
#offsets=[-0.54, -0.87, 1.58, -1.26] # EAST-/WEST+ NORTH-/SOUTH+
ADR=False,
flux_calibration=flux_calibration,
#size_arcsec=[60,60],
valid_wave_min = 6085,
valid_wave_max = 9305,
plot=DO_PLOTTING,
warnings=False
)
end = timer()
print("\n> Elapsing time = ", end - start, "s")
# -----------------------------------------------------------------------------
# ... Paranoy@ Rulz! ;^D & Angel R. :-)
# -----------------------------------------------------------------------------
|
like_num = {
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
}
print(like_num
)
like_num = {
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
}
A_like = like_num['A']
B_like = like_num['B']
C_like = like_num['C']
D_like = like_num['D']
E_like = like_num['E']
like = [A_like,B_like,C_like,D_like,E_like]
for l in like:
print("He like number is " + str(l))
|
import threading
import serial
import sys
import time
class DataObject(object):
"""Base class for Data Objects"""
def __init(self):
pass
def Write(self, buf, bytes):
pass
def signalwhenReady(self, callback):
self.callback = callback
#
# TODO: check how threading works i.e run()-method , simulate the serial port with socat.
#
class SerialWrapper(DataObject, threading.Thread):
def __init__(self, port):
print "Running constructor in SerialWrapper"
threading.Thread.__init__(self)
DataObject.__init__(self)
try:
self.m_fh = serial.Serial(port, 38400, timeout=1)
self.m_fh.open()
except serial.SerialException, e:
sys.stderr.write("Could not open serial port %s: %s\n" % (self.m_fh.portstr, e))
sys.exit(1)
def Write(self, buf, bytes):
print 'serialWrapper:', buf
# self.m_fh.write(buf)
self.m_fh.write(''.join(chr(i) for i in buf))
pass
def run(self):
# return
while True:
print "SerialWrapper, before read()"
data = self.m_fh.read(1) # read one, blocking
print "SerialWrapper, after read()"
n = self.m_fh.inWaiting() # look if there is more
if n:
print ' got:', n, 'bytes'
data = data + self.m_fh.read(n) # and get as much as possible
time.sleep(0.5 )
self.callback(data, n+1)
class LoopDataObject(DataObject):
def Write(self, buf, bytes):
self.buffer = []
for d in buf:
self.buffer.append(d)
self.callback(self.buffer, bytes)
class NewLoopDataObject(DataObject):
def Write(self, buf, bytes):
# print 'NewLoopDataobject, bytes:', bytes
self.callback(buf, bytes)
|
from django.shortcuts import render,redirect
from django.core.mail import send_mail
# Create your views here.
def contact_info(request):
return render(request,'contact/contact.html')
def send(request):
if request.method=='POST':
name=request.POST.get('name')
print(name)
mail =request.POST.get('mail')
print(mail)
subject=request.POST.get('subject')
print(subject)
messages=request.POST.get('messages')
print(messages)
send_mail(
subject,
f'sender name{name},sender mail{mail},subject{subject},message{messages}',
mail,
['eng_mina_hosam@yahoo.com'],
fail_silently=False,
)
return redirect('contact:contact') |
from fabric.api import task, local, settings
import sys
import os
import time
browser = "firefox"
if sys.platform == 'darwin':
browser = "open"
@task
def all():
html()
pdf()
epub()
@task
def clean():
pass
@task
def view(kind='html'):
if kind == 'html':
"""view the documentation in a browser"""
local("{browser} docs/build/html/index.html".format(browser=browser))
else:
local("open docs/build/epub/cloudaqua.epub")
def theme(name='bootstrap'):
os.environ['SPHINX_THEME'] = name
if os.environ['SPHINX_THEME'] == 'bootstrap':
local('cp docs/source/_templates/layout_bootstrap.html docs/source/_templates/layout.html')
else:
local('cp docs/source/_templates/layout_simple.html docs/source/_templates/layout.html')
@task
def html(theme_name='bootstrap'):
# disable Flask RSTPAGES due to sphins incompatibility
os.environ['RSTPAGES'] = 'FALSE'
theme(theme_name)
# api()
# man()
"""build the doc locally and view"""
clean()
local("cd docs; make html")
@task
def pdf():
theme('simple')
with settings(warn_only=True):
local("cd docs; echo 'r' | make latexpdf")
local("cp docs/build/latex/myCloudmesh.pdf docs/build/html/myCloudmesh.pdf")
@task
def epub():
theme('simple')
with settings(warn_only=True):
local("cd docs; make epub")
local("cp docs/build/epub/myCloudmesh.epub docs/build/html/myCloudmesh.epub")
@task
def fast(theme_name='bootstrap'):
theme(theme_name)
local("cd docs; make html")
@task
def simple():
local("cd docs; make html")
@task
def publish():
"""deploy the documentation on gh-pages"""
#html()
local('cd docs/build/html && git add . && git commit -m "site generated" && git push origin gh-pages')
local('git commit -a -m "build site"')
local("git push origin master")
@task
def man():
"""deploy the documentation on gh-pages"""
#TODO: match on "Commands"
local("cm man | grep -A10000 \"Commands\" | sed \$d > docs/source/man/man.rst")
@task
def api():
for modulename in ["cloudmesh", "cloudmesh_common", "cloudmesh_install", "cmd3local", "cloudmesh_web"]:
print 70 * "="
print "Building API Doc:", modulename
print 70 * "="
local("sphinx-apidoc -f -o docs/source/api/{0} {0}".format(modulename))
|
"""@package sqp_linsearch
Implements the SQP linesearch method.
"""
from .sqp_solver import SqpSolver
import numpy as np
import numpy.linalg as la
from quadprog import solve_qp
from .modified_cholesky import modified_cholesky
from collections import OrderedDict
class SqpLinesearch(SqpSolver):
def __init__(self, objective_function, constraint, dumper=None):
""" SQP minimization that employs a line-search and the l-1 non-smooth merit function for global convergence.
:param MatModelErrorNda objective_function: Defines the objective function, and it's gradient and Hessian.
:param AugLagConstraint constraint: Defines inequality constraints applied to the problem.
"""
SqpSolver.__init__(self, objective_function, constraint, dumper)
self.line_search_failure = 4
def set_active_constraints(self, lagrange_multipliers):
""" Active constraints are those that have non-zero Lagrange multipliers. """
self.active_constraints_set = True
self.active_constraints_index = lagrange_multipliers != 0.
return
def get_active_constraints(self):
""" Returns indices for the active constraints.
:return np.array : (m, 1) Array of bools, True for the active constraints and False for the non-active.
Use this function instead of the class member to ensure that the indicies have been set.
"""
if self.active_constraints_set:
return self.active_constraints_index
else:
raise Exception('Active constraints not set yet!')
def get_active_constraint_array(self, x, active_constraints=None):
c = self.get_constraint_array(x)
return c[active_constraints]
def merit_fun(self, x, c):
""" Nonsmooth, exact, L-1 merit function.
:param np.array x: Primal variables.
:param float c: Penalty parameter.
"""
ca = self.get_constraint_array(x)
ca_active = ca[self.get_active_constraints()]
return float(self.objective_fun.value(x) + c * la.norm(ca_active, 1))
def quadprog(self, x, hessian, gradient, constraint_array):
""" Returns the primal and dual solutions to the QP subproblem.
:param np.array x: (n, 1) Primal variables.
:param np.array hessian: (n, n) Hessian of the Lagrangian w.r.t. xx, assumed to be positive-definite.
:param np.array gradient: (n, 1) Gradient of the objective function w.r.t x.
:param np.array constraint_array: (m, 1) Values of each of the constraints.
:return list: [(n, 1), (m, 1)] Primal and dual solution variables.
Uses the quadprog package to solve the quadratic programming (QP) subproblem. See this package for more details.
The form of the problem assumed in the package is:
Minimize_x 1/2 x^T G x - a^T x
Subject to C.T x >= b
The problem we want to solve is:
Minimize_x 1/2 d^T H d + grad[f(x)]^T d
Subject to grad[h(x)]^T d + h(x) <= 0
So we set:
G = hessian (H)
a = -gradient (-grad[f(x)])
C = -constraint_grads (-grad[h(x)])
b = constraint_array (h(x))
The constraints are multiplied by -1. to obtain the form C.T <= b assumed in the SQP algorithm. Therefore,
no factor is applied to constraint_array since the result is -1 * -1 * constraint_array to move it to the rhs.
Note the "solve_qp" function states that the returned value "lagrangian" is the "vector with the Lagrangian at
the solution", however more specifically this is the vector of Lagrange multipliers (dual variables). For the
actual definition see the solve.QP.c file (https://github.com/rmcgibbo/quadprog/tree/master/quadprog, 29/08/18).
"""
b = constraint_array.reshape(-1)
if len(b) == 0:
qp_solution = solve_qp(hessian, -1. * gradient.reshape(-1))
else:
constraint_grads = -1 * self.get_constraint_gradient_array(x)
qp_solution = solve_qp(hessian, -1. * gradient.reshape(-1), constraint_grads, b)
d_x = qp_solution[0]
if len(b) > 0:
d_lambda = qp_solution[4]
else:
d_lambda = np.array([])
return [d_x.reshape(len(d_x), 1), d_lambda.reshape(len(d_lambda), 1)]
def globalized_sqp(self, x_0, dual_x_0):
""" Uses a globalized SQP method with a line-search to solve the minimization problem.
:param np.array x_0: (n, 1) Initial guess at primal variables.
:param np.array dual_x_0: (m, 1) Initial guess at dual variables, m is the number of constraints specified.
:return list: Primal and dual solutions, and exit information.
Follows Algorithm 20.2 from Bierlaire (2015) "Optimization: Principles and Algorithms", pg. 480.
Raises a RuntimeError if the line-search algorithm does not converge.
"""
# Initialization
maximum_iterations = self.maximum_iterations
tol = self.precision
x = x_0
dual_x = dual_x_0
c_bar = 0.1 # basic penalty parameter value
if len(dual_x) == 0:
penalty_parameter = 0.
else:
penalty_parameter = la.norm(dual_x, ord=np.inf) + c_bar
self.set_active_constraints(dual_x)
constraint_array = self.get_constraint_array(x)
grad_f = self.objective_fun.grad(x)
hess_f = self.objective_fun.hess(x)
convergence_criteria = la.norm(self.grad_lagrangian(x, grad_f, dual_x, constraint_array,
self.get_active_constraints()))
# Calculate the primal and dual solutions
while convergence_criteria > tol and self.total_iterations < maximum_iterations:
# Set the Hessian and get a positive-definite approximation
hess_lagrangian = self.hess_xx_lagrangian(x, hess_f, dual_x)
[hess_posdef, id_factor] = modified_cholesky(hess_lagrangian)
# Solve the quadratic programming sub-problem to get the step direction
[x_step, dual_x_step] = self.quadprog(x, hess_posdef, grad_f, constraint_array)
self.set_active_constraints(dual_x_step)
# Update the penalty parameter
if len(dual_x_0) == 0:
c_upper_bound = 0.
penalty_parameter = 0.
else:
c_upper_bound = la.norm(dual_x_step, np.inf)
if penalty_parameter >= 1.1 * c_upper_bound:
penalty_parameter = 0.5 * (penalty_parameter + c_upper_bound)
# If c_upper_bound <= penalty_parameter < 1.1 * c_upper_bound -> don't change penalty_parameter
elif penalty_parameter < c_upper_bound:
penalty_parameter = np.max([1.5 * penalty_parameter, c_upper_bound])
# Calculate the step length using a line-search
active_constraints = constraint_array[self.active_constraints_index]
merit_descent = float(np.dot(grad_f.transpose(), x_step)
- penalty_parameter * la.norm(active_constraints, 1))
[step_trajectory, step_size, ls_conv] = self.basic_linesearch(x, x_step, penalty_parameter, merit_descent)
# Exit the solver if the line-search does not converge
if not ls_conv:
break
# Update parameters for the next step
x = x + step_trajectory
dual_x = dual_x_step
grad_f = self.objective_fun.grad(x)
hess_f = self.objective_fun.hess(x)
constraint_array = self.get_constraint_array(x)
self.total_iterations += 1
convergence_criteria = float(la.norm(self.grad_lagrangian(x, grad_f, dual_x, constraint_array,
self.get_active_constraints())))
# Dump the progress when appropriate
if self.use_dumper:
dump_info = OrderedDict([('it_num', self.total_iterations),
('step_factor', step_size),
('f_val', self.objective_fun.value(x)),
('norm_grad_lag', convergence_criteria),
('x', x)])
self.dumper.dump(dump_info)
# Let the solver know how it exited
if convergence_criteria <= tol:
exit_info = {'tag': self.convergence_reached_tag, 'val': convergence_criteria,
'msg': "SQP line-search converged in {0} iterations.".format(self.total_iterations)}
elif self.total_iterations >= maximum_iterations:
exit_info = {'tag': self.maximum_iterations_reached_tag, 'val': convergence_criteria,
'msg': "\nMaximum iterations reached in SQP."}
elif not ls_conv:
exit_info = {'tag': self.line_search_failure, 'val': convergence_criteria,
'its': self.total_iterations,
'msg': "\nLine search did not converge in 50 iterations."}
else:
exit_info = {'tag': self.unknown_exit, 'val': convergence_criteria,
'msg': "Unknown exit condition reached."}
return [x, dual_x, exit_info]
def basic_linesearch(self, x, d_x, c, merit_descent):
""" Backtracking line-search using a full-Newton descent direction.
:param np.array x: Primal variables.
:param np.array d_x: Step in primal variables.
:param float c: Penalty parameter.
:param float merit_descent: Directional derivative of the merit function.
:return float: The step length factor.
Only the first Wolfe conditions are imposed because a back-tracking line-search strategy is used.
See Nocedal and Wright (2006) Ch. 3 for more details.
"""
max_line_its = 50 # this will give a minimum step length factor of around 8*10^-16
sufficient_decrease_factor = 0.3
step_length_decrease = 2. # halve the step length at each failed iteration
step_length = 1.
merit_init = self.merit_fun(x, c)
merit_trial = self.merit_fun(x + d_x, c)
merit_check = merit_init + step_length * sufficient_decrease_factor * merit_descent
line_search_iterations = 0
line_search_converged = True
while merit_trial > merit_check and line_search_iterations < max_line_its:
step_length = step_length / step_length_decrease
merit_trial = self.merit_fun(x + step_length * d_x, c)
merit_check = merit_init + step_length * sufficient_decrease_factor * merit_descent
line_search_iterations += 1
# Raise an exception if the line-search did not converge
if line_search_iterations >= max_line_its:
line_search_converged = False
return [d_x * step_length, step_length, line_search_converged]
def corrected_linesearch(self, x, d_x, c, merit_descent):
""" Similar to the basic line-search but with a 2nd order correction to alleviate the Maratos effect.
Based on Algorithm 15.2 from Nocedal and Wright (2006), pg. 443-444.
"""
max_line_its = 50 # this will give a minimum step length factor of around 8*10^-16
sufficient_decrease_factor = 0.3
step_length_decrease = 2. # halve the step length at each failed iteration
step_length = 1.
merit_init = self.merit_fun(x, c)
line_search_iterations = 0
new_point = False
used_correction = False
correction = 0.
line_search_converged = True
while new_point is False and line_search_iterations < max_line_its:
# Start with regular line search
merit_trial = self.merit_fun(x + step_length * d_x, c)
merit_check = merit_init + step_length * sufficient_decrease_factor * merit_descent
if merit_trial <= merit_check:
new_point = True
elif step_length == 1.:
# Apply the 2nd order correction only on the first iteration
correction = self.calc_2nd_correction(x, d_x)
merit_trial = self.merit_fun(x + d_x + correction, c)
merit_check = merit_init + sufficient_decrease_factor * merit_descent # step_length == 1 here
if merit_trial <= merit_check:
used_correction = True
new_point = True
else:
step_length = step_length / step_length_decrease
else:
# If the 2nd order correction doesn't give a sufficient decrease, backtrack on the original direction
step_length = step_length / step_length_decrease
line_search_iterations += 1
if used_correction:
dx_total = d_x + correction
print ("Used 2nd order correction in line-search.") # todo: remove this at some point
else:
dx_total = d_x * step_length
# Raise an exception if the line-search did not converge
if line_search_iterations >= max_line_its:
line_search_converged = False
return [dx_total, step_length, line_search_converged]
def calc_2nd_correction(self, x, d_x):
""" Calculates the 2nd order correction step.
:param np.array x: (n, 1) Primal variables.
:param np.array d_x: (n, 1) Step direction.
:return np.array: (n, 1) 2nd order correction step.
"""
# todo: not sure if this works with active constraints in the current formulation -> maybe doesn't do anything
ca = self.get_constraint_array(x + d_x)
active_index = self.get_active_constraints()
ca_active = ca[active_index]
if len(ca_active) == 0:
d_second_order = 0.
else:
c_jacobian = self.get_constraint_gradient_array(x)
c_jacobian = c_jacobian[:, active_index.reshape(-1)]
if len(ca_active) == 1:
# Only have one active constraint, need to adjust the matrix algebra since we get scalars
c_jacobian = c_jacobian.reshape(1, -1)
a = -1. * np.matmul(c_jacobian.transpose(), la.inv(np.matmul(c_jacobian, c_jacobian.transpose())))
d_second_order = a * float(ca_active)
else:
c_jacobian = c_jacobian.transpose()
a = -1. * np.matmul(c_jacobian.transpose(), la.inv(np.matmul(c_jacobian, c_jacobian.transpose())))
d_second_order = np.matmul(a, ca)
return d_second_order
|
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.encoders import jsonable_encoder
from fastapi.middleware.cors import CORSMiddleware
import cgi
a_pi = FastAPI()
class info(BaseModel):
name: str
course_name: str
join_date: str
ph_no: str
srn: str
@a_pi.get("/packet")
def stu_date():
return "Student"
@a_pi.post("/send")
def create(name_var: info):
name_encoded = jsonable_encoder(name_var)
print(name_encoded)
name = name_encoded["name"]
print("Name: ",name)
course_name = name_encoded["course_name"]
print("course name: ",course_name)
join_date = name_encoded["join_date"]
print("join date: ",join_date)
ph_no = name_encoded["ph_no"]
srn = name_encoded["srn"]
print("Phone number: ",ph_no)
print("SRN: ",srn)
return name,course_name,join_date,ph_no,srn
form = cgi.FieldStorage()
d = form.getvalue('stu_date') |
from __future__ import annotations
from typing import Any
import jax.numpy as jnp
from jax import grad, vmap
from jax.tree_util import tree_map
from numpy.random import Generator
from tjax import assert_tree_allclose
from efax import HasConjugatePrior, HasGeneralizedConjugatePrior
from .distribution_info import DistributionInfo
def test_conjugate_prior(generator: Generator,
cp_distribution_info: DistributionInfo[Any, Any, Any],
distribution_name: None | str) -> None:
"""Test that the conjugate prior actually matches the distribution."""
cp_distribution_info.skip_if_deselected(distribution_name)
shape = (4, 3)
n = 100.0 * jnp.ones(shape)
# Choose a random distribution.
p = cp_distribution_info.exp_parameter_generator(generator, shape=shape)
assert isinstance(p, HasConjugatePrior)
# Find its conjugate prior at that point with many observations.
cp_q = p.conjugate_prior_distribution(n)
assert cp_q.shape == p.shape
# Produce a copy of p that matches the conjugate prior distribution.
cp_x = p.conjugate_prior_observation()
# Produce a function that calculates the gradient of the density with respect to p. Ensure that
# it is broadcasted according to the shape.
density_gradient = grad(type(cp_q).pdf, argnums=1)
for _ in range(len(shape)):
density_gradient = vmap(density_gradient)
# Check the gradient of the density of the conjugate prior at p is zero.
derivative = density_gradient(cp_q, cp_x)
zero_derivative = tree_map(jnp.zeros_like, derivative)
assert_tree_allclose(derivative, zero_derivative, atol=1.5)
def test_generalized_conjugate_prior(generator: Generator,
gcp_distribution_info: DistributionInfo[Any, Any, Any],
distribution_name: None | str
) -> None:
"""Same as test_conjugate_prior, but with generalized_conjugate_prior_distribution."""
gcp_distribution_info.skip_if_deselected(distribution_name)
shape = (4, 3)
# Choose a random distribution.
p = gcp_distribution_info.exp_parameter_generator(generator, shape=shape)
assert isinstance(p, HasGeneralizedConjugatePrior)
# Find its conjugate prior at that point with many observations.
n = 100.0 * jnp.ones((*shape, p.dimensions()))
gcp_q = p.generalized_conjugate_prior_distribution(n)
assert gcp_q.shape == p.shape
# Produce a copy of p that matches the conjugate prior distribution.
cp_x = p.conjugate_prior_observation()
# Produce a function that calculates the gradient of the density with respect to p. Ensure that
# it is broadcasted according to the shape.
density_gradient = grad(type(gcp_q).pdf, argnums=1)
for _ in range(len(shape)):
density_gradient = vmap(density_gradient)
# Check the gradient of the density of the conjugate prior at p is zero.
derivative = density_gradient(gcp_q, cp_x)
zero_derivative = tree_map(jnp.zeros_like, derivative)
assert_tree_allclose(derivative, zero_derivative, atol=1.5)
|
'''
DQ1 Battle Simulator - App
'''
from view import View
from controller import Controller
from model import model
if __name__ == "__main__":
view = View()
controller = Controller(model, view)
controller.view.mainloop()
|
# (c) 2015, Ian Clegg <ian.clegg@sourcewarp.com>
#
# winrmlib is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ian.clegg@sourcewarp.com'
import unittest
import mock
from winrmlib.shell import CommandShell
class ShellOpenCase(unittest.TestCase):
"""
Test cases covering the Shell.open() method
"""
@mock.patch('winrmlib.shell.Session')
def test_should_open_set_shell_id(self, mock_session):
expected_id = '0000'
mock_instance = mock_session.return_value
mock_instance.create.return_value = {'rsp:Shell': {'rsp:ShellId': expected_id}}
mock_instance.command.return_value = {'rsp:CommandResponse': {'rsp:CommandId': '9999'}}
shell = CommandShell('http://server:5985', 'username', 'password')
shell.open()
shell.run('unittest')
args, kwargs = mock_instance.command.call_args
self.assertEqual(expected_id, args[0].selectors['ShellId'])
class ShellRunCase(unittest.TestCase):
"""
Test cases covering the Shell.open() method
"""
"""
@mock.patch('winrmlib.shell.Session')
def test_should_open_set_shell_id(self, mock_session):
mock_instance = mock_session.return_value
mock_instance.create.return_value = {'rsp:CommandResponse': {'rsp:CommandId': '123'}}
mock_instance.command.return_value = {'rsp:CommandResponse': {'rsp:CommandId': '9999'}}
shell = CommandShell('http://server:5985', 'username', 'password')
shell.__shell_id = 123
shell.open()
shell.run('')
self.assertEqual('123', '123')
"""
class ShellReceiveCase(unittest.TestCase):
"""
Test cases covering the Shell.open() method
"""
@mock.patch('winrmlib.shell.Session')
def test_should_receive(self, mock_session):
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
import os, argparse
parser = argparse.ArgumentParser(prog='python PiStation.py', description='Broadcasts WAV/MP3 file over FM using RPI GPIO #4 pin.')
parser.add_argument("song_file")
parser.add_argument("-f", "--frequency", help="Set TX frequency. Acceptable range 87.1-108.2", type=float)
arg = parser.parse_args()
def main():
os.system('clear')
frequency = 0
#frequency=freq()
print ("Welcome to PiStation! \nVersion 1.0 \nGPLv3 License\n")
#This block is for setting default values for frequency in case argument is not provided
if arg.frequency is None:
frequency = raw_input("Enter the frequency (press Enter to set default frequency of 103.3 MHz) : ")
if frequency == "":
frequency = '103.3'
elif 87.1 >= arg.frequency >= 108.2:
print "Frequency argument out of range.";exit()
else:
frequency = str(arg.frequency)
print frequency
try:
if ".mp3" in arg.song_file.lower():
os.system("ffmpeg -i "+arg.song_file+" "+"-f s16le -ar 22.05k -ac 1 - | sudo ./fm_transmitter -f"+" "+frequency+" "+" - ")
elif ".wav" in arg.song_file.lower():
os.system("sudo ./fm_transmitter -f"+" "+frequency+" "+arg.song_file)
else:
print "That file extension is not supported."
print "File name provided: %s" %arg.song_file
raise IOError
except Exception:
print "Something went wrong. Halting."; exit()
except IOError:
print "There was an error regarding file selection. Halting."; exit()
if __name__ == '__main__':
main()
|
$NetBSD: patch-setup.py,v 1.1 2019/01/28 08:40:07 adam Exp $
Allow newer pytest.
--- setup.py.orig 2019-01-28 08:19:14.000000000 +0000
+++ setup.py
@@ -23,7 +23,7 @@ classifiers = [
install_requires = ['pytest-fixture-config',
'pytest-shutil',
- 'pytest<4.0.0',
+ 'pytest',
]
tests_require = [
|
def fun(array):
sum = 0 # This has a time complexity of O(1)
product = 1 # This has a time complexity of O(1)
for i in array: # This has a time complexity of O(n)
sum += i # This has a time complexity of O(1)
for i in array: # This has a time complexity of O(n)
product *= i # This has a time complexity of O(1)
print("Sum: {}, Product: {}".format(sum, product)) # This has a time complexity of O(1)
# If we combine all the steps equation can be simplified to 5 + 2(O(n)).
# If we remove the constants from the equation, it can be further simplified to O(n) time complexity
fun([1, 2, 3, 4, 5])
# Calculate the time complexity of below example
def print_pairs(array):
for i in array:
for j in array:
print(str(i) + ", " + str(j))
# If we observe the above function carefully, the inner for loop takes the O(n) complexity however, since it is nested
# and we are iterating the same array and printing the pairs, in the nested for loop, the outer for loop will have time
# complexity of O(n^2) complexity.
print_pairs([1, 2, 3, 4, 5])
print()
def print_unordered_pairs(array):
for i in range(0, len(array)):
for j in range(i+1, len(array)):
print("{}, {}".format(array[i], array[j]))
# In the above case, we can see that outer for loop runs O(n) times and inner loop will run o(n/2) times,
# hence, n^2 / 2 times which can be simplified to time complexity of O(n^2) time complexity
print_unordered_pairs([1, 2, 3, 4, 5])
print()
def print_pairs(array1, array2):
for i in range(len(array1)):
for j in range(len(array2)):
if array1[i] < array2[j]:
print("{}, {}".format(array1[i], array2[j]))
# Here, we need to understand the length of an array. If both the arrays have same length then it will have a quadratic
# time complexity because for every n elements in array1, every element in array2 will loop n times.
# If the lengths of both the arrays are different, then for every n elements in array1, every element in array2 will
# only loop m times where m is the length of array2. hence, we can say that it will have a time complexity of O(n*m)
print_pairs([1, 2, 3, 4, 5], [1, 2, 3, 4, 5])
print()
def print_10_pairs(arrayA, arrayB):
for i in range(len(arrayA)):
for j in range(len(arrayB)):
for k in range(0, 10):
print("{}, {}".format(arrayA[i], arrayB[j]))
# Here, we know that from the previous example, we know that the two outer for loops have a time complexity of O(n*m)
# time complexity. How about the third for loop? We have to understand that that we are looping through the constant
# time range of 1000 times, hence, it will have a time complexity of O(1). Hence even this function has a time
# complexity of O(n*m)
print_10_pairs([1, 2, 3, 4, 5], [1, 2, 3, 4, 5])
def reverse_array(array):
for i in range(0, int(len(array) / 2)):
temp = array[i]
array[i] = array[-i-1]
array[-i-1] = temp
print(array)
# Since we are iterating over n/2 times, it will have a time complexity O(N/2) times.
# By removing the constants, we can conclude that, the final time complexity will be O(N)
reverse_array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def powersof2(n):
if n == 0:
return 1
else:
a = int(n/2)
print("a is now: {}".format(a))
prev = powersof2(a)
print("Prev is now: {}".format(prev))
curr = prev * 2
print("Curr is now: {}".format(curr))
return curr
print(powersof2(5)) |
# Imports ###########################################################
from django import http
from django.conf import settings
from django.utils import simplejson as json
# View Mixins #######################################################
class JSONResponseMixin(object):
def render_to_response(self, context, progressive=False, **httpresponse_kwargs):
"""Returns a JSON response containing 'context' as payload"""
if progressive:
return u'{content}\n{separator}\n'\
.format(content=self.convert_context_to_json(context),
separator=settings.PROGRESSIVE_RESPONSE_SEPARATOR)
else:
return self.get_json_response(self.convert_context_to_json(context),
**httpresponse_kwargs)
def get_json_response(self, content, **httpresponse_kwargs):
"""Construct an `HttpResponse` object."""
return http.HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"""Convert the context dictionary into a JSON object"""
# TODO: Convert querysets/models
return json.dumps(context, sort_keys=True, indent=2)
|
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
import torchvision
import numpy
from utils import *
def imagenet_transformer():
transform=transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def cifar10_transformer():
return torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
])
class CIFAR10(Dataset):
def __init__(self, path):
self.cifar10 = datasets.CIFAR10(root=path,
download=True,
train=True,
transform=cifar10_transformer())
def __getitem__(self, index):
if isinstance(index, numpy.float64):
index = index.astype(numpy.int64)
data, target = self.cifar10[index]
return data, target, index
def __len__(self):
return len(self.cifar10)
class CIFAR100(Dataset):
def __init__(self, path):
self.cifar100 = datasets.CIFAR100(root=path,
download=True,
train=True,
transform=cifar10_transformer())
def __getitem__(self, index):
if isinstance(index, numpy.float64):
index = index.astype(numpy.int64)
data, target = self.cifar100[index]
# Your transformations here (or set it in CIFAR10)
return data, target, index
def __len__(self):
return len(self.cifar100)
class ImageNet(Dataset):
def __init__(self, path):
self.imagenet = datasets.ImageFolder(root=path, transform=imagenet_transformer)
def __getitem__(self, index):
if isinstance(index, numpy.float64):
index = index.astype(numpy.int64)
data, target = self.imagenet[index]
return data, target, index
def __len__(self):
return len(self.imagenet)
|
import sqlite3
#Conexion con la base de datos, usa una base de datos en memoria
#Se pierde cuando se termina el programa
conn = sqlite3.connect(':memory:')
#Creacion de un cursor
cursor = conn.cursor()
#Creacion de una tabla
cursor.execute("""CREATE TABLE divisas (
id INTEGER PRIMARY KEY, nombre TEXT, simbolo TEXT);""")
#Insertar registros en la tabla
cursor.execute("INSERT INTO divisas VALUES(1, 'Peso(MXN)', '$');")
cursor.execute("INSERT INTO divisas VALUES(2, 'Dolar USD', 'U$S');")
#Guardar los cambios en la base de datos
conn.commit()
#Ejecucion de una consulta
query = "SELECT * FROM divisas;"
#Busco resultado de la consulta
divisas = cursor.execute(query).fetchall()
print(divisas)
#Cierra conexion con la base de datos
conn.close() |
########################################################################
## Dataloader and Code
########################################################################
import Code
import numpy as np
import torch
Times=15
'''
kernels = [Code.DoGKernel(3, 3 / 9, 6 / 9),
Code.DoGKernel(3, 6 / 9, 3 / 9),
Code.DoGKernel(3, 7 / 9, 14 / 9),
Code.DoGKernel(3, 14 / 9, 7 / 9),
Code.DoGKernel(3, 13 / 9, 26 / 9),
Code.DoGKernel(3, 26 / 9, 13 / 9)]
'''
kernels = [Code.DoGKernel(3, 26 / 9, 13 / 9)]
filter = Code.Filter(kernels, padding = 1, thresholds = 50)
s1c1 = Code.S1C1Transform(filter,timesteps=Times)
path='/home/sunhongze/PycharmProjects/Wide_Narrow/data/MNIST_BIN'
trainset=Code.dataloader(path,s1c1,batch_size=1, shuttle=True)
########################################################################
## END
########################################################################
########################################################################
## Define the Network
########################################################################
from Network import Neuron
import torch.nn as nn
sun=Neuron([256,400,100,2],[20,20],time=Times)
for data,targets in trainset:
data=torch.squeeze(data.view(1,Times,1,1,256)).float()
sun.training(data_in=data)
print(targets)
print('END')
|
class AuthenticationManager:
def __init__(self, timeToLive: int):
self.ttl = timeToLive
self.et = dict() # expire time
def generate(self, tokenId: str, currentTime: int) -> None:
self.et[tokenId] = currentTime + self.ttl
def renew(self, tokenId: str, currentTime: int) -> None:
if tokenId not in self.et:
return
if self.et[tokenId] <= currentTime:
del self.et[tokenId]
return
self.et[tokenId] = currentTime + self.ttl
def countUnexpiredTokens(self, currentTime: int) -> int:
for tokenId in list(self.et.keys()):
if self.et[tokenId] <= currentTime:
del self.et[tokenId]
return len(self.et)
# Your AuthenticationManager object will be instantiated and called as such:
# obj = AuthenticationManager(timeToLive)
# obj.generate(tokenId,currentTime)
# obj.renew(tokenId,currentTime)
# param_3 = obj.countUnexpiredTokens(currentTime) |
#!/usr/bin/env
"""
class definitions for ctd profile plots
limit to four variables
"""
import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from matplotlib.dates import (DateFormatter, DayLocator, HourLocator,
MonthLocator, WeekdayLocator, YearLocator)
class CTDProfilePlot(object):
def __init__(self, fontsize=10, labelsize=10, plotstyle='k-.', stylesheet='seaborn-ticks'):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
self.max_xticks = 10
plt.style.use(stylesheet)
mpl.rcParams['svg.fonttype'] = 'none'
mpl.rcParams['ps.fonttype'] = 42 #truetype/type2 fonts instead of type3
mpl.rcParams['pdf.fonttype'] = 42 #truetype/type2 fonts instead of type3
mpl.rcParams['axes.grid'] = True
mpl.rcParams['axes.edgecolor'] = 'white'
mpl.rcParams['axes.linewidth'] = 0.25
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['xtick.major.size'] = 2
mpl.rcParams['xtick.minor.size'] = 1
mpl.rcParams['xtick.major.width'] = 0.25
mpl.rcParams['xtick.minor.width'] = 0.25
mpl.rcParams['ytick.major.size'] = 2
mpl.rcParams['ytick.minor.size'] = 1
mpl.rcParams['xtick.major.width'] = 0.25
mpl.rcParams['xtick.minor.width'] = 0.25
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.color'] = 'grey'
mpl.rcParams['xtick.color'] = 'grey'
@staticmethod
def add_title(cruiseid='', fileid='', stationid='',castid='',castdate=datetime.datetime.now(),lat=-99.9,lon=-99.9):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
cruiseid : str
Cruise Identifier
fileid : str
File Identifier
stationid : str
Station Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
"""
ptitle = ("Plotted on: {time:%Y/%m/%d %H:%M} \n from {fileid} \n "
"Cruise: {cruiseid} Cast: {castid} Stn: {stationid} \n"
"Lat: {latitude:3.3f} Lon: {longitude:3.3f} at {castdate}"
" ").format(
time=datetime.datetime.now(),
cruiseid=cruiseid,
stationid=stationid,
castid=castid,
fileid=fileid,
latitude=lat,
longitude=lon,
castdate=datetime.datetime.strftime(castdate,"%Y-%m-%d %H:%M GMT" ) )
return ptitle
def plot1var(self, epic_key=None, xdata=None, ydata=None, xlabel=None, secondary=False, **kwargs):
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
p1 = ax1.plot(xdata[0], ydata)
plt.setp(p1, color=self.var2format(epic_key[0])['color'],
linestyle=self.var2format(epic_key[0])['linestyle'],
linewidth=self.var2format(epic_key[0])['linewidth'])
if secondary:
p1 = ax1.plot(xdata[1],ydata)
plt.setp(p1, color=self.var2format(epic_key[1])['color'],
linestyle=self.var2format(epic_key[1])['linestyle'],
linewidth=self.var2format(epic_key[1])['linewidth'])
ax1.invert_yaxis()
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel, fontsize=self.labelsize, fontweight='bold')
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[0])['format'])
ax1.xaxis.set_major_formatter(fmt)
ax1.tick_params(axis='both', which='major', labelsize=self.labelsize)
return plt, fig
def plot2var(self, epic_key=None, xdata=None, ydata=None, xlabel=None, secondary=False, **kwargs):
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
p1 = ax1.plot(xdata[0], ydata)
plt.setp(p1, color=self.var2format(epic_key[0])['color'],
linestyle=self.var2format(epic_key[0])['linestyle'],
linewidth=self.var2format(epic_key[0])['linewidth'])
if secondary and not (xdata[1].size == 0):
p1 = ax1.plot(xdata[1],ydata)
plt.setp(p1, color=self.var2format(epic_key[1])['color'],
linestyle=self.var2format(epic_key[1])['linestyle'],
linewidth=self.var2format(epic_key[1])['linewidth'])
#set plot limits for two vars by finding the absolute range and adding 10%
abmin=np.nanmin([np.nanmin(xdata[0]),np.nanmin(xdata[1])])
abmax=np.nanmax([np.nanmax(xdata[0]),np.nanmax(xdata[1])])
ax1.set_xlim([abmin - 0.1*(abmax-abmin),abmax + 0.1*(abmax-abmin)])
ax1.invert_yaxis()
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[0], fontsize=self.labelsize, fontweight='bold')
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[0])['format'])
ax1.xaxis.set_major_formatter(fmt)
ax1.tick_params(axis='both', which='major', labelsize=self.labelsize)
#plot second param
ax2 = ax1.twiny()
p1 = ax2.plot(xdata[2], ydata)
plt.setp(p1, color=self.var2format(epic_key[2])['color'],
linestyle=self.var2format(epic_key[2])['linestyle'],
linewidth=self.var2format(epic_key[2])['linewidth'])
if secondary and not (xdata[3].size == 0):
p1 = ax2.plot(xdata[3],ydata)
plt.setp(p1, color=self.var2format(epic_key[3])['color'],
linestyle=self.var2format(epic_key[3])['linestyle'],
linewidth=self.var2format(epic_key[3])['linewidth'])
#set plot limits for two vars by finding the absolute range and adding 10%
abmin=np.nanmin([np.nanmin(xdata[2]),np.nanmin(xdata[3])])
abmax=np.nanmax([np.nanmax(xdata[2]),np.nanmax(xdata[3])])
try:
ax2.set_xlim([abmin - 0.1*(abmax-abmin),abmax + 0.1*(abmax-abmin)])
except:
ax2.set_xlim([0,1])
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[1], fontsize=self.labelsize, fontweight='bold')
#set xticks and labels to be at the same spot for all three vars
ax1.set_xticks(np.linspace(ax1.get_xbound()[0], ax1.get_xbound()[1], self.max_xticks))
ax2.set_xticks(np.linspace(ax2.get_xbound()[0], ax2.get_xbound()[1], self.max_xticks))
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[2])['format'])
ax2.xaxis.set_major_formatter(fmt)
ax2.tick_params(axis='x', which='major', labelsize=self.labelsize)
return plt, fig
def plot3var(self, epic_key=None, xdata=None, ydata=None, xlabel=None, secondary=False, **kwargs):
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
p1 = ax1.plot(xdata[0], ydata)
plt.setp(p1, color=self.var2format(epic_key[0])['color'],
linestyle=self.var2format(epic_key[0])['linestyle'],
linewidth=self.var2format(epic_key[0])['linewidth'])
if secondary and not (xdata[1].size == 0):
p1 = ax1.plot(xdata[1],ydata)
plt.setp(p1, color=self.var2format(epic_key[1])['color'],
linestyle=self.var2format(epic_key[1])['linestyle'],
linewidth=self.var2format(epic_key[1])['linewidth'])
#set plot limits for two vars by finding the absolute range and adding 10%
abmin=np.nanmin([np.nanmin(xdata[0]),np.nanmin(xdata[1])])
abmax=np.nanmax([np.nanmax(xdata[0]),np.nanmax(xdata[1])])
ax1.set_xlim([abmin - 0.1*(abmax-abmin),abmax + 0.1*(abmax-abmin)])
ax1.invert_yaxis()
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[0], fontsize=self.labelsize, fontweight='bold')
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[0])['format'])
ax1.xaxis.set_major_formatter(fmt)
ax1.tick_params(axis='both', which='major', labelsize=self.labelsize)
#plot second param
ax2 = ax1.twiny()
p1 = ax2.plot(xdata[2], ydata)
plt.setp(p1, color=self.var2format(epic_key[2])['color'],
linestyle=self.var2format(epic_key[2])['linestyle'],
linewidth=self.var2format(epic_key[2])['linewidth'])
if secondary and not (xdata[3].size == 0):
p1 = ax2.plot(xdata[3],ydata)
plt.setp(p1, color=self.var2format(epic_key[3])['color'],
linestyle=self.var2format(epic_key[3])['linestyle'],
linewidth=self.var2format(epic_key[3])['linewidth'])
#set plot limits for two vars by finding the absolute range and adding 10%
abmin=np.nanmin([np.nanmin(xdata[2]),np.nanmin(xdata[3])])
abmax=np.nanmax([np.nanmax(xdata[2]),np.nanmax(xdata[3])])
try:
ax2.set_xlim([abmin - 0.1*(abmax-abmin),abmax + 0.1*(abmax-abmin)])
except:
ax2.set_xlim([0,1])
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[1], fontsize=self.labelsize, fontweight='bold')
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[2])['format'])
ax2.xaxis.set_major_formatter(fmt)
ax2.tick_params(axis='x', which='major', labelsize=self.labelsize)
ax3 = ax1.twiny()
ax3.spines["top"].set_position(("axes", 1.05))
self.make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["top"].set_visible(True)
p1 = ax3.plot(xdata[4], ydata)
plt.setp(p1, color=self.var2format(epic_key[4])['color'],
linestyle=self.var2format(epic_key[4])['linestyle'],
linewidth=self.var2format(epic_key[4])['linewidth'])
if secondary and not (xdata[5].size == 0):
p1 = ax2.plot(xdata[5],ydata)
plt.setp(p1, color=self.var2format(epic_key[5])['color'],
linestyle=self.var2format(epic_key[5])['linestyle'],
linewidth=self.var2format(epic_key[5])['linewidth'])
#set plot limits for two vars by finding the absolute range and adding 10%
abmin=np.nanmin([np.nanmin(xdata[4]),np.nanmin(xdata[5])])
abmax=np.nanmax([np.nanmax(xdata[4]),np.nanmax(xdata[5])])
ax3.set_xlim([abmin - 0.1*(abmax-abmin),abmax + 0.1*(abmax-abmin)])
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[2], fontsize=self.labelsize, fontweight='bold')
#set bounds based on max and min values
#set xticks and labels to be at the same spot for all three vars
ax1.set_xticks(np.linspace(ax1.get_xbound()[0], ax1.get_xbound()[1], self.max_xticks))
ax2.set_xticks(np.linspace(ax2.get_xbound()[0], ax2.get_xbound()[1], self.max_xticks))
ax3.set_xticks(np.linspace(ax3.get_xbound()[0], ax3.get_xbound()[1], self.max_xticks))
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[4])['format'])
ax3.xaxis.set_major_formatter(fmt)
ax3.tick_params(axis='x', which='major', labelsize=self.labelsize)
return plt, fig
def plot3var2y(self, epic_key=None, xdata=None, ydata=None, ydata2=None, xlabel=None, secondary=False, **kwargs):
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
p1 = ax1.plot(xdata[0], ydata)
plt.setp(p1, color=self.var2format(epic_key[0])['color'],
linestyle=self.var2format(epic_key[0])['linestyle'],
linewidth=self.var2format(epic_key[0])['linewidth'])
if secondary and not (xdata[1].size == 0):
p1 = ax1.plot(xdata[1],ydata2)
plt.setp(p1, color=self.var2format(epic_key[1])['color'],
linestyle=self.var2format(epic_key[1])['linestyle'],
linewidth=self.var2format(epic_key[1])['linewidth'])
ax1.invert_yaxis()
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[0], fontsize=self.labelsize, fontweight='bold')
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[0])['format'])
ax1.xaxis.set_major_formatter(fmt)
ax1.tick_params(axis='both', which='major', labelsize=self.labelsize)
#plot second param
ax2 = ax1.twiny()
p1 = ax2.plot(xdata[2], ydata)
plt.setp(p1, color=self.var2format(epic_key[2])['color'],
linestyle=self.var2format(epic_key[2])['linestyle'],
linewidth=self.var2format(epic_key[2])['linewidth'])
if secondary and not (xdata[3].size == 0):
p1 = ax2.plot(xdata[3],ydata2)
plt.setp(p1, color=self.var2format(epic_key[3])['color'],
linestyle=self.var2format(epic_key[3])['linestyle'],
linewidth=self.var2format(epic_key[3])['linewidth'])
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[1], fontsize=self.labelsize, fontweight='bold')
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[2])['format'])
ax2.xaxis.set_major_formatter(fmt)
ax2.tick_params(axis='x', which='major', labelsize=self.labelsize)
ax3 = ax1.twiny()
ax3.spines["top"].set_position(("axes", 1.05))
self.make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["top"].set_visible(True)
p1 = ax3.plot(xdata[4], ydata)
plt.setp(p1, color=self.var2format(epic_key[4])['color'],
linestyle=self.var2format(epic_key[4])['linestyle'],
linewidth=self.var2format(epic_key[4])['linewidth'])
if secondary and not (xdata[5].size == 0):
p1 = ax3.plot(xdata[5],ydata2)
plt.setp(p1, color=self.var2format(epic_key[5])['color'],
linestyle=self.var2format(epic_key[5])['linestyle'],
linewidth=self.var2format(epic_key[5])['linewidth'])
plt.ylabel('Depth (dB)', fontsize=self.labelsize, fontweight='bold')
plt.xlabel(xlabel[2], fontsize=self.labelsize, fontweight='bold')
#set xticks and labels to be at the same spot for all three vars
ax1.set_xticks(np.linspace(ax1.get_xbound()[0], ax1.get_xbound()[1], self.max_xticks))
ax2.set_xticks(np.linspace(ax2.get_xbound()[0], ax2.get_xbound()[1], self.max_xticks))
ax3.set_xticks(np.linspace(ax3.get_xbound()[0], ax3.get_xbound()[1], self.max_xticks))
fmt=mpl.ticker.StrMethodFormatter(self.var2format(epic_key[4])['format'])
ax3.xaxis.set_major_formatter(fmt)
ax3.tick_params(axis='x', which='major', labelsize=self.labelsize)
return plt, fig
def change_range(self, plt=None, xlim=[None,None], ylim=[None,None]):
if not len(xlim) == 2:
raise ValueError('Modified xlim must be a list of two values')
if not len(ylim) == 2:
raise ValueError('Modified ylim must be a list of two values')
if xlim[0] != None:
ax = plt.gca()
ax.set_xlim(xlim)
if ylim[0] != None:
ax = plt.gca()
ax.set_ylim(ylim)
ax.invert_yaxis()
return plt
@staticmethod
def var2format(epic_key):
"""list of plot specifics based on variable name"""
plotdic={}
if epic_key in ['T_28']:
plotdic['color']='red'
plotdic['linestyle']='-'
plotdic['linewidth']=0.5
plotdic['format']='{x:.3f}'
elif epic_key in ['T2_35']:
plotdic['color']='magenta'
plotdic['linestyle']='--'
plotdic['linewidth']=0.5
plotdic['format']='{x:.3f}'
elif epic_key in ['S_41', 'OST_62', 'O_65']:
plotdic['color']='blue'
plotdic['linestyle']='-'
plotdic['linewidth']=0.5
if epic_key in ['S_41']:
plotdic['format']='{x:.3f}'
else:
plotdic['format']='{x:3.1f}'
elif epic_key in ['S_42', 'CTDOST_4220', 'CTDOXY_4221']:
plotdic['color']='cyan'
plotdic['linestyle']='--'
plotdic['linewidth']=0.5
plotdic['format']='{x:3.1f}'
if epic_key in ['S_42']:
plotdic['format']='{x:.3f}'
else:
plotdic['format']='{x:3.1f}'
elif epic_key in ['ST_70','Trb_980','SigmaT']:
plotdic['color']='black'
plotdic['linestyle']='-'
plotdic['linewidth']=0.5
plotdic['format']='{x:.3f}'
elif epic_key in ['F_903','fWS_973','Fch_906']:
plotdic['color']='green'
plotdic['linestyle']='-'
plotdic['linewidth']=0.5
plotdic['format']='{x:.2f}'
elif epic_key in ['PAR_905']:
plotdic['color']='darkorange'
plotdic['linestyle']='-'
plotdic['linewidth']=0.75
plotdic['format']='{x:5.0f}'
else:
plotdic['color']='black'
plotdic['linestyle']='--'
plotdic['linewidth']=1.0
plotdic['format']='{x:.3f}'
return plotdic
@staticmethod
#python3 change as dictionaries no longer have itervalues methods
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
|
import socket;
from tools import parseHttpHeader;
from iterator2 import *;
svrScoket = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
svrScoket.bind(("localhost", 8080));
svrScoket.listen(10);
while True:
con, addr = svrScoket.accept();
buf = con.recv(1024);
request = parseHttpHeader(buf);
request["client"] = addr[0];
request["client_port"] = addr[1];
response = handler(request);
con.send(response);
con.close(); |
__author__ = "Gil Ortiz"
__version__ = "1.0"
__date_last_modification__ = "4/28/2019"
__python_version__ = "3"
# This application generates scratch off tickets in batches (Double Dollars NY Lottery style)
# Results are currently being printed on-screen but the output can be easily tweaked to a TXT file output instead
# Game model (Double Dollars style - if "your_number" is greater than "their_number", you win the "PRIZE")
# game 1: your_number their_number PRIZE
# game 2: your_number their_number PRIZE
# game 3: your_number their_number PRIZE
# game 4: your_number their_number PRIZE
# game 5: your_number their_number PRIZE
import random
# 1: 15 - this represents $1 dollar prize should be available every 15 tickets (common prize)
# 3000: 15000 - the prize of $3,000 dollars can be obtained every 15000 tickets printed (very rare prize)
winning_ratio = {'${:1,.2f}'.format(1): 15,
'${:1,.2f}'.format(3): 50,
'${:1,.2f}'.format(5): 100,
'${:1,.2f}'.format(10): 250,
'${:1,.2f}'.format(100): 5000,
'${:1,.2f}'.format(300): 10000,
'${:1,.2f}'.format(3000): 15000}
def create_loser_game():
counter = 1
print("\nGenerating loser ticket...")
while counter < 6:
your_number = random.randint(1, 30)
their_number = random.randint(your_number + 1, 31)
prize = random.choice(list(winning_ratio.keys()))
print(f"game {counter} : {your_number} {their_number} {prize}")
counter += 1
def create_winner_game(prize):
print("\nGenerating winner ticket...")
winner_line = random.randint(1, 6)
counter = 1
while counter < 6:
if counter == winner_line:
their_number = random.randint(1, 30)
your_number = random.randint(their_number + 1, 31) # my number is greater than the computer's, so I'll win here
print(f"game {counter} : {your_number} {their_number} {prize}")
else:
your_number = random.randint(1, 30)
their_number = random.randint(your_number + 1, 31)
print(f"game {counter} : {your_number} {their_number} {random.choice(list(winning_ratio.keys()))}")
counter += 1
# Ask how many scratch offs will be printed in the batch
print_ticket_qty = 0
valid_entry = False
while valid_entry is False:
print_ticket_qty = input("\nEnter the number of tickets you want to generate in this batch (min. 10 tickets):")
if print_ticket_qty.isdigit():
if int(print_ticket_qty) >= 10:
print_ticket_qty = int(print_ticket_qty)
valid_entry = True
else:
print("\nEach batch should contain a minimum of 10 tickets")
else:
print("\nThis is an invalid number!")
already_printed = 0
# Step 1/2:Loop through the winning ratio dictionary and call the function create_winner_game as many times as necessary
for prize, frequency in winning_ratio.items():
quant = round(print_ticket_qty / frequency)
if quant > 0:
for n in range(0, quant):
create_winner_game(prize)
already_printed += 1
# Step 2/2:We have just printed all winning tickets. Now we have to print the losing tickets.
for i in range(1, print_ticket_qty - already_printed):
create_loser_game()
|
#! usr/bin/python3
# -*- coding:utf-8 -*-
import os
import sys
from cx_Freeze import setup, Executable
local_files = []
# if os.path.exists("log/"):
# local_files.append("log/")
# elif os.path.exists("userdata/"):
# local_files.append("userdata/")
# elif os.path.exists("config/"):
# local_files.append("config/")
# 依赖会自动检测,但会需要微调
build_exe_options = {
"packages": ["sqlalchemy"],
"excludes": ["tkinter"],
"includes": [],
"include_files": local_files,
}
base = 'Win32GUI' if sys.platform == 'win32' else None
executables = [
Executable('main.py', targetName="valueReader.exe", base=base)
]
setup(
name="valueReader",
version="1.0",
description="A PyQt Value Reader Program",
options={"build_exe": build_exe_options},
executables=executables
)
|
#!/usr/bin/python
import argparse, logging, sys
import zmq
import communication
import configuration
import serialization
import payload
class Commander(object):
def __init__(self, config = configuration.Config(), serializer = serialization.Serializer(), payload = payload.Payload()):
self.context = zmq.Context()
self.settings = config
self.serializer = serializer
self.payload = payload
self.controller = communication.connect_socket(self.context, socket_type = zmq.REQ, connection = self.settings.connections["commander"])
self.logger = logging.getLogger("commander")
def process(self, args):
try:
# split command line arguments
for arg in vars(args):
if getattr(args, arg):
command, argument = arg, getattr(args, arg)
self.logger.info("Command: %s", command)
topic = self.get_topic(command)
self.logger.debug("Topic: %s", topic)
payload = self.payload.read(path=argument,topic=topic)
self.logger.debug("Payload: %s", payload)
self.send_command(command,payload)
except Exception as error:
self.logger.exception("Processing command failed.")
self.logger.info("Shutting down commander.")
sys.exit()
def get_topic(self, command):
if command in self.settings.write_commands:
return self.remove_prefix(command,"write_")
return None
def send_command(self, command, payload):
payload_buffer = self.serializer.write_buffer(payload, topic = self.get_topic(command))
self.controller.send("%s %s" % (command, payload_buffer))
response = self.controller.recv()
self.logger.debug("Controller executed command: %s", response)
assert(command == response)
self.logger.info("Done.")
def remove_prefix(self, message, prefix):
if message.startswith(prefix):
return message[len(prefix):]
return message
def main(args):
commander = Commander()
commander.process(args)
if __name__ == '__main__':
# Setup for application logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s: %(name)s: -- %(levelname)s -- %(message)s', filename="./log/commander.log", filemode="w")
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)s: -- %(levelname)s -- %(message)s')
console.setFormatter(formatter)
logging.getLogger("commander").addHandler(console)
# Setup for parsing command line arguments
# see configuration module for options
parser = argparse.ArgumentParser(prog="commander", description='Generates and sends commands to the controller which interacts with the neural network.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-q', '--quit', help='shutdown controller', action='store_true')
group.add_argument('-p','--pause', help='put controller in idle state and listen for further commands', action='store_true')
group.add_argument('-rw','--read_weights', action='store_true', help='tell controller to continuously read weights', )
group.add_argument('-ww','--write_weights', metavar="weight file", nargs='?', const = "config/weights/example.file", help='tell controller to write weights once', )
group.add_argument('-rp','--read_parameters', action='store_true', help='tell controller to continuously read parameters', )
group.add_argument('-wp','--write_parameters', metavar="parameters file", nargs='?', const = "config/parameters/example.file", help='tell controller to write parameters once', )
group.add_argument('-rt','--read_topology', action='store_true', help='tell controller to continuously read network topology', )
group.add_argument('-wt','--write_topology', metavar="topology file", nargs='?', const = "config/topology/example.file", help='tell controller to write network topology once', )
group.add_argument('-rs','--read_spikes', action='store_true', help='tell controller to continuously read neuron spikes', )
args = parser.parse_args()
main(args) |
n = int(input())
cityState = dict()
for _ in range(n):
line = input().split()
for k in range(1, len(line)):
cityState[line[k]] = line[0]
m = int(input())
for _ in range(m):
print(cityState[input()])
|
from bisect import bisect_right
from itertools import accumulate
N = int(input())
A = list(map(int, input().split()))
X = int(input())
tA = list(accumulate(A))
ans = N * (X // tA[-1]) + bisect_right(tA, X % tA[-1]) + 1
print(ans)
|
# MIT License
# Copyright (c) 2018 Nathan Wilson
import os
import re
import sys
from time import (
sleep,
time,
)
from log_parser.block_summary import BlockSummary
from log_parser.traffic_event_block import TrafficEventBlock
from log_parser.traffic_watcher import TrafficWatcher
from log_parser.utils import (
display_time,
epoch_time,
)
class LogParser(object):
def __init__(self, args, output_stream=None):
self.referer_expected = args.referer_expected
self.window = args.window
self.sleep_interval = args.interval
self.line_regex = self.line_regex()
self.bad_lines = 0
self.line_counter = 0
self.total_traffic = 0
self.error_count = 0
self.total_bytes_sent = 0
self.real_time_watcher = TrafficWatcher(args.window, args.threshold)
self.log_time_watcher = TrafficWatcher(args.window, args.threshold)
self.output_stream = output_stream or sys.stdout
def line_regex(self):
# Regular expression for log parsing inspired by
# https://gist.github.com/hreeder/f1ffe1408d296ce0591d
regex = (
r"(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) "
r"(?P<client>[^ ]+) "
r"(?P<user>[^ ]+) "
r"\[(?P<datetime>\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} "
r"(\+|\-)\d{4})\] "
r"\"(?P<method>[^ ]+) "
r"(?P<url>[^ ]+) "
r"(?P<protocol>[^\"]+)\" "
r"(?P<statuscode>\d{3}) "
r"(?P<bytessent>\d+)")
if self.referer_expected:
regex += (r' (["](?P<referer>(\-)|(.+))["])'
r' (["](?P<useragent>.+)["])')
return re.compile(regex, re.IGNORECASE)
def run(self, filename):
stream = open(filename)
while True:
block = []
try:
block = self.process_lines(stream.readlines())
except UnicodeDecodeError:
self.output_stream.write(
"\nIgnoring binary data in {}".format(filename))
stream.seek(0, os.SEEK_END)
self.total_traffic += len(block)
for line in self.summarize(block):
self.output_stream.write(line + "\n")
self.output_stream.flush()
sleep(self.sleep_interval)
def process_lines(self, lines):
block = []
for line in lines:
self.line_counter += 1
match = re.match(self.line_regex, line)
if match:
block.append(match.groupdict())
elif line.strip():
self.output_stream.write(
"{line_number}: Unable to parse: {line}\n".format(
line_number=self.line_counter, line=line))
self.bad_lines += 1
return block
def summarize(self, block):
report = []
report_time = time()
report.append("\nDate: {}".format(display_time(report_time)))
block_summary = BlockSummary(block)
report += block_summary.summarize()
report += self.update_whole(report_time, block_summary)
report += self.summarize_whole()
return report
def update_whole(self, report_time, block_summary):
self.error_count += block_summary.error_count
self.total_bytes_sent += block_summary.total_bytes_sent
return self.update_watchers(report_time, block_summary)
def update_watchers(self, report_time, block_summary):
return (self.update_real_time_watcher(report_time, block_summary) +
self.update_log_time_watcher(block_summary))
def update_real_time_watcher(self, report_time, block_summary):
start_time = report_time - self.sleep_interval
alerts = self.real_time_watcher.update(
TrafficEventBlock(start_time=start_time,
end_time=report_time,
count=block_summary.traffic()))
if not alerts:
return []
return alert_report("Real-time", alerts)
def update_log_time_watcher(self, block_summary):
alerts = []
for event_block in logtime_split(block_summary.block, 10):
alerts += self.log_time_watcher.update(event_block)
if not alerts:
return []
return alert_report("Log-time", alerts)
def summarize_whole(self):
return [
"Overall data:",
"\tTotal lines parsed: {}".format(self.line_counter),
"\tUnparsable lines: {}".format(self.bad_lines),
"\tTotal traffic: {}".format(self.total_traffic),
"\tTotal errors: {}".format(self.error_count),
"\tTotal bytes sent: {}".format(self.total_bytes_sent),
]
def logtime_split(block, seconds):
if not block:
return []
result = []
event = TrafficEventBlock(0, epoch_time(block[0]["datetime"]))
for entry in block:
new_time = epoch_time(entry["datetime"])
if not event.add_if_in_range(new_time, seconds):
result.append(event)
event = TrafficEventBlock(0, new_time)
result.append(event)
return result
def alert_report(prefix, alerts):
return [prefix + " Alerts:"] + ["\t" + alert for alert in alerts]
|
# 8) Defina uma função que, dado um valor e uma lista, insira esse valor de forma ordenada na
# lista.
def ehMaior(a, b):
return a > b
def ehMenor(a, b):
return a < b
def insereOrdenado(x, lista):
lMenorX = list(filter(lambda y: y < x, lista))
lMaiorX = list(filter(lambda y: y > x, lista))
lInserida = lMenorX + [x] + lMaiorX
return lInserida
l = [1, 3, 4]
x = 2
print(insereOrdenado(x, l))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.